code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import logging
import pathlib
import struct
import time
import serial.tools.list_ports
from construct.core import ConstructError
from tqdm import tqdm
from ..core.struct import struct_blit_meta_standalone
class BlitSerialException(Exception):
pass
class BlitSerial(serial.Serial):
def __init__(self, port):
super().__init__(port, timeout=5)
@classmethod
def find_comport(cls):
ret = []
for comport in serial.tools.list_ports.comports():
if comport.vid == 0x0483 and comport.pid == 0x5740:
logging.info(f'Found 32Blit on {comport.device}')
ret.append(comport.device)
if ret:
return ret
raise RuntimeError('Unable to find 32Blit')
@classmethod
def validate_comport(cls, device):
if device.lower() == 'auto':
return cls.find_comport()[:1]
if device.lower() == 'all':
return cls.find_comport()
for comport in serial.tools.list_ports.comports():
if comport.device == device:
if comport.vid == 0x0483 and comport.pid == 0x5740:
logging.info(f'Found 32Blit on {comport.device}')
return [device]
# if the user asked for a port with no vid/pid assume they know what they're doing
elif comport.vid is None and comport.pid is None:
logging.info(f'Using unidentified port {comport.device}')
return [device]
raise RuntimeError(f'Unable to find 32Blit on {device}')
@property
def status(self):
self.write(b'32BLINFO\x00')
response = self.read(8)
if response == b'':
raise RuntimeError('Timeout waiting for 32Blit status.')
return 'game' if response == b'32BL_EXT' else 'firmware'
def reset(self, timeout=5.0):
self.write(b'32BL_RST\x00')
self.flush()
self.close()
time.sleep(1.0)
t_start = time.time()
while time.time() - t_start < timeout:
try:
self.open()
return
except BlitSerialException:
time.sleep(0.1)
raise RuntimeError(f"Failed to connect to 32Blit on {serial.port} after reset")
def send_file(self, file, drive, directory=pathlib.PurePosixPath('/'), auto_launch=False):
sent_byte_count = 0
chunk_size = 64
file_name = file.name
file_size = file.stat().st_size
if drive == 'sd':
logging.info(f'Saving {file} ({file_size} bytes) as {file_name} in {directory}')
command = f'32BLSAVE{directory}/{file_name}\x00{file_size}\x00'
elif drive == 'flash':
logging.info(f'Flashing {file} ({file_size} bytes)')
command = f'32BLPROG{file_name}\x00{file_size}\x00'
else:
raise TypeError(f'Unknown destination {drive}.')
self.reset_output_buffer()
self.write(command.encode('ascii'))
with open(file, 'rb') as input_file:
progress = tqdm(total=file_size, desc=f"Flashing {file.name}", unit_scale=True, unit_divisor=1024, unit="B", ncols=70, dynamic_ncols=True)
while sent_byte_count < file_size:
# if we received something, there was an error
if self.in_waiting:
progress.close()
break
data = input_file.read(chunk_size)
self.write(data)
self.flush()
sent_byte_count += chunk_size
progress.update(chunk_size)
response = self.read(8)
if response == b'32BL__OK':
# get block for flash
if drive == 'flash':
block, = struct.unpack('<H', self.read(2))
# do launch if requested
if auto_launch:
self.launch(f'flash:/{block}')
elif auto_launch:
self.launch(f'{directory}/{file_name}')
logging.info(f'Wrote {file_name} successfully')
else:
response += self.read(self.in_waiting)
raise RuntimeError(f"Failed to save/flash {file_name}: {response.decode()}")
def erase(self, offset):
self.write(b'32BLERSE\x00')
self.write(struct.pack("<I", offset))
def list(self):
self.write(b'32BL__LS\x00')
offset_str = self.read(4)
while offset_str != '' and offset_str != b'\xff\xff\xff\xff':
offset, = struct.unpack('<I', offset_str)
size, = struct.unpack('<I', self.read(4))
meta_head = self.read(10)
meta_size, = struct.unpack('<H', meta_head[8:])
meta = None
if meta_size:
size += meta_size + 10
try:
meta = struct_blit_meta_standalone.parse(meta_head + self.read(meta_size))
except ConstructError:
pass
yield (meta, offset, size)
offset_str = self.read(4)
def launch(self, filename):
self.write(f'32BLLNCH{filename}\x00'.encode('ascii'))
| 32blit | /32blit-0.7.3-py3-none-any.whl/ttblit/core/blitserial.py | blitserial.py |
import logging
import pathlib
import yaml
class YamlLoader():
def setup_for_config(self, config_file, output_path, files=None):
self.working_path = pathlib.Path('.')
if config_file is not None:
if config_file.is_file():
self.working_path = config_file.parent
else:
logging.warning(f'Unable to find config at {config_file}')
self.config = yaml.safe_load(config_file.read_text())
logging.info(f'Using config at {config_file}')
elif files is not None and output_path is not None:
self.config = {output_path: {file: {} for file in files}}
if output_path is not None:
self.destination_path = output_path
else:
self.destination_path = self.working_path
| 32blit | /32blit-0.7.3-py3-none-any.whl/ttblit/core/yamlloader.py | yamlloader.py |
import binascii
from construct import (Adapter, Bytes, Checksum, Const, GreedyBytes, Int8ul,
Int16ul, Int32ub, Int32ul, Optional, PaddedString,
Prefixed, PrefixedArray, RawCopy, Rebuild, Struct, len_,
this)
from .compression import ImageCompressor
class PaletteCountAdapter(Adapter):
def _decode(self, obj, context, path):
if obj == 0:
obj = 256
return obj
def _encode(self, obj, context, path):
if obj == 256:
obj = 0
return obj
class ImageSizeAdapter(Adapter):
"""
Adds the header and type size to the size field.
The size field itself is already counted.
"""
def _decode(self, obj, context, path):
return obj - 8
def _encode(self, obj, context, path):
return obj + 8
struct_blit_pixel = Struct(
'r' / Int8ul,
'g' / Int8ul,
'b' / Int8ul,
'a' / Int8ul
)
struct_blit_image_compressed = Struct(
'header' / Const(b'SPRITE'),
'type' / PaddedString(2, 'ASCII'),
'data' / Prefixed(ImageSizeAdapter(Int32ul), Struct(
'width' / Int16ul,
'height' / Int16ul,
'format' / Const(0x02, Int8ul),
'palette' / PrefixedArray(PaletteCountAdapter(Int8ul), struct_blit_pixel),
'pixels' / GreedyBytes,
), includelength=True)
)
struct_blit_image = ImageCompressor(struct_blit_image_compressed)
struct_blit_meta = Struct(
'header' / Const(b'BLITMETA'),
'data' / Prefixed(Int16ul, Struct(
'checksum' / Checksum(
Int32ul,
lambda data: binascii.crc32(data),
this._._.bin.data
),
'date' / PaddedString(16, 'ascii'),
'title' / PaddedString(25, 'ascii'),
'description' / PaddedString(129, 'ascii'),
'version' / PaddedString(17, 'ascii'),
'author' / PaddedString(17, 'ascii'),
Const(b'BLITTYPE'),
'category' / PaddedString(17, 'ascii'),
'url' / PaddedString(129, 'ascii'),
'filetypes' / PrefixedArray(Int8ul, PaddedString(5, 'ascii')),
'icon' / struct_blit_image,
'splash' / struct_blit_image
))
)
struct_blit_meta_standalone = Struct(
'header' / Const(b'BLITMETA'),
'data' / Prefixed(Int16ul, Struct(
'checksum' / Int32ul,
'date' / PaddedString(16, 'ascii'),
'title' / PaddedString(25, 'ascii'),
'description' / PaddedString(129, 'ascii'),
'version' / PaddedString(17, 'ascii'),
'author' / PaddedString(17, 'ascii'),
Const(b'BLITTYPE'),
'category' / PaddedString(17, 'ascii'),
'url' / PaddedString(129, 'ascii'),
'filetypes' / PrefixedArray(Int8ul, PaddedString(5, 'ascii')),
'icon' / struct_blit_image,
'splash' / struct_blit_image
))
)
struct_blit_bin = Struct(
'header' / Const(b'BLIT'),
'render' / Int32ul,
'update' / Int32ul,
'init' / Int32ul,
'length' / Int32ul,
# The length above is actually the _flash_end symbol from startup_user.s
# it includes the offset into 0x90000000 (external flash)
# we mask out the highest nibble to correct this into the actual bin length
# plus subtract 20 bytes for header, symbol and length dwords
'bin' / Bytes((this.length & 0x0FFFFFFF) - 20)
)
struct_blit_relo = Struct(
'header' / Const(b'RELO'),
'relocs' / PrefixedArray(Int32ul, Struct(
'reloc' / Int32ul
))
)
blit_game = Struct(
'relo' / Optional(struct_blit_relo),
'bin' / RawCopy(struct_blit_bin),
'meta' / Optional(struct_blit_meta)
)
blit_game_with_meta = Struct(
'relo' / Optional(struct_blit_relo),
'bin' / RawCopy(struct_blit_bin),
'meta' / struct_blit_meta
)
blit_game_with_meta_and_relo = Struct(
'relo' / struct_blit_relo,
'bin' / RawCopy(struct_blit_bin),
'meta' / struct_blit_meta
)
blit_icns = Struct(
'header' / Const(b'icns'),
'size' / Rebuild(Int32ub, len_(this.data) + 16),
'type' / Const(b'ic07'), # 128×128 icon in PNG format
'data_length' / Rebuild(Int32ub, len_(this.data) + 8),
'data' / Bytes(this.data_length - 8)
)
struct_blit_image_bi = Struct(
'type' / Const(1, Int16ul), # raw data
'tag' / Const(b'3B'), # 32blit tag
'data' / struct_blit_image
)
| 32blit | /32blit-0.7.3-py3-none-any.whl/ttblit/core/struct.py | struct.py |
import argparse
import logging
import pathlib
import re
import struct
from PIL import Image
class Colour():
def __init__(self, colour):
if type(colour) is Colour:
self.r, self.g, self.b = colour
elif len(colour) == 6:
self.r, self.g, self.b = tuple(bytes.fromhex(colour))
elif ',' in colour:
self.r, self.g, self.b = [int(c, 10) for c in colour.split(',')]
def __getitem__(self, index):
return [self.r, self.g, self.b][index]
def __repr__(self):
return f'Colour{self.r, self.g, self.b}'
class Palette():
def __init__(self, palette_file=None):
self.transparent = None
self.entries = []
if isinstance(palette_file, Palette):
self.transparent = palette_file.transparent
self.entries = palette_file.entries
return
if palette_file is not None:
palette_file = pathlib.Path(palette_file)
palette_type = palette_file.suffix[1:]
palette_loader = f'load_{palette_type}'
fn = getattr(self, palette_loader, None)
if fn is None:
self.load_image(palette_file)
else:
fn(palette_file, open(palette_file, 'rb').read())
self.extract_palette()
def extract_palette(self):
self.entries = []
for y in range(self.height):
for x in range(self.width):
self.entries.append(self.image.getpixel((x, y)))
def set_transparent_colour(self, r, g, b):
if (r, g, b, 0xff) in self.entries:
self.transparent = self.entries.index((r, g, b, 0xff))
self.entries[self.transparent] = (r, g, b, 0x00)
return self.transparent
return None
def load_act(self, palette_file, data):
# Adobe Colour Table .act
palette = data
if len(palette) < 772:
raise ValueError(f'palette {palette_file} is not a valid Adobe .act (length {len(palette)} != 772')
size, _ = struct.unpack('>HH', palette[-4:])
self.width = 1
self.height = size
self.image = Image.frombytes('RGB', (self.width, self.height), palette).convert('RGBA')
def load_pal(self, palette_file, data):
# Pro Motion NG .pal - MS Palette files and raw palette files share .pal suffix
# Raw files are just 768 bytes
palette = data
if len(palette) < 768:
raise ValueError(f'palette {palette_file} is not a valid Pro Motion NG .pal')
# There's no length in .pal files, so we just create a 16x16 256 colour palette
self.width = 16
self.height = 16
self.image = Image.frombytes('RGB', (self.width, self.height), palette).convert('RGBA')
def load_gpl(self, palette_file, data):
palette = data
palette = palette.decode('utf-8').strip().replace('\r\n', '\n')
if not palette.startswith('GIMP Palette'):
raise ValueError(f'palette {palette_file} is not a valid GIMP .gpl')
# Split the whole file into palette entries
palette = palette.split('\n')
# drop 'GIMP Palette' from the first entry
palette.pop(0)
# drop metadata/comments
while palette[0].startswith(('Name:', 'Columns:', '#')):
palette.pop(0)
# calculate our image width/height here because len(palette)
# equals the number of palette entries
self.width = 1
self.height = len(palette)
# Split out the palette entries into R, G, B and drop the hex colour
# This convoluted list comprehension does this while also flatenning to a 1d array
palette = [int(c) for entry in palette for c in re.split(r'\s+', entry.strip())[0:3]]
self.image = Image.frombytes('RGB', (self.width, self.height), bytes(palette)).convert('RGBA')
def load_image(self, palette_file):
palette = Image.open(palette_file)
self.width, self.height = palette.size
if self.width * self.height > 256:
raise argparse.ArgumentError(None, f'palette {palette_file} has too many pixels {self.width}x{self.height}={self.width*self.height} (max 256)')
logging.info(f'Using palette {palette_file} {self.width}x{self.height}')
self.image = palette.convert('RGBA')
def quantize_image(self, image, transparent=None, strict=False):
if strict and len(self) == 0:
raise TypeError("Attempting to enforce strict colours with an empty palette, did you really want to do this?")
w, h = image.size
output_image = Image.new('P', (w, h))
for y in range(h):
for x in range(w):
r, g, b, a = image.getpixel((x, y))
if transparent is not None and (r, g, b) == tuple(transparent):
a = 0x00
index = self.get_entry(r, g, b, a, strict=strict)
output_image.putpixel((x, y), index)
return output_image
def get_entry(self, r, g, b, a, remap_transparent=True, strict=False):
if (r, g, b, a) in self.entries:
index = self.entries.index((r, g, b, a))
return index
# Noisy print
# logging.info(f'Re-mapping ({r}, {g}, {b}, {a}) at ({x}x{y}) to ({index})')
# Anything with 0 alpha that's not in the palette might as well be the transparent colour
elif a == 0 and self.transparent is not None:
return self.transparent
elif not strict:
if len(self.entries) < 256:
self.entries.append((r, g, b, a))
if a == 0:
# Set this as the transparent colour - if we had one we'd have returned it already.
self.transparent = len(self.entries) - 1
return len(self.entries) - 1
else:
raise TypeError('Out of palette entries')
else:
raise TypeError(f'Colour {r}, {g}, {b}, {a} does not exist in palette!')
def tostruct(self):
return [dict(zip('rgba', c)) for c in self.entries]
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
return self.entries[index]
| 32blit | /32blit-0.7.3-py3-none-any.whl/ttblit/core/palette.py | palette.py |
# -*- coding: utf-8 -*-
from setuptools import setup
version = '0.0.1'
setup(
name = "3301-calc",
packages = ["calc"],
entry_points = {
"console_scripts": ['calculate = calc.calc:main']
},
version = version,
description = "Python command line application bare bones template.",
long_description = "README File",
author = "Anurag Kumar Singh",
author_email = "anurag3301.0x0@gmail.com",
)
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/setup.py | setup.py |
# -*- coding: utf-8 -*-
from .calc import main
main()
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/calc/__main__.py | __main__.py |
# -*- coding: utf-8 -*-
def is_palindrom(num: int) -> bool:
int_str = str(num)
if int_str == int_str[::-1]:
return True
return False
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/calc/palindrom.py | palindrom.py |
# -*- coding: utf-8 -*-
"""Some Comment"""
from .odd import is_odd, is_even
from .prime import is_prime
from .palindrom import is_palindrom
def main():
while True:
print("\nEnter 0 to quit")
print("Enter 1 to check odd")
print("Enter 2 to check even")
print("Enter 3 to check prime")
print("Enter 4 to check palindrom")
ch = int(input("Enter choice: "))
if ch == 0:
exit(0)
num = int(input("Enter a number: "))
if ch == 1:
print(f"Number {num} {'is' if is_odd(num) else 'is not'} odd")
elif ch == 2:
print(f"Number {num} {'is' if is_even(num) else 'is not'} even")
elif ch == 3:
print(f"Number {num} {'is' if is_prime(num) else 'is not'} prime")
elif ch == 4:
print(f"Number {num} {'is' if is_palindrom(num) else 'is not'} palindrom")
else:
print("Invalid input")
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/calc/calc.py | calc.py |
# -*- coding: utf-8 -*-
def is_odd(num: int) -> bool:
if num % 2 != 0:
return True
else:
return False
def is_even(num: int) -> bool:
return not is_odd(num)
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/calc/odd.py | odd.py |
# -*- coding: utf-8 -*-
def is_prime(num: int) -> bool:
for i in range(2, num):
if num % i == 0:
return False
return True
| 3301-calc | /3301-calc-0.0.1.tar.gz/3301-calc-0.0.1/calc/prime.py | prime.py |
36-chambers
=====================================================================
36-chambers is a Python Library which adds common reversing methods and
functions for Binary Ninja. The library is designed to be used within Binary
Ninja in the Python console.
Installation
------------
Use pip to install or upgrade 36-chambers::
$ pip install 36-chambers [--upgrade]
Quick Example
-------------
Find and print all blocks with a "push" instruction:
.. code-block :: python
from chambers import Chamber
c = Chamber(bv=bv)
c.instructions('push')
Documentation
-------------
For more information you can find documentation on readthedocs_.
.. _readthedocs: https://36-chambers.readthedocs.org
| 36-chambers | /36-chambers-0.0.1.tar.gz/36-chambers-0.0.1/README.rst | README.rst |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
description = f.read()
install_requires = []
setup(
name='36-chambers',
version='0.0.1',
description='Python Library for Binary Ninja',
long_description=long_description,
author='Mike Goffin',
author_email='mgoffin@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='binary ninja 36 chambers',
url='https://www.github.com/mgoffin/36-chambers',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=install_requires,
scripts=[],
)
| 36-chambers | /36-chambers-0.0.1.tar.gz/36-chambers-0.0.1/setup.py | setup.py |
#!/usr/bin/env python
class Chamber(object):
"""
The main chamber.
"""
bv = None
def __init__(self, bv=None):
"""
Do a crude check to see if `bv` is not some standard object. If it is,
note that it should be a `BinaryView` object.
If it's something we don't check for, or a true BinaryView object, set
the class attribute for use in methods. If a developer decides to pass
something that isn't checked by us and isn't a BinaryView object, they
can deal with the exceptions being thrown to figure it out.
"""
if bv is None or isinstance(bv, (basestring,
dict,
float,
int,
list,
long,
str,
)):
raise Exception(
"Must provide a valid Binary Ninja BinaryView object")
self.bv = bv
def instructions(self, instruction=None):
"""
Loop through all of the functions, then all of the blocks in the IL, and
for each one check to see if it contains the provided instruction. If it
does, print it and its instructions.
TODO: Figure out why Binja doesn't show some instructions, like `xor`.
:param instruction: The instruction to search for.
:type instruction: str
"""
for func in self.bv.functions:
for block in func.low_level_il:
f = False
for i in block:
if instruction in str(i):
f = True
break
if f:
print("\t{0}".format(block))
for insn in block:
print("\t\t{0}".format(insn))
| 36-chambers | /36-chambers-0.0.1.tar.gz/36-chambers-0.0.1/chambers/chamber.py | chamber.py |
from .chamber import Chamber
__title__ = '36-chambers'
__version__ = '0.0.1'
__author__ = 'Mike Goffin'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Mike Goffin'
__all__ = [
'Chamber'
]
| 36-chambers | /36-chambers-0.0.1.tar.gz/36-chambers-0.0.1/chambers/__init__.py | __init__.py |
from contextlib import contextmanager
import ctypes
import inspect
@contextmanager
def scope():
frame = inspect.currentframe().f_back.f_back
loc = frame.f_locals
before_keys = set(loc.keys())
yield
loc = frame.f_locals
glob = frame.f_globals
new_keys = [k for k in loc.keys() if k not in before_keys]
for k in new_keys:
del loc[k]
ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), ctypes.c_int(1))
def main():
x = 3
with block_scope():
y = x + 4
print(y) # 7
x = 8
print(y) # UnboundLocalError
if __name__ == '__main__':
main()
| 360blockscope | /360blockscope-0.1.0-py3-none-any.whl/blockscope/scope.py | scope.py |
from .scope import scope
| 360blockscope | /360blockscope-0.1.0-py3-none-any.whl/blockscope/__init__.py | __init__.py |
# 360 Monitoring CLI
This repository contains a CLI script for 360 Monitoring that allows you to connect to your 360 Monitoring (https://360monitoring.com) account and list monitoring data, add, update or remove server or website monitors.
## Documentation
You can find the full documentation including the feature complete REST API at [docs.360monitoring.com](https://docs.360monitoring.com/docs) and [docs.360monitoring.com/docs/api](https://docs.360monitoring.com/docs/api).
## Preconditions
* Make sure to have an account at https://360monitoring.com or https://platform360.io
* 360 Monitoring CLI requires a Python version of 3.* or above
## Install 360 Monitoring CLI as ready-to-use package
$ pip install 360monitoringcli
## Configure your account
First you need to connect your CLI to your existing 360 Monitoring account via your API KEY. If you don't have a 360 Monitoring account yet, please register for free at https://360monitoring.com. To create an API KEY you'll need to upgrade at least to a Pro plan to be able to create your API KEY.
$ 360monitoring config save --api-key KEY configure API KEY to connect to 360 Monitoring account
## Test 360 Monitoring CLI locally
### Test 360 Monitoring CLI with pre-configured Docker image
You can easily test and run 360 Monitoring CLI for production by running the pre-configured docker image
$ docker build -t 360monitoringcli .
$ docker run -it --rm 360monitoringcli /bin/bash
### Test 360 Monitoring CLI for specific staging version
To test a package from staging you can simply deploy a docker container:
$ docker run -it --rm ubuntu /bin/bash
$ apt-get update && apt-get install -y python3 && apt-get install -y pip
$ pip install -i https://test.pypi.org/simple/ --force-reinstall -v "360monitoringcli==1.0.19"
### For developement, install required Python modules
To test the code locally, install the Python modules "requests", "configparser", "argparse" and "prettytable".
Use "pip install -e ." to use "360monitoring" command with latest dev build locally based on local code.
$ pip install requests
$ pip install configparser
$ pip install argparse
$ pip install prettytable
$ pip install -e .
#### Run tests to check each function works
Test the code:
$ ./test_cli.sh
## Usage
$ 360monitoring --help display general help
$ 360monitoring signup open the sign up page to get your 360 Monitoring account
$ 360monitoring config save --api-key KEY configure API KEY to connect to 360 Monitoring account (only for paid plans)
$ 360monitoring statistics display all assets of your account
$ 360monitoring servers list display all monitored servers
$ 360monitoring servers list --issues display monitored servers with issues only
$ 360monitoring servers list --tag cpanel display only servers with tag "cpanel"
$ 360monitoring sites list display all monitored sites
$ 360monitoring sites list --issues display monitored sites with issues only
$ 360monitoring sites list --sort 6 --limit 5 display worst 5 monitored sites by uptime
$ 360monitoring contacts list display all contacts
$ 360monitoring usertokens list display user tokens
$ 360monitoring config print display your current settings and where those are stored
$ 360monitoring recommendations display upgrade recommendations for servers that exceed their limits
$ 360monitoring magiclinks create and open a readonly dashboard for a single server only via magic link
$ 360monitoring wptoolkit display statistics of WP Toolkit if installed
$ 360monitoring sites add --url domain.tld start monitoring a new website
$ 360monitoring servers update --name cpanel123.hoster.com --tag production tag a specific server
$ 360monitoring contacts --help display specific help for a sub command
$ 360monitoring dashboard open 360 Monitoring in your Web Browser
## Updating 360 Monitoring CLI package
You can update the 360monitoringcli package to the latest version using the following command:
$ pip install 360monitoringcli --upgrade
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/360monitoringcli-1.0.19.data/data/share/doc/360monitoring-cli/README.md | README.md |
#!/usr/bin/env python3
import os
import argparse
import json
import subprocess
import sys
import webbrowser
from datetime import datetime, timedelta
# suprisingly this works in PyPi, but not locally. For local usage replace ".lib." with "lib."
# use "pip install -e ." to use "360monitoring" command with latest dev build locally based on local code.
from .lib.config import Config
from .lib.contacts import Contacts
from .lib.incidents import Incidents
from .lib.magiclinks import MagicLinks
from .lib.nodes import Nodes
from .lib.recommendations import Recommendations
from .lib.servers import Servers
from .lib.servercharts import ServerCharts
from .lib.servernotifications import ServerNotifications
from .lib.sites import Sites
from .lib.sitecharts import SiteCharts
from .lib.sitenotifications import SiteNotifications
from .lib.statistics import Statistics
from .lib.usertokens import UserTokens
from .lib.wptoolkit import WPToolkit
__version__ = '1.0.19'
# only runs on Python 3.x; throw exception on 2.x
if sys.version_info[0] < 3:
raise Exception("360 Monitoring CLI requires Python 3.x")
cfg = Config(__version__)
cli = argparse.ArgumentParser(prog='360monitoring', description='CLI for 360 Monitoring')
cli_subcommands = dict()
def check_version():
"""Check PyPi if there is a newer version of the application, but only once every 24 hours"""
# some code parts have been introduced in Python 3.7 and are not supported on older versions
if sys.version_info >= (3, 7):
# skip version check if the last one was within 24 hours already
if cfg.last_version_check and datetime.fromisoformat(cfg.last_version_check) > (datetime.now() - timedelta(hours=24)):
return
latest_version = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '360monitoringcli==random'], capture_output=True, text=True))
latest_version = latest_version[latest_version.find('(from versions:')+15:]
latest_version = latest_version[:latest_version.find(')')]
latest_version = latest_version.replace(' ','').split(',')[-1]
cfg.last_version_check = datetime.now().isoformat()
cfg.saveToFile(False)
if latest_version > __version__:
print('Update available: Please upgrade from', __version__, 'to', latest_version, 'with: pip install 360monitoringcli --upgrade')
def check_columns(columns):
"""Show or hide columns in ASCII table view"""
for column in columns:
if '0id' == column.lower():
cfg.hide_ids = True
elif 'id' == column.lower():
cfg.hide_ids = False
# --- config functions ---
def config_print(args):
"""Sub command for config print"""
cfg.print()
def config_save(args):
"""Sub command for config save"""
if args.api_key:
cfg.api_key = args.api_key
if args.usertoken:
cfg.usertoken = args.usertoken
if args.debug:
cfg.debug = args.debug == 'on'
cfg.saveToFile()
def config(args):
"""Sub command for config"""
config_print(args)
# --- contacts functions ---
def contacts_add(args):
"""Sub command for contacts add"""
contacts = Contacts(cfg)
if args.file:
if os.path.isfile(args.file):
with open(args.file) as file:
lines = file.readlines()
for line in lines:
contacts.add(line.strip())
else:
print('ERROR: File', args.file, 'to import not found')
elif args.name:
contacts.add(name=args.name, email=args.email, sms=args.sms)
else:
print('You need to specify at least a name with --name [name]')
def contacts_list(args):
"""Sub command for contacts list"""
check_columns(args.columns)
contacts = Contacts(cfg, format=args.output)
contacts.list(id=args.id, name=args.name, email=args.email, phone=args.phone, sort=args.sort, reverse=args.reverse, limit=args.limit)
def contacts_remove(args):
"""Sub command for contacts remove"""
contacts = Contacts(cfg)
contacts.remove(id=args.id, name=args.name, email=args.email, phone=args.phone)
def contacts(args):
"""Sub command for contacts"""
cli_subcommands[args.subparser].print_help()
# --- dashboard functions ---
def dashboard(args):
"""Sub command for dashboard"""
webbrowser.open('https://monitoring.platform360.io/')
# --- incidents functions ---
def incidents_add(args):
"""Sub command for incidents add"""
incidents = Incidents(cfg)
incidents.add(page_id=args.page_id, name=args.name, body=args.body)
def incidents_list(args):
"""Sub command for incidents list"""
incidents = Incidents(cfg, format=args.output)
incidents.list(page_id=args.page_id, name=args.name)
def incidents_remove(args):
"""Sub command for incidents remove"""
incidents = Incidents(cfg)
incidents.remove(page_id=args.page_id, id=args.id, name=args.name)
def incidents(args):
"""Sub command for incidents"""
cli_subcommands[args.subparser].print_help()
# --- magiclink functions ---
def magiclinks_create(args):
"""Sub command for magiclink create"""
serverId = ''
usertoken = args.usertoken if args.usertoken else cfg.usertoken
if args.id:
serverId = args.id
elif args.name:
# find correct server id for the server with the specified name
servers = Servers(cfg)
serverId = servers.getServerId(args.name)
if usertoken and serverId:
magiclinks = MagicLinks(cfg)
magiclink = magiclinks.create(usertoken, serverId, 'Dashboard')
if magiclink and args.open:
webbrowser.open(magiclink)
# if usertoken is not yet stored in config, do it now
if not cfg.usertoken:
cfg.usertoken = usertoken
cfg.saveToFile(False)
else:
print('Please specify an existing server either by "--id id" or "--name hostname" and specifiy your user token "--usertoken token"')
def magiclinks(args):
"""Sub command for magiclink"""
cli_subcommands[args.subparser].print_help()
# --- nodes functions ---
def nodes(args):
"""Sub command for nodes"""
check_columns(args.columns)
nodes = Nodes(cfg, format=args.output)
nodes.list(id=args.id, name=args.name, sort=args.sort, reverse=args.reverse, limit=args.limit)
# --- recommendations functions ---
def recommendations(args):
"""Sub command for recommendations"""
recommendations = Recommendations(cfg)
recommendations.print(format=args.output)
# --- servers functions ---
def servers_add(args):
"""Sub command for servers add"""
usertokens = UserTokens(cfg)
token = usertokens.token()
if not token:
print('First create a user token by executing:')
print()
print('360monitoring usertokens create')
print('360monitoring usertokens list')
print()
token = '[YOUR_USER_TOKEN]'
print('Please login via SSH to each of the servers you would like to add and execute the following command:')
print()
print('wget -q -N monitoring.platform360.io/agent360.sh && bash agent360.sh', token)
def servers_charts(args):
"""Sub command for servers charts"""
cli_subcommands[args.subparser].print_help()
def servers_charts_create(args):
"""Sub command for servers charts"""
serverId = ''
startDate = datetime.strptime(args.start.strip('\"'), '%Y-%m-%d').timestamp() if args.start else 0
endDate = datetime.strptime(args.end.strip('\"'), '%Y-%m-%d').timestamp() if args.end else 0
if args.id:
serverId = args.id
elif args.name:
# find correct server id for the server with the specified name
servers = Servers(cfg)
serverId = servers.getServerId(args.name)
if serverId:
charts = ServerCharts(cfg)
chart_url = charts.create(serverId, args.metric, startDate, endDate)
if chart_url and args.open:
webbrowser.open(chart_url)
else:
print('Please specify an existing server either by "--id id" or "--name hostname"')
def servers_events(args):
"""Sub command for servers events"""
serverId = ''
startDate = datetime.strptime(args.start.strip('\"'), '%Y-%m-%d') if args.start else (datetime.today() - timedelta(days=365))
endDate = datetime.strptime(args.end.strip('\"'), '%Y-%m-%d') if args.end else datetime.now()
if args.id:
serverId = args.id
elif args.name:
servers = Servers(cfg)
serverId = servers.getServerId(args.name)
if serverId:
notifications = ServerNotifications(cfg, format=args.output)
notifications.list(serverId, startDate.timestamp(), endDate.timestamp(), args.sort, args.reverse, args.limit)
def servers_list(args):
"""Sub command for servers list"""
check_columns(args.columns)
servers = Servers(cfg, format=args.output)
servers.list(args.issues, args.sort, args.reverse, args.limit, args.tag)
def servers_remove(args):
"""Sub command for servers remove"""
print("Please login via SSH to each of the servers you would like to remove.")
print("First stop the monitoring agent by running \"service agent360 stop\" then run \"pip3 uninstall agent360\". After 15 minutes you are able to remove the server.")
def servers_update(args):
"""Sub command for servers update"""
servers = Servers(cfg)
pattern = ''
if args.id:
pattern = args.id
elif args.name:
pattern = args.name
servers.setTags(pattern, args.tag)
def servers(args):
"""Sub command for servers"""
cli_subcommands[args.subparser].print_help()
# --- signup functions ---
def signup(args):
"""Sub command for signup"""
webbrowser.open('https://360monitoring.com/monitoring-trial/')
# --- sites functions ---
def sites_add(args):
"""Sub command for sites add"""
sites = Sites(cfg)
nodeId = ''
if args.node_id:
nodeId = args.node_id
elif args.node_name:
nodes = Nodes(cfg)
nodeId = nodes.getNodeId(args.node_name)
if args.file:
if os.path.isfile(args.file):
with open(args.file) as file:
lines = file.readlines()
for line in lines:
sites.add(line.strip(), protocol=args.protocol, name=args.name, port=args.port, keyword=args.keyword, matchType=args.match_type, nodeId=nodeId, force=args.force)
else:
print('ERROR: File', args.file, 'to import not found')
elif args.url:
sites.add(args.url, protocol=args.protocol, name=args.name, port=args.port, keyword=args.keyword, matchType=args.match_type, nodeId=nodeId, force=args.force)
else:
print('You need to specify at least a name with --name [name]')
def sites_charts(args):
"""Sub command for sites charts"""
cli_subcommands[args.subparser].print_help()
def sites_charts_create(args):
"""Sub command for sites charts"""
siteId = ''
startDate = datetime.strptime(args.start.strip('\"'), '%Y-%m-%d').timestamp() if args.start else 0
endDate = datetime.strptime(args.end.strip('\"'), '%Y-%m-%d').timestamp() if args.end else 0
if args.id:
siteId = args.id
elif args.url:
# find correct server id for the server with the specified name
sites = Sites(cfg)
siteId = sites.getSiteId(args.url)
if siteId:
charts = SiteCharts(cfg)
chart_url = charts.create(siteId, startDate, endDate)
if chart_url and args.open:
webbrowser.open(chart_url)
else:
print('Please specify an existing site either by "--id id" or "--url url"')
def sites_events(args):
"""Sub command for sites events"""
siteId = ''
startDate = datetime.strptime(args.start.strip('\"'), '%Y-%m-%d') if args.start else (datetime.today() - timedelta(days=365))
endDate = datetime.strptime(args.end.strip('\"'), '%Y-%m-%d') if args.end else datetime.now()
if args.id:
siteId = args.id
elif args.url:
sites = Sites(cfg)
siteId = sites.getSiteId(args.url)
if siteId:
notifications = SiteNotifications(cfg, format=args.output)
notifications.list(siteId, startDate.timestamp(), endDate.timestamp(), args.sort, args.reverse, args.limit)
def sites_list(args):
"""Sub command for sites list"""
check_columns(args.columns)
sites = Sites(cfg, format=args.output)
sites.list(id=args.id, url=args.url, name=args.name, location=args.location, pattern=args.pattern, issuesOnly=args.issues, sort=args.sort, reverse=args.reverse, limit=args.limit)
def sites_remove(args):
"""Sub command for sites remove"""
sites = Sites(cfg)
sites.remove(id=args.id, url=args.url, name=args.name, location=args.location, pattern=args.pattern)
def sites_uptime(args):
"""Sub command for sites uptime"""
siteId = ''
startDate = datetime.strptime(args.start.strip('\"'), '%Y-%m-%d') if args.start else (datetime.today() - timedelta(days=365))
endDate = datetime.strptime(args.end.strip('\"'), '%Y-%m-%d') if args.end else datetime.now()
sites = Sites(cfg)
if args.id:
siteId = args.id
elif args.url:
siteId = sites.getSiteId(args.url)
elif args.name:
siteId = sites.getSiteId(args.name)
if siteId:
if args.daily:
periods = []
firstDate = startDate
while endDate > firstDate:
startDate = datetime(endDate.year, endDate.month, endDate.day, 0, 0, 0)
endDate = datetime(endDate.year, endDate.month, endDate.day, 23, 59, 59)
periods.append([startDate.timestamp(), endDate.timestamp()])
endDate = startDate - timedelta(days=1)
sites.listUptimes(siteId, periods)
elif args.monthly:
periods = []
firstDate = startDate
while endDate > firstDate:
startDate = datetime(endDate.year, endDate.month, 1, 0, 0, 0)
endDate = datetime(endDate.year, endDate.month, endDate.day, 23, 59, 59)
periods.append([startDate.timestamp(), endDate.timestamp()])
endDate = startDate - timedelta(days=1)
sites.listUptimes(siteId, periods, '%Y-%m')
elif args.start or args.end:
sites.listUptimes(siteId, [[startDate.timestamp(), endDate.timestamp()]])
else:
periods = []
periods.append([(datetime.now() - timedelta(days=1)).timestamp(), datetime.now().timestamp()])
periods.append([(datetime.now() - timedelta(days=7)).timestamp(), datetime.now().timestamp()])
periods.append([(datetime.now() - timedelta(days=30)).timestamp(), datetime.now().timestamp()])
periods.append([(datetime.now() - timedelta(days=90)).timestamp(), datetime.now().timestamp()])
periods.append([(datetime.now() - timedelta(days=365)).timestamp(), datetime.now().timestamp()])
sites.listUptimes(siteId, periods)
def sites(args):
"""Sub command for sites"""
cli_subcommands[args.subparser].print_help()
# --- statistics functions ---
def statistics(args):
"""Sub command for statistics"""
statistics = Statistics(cfg)
statistics.print(format=args.output)
# --- usertokens functions ---
def usertokens_create(args):
"""Sub command for usertokens create"""
usertokens = UserTokens(cfg)
usertokens.create(name=args.name, tags=args.tag)
def usertokens_list(args):
"""Sub command for usertokens list"""
usertokens = UserTokens(cfg)
usertokens.list(format=args.output)
def usertokens(args):
"""Sub command for usertokens"""
cli_subcommands[args.subparser].print_help()
# --- wptoolkit functions ---
def wptoolkit(args):
"""Sub command for wptoolkit"""
check_columns(args.columns)
wptoolkit = WPToolkit(cfg)
wptoolkit.print(format=args.output, issuesOnly=args.issues, sort=args.sort, reverse=args.reverse, limit=args.limit)
# --- configure & parse CLI ---
def performCLI():
"""Parse the command line parameters and call the related functions"""
subparsers = cli.add_subparsers(title='commands', dest='subparser')
cli.add_argument('-v', '--version', action='store_true', help='print CLI version')
# config
cli_config = subparsers.add_parser('config', help='configure connection to 360 Monitoring account')
cli_config.set_defaults(func=config)
cli_config_subparsers = cli_config.add_subparsers(title='commands', dest='subparser')
cli_config_print = cli_config_subparsers.add_parser('print', help='print current settings for 360 Monitoring')
cli_config_print.set_defaults(func=config_print)
cli_config_save = cli_config_subparsers.add_parser('save', help='save current settings for 360 Monitoring to ' + cfg.filename)
cli_config_save.set_defaults(func=config_save)
cli_config_save.add_argument('--api-key', metavar='key', help='specify your API KEY for 360 Monitoring')
cli_config_save.add_argument('--usertoken', metavar='token', help='specify your USERTOKEN for 360 Monitoring')
cli_config_save.add_argument('--debug', choices=['on', 'off'], type=str, help='switch debug mode to print all API calls on or off')
# contacts
cli_contacts = subparsers.add_parser('contacts', help='list and manage contacts')
cli_contacts.set_defaults(func=contacts)
cli_contacts_subparsers = cli_contacts.add_subparsers(title='commands', dest='subparser')
cli_contacts_add = cli_contacts_subparsers.add_parser('add', help='add a new contact')
cli_contacts_add.set_defaults(func=contacts_add)
cli_contacts_add.add_argument('--name', nargs='?', metavar='name', help='name of the new contact')
cli_contacts_add.add_argument('--email', nargs='?', default='', metavar='email', help='email address of the new contact')
cli_contacts_add.add_argument('--sms', nargs='?', default='', metavar='phone', help='mobile phone number of the new contact (optional)')
cli_contacts_add.add_argument('--file', nargs='?', default='', metavar='file', help='file containing one contact per line to add')
cli_contacts_list = cli_contacts_subparsers.add_parser('list', help='list contacts')
cli_contacts_list.set_defaults(func=contacts_list)
cli_contacts_list.add_argument('--id', nargs='?', default='', metavar='id', help='list contact with given ID')
cli_contacts_list.add_argument('--name', nargs='?', default='', metavar='name', help='list contact with given name')
cli_contacts_list.add_argument('--email', nargs='?', default='', metavar='email', help='list contact with given email address')
cli_contacts_list.add_argument('--phone', nargs='?', default='', metavar='phone', help='list contact with given phone number')
cli_contacts_list.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_contacts_list.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_contacts_list.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_contacts_list.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_contacts_list.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_contacts_list.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_contacts_list.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_contacts_list.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_contacts_remove = cli_contacts_subparsers.add_parser('remove', help='remove a contact')
cli_contacts_remove.set_defaults(func=contacts_remove)
cli_contacts_remove.add_argument('--id', nargs='?', default='', metavar='id', help='remove contact with given ID')
cli_contacts_remove.add_argument('--name', nargs='?', default='', metavar='name', help='remove contact with given name')
cli_contacts_remove.add_argument('--email', nargs='?', default='', metavar='email', help='remove contact with given email address')
cli_contacts_remove.add_argument('--phone', nargs='?', default='', metavar='phone', help='remove contact with given phone number')
# dashboard
cli_dashboard = subparsers.add_parser('dashboard', help='open 360 Monitoring Dashboard in your Web Browser')
cli_dashboard.set_defaults(func=dashboard)
# incidents
cli_incidents = subparsers.add_parser('incidents', help='list and manage incidents')
cli_incidents.set_defaults(func=incidents)
cli_incidents_subparsers = cli_incidents.add_subparsers(title='commands', dest='subparser')
cli_incidents_add = cli_incidents_subparsers.add_parser('add', help='add a new incident')
cli_incidents_add.set_defaults(func=incidents_add)
cli_incidents_add.add_argument('--page-id', required=True, metavar='id', help='list incidents from status page with given ID')
cli_incidents_add.add_argument('--name', required=True, metavar='name', help='name of the new incident')
cli_incidents_add.add_argument('--body', metavar='body', help='text of the new incident')
cli_incidents_list = cli_incidents_subparsers.add_parser('list', help='list incidents')
cli_incidents_list.set_defaults(func=incidents_list)
cli_incidents_list.add_argument('--page-id', required=True, metavar='id', help='list incidents from status page with given ID')
cli_incidents_list.add_argument('--name', nargs='?', default='', metavar='name', help='list incidents with given name')
cli_incidents_list.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_incidents_list.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_incidents_list.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_incidents_list.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_incidents_remove = cli_incidents_subparsers.add_parser('remove', help='remove an incident')
cli_incidents_remove.set_defaults(func=incidents_remove)
cli_incidents_remove.add_argument('--page-id', required=True, metavar='id', help='remove incidents from status page with given ID')
cli_incidents_remove.add_argument('--id', nargs='?', default='', metavar='id', help='remove incident with given ID')
cli_incidents_remove.add_argument('--name', nargs='?', default='', metavar='name', help='remove incident with given name')
# magiclinks
cli_magiclinks = subparsers.add_parser('magiclinks', help='create a magic link for the dasboard of a specific server')
cli_magiclinks.set_defaults(func=magiclinks)
cli_magiclinks_subparsers = cli_magiclinks.add_subparsers(title='commands', dest='subparser')
cli_magiclinks_create = cli_magiclinks_subparsers.add_parser('create', help='create new magic link to access the (readonly) dashboard for a specific server only')
cli_magiclinks_create.set_defaults(func=magiclinks_create)
cli_magiclinks_create.add_argument('--id', nargs='?', default='', metavar='id', help='create magic link for server with given ID')
cli_magiclinks_create.add_argument('--name', nargs='?', default='', metavar='name', help='create magic link for server with given name')
cli_magiclinks_create.add_argument('--usertoken', nargs='?', default='', metavar='token', help='use this usertoken for authentication')
cli_magiclinks_create.add_argument('--open', action='store_true', help='open the server dashboard directly in the default web browser (optional)')
# nodes
cli_nodes = subparsers.add_parser('nodes', help='show monitoring locations')
cli_nodes.set_defaults(func=nodes)
cli_nodes.add_argument('--id', nargs='?', default='', metavar='id', help='show node with given ID')
cli_nodes.add_argument('--name', nargs='?', default='', metavar='name', help='show node with given name')
cli_nodes.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_nodes.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_nodes.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_nodes.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_nodes.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_nodes.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_nodes.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_nodes.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
# recommendations
cli_recommendations = subparsers.add_parser('recommendations', help='show upgrade recommendations for servers that exceed their limits')
cli_recommendations.set_defaults(func=recommendations)
cli_recommendations.add_argument('--output', choices=['csv', 'table'], default='table', help='output format for the data')
cli_recommendations.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_recommendations.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
# servers
cli_servers = subparsers.add_parser('servers', help='list and manage monitored servers')
cli_servers.set_defaults(func=servers)
cli_servers_subparsers = cli_servers.add_subparsers(title='commands', dest='subparser')
cli_servers_add = cli_servers_subparsers.add_parser('add', help='activate monitoring for a server')
cli_servers_add.set_defaults(func=servers_add)
cli_servers_charts = cli_servers_subparsers.add_parser('charts', help='create a metrics chart as PNG file and print its url or open it in your browser')
cli_servers_charts.set_defaults(func=servers_charts)
cli_servers_charts_subparsers = cli_servers_charts.add_subparsers(title='commands', dest='subparser')
cli_servers_charts_create = cli_servers_charts_subparsers.add_parser('create', help='create a metrics chart as PNG file and print its url or open it in your browser')
cli_servers_charts_create.set_defaults(func=servers_charts_create)
cli_servers_charts_create.add_argument('--id', nargs='?', default='', metavar='id', help='show metrics for server with given ID')
cli_servers_charts_create.add_argument('--name', nargs='?', default='', metavar='name', help='show metrics for server with given name')
cli_servers_charts_create.add_argument('--metric', nargs='?', default='cpu', metavar='metric', help='select the metric to chart, e.g. \"cpu\", \"mem\", \"nginx\", \"bitninja\", \"httpd\", \"network\", \"ping\", \"process\", \"swap\", \"uptime\", \"wp-toolkit\" (optional)')
cli_servers_charts_create.add_argument('--start', nargs='?', default='', metavar='start', help='select start date of chart period in form of \"yyyy-mm-dd\" (optional)')
cli_servers_charts_create.add_argument('--end', nargs='?', default='', metavar='end', help='select end date of chart period in form of \"yyyy-mm-dd\" (optional)')
cli_servers_charts_create.add_argument('--open', action='store_true', help='open the metrics chart directly in the default web browser (optional)')
cli_servers_events = cli_servers_subparsers.add_parser('events', help='list event notifications of a specified server')
cli_servers_events.set_defaults(func=servers_events)
cli_servers_events.add_argument('--id', nargs='?', default='', metavar='id', help='show event notifications for server with given ID')
cli_servers_events.add_argument('--name', nargs='?', default='', metavar='name', help='show event notifications for server with given name')
cli_servers_events.add_argument('--start', nargs='?', default='', metavar='start', help='select start date of notification period in form of yyyy-mm-dd')
cli_servers_events.add_argument('--end', nargs='?', default='', metavar='end', help='select end date of notification period in form of yyyy-mm-dd')
cli_servers_events.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_servers_events.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_servers_events.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_servers_events.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_servers_events.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_servers_events.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_servers_events.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_servers_events.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_servers_list = cli_servers_subparsers.add_parser('list', help='list monitored servers')
cli_servers_list.set_defaults(func=servers_list)
cli_servers_list.add_argument('--id', nargs='?', default='', metavar='id', help='update server with given ID')
cli_servers_list.add_argument('--name', nargs='?', default='', metavar='name', help='update server with given name')
cli_servers_list.add_argument('--tag', nargs='*', default='', metavar='tag', help='only list servers matching these tags')
cli_servers_list.add_argument('--issues', action='store_true', help='show only servers with issues')
cli_servers_list.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_servers_list.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_servers_list.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_servers_list.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_servers_list.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_servers_list.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_servers_list.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_servers_list.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_servers_remove = cli_servers_subparsers.add_parser('remove', help='remove monitoring for a server')
cli_servers_remove.set_defaults(func=servers_remove)
cli_servers_update = cli_servers_subparsers.add_parser('update', help='set tags for a server')
cli_servers_update.set_defaults(func=servers_update)
cli_servers_update.add_argument('--id', nargs='?', default='', metavar='id', help='update server with given ID')
cli_servers_update.add_argument('--name', nargs='?', default='', metavar='name', help='update server with given name')
cli_servers_update.add_argument('--tag', nargs='*', default='', metavar='tag', help='set these tags for one or more servers specified')
# signup
cli_signup = subparsers.add_parser('signup', help='sign up for 360 Monitoring')
cli_signup.set_defaults(func=signup)
# sites
cli_sites = subparsers.add_parser('sites', help='list and manage monitored websites')
cli_sites.set_defaults(func=sites)
cli_sites_subparsers = cli_sites.add_subparsers(title='commands', dest='subparser')
cli_sites_add = cli_sites_subparsers.add_parser('add', help='activate monitoring for a site')
cli_sites_add.set_defaults(func=sites_add)
cli_sites_add.add_argument('--url', nargs='?', metavar='url', help='url of site that should be monitored')
cli_sites_add.add_argument('--name', nargs='?', metavar='name', help='name of site that should be monitored (optional)')
cli_sites_add.add_argument('--protocol', choices=['http', 'https', 'icmp', 'tcp'], default='https', metavar='protocol', help='specify a different protocol than https')
cli_sites_add.add_argument('--port', nargs='?', default=443, type=int, metavar='port', help='specify a different port than 443')
cli_sites_add.add_argument('--keyword', nargs='?', metavar='keyword', help='alert when the specified keyword is found/not found in the page HTML (optional)')
cli_sites_add.add_argument('--match-type', choices=['', 'yes', 'no'], default='', metavar='type', help='if set to \"yes\" it will alert when keyword is found on page, if \"no\" alert when keyword not found')
cli_sites_add.add_argument('--node-id', nargs='?', metavar='id', help='id of the monitoring node location that should monitor the url (optional)')
cli_sites_add.add_argument('--node-name', nargs='?', metavar='id', help='name of the monitoring node location that should monitor the url. In doubt, take the first match. (optional)')
cli_sites_add.add_argument('--force', action='store_true', help='add new monitor even if already exists')
cli_sites_add.add_argument('--file', nargs='?', default='', metavar='file', help='file containing one URL per line to monitor')
cli_sites_charts = cli_sites_subparsers.add_parser('charts', help='create a metrics chart as PNG file and print its url or open it in your browser')
cli_sites_charts.set_defaults(func=sites_charts)
cli_sites_charts_subparsers = cli_sites_charts.add_subparsers(title='commands', dest='subparser')
cli_sites_charts_create = cli_sites_charts_subparsers.add_parser('create', help='create a metrics chart as PNG file and print its url or open it in your browser')
cli_sites_charts_create.set_defaults(func=sites_charts_create)
cli_sites_charts_create.add_argument('--id', nargs='?', default='', metavar='id', help='show metrics for server with given ID')
cli_sites_charts_create.add_argument('--url', nargs='?', default='', metavar='url', help='show metrics for server with given url')
cli_sites_charts_create.add_argument('--start', nargs='?', default='', metavar='start', help='select start date of chart period in form of \"yyyy-mm-dd\" (optional)')
cli_sites_charts_create.add_argument('--end', nargs='?', default='', metavar='end', help='select end date of chart period in form of \"yyyy-mm-dd\" (optional)')
cli_sites_charts_create.add_argument('--open', action='store_true', help='open the metrics chart directly in the default web browser (optional)')
cli_sites_events = cli_sites_subparsers.add_parser('events', help='list event notifications of a specified site')
cli_sites_events.set_defaults(func=sites_events)
cli_sites_events.add_argument('--id', nargs='?', default='', metavar='id', help='show event notifications for site with given ID')
cli_sites_events.add_argument('--url', nargs='?', default='', metavar='url', help='show event notifications for site with given url')
cli_sites_events.add_argument('--start', nargs='?', default='', metavar='start', help='select start date of notification period in form of yyyy-mm-dd')
cli_sites_events.add_argument('--end', nargs='?', default='', metavar='end', help='select end date of notification period in form of yyyy-mm-dd')
cli_sites_events.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_sites_events.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_sites_events.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_sites_events.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_sites_events.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_sites_events.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_sites_events.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_sites_events.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_sites_list = cli_sites_subparsers.add_parser('list', help='list sites')
cli_sites_list.set_defaults(func=sites_list)
cli_sites_list.add_argument('--id', nargs='?', default='', metavar='id', help='list site with given ID')
cli_sites_list.add_argument('--url', nargs='?', default='', metavar='url', help='list site with given url')
cli_sites_list.add_argument('--name', nargs='?', default='', metavar='name', help='list site with given name')
cli_sites_list.add_argument('--location', nargs='?', default='', metavar='location', help='list sites monitored from given location')
cli_sites_list.add_argument('--pattern', nargs='?', default='', metavar='pattern', help='list sites with pattern included in URL')
cli_sites_list.add_argument('--issues', action='store_true', help='show only sites with issues')
cli_sites_list.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_sites_list.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_sites_list.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_sites_list.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_sites_list.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_sites_list.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_sites_list.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_sites_list.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
cli_sites_remove = cli_sites_subparsers.add_parser('remove', help='remove a contact')
cli_sites_remove.set_defaults(func=sites_remove)
cli_sites_remove.add_argument('--id', nargs='?', default='', metavar='id', help='remove site with given ID')
cli_sites_remove.add_argument('--url', nargs='?', default='', metavar='url', help='remove site with given url')
cli_sites_remove.add_argument('--name', nargs='?', default='', metavar='name', help='remove site with given name')
cli_sites_remove.add_argument('--location', nargs='?', default='', metavar='location', help='remove sites monitored from given location')
cli_sites_remove.add_argument('--pattern', nargs='?', default='', metavar='pattern', help='remove sites with pattern included in URL')
cli_sites_uptime = cli_sites_subparsers.add_parser('uptime', help='show uptime')
cli_sites_uptime.set_defaults(func=sites_uptime)
cli_sites_uptime.add_argument('--id', nargs='?', default='', metavar='id', help='show uptime for site with given ID')
cli_sites_uptime.add_argument('--url', nargs='?', default='', metavar='url', help='show uptime for site with given url')
cli_sites_uptime.add_argument('--name', nargs='?', default='', metavar='name', help='show uptime for site with given name')
cli_sites_uptime.add_argument('--start', nargs='?', default='', metavar='start', help='select start date of uptime period in form of yyyy-mm-dd')
cli_sites_uptime.add_argument('--end', nargs='?', default='', metavar='end', help='select end date of uptime period in form of yyyy-mm-dd')
cli_sites_uptime.add_argument('--daily', action='store_true', help='show uptime per day')
cli_sites_uptime.add_argument('--monthly', action='store_true', help='show uptime per month')
# statistics
cli_statistics = subparsers.add_parser('statistics', help='print statistics')
cli_statistics.set_defaults(func=statistics)
cli_statistics.add_argument('--output', choices=['csv', 'table'], default='table', help='output format for the data')
cli_statistics.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_statistics.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
# user tokens
cli_usertokens = subparsers.add_parser('usertokens', help='list or create usertokens')
cli_usertokens.set_defaults(func=usertokens)
cli_usertokens_subparsers = cli_usertokens.add_subparsers(title='commands', dest='subparser')
cli_usertokens_create = cli_usertokens_subparsers.add_parser('create', help='create new user token')
cli_usertokens_create.set_defaults(func=usertokens_create)
cli_usertokens_create.add_argument('--name', nargs='?', default='', metavar='name', help='name of the new token (optional)')
cli_usertokens_create.add_argument('--tag', nargs='?', default='', metavar='tag', help='set a tag for the new token (optional)')
cli_usertokens_list = cli_usertokens_subparsers.add_parser('list', help='list usertokens')
cli_usertokens_list.set_defaults(func=usertokens_list)
cli_usertokens_list.add_argument('--output', choices=['json', 'csv', 'table'], default='table', help='output format for the data')
cli_usertokens_list.add_argument('--json', action='store_const', const='json', dest='output', help='print data in JSON format')
cli_usertokens_list.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_usertokens_list.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
# wptoolkit
cli_wptoolkit = subparsers.add_parser('wptoolkit', help='list statistics of WP Toolkit if installed')
cli_wptoolkit.set_defaults(func=wptoolkit)
cli_wptoolkit.add_argument('--issues', action='store_true', help='show only servers with WordPress issues')
cli_wptoolkit.add_argument('--columns', nargs='*', default='', metavar='col', help='specify columns to print in table view or remove columns with 0 as prefix e.g. "0id"')
cli_wptoolkit.add_argument('--sort', nargs='?', default='', metavar='col', help='sort by specified column. Reverse sort by adding --reverse')
cli_wptoolkit.add_argument('--reverse', action='store_true', help='show in descending order. Works only together with --sort')
cli_wptoolkit.add_argument('--limit', nargs='?', default=0, type=int, metavar='n', help='limit the number of printed items')
cli_wptoolkit.add_argument('--output', choices=['csv', 'table'], default='table', help='output format for the data')
cli_wptoolkit.add_argument('--csv', action='store_const', const='csv', dest='output', help='print data in CSV format')
cli_wptoolkit.add_argument('--table', action='store_const', const='table', dest='output', help='print data as ASCII table')
# Parse
cli_subcommands['config'] = cli_config
cli_subcommands['contacts'] = cli_contacts
cli_subcommands['dashboard'] = cli_dashboard
cli_subcommands['incidents'] = cli_incidents
cli_subcommands['magiclinks'] = cli_magiclinks
cli_subcommands['nodes'] = cli_nodes
cli_subcommands['recommendations'] = cli_recommendations
cli_subcommands['servers'] = cli_servers
cli_subcommands['signup'] = cli_signup
cli_subcommands['sites'] = cli_sites
cli_subcommands['statistics'] = cli_statistics
cli_subcommands['usertokens'] = cli_usertokens
cli_subcommands['wptoolkit'] = cli_wptoolkit
args = cli.parse_args()
if args.subparser == None:
if args.version:
print('360 Monitoring CLI Version:', __version__)
elif 'func' in args:
# nodes, recommendations, statistics, signup, wptoolkit and dashboard is shown directly without subparser
if args.func == config:
cli_config.print_help()
elif args.func == contacts:
cli_contacts.print_help()
elif args.func == incidents:
cli_incidents.print_help()
elif args.func == magiclinks:
cli_magiclinks.print_help()
elif args.func == servers:
cli_servers.print_help()
elif args.func == servers_charts:
cli_servers_charts.print_help()
elif args.func == sites:
cli_sites.print_help()
elif args.func == usertokens:
cli_usertokens.print_help()
else:
cli.print_help()
else:
cli.print_help()
else:
args.func(args)
def main():
check_version()
performCLI()
if __name__ == '__main__':
main()
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/monitoring.py | monitoring.py |
#!/usr/bin/env python3
from prettytable import PrettyTable
from .config import Config
from .servers import Servers
class Recommendations(object):
def __init__(self, config: Config):
self.config = config
self.table = PrettyTable(field_names=['Asset', 'Recommendation'])
self.table.align['Asset'] = 'l'
self.table.align['Recommendation'] = 'l'
def print(self, format: str = 'table'):
"""Iterate through all assets and check for which we have recommendations"""
servers = Servers(self.config)
if servers.fetchData():
for server in servers.servers:
if servers.hasIssue(server):
self.table.add_row([server['name'], servers.getRecommendation(server)])
if (format == 'table'):
print(self.table)
elif (format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/recommendations.py | recommendations.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from datetime import datetime
from .api import apiGet, apiPost, apiDelete
from .config import Config
from .functions import printError, printWarn, formatDowntime, formatTimespan
from .bcolors import bcolors
class Sites(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.monitors = None
self.table = PrettyTable(field_names=['ID', 'URL', 'Status', 'Uptime %', 'Time to first Byte', 'Location'])
self.table.align['ID'] = 'l'
self.table.align['URL'] = 'l'
self.table.min_width['URL'] = 25
self.table.align['Status'] = 'l'
self.table.align['Uptime %'] = 'r'
self.table.align['Time to first Byte'] = 'c'
self.table.align['Location'] = 'c'
self.sum_uptime = 0
self.sum_ttfb = 0
self.num_monitors = 0
def fetchData(self):
"""Retrieve the list of all website monitors"""
# if data is already downloaded, use cached data
if self.monitors != None:
return True
response_json = apiGet('monitors', 200, self.config)
if response_json:
if 'monitors' in response_json:
self.monitors = response_json['monitors']
return True
else:
printWarn('No monitors found')
self.monitors = None
return False
else:
self.monitors = None
return False
def getSiteId(self, url: str):
"""Return Site Id for the monitor with the specified url or name. Only the first matching entry (exact match) is returned or empty string if not found"""
if url and self.fetchData():
# Iterate through list of monitors and find the specified one
for monitor in self.monitors:
if monitor['url'] == url or (monitor['name'] and monitor['name'] == url):
return monitor['id']
return ''
def getSiteUrl(self, id: str):
"""Return Site Url for the monitor with the specified id"""
if id and self.fetchData():
# Iterate through list of monitors and find the specified one
for monitor in self.monitors:
if monitor['id'] == id:
return monitor['url']
return ''
def list(self, id: str = '', url: str = '', name: str = '', location: str = '', pattern: str = '', issuesOnly: bool = False, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through list of web monitors and print details"""
if self.fetchData():
# if JSON was requested and no filters, then just print it without iterating through
if (self.format == 'json' and not (id or url or name or location or pattern or issuesOnly or limit > 0)):
print(json.dumps(self.monitors, indent=4))
return
self.printHeader()
self.sum_uptime = 0
self.sum_ttfb = 0
self.num_monitors = 0
for monitor in self.monitors:
if (id or url or name or location or pattern):
if (id and monitor['id'] == id) \
or (url and monitor['url'] == url) \
or (name and 'name' in monitor and monitor['name'] == name) \
or (location and location in monitor['monitor']['name']) \
or (pattern and pattern in monitor['url']):
if (not issuesOnly) or self.hasIssue(monitor):
self.print(monitor)
else:
if (not issuesOnly) or self.hasIssue(monitor):
self.print(monitor)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def add(self, url: str, protocol: str = 'https', name: str = '', port: int = 443, keyword: str = '', matchType:str = '', nodeId: str = '', force: bool = False):
"""Add a monitor for the given URL"""
if url and self.fetchData():
# urls do not include the protocol
url = url.replace('https://', '').replace('http://', '')
# use the url as name if not specified otherwise
if not name:
name = url
# use https as protocol if not specified otherwise
if not protocol:
protocol = 'https'
# check if monitored url already exists unless --force is specified
if not force:
for monitor in self.monitors:
if monitor['url'] == url:
print(url, 'already exists and will not be added')
return
# other parameters:
# port: int (e.g. 443, 80)
# redirects: int (e.g. 3 max redirects; 0 for no redirects)
# timeout: int (e.g. 30 seconds)
# interval: int # How often to perform the check in seconds
# keyword: str # Alert when error on my page is found in the page HTML
# match_type: str 'yes' or 'no' # If set to yes it will alert when keyword is found on page, if no alert when keyword not found
# monitor: str
# protocol: str [http, https, icmp, tcp]
# Make request to API endpoint
data = {
'url': url,
'name': name,
'port': port,
'protocol': protocol,
'keyword': keyword,
'match_type': matchType,
'monitor': nodeId,
}
apiPost('monitors', self.config, data=data, expectedStatusCode=200, successMessage='Added site monitor: ' + url, errorMessage='Failed to add site monitor ' + url + '')
def remove(self, id: str = '', url: str = '', name: str = '', location: str = '', pattern: str = ''):
"""Remove the monitor for the given URL"""
removed = 0
if (id or url or name or location or pattern) and self.fetchData():
for monitor in self.monitors:
curr_id = monitor['id']
curr_url = monitor['url']
curr_name = monitor['name'] if 'name' in monitor else ''
curr_location = monitor['monitor']['name']
if (id == curr_id) \
or (url and url == curr_url) \
or (name and name == curr_name) \
or (location and location in curr_location) \
or (pattern and pattern in curr_url):
removed += 1
apiDelete('monitor/' + curr_id, self.config, expectedStatusCode=204, successMessage='Removed site monitor: ' + curr_url + ' [' + curr_id + ']', errorMessage='Failed to remove site monitor ' + curr_url + ' [' + curr_id + ']')
if removed == 0:
printWarn('No monitors with given pattern found: id=' + id, 'url=', url, 'name=' + name, 'location=' + location, 'pattern=' + pattern)
def hasIssue(self, monitor):
"""Return True if the specified monitor has some issue by having a value outside of the expected threshold specified in config file"""
if float(monitor['uptime_percentage']) <= float(self.config.threshold_uptime):
return True
if 'last_check' in monitor and 'ttfb' in monitor['last_check']:
if float(monitor['last_check']['ttfb']) >= float(self.config.threshold_ttfb):
return True
return False
def getUptime(self, siteId: str, startTimestamp: float, endTimestamp: float):
"""Retrieve uptime for the specified site within the specified uptime period"""
# make sure all parameters are defined
if not (siteId and startTimestamp > 0 and endTimestamp > 0):
return None
params = self.config.params()
params['start'] = int(startTimestamp)
params['end'] = int(endTimestamp)
response_json = apiGet('monitor/' + siteId + '/uptime', 200, self.config, params=params)
if response_json:
if 'uptime_percentage' in response_json:
return response_json
else:
printWarn('No uptime information available for site', siteId)
return None
else:
return None
def listUptimes(self, siteId: str, periods, dateTimeFormat: str = '%Y-%m-%d'):
"""Retrieve uptime for the specified site within the specified uptime periods"""
table = PrettyTable(field_names=['Uptime in %', 'Period', 'Downtime', 'Events'])
table.align['Uptime in %'] = 'r'
table.align['Period'] = 'l'
table.align['Downtime'] = 'l'
table.align['Events'] = 'r'
for period in periods:
uptime_json = self.getUptime(siteId, period[0], period[1])
if uptime_json:
startDate = datetime.fromtimestamp(float(uptime_json['start']))
endDate = datetime.fromtimestamp(float(uptime_json['end']))
uptime_percentage = float(uptime_json['uptime_percentage'])
downtime_seconds = uptime_json['downtime_seconds']
events = uptime_json['events']
table.add_row(["{:.4f}%".format(uptime_percentage), formatTimespan(startDate, endDate, dateTimeFormat), formatDowntime(downtime_seconds), events])
print(table)
def printHeader(self):
"""Print CSV header if CSV format requested"""
if (self.format == 'csv'):
print(self.config.delimiter.join(['ID', 'URL', 'Name', 'Code', 'Status', 'Status Message', 'Uptime %', 'Time to first Byte', 'Location']))
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
avg_uptime = self.sum_uptime / self.num_monitors if self.sum_uptime > 0 and self.num_monitors > 0 else 0
avg_ttfb = self.sum_ttfb / self.num_monitors if self.sum_ttfb > 0 and self.num_monitors > 0 else 0
if avg_uptime <= float(self.config.threshold_uptime):
uptime_percentage_text = f"{bcolors.FAIL}" + "{:.4f}".format(avg_uptime) + f"{bcolors.ENDC}"
else:
uptime_percentage_text = "{:.4f}".format(avg_uptime)
if avg_ttfb >= float(self.config.threshold_ttfb):
ttfb_text = f"{bcolors.FAIL}" + "{:.2f}".format(avg_ttfb) + f"{bcolors.ENDC}"
else:
ttfb_text = "{:.2f}".format(avg_ttfb)
# add average row as table footer
self.table.add_row(['', 'Average of ' + str(self.num_monitors) + ' monitors', '', uptime_percentage_text, ttfb_text, ''])
# remove columns that should be excluded
if self.config.hide_ids:
self.table.del_column('ID')
# Get string to be printed and create list of elements separated by \n
list_of_table_lines = self.table.get_string().split('\n')
# remember summary row
summary_line = list_of_table_lines[-2]
# remove summary row again to allow sorting and limiting
self.table.del_row(len(self.table.rows)-1)
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit).split('\n')
else:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse).split('\n')
# Sorting by multiple columns could be done like this
# list_of_table_lines = self.table.get_string(sortby=("Col Name 1", "Col Name 2")), reversesort=reverse).split('\n')
# Print the table
print('\n'.join(list_of_table_lines))
print(summary_line)
print(list_of_table_lines[0])
# elif (self.format == 'csv'):
# print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, monitor):
"""Print the data of the specified web monitor"""
if (self.format == 'json'):
print(json.dumps(monitor, indent=4))
return
id = monitor['id']
url = monitor['url']
name = monitor['name'] if 'name' in monitor else ''
code = monitor['code'] if 'code' in monitor else ''
status = monitor['status'] if 'status' in monitor else ''
status_message = monitor['status_message'] if 'status_message' in monitor else ''
location = monitor['monitor']['name']
uptime_percentage = float(monitor['uptime_percentage'])
if 'last_check' in monitor and 'ttfb' in monitor['last_check']:
ttfb = float(monitor['last_check']['ttfb'])
self.sum_uptime = self.sum_uptime + uptime_percentage
self.sum_ttfb = self.sum_ttfb + ttfb
self.num_monitors = self.num_monitors + 1
else:
ttfb = -1
if (self.format == 'csv'):
print(self.config.delimiter.join([id, url, name, str(code), status, status_message, str(uptime_percentage) + '%', str(ttfb), location]))
else:
if uptime_percentage <= float(self.config.threshold_uptime):
uptime_percentage_text = f"{bcolors.FAIL}" + "{:.4f}".format(uptime_percentage) + f"{bcolors.ENDC}"
else:
uptime_percentage_text = "{:.4f}".format(uptime_percentage)
if ttfb >= float(self.config.threshold_ttfb):
ttfb_text = f"{bcolors.FAIL}" + "{:.2f}".format(ttfb) + f"{bcolors.ENDC}"
elif ttfb != -1:
ttfb_text = "{:.2f}".format(ttfb)
else:
ttfb_text = f"{bcolors.FAIL}n/a{bcolors.ENDC}"
self.table.add_row([id, url, status_message, uptime_percentage_text, ttfb_text, location])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/sites.py | sites.py |
#!/usr/bin/env python3
import json
from datetime import datetime
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class SiteCharts(object):
def __init__(self, config: Config):
self.config = config
def create(self, siteId: str, startTimestamp: float = 0, endTimestamp: float = 0):
"""Create a site metrics chart for the specified site id"""
if not siteId:
printError('No site id specified')
return ''
params = self.config.params()
params['output'] = 'png'
if startTimestamp > 0:
params['start'] = int(startTimestamp)
if endTimestamp > 0:
params['end'] = int(endTimestamp)
response_json = apiGet('monitor/' + siteId + '/metrics', 200, self.config, params)
if response_json:
print(json.dumps(response_json, indent=4))
print()
print('Only JSON output supported currently. PNG export not yet implemented.')
'''
if 'uri' in response_json:
uri = response_json['uri']
print(uri)
return uri
else:
printWarn('Site with id', siteId, 'not found')
return ''
'''
else:
return ''
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/sitecharts.py | sitecharts.py |
#!/usr/bin/env python3
import json
from datetime import datetime
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class ServerCharts(object):
def __init__(self, config: Config):
self.config = config
def create(self, serverId: str, metric: str = 'cpu', startTimestamp: float = 0, endTimestamp: float = 0, dataAsJSON: bool = False):
"""Create a server metrics chart for the specified server id"""
if not serverId:
printError('No server id specified')
return ''
if not metric:
printError('No metric specified: e.g. use \"cpu\"')
return ''
params = self.config.params()
if not dataAsJSON:
params['output'] = 'png'
# metrics can be one of [bitninja, cpu, httpd, mem, network, nginx, ping, process, swap, uptime, wp-toolkit]
params['metric'] = metric
if startTimestamp > 0:
params['start'] = int(startTimestamp)
if endTimestamp > 0:
params['end'] = int(endTimestamp)
response_json = apiGet('server/' + serverId + '/metrics', 200, self.config, params)
if response_json:
if dataAsJSON:
print(json.dumps(response_json, indent=4))
return ''
elif 'uri' in response_json:
uri = response_json['uri']
print(uri)
return uri
else:
printWarn('Server with id', serverId, 'not found')
return ''
else:
return ''
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/servercharts.py | servercharts.py |
#!/usr/bin/env python3
import os
import configparser
from .functions import printError
from .bcolors import bcolors
class Config(object):
def __init__(self, version: str):
self.version = version
self.filename = '360monitoring.ini'
self.endpoint = 'https://api.monitoring360.io/v1/'
self.api_key = ''
self.usertoken = ''
self.max_items = 5000
self.debug = False
self.readonly = False
self.hide_ids = False
self.delimiter = ','
self.last_version_check = ''
self.threshold_uptime = 99.0
self.threshold_ttfb = 1.0
self.threshold_free_diskspace = 20.0
self.threshold_cpu_usage = 80.0
self.threshold_mem_usage = 80.0
self.threshold_disk_usage = 80.0
self.loadFromFile()
def headers(self):
"""Set headers for http requests"""
if self.api_key:
return {
'Authorization': 'Bearer ' + self.api_key,
'User-Agent': '360 Monitoring CLI ' + self.version,
}
else:
printError('ERROR: No API key specified in ' + self.filename + ". Please run \"360monitoring config save --api-key YOUR_API_KEY\" to connect to your 360 Monitoring account.")
return {}
def params(self, tags:str = ''):
"""Set params for http requests"""
params = {
'perpage': self.max_items,
'api_mode': 'cli_' + self.version
}
if tags:
params['tags'] = tags
return params
def loadFromFile(self):
"""Read API endpoint and API key from config file"""
if os.path.isfile(self.filename):
parser = configparser.ConfigParser()
parser.read(self.filename)
if 'General' in parser.sections():
if 'last-version-check' in parser['General']:
self.last_version_check = parser['General']['last-version-check']
if 'Connection' in parser.sections():
if 'endpoint' in parser['Connection']:
self.endpoint = parser['Connection']['endpoint']
if 'api-key' in parser['Connection']:
self.api_key = parser['Connection']['api-key']
if 'usertoken' in parser['Connection']:
self.usertoken = parser['Connection']['usertoken']
if 'max-items' in parser['Connection']:
self.max_items = parser['Connection']['max-items']
if 'hide-ids' in parser['Connection']:
self.hide_ids = (parser['Connection']['hide-ids'] == 'True')
if 'debug' in parser['Connection']:
self.debug = (parser['Connection']['debug'] == 'True')
if 'readonly' in parser['Connection']:
self.readonly = (parser['Connection']['readonly'] == 'True')
if 'Thresholds' in parser.sections():
if 'min-uptime-percent' in parser['Thresholds']:
self.threshold_uptime = parser['Thresholds']['min-uptime-percent']
if 'max-time-to-first-byte' in parser['Thresholds']:
self.threshold_ttfb = parser['Thresholds']['max-time-to-first-byte']
if 'min-free-diskspace-percent' in parser['Thresholds']:
self.threshold_free_diskspace = parser['Thresholds']['min-free-diskspace-percent']
if 'max-cpu-usage-percent' in parser['Thresholds']:
self.threshold_cpu_usage = parser['Thresholds']['max-cpu-usage-percent']
if 'max-mem-usage-percent' in parser['Thresholds']:
self.threshold_mem_usage = parser['Thresholds']['max-mem-usage-percent']
if 'max-disk-usage-percent' in parser['Thresholds']:
self.threshold_disk_usage = parser['Thresholds']['max-disk-usage-percent']
def saveToFile(self, printInfo : bool = True):
"""Save settings to config file"""
parser = configparser.ConfigParser()
parser['General'] = {
'last-version-check': self.last_version_check,
}
parser['Connection'] = {
'api-key': self.api_key,
'usertoken': self.usertoken,
'endpoint': self.endpoint,
'max-items': self.max_items,
'hide-ids': self.hide_ids,
'debug': self.debug,
'readonly': self.readonly,
}
parser['Thresholds'] = {
'min-uptime-percent': self.threshold_uptime,
'max-time-to-first-byte': self.threshold_ttfb,
'min-free-diskspace-percent': self.threshold_free_diskspace,
'max-cpu-usage-percent': self.threshold_cpu_usage,
'max-mem-usage-percent': self.threshold_mem_usage,
'max-disk-usage-percent': self.threshold_disk_usage,
}
with open(self.filename, 'w') as config_file:
parser.write(config_file)
if printInfo:
print('Saved settings to', self.filename)
def print(self):
"""Print current settings"""
if os.path.isfile(self.filename):
print('config file:'.ljust(30), self.filename)
else:
print('config file:'.ljust(30) + f"{bcolors.WARNING}" + self.filename + f" does not exist. Please run \"360monitoring config save --api-key YOUR_API_KEY\" to configure.{bcolors.ENDC}")
print('CLI version:'.ljust(30), self.version)
print()
print('Connection')
print('----------')
print('endpoint:'.ljust(30), self.endpoint)
if self.api_key:
print('api-key:'.ljust(30), self.api_key)
else:
print('api-key:'.ljust(30) + f"{bcolors.FAIL}No API key specified in " + self.filename + f". Please run \"360monitoring config save --api-key YOUR_API_KEY\" to connect to your 360 Monitoring account.{bcolors.ENDC}")
if self.usertoken:
print('usertoken:'.ljust(30), self.usertoken)
else:
print('usertoken:'.ljust(30) + f"{bcolors.FAIL}No usertoken specified in " + self.filename + f". Please run \"360monitoring config save --usertoken YOUR_TOKEN\" to use it for creating magic links.{bcolors.ENDC}")
print('max items:'.ljust(30), self.max_items)
print('hide ids:'.ljust(30), self.hide_ids)
print('debug:'.ljust(30), self.debug)
print('readonly:'.ljust(30), self.readonly)
print()
print('Thresholds')
print('----------')
print('min-uptime-percent:'.ljust(30), self.threshold_uptime)
print('max-time-to-first-byte:'.ljust(30), self.threshold_ttfb)
print('min-free-diskspace-percent:'.ljust(30), self.threshold_free_diskspace)
print('max-cpu-usage-percent:'.ljust(30), self.threshold_cpu_usage)
print('max-mem-usage-percent:'.ljust(30), self.threshold_mem_usage)
print('max-disk-usage-percent:'.ljust(30), self.threshold_disk_usage)
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/config.py | config.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from datetime import datetime
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class ServerNotifications(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.notifications = None
self.table = PrettyTable(field_names=['Start', 'End', 'Status', 'Summary'])
self.table.align['Start'] = 'c'
self.table.align['End'] = 'c'
self.table.align['Status'] = 'c'
self.table.align['Summary'] = 'l'
def fetchData(self, serverId: str, startTimestamp: float, endTimestamp: float):
"""Retrieve a list of all alerts of a specified server in the specified time period"""
# if data is already downloaded, use cached data
if self.notifications != None:
return True
params = self.config.params()
params['start'] = int(startTimestamp)
params['end'] = int(endTimestamp)
response_json = apiGet('server/' + serverId + '/notifications', 200, self.config, params)
if response_json:
if 'data' in response_json:
self.notifications = response_json['data']
return True
else:
printWarn('No notifications found for server', serverId)
self.notifications = None
return False
else:
self.notifications = None
return False
def list(self, serverId: str, startTimestamp: float, endTimestamp: float, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through list of server notifications and print details"""
if self.fetchData(serverId, startTimestamp, endTimestamp):
# if JSON was requested and no filters, then just print it without iterating through
if self.format == 'json':
print(json.dumps(self.notifications, indent=4))
return
# Iterate through list of servers and print data, etc.
for notification in self.notifications:
self.print(notification)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
# if self.config.hide_ids:
# self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, notification):
"""Print the data of the specified contact"""
if (self.format == 'json'):
print(json.dumps(notification, indent=4))
return
startTimestamp = datetime.fromtimestamp(float(notification['start']))
endTimestamp = datetime.fromtimestamp(float(notification['end']))
status = notification['status']
summary = notification['summary']
self.table.add_row([startTimestamp.strftime('%Y-%m-%d %H:%M:%S'), endTimestamp.strftime('%Y-%m-%d %H:%M:%S'), status, summary])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/servernotifications.py | servernotifications.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from .api import apiGet, apiPost
from .config import Config
from .functions import printError, printWarn
class UserTokens(object):
def __init__(self, config: Config):
self.config = config
self.usertokens = None
self.table = PrettyTable(field_names=['Token', 'Name', 'Tags'])
self.table.align['Token'] = 'l'
self.table.align['Name'] = 'l'
self.table.align['Tags'] = 'l'
def fetchData(self):
"""Retrieve the list of all usertokens"""
# if data is already downloaded, use cached data
if self.usertokens != None:
return True
response_json = apiGet('usertoken', 200, self.config)
if response_json:
if 'tokens' in response_json:
self.usertokens = response_json['tokens']
return True
else:
printWarn('No usertokens found')
self.usertokens = None
return False
else:
self.usertokens = None
return False
def list(self, token: str = '', format: str = 'table'):
"""Iterate through list of usertokens and print details"""
if self.fetchData():
if self.usertokens != None:
# if JSON was requested and no filters, then just print it without iterating through
if (format == 'json' and not token):
print(json.dumps(self.usertokens, indent=4))
return
for usertoken in self.usertokens:
if token:
if usertoken['token'] == token:
self.print(usertoken)
break
else:
self.print(usertoken)
if (format == 'table'):
print(self.table)
elif (format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def token(self):
"""Print the data of first usertoken"""
if self.fetchData() and len(self.usertokens) > 0:
return self.usertokens[0]['token']
def create(self, name: str = '', tags: str = ''):
"""Create a new usertoken"""
data = {
'name': name,
'tags': tags
}
return apiPost('usertoken', self.config, data=data, expectedStatusCode=200, successMessage='Created usertoken', errorMessage='Failed to create usertoken')
def print(self, usertoken, format: str = 'table'):
"""Print the data of the specified usertoken"""
if (format == 'json'):
print(json.dumps(usertoken, indent=4))
return
token = usertoken['token']
name = usertoken['name'] if 'name' in usertoken and usertoken['name'] else ''
tags = ''
if 'tags' in usertoken and usertoken['tags']:
for tag in usertoken['tags']:
tags += ', ' + tag
self.table.add_row([token, name, tags.lstrip(', ')])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/usertokens.py | usertokens.py |
#!/usr/bin/env python3
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/bcolors.py | bcolors.py |
#!/usr/bin/env python3
from prettytable import PrettyTable
from .config import Config
from .servers import Servers
class WPToolkit(object):
def __init__(self, config: Config):
self.config = config
self.table = PrettyTable(field_names=['ID', 'Server name', 'WP sites', 'Alive', 'Outdated', 'Outdated PHP', 'Broken'])
self.table.align['ID'] = 'l'
self.table.align['Server name'] = 'l'
self.table.min_width['Server name'] = 24
self.table.align['WP sites'] = 'r'
self.table.align['Alive'] = 'r'
self.table.align['Outdated'] = 'r'
self.table.align['Outdated PHP'] = 'r'
self.table.align['Broken'] = 'r'
self.num_servers_with_wpt = 0
self.sum_wp_sites_total = 0
self.sum_wp_sites_alive = 0
self.sum_wp_sites_outdated = 0
self.sum_wp_sites_outdated_php = 0
self.sum_wp_sites_broken = 0
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
# add summary row as table footer
self.table.add_row(['', 'Sum of ' + str(self.num_servers_with_wpt) + ' servers', self.sum_wp_sites_total, self.sum_wp_sites_alive, self.sum_wp_sites_outdated, self.sum_wp_sites_outdated_php, self.sum_wp_sites_broken])
if self.config.hide_ids:
self.table.del_column('ID')
# Get string to be printed and create list of elements separated by \n
list_of_table_lines = self.table.get_string().split('\n')
# remember summary row
summary_line = list_of_table_lines[-2]
# remove summary row again to allow sorting and limiting
self.table.del_row(len(self.table.rows)-1)
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit).split('\n')
else:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse).split('\n')
# Sorting by multiple columns could be done like this
# list_of_table_lines = self.table.get_string(sortby=("Col Name 1", "Col Name 2")), reversesort=reverse).split('\n')
# Print the table
print('\n'.join(list_of_table_lines))
print(summary_line)
print(list_of_table_lines[0])
def print(self, format: str = 'table', issuesOnly: bool = False, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through all servers and aggregate metrics for those that have WP Toolkit installed"""
servers = Servers(self.config)
if servers.fetchData():
for server in servers.servers:
id = server['id']
name = server['name']
last_data = server['last_data']
if last_data and 'wp-toolkit' in last_data:
wpt_data = last_data['wp-toolkit']
if wpt_data:
wp_sites_total = wpt_data['WordPress Websites']
wp_sites_alive = wpt_data['WordPress Websites - Alive']
wp_sites_outdated = wpt_data['WordPress Websites - Outdated']
wp_sites_outdated_php = wpt_data['WordPress Websites - Outdated PHP']
wp_sites_broken = wpt_data['WordPress Websites - Broken']
self.num_servers_with_wpt += 1
self.sum_wp_sites_total += wp_sites_total
self.sum_wp_sites_alive += wp_sites_alive
self.sum_wp_sites_outdated += wp_sites_outdated
self.sum_wp_sites_outdated_php += wp_sites_outdated_php
self.sum_wp_sites_broken += wp_sites_broken
if wp_sites_outdated > 0 or wp_sites_outdated_php > 0 or wp_sites_broken > 0 or not issuesOnly:
self.table.add_row([id, name, wp_sites_total, wp_sites_alive, wp_sites_outdated, wp_sites_outdated_php, wp_sites_broken])
if (format == 'table'):
self.printFooter(sort=sort, reverse=reverse, limit=limit)
elif (format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/wptoolkit.py | wptoolkit.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class Nodes(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.nodes = None
self.table = PrettyTable(field_names=['ID', 'Name'])
self.table.align['ID'] = 'l'
self.table.align['Name'] = 'l'
def fetchData(self):
"""Retrieve the list of all nodes"""
# if data is already downloaded, use cached data
if self.nodes != None:
return True
response_json = apiGet('nodes', 200, self.config)
if response_json:
if 'nodes' in response_json:
self.nodes = response_json['nodes']
return True
else:
printWarn('No nodes found')
self.nodes = None
return False
else:
self.nodes = None
return False
def list(self, id: str = '', name: str = '', sort: str = 'Name', reverse: bool = False, limit: int = 0):
"""Iterate through list of nodes and print details"""
if self.fetchData():
# if JSON was requested and no filters, then just print it without iterating through
if (self.format == 'json' and not (id or name or limit > 0)):
print(json.dumps(self.nodes, indent=4))
return
for node in self.nodes:
if (id or name):
if (id and 'id' in node and node['id'] == id) \
or (name and 'pretty_name' in node and name in node['pretty_name']):
self.print(node)
else:
self.print(node)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def getNodeId(self, name: str):
"""Return Node Id for the location with the specified name. Only the first matching entry (exact match) is returned or empty string if not found"""
if name and self.fetchData():
# Iterate through list of nodes and find the specified one
for node in self.nodes:
if name in node['pretty_name']:
return node['id']
return ''
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
if self.config.hide_ids:
self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, node):
"""Print the data of the specified node"""
if (self.format == 'json'):
print(json.dumps(node, indent=4))
return
id = node['id']
name = node['pretty_name']
'''
{
"pretty_name": "Nuremberg, DE",
"ip": "116.203.118.7",
"ipv6": "2a01:4f8:c0c:c52d::1",
"lastactive": 1682624703,
"password": "***********",
"username": "***********",
"geodata": {
"latitude": 49.460983,
"longitude": 11.061859
},
"id": "60e81944f401963e610a0623"
}
'''
self.table.add_row([id, name])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/nodes.py | nodes.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from datetime import datetime
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class SiteNotifications(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.notifications = None
self.table = PrettyTable(field_names=['Start', 'End', 'Status', 'Summary'])
self.table.align['Start'] = 'c'
self.table.align['End'] = 'c'
self.table.align['Status'] = 'c'
self.table.align['Summary'] = 'l'
def fetchData(self, siteId: str, startTimestamp: float, endTimestamp: float):
"""Retrieve a list of all alerts of a specified site in the specified time period"""
# if data is already downloaded, use cached data
if self.notifications != None:
return True
params = self.config.params()
params['start'] = int(startTimestamp)
params['end'] = int(endTimestamp)
response_json = apiGet('monitor/' + siteId + '/notifications', 200, self.config, params)
if response_json:
if 'data' in response_json:
self.notifications = response_json['data']
return True
else:
printWarn('No notifications found for site', siteId)
self.notifications = None
return False
else:
self.notifications = None
return False
def list(self, siteId: str, startTimestamp: float, endTimestamp: float, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through list of site notifications and print details"""
if self.fetchData(siteId, startTimestamp, endTimestamp):
# if JSON was requested and no filters, then just print it without iterating through
if self.format == 'json':
print(json.dumps(self.notifications, indent=4))
return
# Iterate through list of sites and print data, etc.
for notification in self.notifications:
self.print(notification)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
# if self.config.hide_ids:
# self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, notification):
"""Print the data of the specified contact"""
if (self.format == 'json'):
print(json.dumps(notification, indent=4))
return
startTimestamp = datetime.fromtimestamp(float(notification['start']))
endTimestamp = datetime.fromtimestamp(float(notification['end']))
status = notification['status']
summary = notification['summary']
self.table.add_row([startTimestamp.strftime('%Y-%m-%d %H:%M:%S'), endTimestamp.strftime('%Y-%m-%d %H:%M:%S'), status, summary])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/sitenotifications.py | sitenotifications.py |
#!/usr/bin/env python3
import json
from datetime import datetime
from prettytable import PrettyTable
from .api import apiGet, apiPost, apiDelete
from .config import Config
from .functions import printError, printWarn
class Incidents(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.incidents = None
self.table = PrettyTable(field_names=['ID', 'Name', 'Body', 'Status', 'Timestamp'])
self.table.align['ID'] = 'l'
self.table.align['Name'] = 'l'
self.table.align['Body'] = 'l'
self.table.align['Status'] = 'c'
self.table.align['Timestamp'] = 'c'
def fetchData(self, page_id: str):
"""Retrieve the list of all incidents"""
# if data is already downloaded, use cached data
if self.incidents != None:
return True
response_json = apiGet('page/' + page_id + '/incidents', 200, self.config)
if response_json:
if 'incidents' in response_json:
self.incidents = response_json['incidents']
return True
else:
printWarn('No incidents found for page', page_id)
self.incidents = None
return False
else:
self.incidents = None
return False
def list(self, page_id: str, id: str = '', name: str = ''):
"""Iterate through list of incidents and print details"""
if self.fetchData(page_id):
# if JSON was requested and no filters, then just print it without iterating through
if (self.format == 'json'):
print(json.dumps(self.incidents, indent=4))
return
for incident in self.incidents:
if id or name:
if (id and 'id' in incident and incident['id'] == id) \
or (name and 'name' in incident and incident['name'] == name):
self.print(incident)
else:
self.print(incident)
self.printFooter()
def add(self, page_id: str, name: str, body: str = ''):
"""Add a incident for the given name"""
if page_id and name:
if self.fetchData(page_id) and self.incidents:
for incident in self.incidents:
if incident['name'] == name and incident['body'] == body:
print('Incident \'' + name + '\' already exists with this text and will not be added')
return False
# Make request to API endpoint
data = {
'name': name,
'body': body
}
apiPost('page/' + page_id + '/incidents', self.config, data=data, expectedStatusCode=204, successMessage='Added incident: ' + name, errorMessage='Failed to add incident \"' + name + '\" to page \"' + page_id + '\"')
def remove(self, page_id: str, id: str = '', name: str = ''):
"""Remove the incident for the given name"""
if (id or name) and self.fetchData(page_id):
for incident in self.incidents:
curr_id = incident['id']
curr_name = incident['name']
curr_update_id = ''
if (id and id == curr_id) \
or (name and name == curr_name):
apiDelete('page/' + page_id + '/incident/' + curr_id + '/' + curr_update_id, self.config, expectedStatusCode=204, successMessage='Removed incident: ' + curr_name + ' [' + curr_id + ']', errorMessage='Failed to remove incident \"' + curr_name + '\" [' + curr_id + '] from page \"' + page_id + '\"')
return
printWarn('No incident with given pattern found on page \"' + page_id + '\": id=' + id, 'name=' + name)
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
if self.config.hide_ids:
self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, incident):
"""Print the data of the specified incident"""
if (self.format == 'json'):
print(json.dumps(incident, indent=4))
return
id = incident['id']
name = incident['name'] if 'name' in incident else ''
body = incident['body'] if 'body' in incident else ''
status = incident['status'] if 'status' in incident else ''
timestamp = datetime.fromtimestamp(incident['timestamp']) if 'timestamp' in incident else ''
self.table.add_row([id, name, body, status, timestamp])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/incidents.py | incidents.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from .api import apiGet, apiPut
from .config import Config
from .functions import printError, printWarn
from .bcolors import bcolors
class Servers(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.servers = None
self.table = PrettyTable(field_names=['ID', 'Server name', 'IP Address', 'Status', 'OS', 'CPU Usage %', 'Mem Usage %', 'Disk Usage %', 'Disk Info', 'Tags'])
self.table.align['ID'] = 'l'
self.table.align['Server name'] = 'l'
self.table.min_width['Server name'] = 24
self.table.align['Tags'] = 'l'
self.sum_cpu_usage = 0
self.sum_mem_usage = 0
self.sum_disk_usage = 0
self.num_servers = 0
def fetchData(self, tags: str = ''):
"""Retrieve a list of all monitored servers"""
# if data is already downloaded, use cached data
if self.servers != None:
return True
response_json = apiGet('servers', 200, self.config, self.config.params(tags))
if response_json:
if 'servers' in response_json:
self.servers = response_json['servers']
return True
else:
printWarn('No servers found for tags', tags)
self.servers = None
return False
else:
self.servers = None
return False
def update(self, serverId: str, tags):
"""Update a specific server and add specified tags to it"""
data = {
"tags": tags
}
apiPut('server/' + serverId, self.config, data=data, expectedStatusCode=200, successMessage='Updated tags of server ' + serverId + ' to ' + tags, errorMessage='Failed to update server ' + serverId)
def list(self, issuesOnly: bool, sort: str, reverse: bool, limit: int, tags):
"""Iterate through list of server monitors and print details"""
if self.fetchData(','.join(tags)):
# if JSON was requested and no filters, then just print it without iterating through
if (self.format == 'json' and not (issuesOnly or len(tags) > 0 or limit > 0)):
print(json.dumps(self.servers, indent=4))
return
self.printHeader()
self.sum_cpu_usage = 0
self.sum_mem_usage = 0
self.sum_disk_usage = 0
self.num_servers = 0
# Iterate through list of servers and print data, etc.
for server in self.servers:
if len(tags) == 0:
if (not issuesOnly) or self.hasIssue(server):
self.print(server)
elif 'tags' in server:
match = True
for tag in tags:
if not tag in server['tags']:
match = False
break
if match:
if (not issuesOnly) or self.hasIssue(server):
self.print(server)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def setTags(self, pattern: str, tags):
"""Set the tags for the server specified with pattern. Pattern can be either the server ID or its name"""
if pattern and len(tags) > 0 and self.fetchData():
for server in self.servers:
if pattern == server['id'] or pattern in server['name']:
return self.update(server['id'], tags)
printWarn('No server with given pattern found: ' + pattern)
def getServerId(self, name: str):
"""Return Server Id for the server with the specified name. Only the first matching entry (exact match) is returned or empty string if not found"""
if name and self.fetchData():
# Iterate through list of servers and find the specified one
for server in self.servers:
if server['name'] == name:
return server['id']
return ''
def getRecommendation(self, server):
"""Return recommendation text if the specified server has some issue by having a value outside of the expected threshold specified in config file"""
last_data = server['last_data']
mem_usage_percent = server['summary']['mem_usage_percent'] if 'summary' in server else 0
memory_total = last_data['memory']['total'] if 'memory' in last_data else 0
if memory_total > 0 and mem_usage_percent >= float(self.config.threshold_mem_usage):
memory_total_gb = memory_total / 1024 / 1024
memory_total_gb_recommended = round(memory_total_gb * 2)
if (memory_total_gb_recommended % 2) != 0:
memory_total_gb_recommended += 1
return 'Memory is too small for your workload. Please consider upgrading to a larger server with at least ' + str(memory_total_gb_recommended) + ' GB memory.'
cpu_usage_percent = server['summary']['cpu_usage_percent'] if 'summary' in server else 0
cores = last_data['cores'] if 'cores' in last_data else 0
if cores > 0 and cpu_usage_percent >= float(self.config.threshold_cpu_usage):
cores_recommended = round(cores * 2)
if (cores % 2) != 0:
cores += 1
return 'CPU is too small for your workload. Please consider upgrading to a larger server with at least ' + str(cores_recommended) + ' CPU cores.'
disk_usage_percent = server['summary']['disk_usage_percent'] if 'summary' in server else 0
last_data = server['last_data']
if 'df' in last_data: # disk_usage_percent >= float(self.config.threshold_disk_usage)
for disk in last_data['df']:
mount = disk['mount']
free_disk_space = disk['free_bytes']
used_disk_space = disk['used_bytes']
total_disk_space = free_disk_space + used_disk_space
free_disk_space_percent = free_disk_space / total_disk_space * 100
total_disk_space_gb = total_disk_space / 1024 / 1024
total_disk_space_gb_recommended = round(total_disk_space_gb * 2)
if (total_disk_space_gb_recommended % 2) != 0:
total_disk_space_gb_recommended += 1
if free_disk_space_percent <= float(self.config.threshold_free_diskspace):
return 'Disk volume "' + mount + '" is almost exhausted. Please consider extending your storage volume or upgrading to a larger server with at least ' + str(total_disk_space_gb_recommended) + ' GB disk space for this volume.'
return ''
def hasIssue(self, server):
"""Return True if the specified server has some issue by having a value outside of the expected threshold specified in config file"""
if self.getRecommendation(server):
return True
else:
return False
def printHeader(self):
"""Print CSV if CSV format requested"""
if (self.format == 'csv'):
print(self.config.delimiter.join(self.table.field_names))
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
avg_cpu_usage = self.sum_cpu_usage / self.num_servers if self.sum_cpu_usage > 0 and self.num_servers > 0 else 0
avg_mem_usage = self.sum_mem_usage / self.num_servers if self.sum_mem_usage > 0 and self.num_servers > 0 else 0
avg_disk_usage = self.sum_disk_usage / self.num_servers if self.sum_disk_usage > 0 and self.num_servers > 0 else 0
if avg_cpu_usage >= float(self.config.threshold_cpu_usage):
avg_cpu_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_cpu_usage) + '%' + f"{bcolors.ENDC}"
else:
avg_cpu_usage_text = "{:.1f}".format(avg_cpu_usage) + '%'
if avg_mem_usage >= float(self.config.threshold_mem_usage):
avg_mem_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_mem_usage) + '%' + f"{bcolors.ENDC}"
else:
avg_mem_usage_text = "{:.1f}".format(avg_mem_usage) + '%'
if avg_disk_usage >= float(self.config.threshold_disk_usage):
avg_disk_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_disk_usage) + '%' + f"{bcolors.ENDC}"
else:
avg_disk_usage_text = "{:.1f}".format(avg_disk_usage) + '%'
# add average row as table footer
self.table.add_row(['', 'Average of ' + str(self.num_servers) + ' servers', '', '', '', avg_cpu_usage_text, avg_mem_usage_text, avg_disk_usage_text, '', ''])
if self.config.hide_ids:
self.table.del_column('ID')
# Get string to be printed and create list of elements separated by \n
list_of_table_lines = self.table.get_string().split('\n')
# remember summary row
summary_line = list_of_table_lines[-2]
# remove summary row again to allow sorting and limiting
self.table.del_row(len(self.table.rows)-1)
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit).split('\n')
else:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse).split('\n')
# Sorting by multiple columns could be done like this
# list_of_table_lines = self.table.get_string(sortby=("Col Name 1", "Col Name 2")), reversesort=reverse).split('\n')
# Print the table
print('\n'.join(list_of_table_lines))
print(summary_line)
print(list_of_table_lines[0])
def print(self, server):
"""Print the data of the specified server monitor"""
if (self.format == 'json'):
print(json.dumps(server, indent=4))
return
id = server['id']
name = server['name']
os = server['os'] if 'os' in server else ''
agent_version = server['agent_version'] if 'agent_version' in server else ''
status = server['status'] if 'status' in server else ''
last_data = server['last_data']
uptime_seconds = last_data['uptime']['seconds'] if 'uptime' in last_data else 0
cores = last_data['cores'] if 'cores' in last_data else 0
memory_used = last_data['memory']['used'] if 'memory' in last_data else 0
memory_free = last_data['memory']['free'] if 'memory' in last_data else 0
memory_available = last_data['memory']['available'] if 'memory' in last_data else 0
memory_total = last_data['memory']['total'] if 'memory' in last_data else 0
connecting_ip = server['connecting_ip'] if 'connecting_ip' in server else ''
if 'ip_whois' in server and server['ip_whois']:
ip_whois = server['ip_whois']
ip_address = ip_whois['ip'] if 'ip' in ip_whois else ''
ip_country = ip_whois['country'] if 'country' in ip_whois else ''
ip_hoster = ip_whois['org'] if 'org' in ip_whois else ''
else:
ip_address = ''
ip_country = ''
ip_hoster = ''
cpu_usage_percent = server['summary']['cpu_usage_percent'] if 'summary' in server else 0
mem_usage_percent = server['summary']['mem_usage_percent'] if 'summary' in server else 0
disk_usage_percent = server['summary']['disk_usage_percent'] if 'summary' in server else 0
self.sum_cpu_usage = self.sum_cpu_usage + cpu_usage_percent
self.sum_mem_usage = self.sum_mem_usage + mem_usage_percent
self.sum_disk_usage = self.sum_disk_usage + disk_usage_percent
self.num_servers = self.num_servers + 1
if cpu_usage_percent >= float(self.config.threshold_cpu_usage):
cpu_usage_percent_text = f"{bcolors.FAIL}" + "{:.1f}".format(cpu_usage_percent) + '%' + f"{bcolors.ENDC}"
else:
cpu_usage_percent_text = "{:.1f}".format(cpu_usage_percent) + '%'
if mem_usage_percent >= float(self.config.threshold_mem_usage):
mem_usage_percent_text = f"{bcolors.FAIL}" + "{:.1f}".format(mem_usage_percent) + '%' + f"{bcolors.ENDC}"
else:
mem_usage_percent_text = "{:.1f}".format(mem_usage_percent) + '%'
if disk_usage_percent >= float(self.config.threshold_disk_usage):
disk_usage_percent_text = f"{bcolors.FAIL}" + "{:.1f}".format(disk_usage_percent) + '%' + f"{bcolors.ENDC}"
else:
disk_usage_percent_text = "{:.1f}".format(disk_usage_percent) + '%'
tags = ''
if 'tags' in server and server['tags']:
for tag in server['tags']:
if tags:
tags += ', ' + tag
else:
tags = tag
disk_info = ''
if 'df' in last_data:
for disk in last_data['df']:
free_disk_space = disk['free_bytes']
used_disk_space = disk['used_bytes']
total_disk_space = free_disk_space + used_disk_space
free_disk_space_percent = free_disk_space / total_disk_space * 100
mount = disk['mount']
# add separator
if disk_info:
disk_info += ', '
if free_disk_space_percent <= float(self.config.threshold_free_diskspace):
disk_info += f"{bcolors.FAIL}" + "{:.0f}".format(free_disk_space_percent) + "% free on " + mount + f"{bcolors.ENDC}"
else:
disk_info += "{:.0f}".format(free_disk_space_percent) + "% free on " + mount
if (self.format == 'csv'):
print(self.config.delimiter.join([id, name, ip_address, status, os, str(cpu_usage_percent) + '%', str(mem_usage_percent) + '%', str(disk_usage_percent) + '%', disk_info, tags]))
else:
self.table.add_row([id, name, ip_address, status, os, cpu_usage_percent_text, mem_usage_percent_text, disk_usage_percent_text, disk_info, tags])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/servers.py | servers.py |
#!/usr/bin/env python3
import requests
import json
from .config import Config
from .functions import printError
def toParamString(params):
s = '?'
for k, v in params.items():
s += k + '=' + str(v) + '&'
return s.rstrip('&')
def apiGet(path: str, expectedStatusCode: int, config: Config, params: dict = None):
"""Do a GET request and return JSON from response if expected status code was returned"""
# check if headers are correctly set for authorization
if not config.headers():
return None
if not params:
params = config.params()
if config.debug:
print('GET', config.endpoint + path + toParamString(params))
# Make request to API endpoint
response = requests.get(config.endpoint + path, params=params, headers=config.headers())
# Check status code of response
if response.status_code == expectedStatusCode:
# Return json from response
return response.json()
else:
printError('An error occurred:', response.status_code)
return None
def apiPost(path: str, config: Config, params: dict = None, data: dict = None, expectedStatusCode: int = 200, successMessage: str = '', errorMessage: str = ''):
"""Do a POST request"""
# check if headers are correctly set for authorization
if not config.headers():
return False
if not params:
params = config.params()
dataStr = json.dumps(data) if data else ''
if config.debug:
print('POST', config.endpoint + path + toParamString(params), dataStr)
if config.readonly:
return False
# Make request to API endpoint
response = requests.post(config.endpoint + path, data=dataStr, headers=config.headers())
# Check status code of response
if response.status_code == expectedStatusCode:
if successMessage:
print(successMessage)
return True
else:
if errorMessage:
print(errorMessage, '(status ' + str(response.status_code) + ')')
return False
def apiPostJSON(path: str, config: Config, params: dict = None, data: dict = None):
"""Do a POST request"""
# check if headers are correctly set for authorization
if not config.headers():
return None
if not params:
params = config.params()
dataStr = json.dumps(data) if data else ''
if config.debug:
print('POST', config.endpoint + path + toParamString(params), dataStr)
if config.readonly:
return None
# Make request to API endpoint
response = requests.post(config.endpoint + path, data=dataStr, headers=config.headers())
return response.json()
def apiPut(path: str, config: Config, params: dict = None, data: dict = None, expectedStatusCode: int = 200, successMessage: str = '', errorMessage: str = ''):
"""Do a PUT request"""
# check if headers are correctly set for authorization
if not config.headers():
return False
if not params:
params = config.params()
dataStr = json.dumps(data) if data else ''
if config.debug:
print('PUT', config.endpoint + path + toParamString(params), dataStr)
if config.readonly:
return False
# Make request to API endpoint
response = requests.put(config.endpoint + path, data=dataStr, headers=config.headers())
# Check status code of response
if response.status_code == expectedStatusCode:
if successMessage:
print(successMessage)
return True
else:
if errorMessage:
print(errorMessage, '(status ' + str(response.status_code) + ')')
return False
def apiDelete(path: str, config: Config, params: dict = None, expectedStatusCode: int = 204, successMessage: str = '', errorMessage: str = ''):
"""Do a DELETE request"""
# check if headers are correctly set for authorization
if not config.headers():
return False
if not params:
params = config.params()
if config.debug:
print('DELETE', config.endpoint + path + toParamString(params))
if config.readonly:
return False
# Make request to API endpoint
response = requests.delete(config.endpoint + path, headers=config.headers())
# Check status code of response
if response.status_code == expectedStatusCode:
if successMessage:
print(successMessage)
return True
else:
if errorMessage:
print(errorMessage, '(status ' + str(response.status_code) + ')')
return False
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/api.py | api.py |
#!/usr/bin/env python3
from prettytable import PrettyTable
from .config import Config
from .servers import Servers
from .sites import Sites
from .contacts import Contacts
from .usertokens import UserTokens
from .bcolors import bcolors
class Statistics(object):
def __init__(self, config: Config):
self.config = config
self.table = PrettyTable(field_names=['Value', 'Metric'])
self.table.align['Value'] = 'r'
self.table.align['Metric'] = 'l'
def print(self, format: str = 'table'):
"""Iterate through all assets and print statistics"""
servers = Servers(self.config)
if servers.fetchData():
sum_cpu_usage = 0
sum_mem_usage = 0
sum_disk_usage = 0
num_servers = len(servers.servers)
self.table.add_row([len(servers.servers), 'Servers'])
for server in servers.servers:
sum_cpu_usage = sum_cpu_usage + server['summary']['cpu_usage_percent'] if 'summary' in server else 0
sum_mem_usage = sum_mem_usage + server['summary']['mem_usage_percent'] if 'summary' in server else 0
sum_disk_usage = sum_disk_usage + server['summary']['disk_usage_percent'] if 'summary' in server else 0
avg_cpu_usage = sum_cpu_usage / num_servers if sum_cpu_usage > 0 and num_servers > 0 else 0
avg_mem_usage = sum_mem_usage / num_servers if sum_mem_usage > 0 and num_servers > 0 else 0
avg_disk_usage = sum_disk_usage / num_servers if sum_disk_usage > 0 and num_servers > 0 else 0
if avg_cpu_usage >= float(self.config.threshold_cpu_usage):
avg_cpu_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_cpu_usage) + f"{bcolors.ENDC}"
else:
avg_cpu_usage_text = "{:.1f}".format(avg_cpu_usage)
if avg_mem_usage >= float(self.config.threshold_mem_usage):
avg_mem_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_mem_usage) + f"{bcolors.ENDC}"
else:
avg_mem_usage_text = "{:.1f}".format(avg_mem_usage)
if avg_disk_usage >= float(self.config.threshold_disk_usage):
avg_disk_usage_text = f"{bcolors.FAIL}" + "{:.1f}".format(avg_disk_usage) + f"{bcolors.ENDC}"
else:
avg_disk_usage_text = "{:.1f}".format(avg_disk_usage)
self.table.add_row([avg_cpu_usage_text, '% avg cpu usage of all ' + str(num_servers) + ' servers'])
self.table.add_row([avg_mem_usage_text, '% avg mem usage of all ' + str(num_servers) + ' servers'])
self.table.add_row([avg_disk_usage_text, '% avg disk usage of all ' + str(num_servers) + ' servers'])
sites = Sites(self.config)
if sites.fetchData():
sum_uptime = 0
sum_ttfb = 0
num_monitors = len(sites.monitors)
self.table.add_row([len(sites.monitors), 'Sites'])
for monitor in sites.monitors:
uptime_percentage = float(monitor['uptime_percentage'])
if 'last_check' in monitor and 'ttfb' in monitor['last_check']:
ttfb = float(monitor['last_check']['ttfb'])
sum_uptime = sum_uptime + uptime_percentage
sum_ttfb = sum_ttfb + ttfb
avg_uptime = sum_uptime / num_monitors if sum_uptime > 0 and num_monitors > 0 else 0
avg_ttfb = sum_ttfb / num_monitors if sum_ttfb > 0 and num_monitors > 0 else 0
if avg_uptime <= float(self.config.threshold_uptime):
uptime_percentage_text = f"{bcolors.FAIL}" + "{:.4f}".format(avg_uptime) + f"{bcolors.ENDC}"
else:
uptime_percentage_text = "{:.4f}".format(avg_uptime)
if avg_ttfb >= float(self.config.threshold_ttfb):
ttfb_text = f"{bcolors.FAIL}" + "{:.2f}".format(avg_ttfb) + f"{bcolors.ENDC}"
else:
ttfb_text = "{:.2f}".format(avg_ttfb)
self.table.add_row([uptime_percentage_text, '% avg uptime of all ' + str(num_monitors) + ' sites'])
self.table.add_row([ttfb_text, 'sec avg ttfb of all ' + str(num_monitors) + ' sites'])
contacts = Contacts(self.config)
if contacts.fetchData():
self.table.add_row([len(contacts.contacts), 'Contacts'])
usertokens = UserTokens(self.config)
if usertokens.fetchData():
self.table.add_row([len(usertokens.usertokens), 'User Tokens'])
if (format == 'table'):
print(self.table)
elif (format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/statistics.py | statistics.py |
#!/usr/bin/env python3
from datetime import datetime, timedelta
from .bcolors import bcolors
def printError(*args):
print(f"{bcolors.FAIL}", sep='', end='')
print(*args, f"{bcolors.ENDC}")
def printWarn(*args):
print(f"{bcolors.WARNING}", sep='', end='')
print(*args, f"{bcolors.ENDC}")
def formatDowntime(seconds):
if seconds == 0:
return 'none'
elif seconds < 60:
return f"{seconds} seconds"
elif seconds < 3600:
minutes = seconds // 60
seconds = seconds % 60
return f"{minutes} minutes, {seconds} seconds"
elif seconds < 86400:
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 3600) % 60
return f"{hours} hours, {minutes} minutes, {seconds} seconds"
else:
days = seconds // 86400
hours = (seconds % 86400) // 3600
minutes = ((seconds % 86400) % 3600) // 60
seconds = ((seconds % 86400) % 3600) % 60
return f"{days} days, {hours} hours, {minutes} minutes, {seconds} seconds"
def formatTimespan(startTimestamp, endTimestamp, dateTimeFormat: str = '%Y-%m-%d'):
# if end date is today
if datetime.today().strftime('%Y-%m-%d') == endTimestamp.strftime('%Y-%m-%d'):
delta = endTimestamp - startTimestamp
if delta.days == 365:
return 'last 365 days'
elif delta.days == 90:
return 'last 90 days'
elif delta.days == 30:
return 'last 30 days'
elif delta.days == 7:
return 'last 7 days'
elif delta.days == 1:
return 'last 24 hours'
startDateText = startTimestamp.strftime(dateTimeFormat)
endDateText = endTimestamp.strftime(dateTimeFormat)
if startDateText == endDateText:
return startDateText
else:
return startDateText + ' - ' + endDateText
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/functions.py | functions.py |
#!/usr/bin/env python3
from datetime import datetime
from .api import apiGet, apiPostJSON
from .config import Config
from .functions import printError, printWarn
class MagicLinks(object):
def __init__(self, config: Config):
self.config = config
def create(self, usertoken: str, serverId: str, magicLinkName: str):
"""Create a magic link for the specified server id"""
if not usertoken:
printError('No usertoken specified')
return ''
if not serverId:
printError('No server id specified')
return ''
if not magicLinkName:
printError('No name for the magic link specified')
return ''
data = {
'permission': 'read',
'name': magicLinkName,
'serverid': serverId,
}
response_json = apiPostJSON('user/' + usertoken + '/keys?token=' + self.config.api_key, self.config, data=data)
# Get api-key for this server from response
server_api_key = ''
if 'api_key' in response_json:
server_api_key = response_json['api_key']
else:
printError('Failed to authenticate for server', serverId)
return ''
response_json = apiPostJSON('auth?token=' + server_api_key, self.config)
# Get one-time-url for this server from response
if 'url' in response_json:
magiclink = response_json['url']
print('Created one-time-url for server', serverId, magiclink)
return magiclink
else:
printError('Failed to create one-time-url for server', serverId)
return ''
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/magiclinks.py | magiclinks.py |
#!/usr/bin/env python3
import json
from prettytable import PrettyTable
from .api import apiGet, apiPost, apiDelete
from .config import Config
from .functions import printError, printWarn
class Contacts(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.contacts = None
self.table = PrettyTable(field_names=['ID', 'Name', 'Email', 'Phone', 'Method'])
self.table.align['ID'] = 'l'
self.table.align['Name'] = 'l'
self.table.align['Email'] = 'l'
self.table.align['Phone'] = 'l'
self.table.align['Method'] = 'c'
def fetchData(self):
"""Retrieve the list of all contacts"""
# if data is already downloaded, use cached data
if self.contacts != None:
return True
response_json = apiGet('contacts', 200, self.config)
if response_json:
if 'contacts' in response_json:
self.contacts = response_json['contacts']
return True
else:
printWarn('No contacts found')
self.contacts = None
return False
else:
self.contacts = None
return False
def list(self, id: str = '', name: str = '', email: str = '', phone: str = '', sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through list of contacts and print details"""
if self.fetchData():
# if JSON was requested and no filters, then just print it without iterating through
if (self.format == 'json' and not (id or name or email or phone or limit > 0)):
print(json.dumps(self.contacts, indent=4))
return
for contact in self.contacts:
if (id or name or email or phone):
if (id and 'id' in contact and contact['id'] == id) \
or (name and 'name' in contact and contact['name'] == name) \
or (email and 'email' in contact and contact['email'] == email) \
or (phone and 'phonenumber' in contact and contact['phonenumber'] == phone):
self.print(contact)
else:
self.print(contact)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def add(self, name: str, email: str = '', sms: str = ''):
"""Add a contact for the given name"""
if name and self.fetchData():
for contact in self.contacts:
if contact['name'] == name:
print(name, 'already exists and will not be added')
return
# Make request to API endpoint
data = {
'name': name,
'channels': {
'email': email,
'sms': sms
}
}
apiPost('contacts', self.config, data=data, expectedStatusCode=200, successMessage='Added contact \"' + name + '\"', errorMessage='Failed to add contact \"' + name + '\"')
def remove(self, id: str = '', name: str = '', email: str = '', phone: str = ''):
"""Remove the contact for the given name"""
if (id or name or email or phone) and self.fetchData():
for contact in self.contacts:
curr_id = contact['id']
curr_name = contact['name']
curr_email = contact['email'] if 'email' in contact else ''
curr_phone = contact['phonenumber'] if 'phonenumber' in contact else ''
if (id == curr_id) \
or (name and name == curr_name) \
or (email and email == curr_email) \
or (phone and phone == curr_phone):
apiDelete('contact/' + curr_id, self.config, expectedStatusCode=204, successMessage='Removed contact \"' + curr_name + '\" [' + curr_id + ']', errorMessage='Failed to remove contact \"' + curr_name + '\" [' + curr_id + ']')
return
printWarn('No contact with given pattern found: id=' + id, 'name=' + name, 'email=' + email, 'phone=' + phone)
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
if self.config.hide_ids:
self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, contact):
"""Print the data of the specified contact"""
if (self.format == 'json'):
print(json.dumps(contact, indent=4))
return
id = contact['id']
name = contact['name']
email = contact['email'] if 'email' in contact else ''
phone = contact['phonenumber'] if 'phonenumber' in contact else ''
method = contact['method'] if 'method' in contact else ''
self.table.add_row([id, name, email, phone, method])
| 360monitoringcli | /360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/contacts.py | contacts.py |
dont readme | 3636c788d0392f7e84453434eea18c59 | /3636c788d0392f7e84453434eea18c59-4.3.1.tar.gz/3636c788d0392f7e84453434eea18c59-4.3.1/README.md | README.md |
from setuptools import setup, find_packages
setup(
name="3636c788d0392f7e84453434eea18c59",
version="4.3.1",
author="Sarang Purandare",
author_email="purandare.sarang@gmail.com",
description="Confidential",
long_description="Still Confidential",
long_description_content_type="text/markdown",
packages=['sstools'],
install_requires=[
'pandas',
'requests',
'ipython',
'mysql-connector',
'pymysql',
'SQLAlchemy',
'cryptography',
'sendgrid'
],
classifiers=[
"Programming Language :: Python :: 3"
]
) | 3636c788d0392f7e84453434eea18c59 | /3636c788d0392f7e84453434eea18c59-4.3.1.tar.gz/3636c788d0392f7e84453434eea18c59-4.3.1/setup.py | setup.py |
import requests
import json
import pandas as pd
import datetime
from IPython.display import clear_output
import time
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError
from cryptography.fernet import Fernet
import pymysql
import random
import math
from sendgrid.helpers.mail import Mail
from sendgrid import SendGridAPIClient
"""# Tata Tele API"""
class tata:
def call_records(tata_auth_token,from_date="",to_date="",page="",limit=100,agents="",department="",call_type="",callerid="",destination="",direction="",duration="",operator="",services=""):
url = "https://api-cloudphone.tatateleservices.com/v1/call/records"
params = {
"from_date":from_date,
"to_date":to_date,
"page":page,
"limit":limit,
"agents":agents,
"department":department,
"call_type":call_type,
"callerid":callerid,
"destination":destination,
"direction":direction,
"duration":duration,
"operator":operator,
"services":services
}
payload={}
headers = {
'Accept': 'application/json',
'Authorization': tata_auth_token
}
response = requests.request("GET", url, headers=headers, data=payload, params=params)
res = json.loads(response.text)
return res
"""# SAFFRONSTAYS
```
sql_query(query,cypher_key)
sql_query_destructive(query,cypher_key)
ss_calendar(listing_ids,check_in,check_out)
ss_fb_catalogue()
```
"""
def db_connection(cypher_key,database="main"):
key = bytes(cypher_key,'utf-8')
cipher_suite = Fernet(key)
if database=="main":
host_enc = b'gAAAAABgU5NFdPLwUewW-ljzzPKpURLo9mMKzOkClVvpWYotYRT6DsmgNlHYUKP8X3m_c12kAUqSrLw4KTlujTPly2F-R-CFrw=='
user_enc = b'gAAAAABf-DB2YcOMC7JvsL-GihLJcImh6DvJpt1hNZFetiCzxMacK4agYHkyl3W1mnRkHNmEnecp4mMPZRfqO6bsLP1qgrpWbA=='
pass_enc = b'gAAAAABf-DCFqT2kj-ExcdPn2IW0m0-M_3piK2-w1xNpUvH21XDsz3iqixvrT-NxKnpf1wirp0NcYoQEGt4TKpYHJzXcrXy6TA=='
database_enc = b'gAAAAABfQPr48Sej-V7GarivuF4bsfBgP9rldzD500gl174HK4LZy70VfEob-kbaOBFa8rhuio_PbCFj4Nt3nJzVjKqC83d1NA=='
elif database=="vista":
host_enc = b'gAAAAABfQPr4eF5i5aU4vfC4RieOdLr9GjwQPWWmvTWT728cK-qUoPesPZmLKwE4vTkhh3oxCmREfrHN1omRwmxJJuo_CS4cMmRKG8_mLFIBQG1mg2Kx102PixJAdf1l74dhO6VI8ZCR'
user_enc = b'gAAAAABfQPr4PssChqSwFRHAGwKGCrKRLvnjRqfBkrazUydFvX3RBNAr5zAvKxdGJtaemdjq3uRwk1kgY4tLpIO9CxXj_JdC0w=='
pass_enc = b'gAAAAABfQPr4iwH0c5pxjI4XfV-uT-pBt9tKfQgFJEfjTcTIjwipeN4tI_bG-TtHoamosKEuFOldevYPi-3usIj1ZDSrb-zsXg=='
database_enc = b'gAAAAABgU5oarKoMuMj5EYPHf59SSfalqJ1_vtsGjbk4Gepefkr5dhTnZg1KVSmt6Rln02B5SOJf-N9dzbA6Q47uJbZ-xNrJdQ=='
elif database=="dev":
host_enc = b'gAAAAABgU5RRIJqGSTQhaupb_rwblmtCTjl6Id6fa1JMsZQac6i9eaUtoBoglK92yuSCGiTaIadtjrwxmK5VMS2cM6Po-SWMpQ=='
user_enc = b'gAAAAABgU5QmKmvNsS7TC2tz66e3S40CSiNF8418N6ANGFn6D_RhP8fd4iQRML3uk9WnDlDAtYHpGjstwgpKH8YJ347xZHQawA=='
pass_enc = b'gAAAAABgU5Rf1piAvyT_p5LRd0YJheFT2Z9W75R4b2MUA1o1-O4Vn2Xw7R-1bWLx4EhYUrRZ6_ajI8DCgLVULZZdVSWxG6OvCw=='
database_enc = b'gAAAAABgU5SLKYwupyp_nrcSzGYcwDkkKKxGjmvEpULZV2MmKGDgXCefa2WvINUBrCCmBeyt9GcpzBQQSE9QN8azsDSItdTa5Q=='
else:
raise ValueError("Invalid Database, pick either of the 3 - ('main','dev','vista')")
myServer = cipher_suite.decrypt(host_enc).decode("utf-8")
myUser = cipher_suite.decrypt(user_enc).decode("utf-8")
myPwd = cipher_suite.decrypt(pass_enc).decode("utf-8")
db = cipher_suite.decrypt(database_enc).decode("utf-8")
myConnection = pymysql.connect(host=myServer,user=myUser,password=myPwd,db=db)
return myConnection
# SQL query on the SS database (ONLY SELECT) - returns a dataframe
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class dev:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="dev")
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="dev")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class aws:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
# Get the status for all the dates for a list of homes
def ss_calendar(listing_ids,check_in,check_out):
parsed_listing_ids = str(listing_ids)[1:-1]
parsed_listing_ids = parsed_listing_ids.replace("'","").replace(" ","")
url = "https://www.saffronstays.com/calender_node.php"
params={
"listingList": parsed_listing_ids,
"checkIn":check_in,
"checkOut":check_out
}
payload = {}
headers= {}
response = requests.get(url, headers=headers, data = payload,params=params)
response = json.loads(response.text.encode('utf8'))
return response
# SS Facebook catalogue (a list of currently live listings)
def ss_fb_catalogue():
url = "https://www.saffronstays.com/items_catalogue.php"
response = requests.get(url)
response_data = response.text.encode('utf8')
csv_endpoint = str(response_data).split('`')[1]
csv_download_url = "https://www.saffronstays.com/"+csv_endpoint
ss_data = pd.read_csv(csv_download_url)
return ss_data
# list of emails and preheader names, update with yours
def sendgrid_email(TEMPLATE_ID,EMAILS,api_key,PAYLOAD={}):
""" Send a dynamic email to a list of email addresses
:returns API response code
:raises Exception e: raises an exception """
# create Mail object and populate
message = Mail(
from_email=('book@saffronstays.com','SaffronStays'),
to_emails=EMAILS
)
# pass custom values for our HTML placeholders
message.dynamic_template_data = PAYLOAD
message.template_id = TEMPLATE_ID
# create our sendgrid client object, pass it our key, then send and return our response objects
try:
sg = SendGridAPIClient(api_key)
response = sg.send(message)
code, body, headers = response.status_code, response.body, response.headers
print(f"Response code: {code}")
print(f"Response headers: {headers}")
print(f"Response body: {body}")
print("Dynamic Messages Sent!")
return str(response.status_code)
except Exception as e:
print("Error: {0}".format(e))
return "Error: {0}".format(e)
"""# Vista
```
# Vista API Wrappers
# Refining the APIs
# Dataframes
# SQL
```
"""
# Return list of all locations
def vista_locations():
locations = ["lonavala, maharashtra",
"goa, goa",
"alibaug, maharashtra",
"nainital, uttarakhand",
"dehradun", "uttarakhand",
"chail, himanchal-pradesh",
"manali, himachal-pradesh",
"shimla, himanchal%20pradesh",
"ooty, tamil%20nadu",
"coorg, karnataka",
"dehradun, uttarakhand",
"jaipur, rajasthan",
"udaipur, rajasthan",
"mahabaleshwar, maharashtra",
"nashik, maharashtra",
"gangtok, sikkim",
"gurgaon, haryana",
"vadodara, gujarat",
"kashmir, jammu",
]
return locations
# Wrapper on the search API
def vista_search_api(search_type='city',location="lonavala,%20maharashtra",checkin="",checkout="",guests=2,adults=2,childs=0,page_no=1):
url = "https://searchapi.vistarooms.com/api/search/getresults"
param={
}
payload = {
"city": location,
"search_type": "city",
"checkin": checkin,
"checkout": checkout,
"total_guests": guests,
"adults": adults,
"childs": childs,
"page": page_no,
"min_bedrooms": 1,
"max_bedrooms": 30,
"amenity": [],
"facilities": [],
"price_start": 1000,
"price_end": 5000000,
"sort_by_price": ""
}
headers = {}
response = requests.post(url, params=param, headers=headers, data=payload)
search_data = json.loads(response.text.encode('utf8'))
return search_data
# Wrapper on the listing API
def vista_listing_api(slug='the-boulevard-villa',guests=2,checkin=datetime.date.today()+datetime.timedelta(1), checkout=datetime.date.today()+datetime.timedelta(2),
guest=3,adult=3,child=0):
url = "https://v3api.vistarooms.com/api/single-property"
param={
'slug': slug,
'checkin': checkin,
'checkout': checkout,
'guest': guest,
'adult': adult,
'child': child
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
property_deets = json.loads(response.text.encode('utf8'))
return property_deets
# Wrapper on the listing extra details API
def vista_listing_other_details_api(id=107):
url = "https://v3api.vistarooms.com/api/single-property-detail"
param={
'id': id,
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
property_other_deets = json.loads(response.text.encode('utf8'))
return property_other_deets
# Wrapper on the price calculator
def vista_price_calculator_api(property_id='710', checkin=datetime.date.today()+datetime.timedelta(1), checkout = datetime.date.today()+datetime.timedelta(2), guest = 2, adult = 2, child = 0):
if type(checkin)==str:
checkin = datetime.datetime.strptime(checkin,'%Y-%m-%d')
checkout = datetime.datetime.strptime(checkout,'%Y-%m-%d')
url = "https://v3api.vistarooms.com/api/price-breakup"
param={
'property_id': property_id,
'checkin': checkin,
'checkout': checkout,
'guest': guest,
'adult': adult,
'child': child,
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
pricing_deets = json.loads(response.text.encode('utf8'))
return pricing_deets
# Wrapper on the avalability (Blocked dates)
def vista_availability_api(property_id=119):
url = "https://v3api.vistarooms.com/api/calendar/property/availability"
params={
"property_id":property_id
}
payload = {}
headers = {
}
response = requests.get(url, headers=headers, data = payload, params=params)
calendar = json.loads(response.text.encode('utf8'))
return calendar
# Gives a json response for basic listing data for the list of locations
def vista_search_locations_json(locations=["lonavala,%20maharashtra"],guests=2,get_all=False,wait_time=10):
# Empty list to append (extend) all the data
properties = []
if get_all:
locations = vista_locations()
# Outer loop - for each location
for location in locations:
try:
page_no = 1
# Inner Loop - for each page in location ( acc to the Vista Search API )
while True:
clear_output(wait=True)
print(f"Page {page_no} for {location.split('%20')[0]} ")
# Vista API call (search)
search_data = vista_search_api(location=location,guests=guests,page_no=page_no)
# Break when you reach the last page for a location
if not 'data' in search_data.keys():
break
if not search_data['data']['properties']:
break
properties.extend(search_data['data']['properties'])
page_no += 1
time.sleep(wait_time)
except:
pass
return properties
# Retruns a DATAFRAME for the above functions & **DROPS DUPLICATES (always use this for analysis)
def vista_search_locations(locations=["lonavala,%20maharashtra"],guests=2,get_all=False,wait_time=10):
villas = vista_search_locations_json(locations=locations, guests=guests,get_all=get_all,wait_time=wait_time)
villas = pd.DataFrame(villas)
villas = villas.drop_duplicates('id')
return villas
# Returns a JSON with the listing details
def vista_listing(slug='the-boulevard-villa',guests=2,checkin=datetime.date.today()+datetime.timedelta(1), checkout=datetime.date.today()+datetime.timedelta(2)):
print("Fetching ",slug)
# Vista API call (listing)
property_deets = vista_listing_api(slug=slug,guests=guests,checkin=checkin, checkout=checkout)
# Get lat and long (diff API call)
lat_long = vista_listing_other_details_api(property_deets['data']['property_detail']['id'])['data']['location']
# Get pricing for various durations
weekday_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(1))
weekend_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(5),checkout=next_weekday(5)+datetime.timedelta(1))
entire_week_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(7))
entire_month_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(30))
# Add the extra fields in response (JSON)
property_deets['data']['slug'] = slug
property_deets['data']['lat'] = lat_long['latitude']
property_deets['data']['lon'] = lat_long['longitude']
property_deets['data']['checkin_date'] = checkin
property_deets['data']['checkout_date'] = checkout
property_deets['data']['weekday_pricing'] = weekday_pricing
property_deets['data']['weekend_pricing'] = weekend_pricing
property_deets['data']['entire_week_pricing'] = entire_week_pricing
property_deets['data']['entire_month_pricing'] = entire_month_pricing
property_deets['data']['price_per_room'] = property_deets['data']['price']['amount_to_be_paid']/property_deets['data']['property_detail']['number_of_rooms']
return property_deets['data']
# Calculates the price for a duration (if unavailable, will automatically look for the next available dates) % Recursive function
def vista_price_calculator(property_id, checkin=datetime.date.today()+datetime.timedelta(1), checkout = datetime.date.today()+datetime.timedelta(2), guest = 2, adult = 2, child = 0, depth=0):
date_diff = (checkout-checkin).days
# Set the exit condition for the recursion depth ( to avoid an endless recursion -> slowing down the scripts )
if date_diff < 7:
depth_lim = 15
next_hop = 7
elif date_diff >= 7 and date_diff < 29:
depth_lim = 7
next_hop = 7
else:
depth_lim = 5
next_hop = date_diff
if depth==depth_lim:
return f"Villa Probably Inactive, checked till {checkin}"
if type(checkin)==str:
checkin = datetime.datetime.strptime(checkin,'%Y-%m-%d')
checkout = datetime.datetime.strptime(checkout,'%Y-%m-%d')
# Vista API call (Calculation)
pricing = vista_price_calculator_api(property_id=property_id, checkin=checkin, checkout=checkout, guest=guest, adult=adult, child=child)
if 'error' in pricing.keys():
# Recursion condition (Call self with next dates in case the dates are not available)
if pricing['error'] == 'Booking Not Available for these dates':
next_checkin = checkin + datetime.timedelta(next_hop)
next_chekout = checkout + datetime.timedelta(next_hop)
next_pricing = vista_price_calculator(property_id,checkin=next_checkin ,checkout=next_chekout,depth=depth+1)
return next_pricing
# For other errors (Like invalid listing ID)
else:
return pricing['error']
return next_pricing
else:
return pricing['data']['price']
# Uses a list of slugs to generate a master DATAFRAME , this contains literally everything, ideal for any analysis on Vista
def vista_master_dataframe(slugs=(['vista-greenwoods-five-villa','maison-calme-villa','vista-greenwoods-four-villa','mehta-mansion','villa-maira'])):
total_slugs = len(slugs)
temp_progress_counter = 0
villas_deets = []
for slug in slugs:
try:
villa_deets = vista_listing(slug=slug)
villas_deets.append(villa_deets)
villas_df = pd.DataFrame(villas_deets)
temp_progress_counter += 1
clear_output(wait=True)
print("Done ",int((temp_progress_counter/total_slugs)*100),"%")
except:
pass
prop_detail_df = pd.DataFrame(list(villas_df['property_detail']))
agent_details_df = pd.DataFrame(list(villas_df['agent_details']))
price_df = pd.DataFrame(list(villas_df['price']))
literally_all_deets = pd.concat([prop_detail_df,villas_df,price_df,agent_details_df], axis=1)
literally_all_deets = literally_all_deets.drop(['property_detail','mini_gallery', 'base_url',
'agent_details', 'house_rule_pdf', 'mini_gallery_text',
'seo','number_extra_guest', 'additionalcost',
'days', 'min_occupancy', 'max_occupancy', 'amount_to_be_paid','total_guest',
'extra_adult', 'extra_child', 'extra_adult_cost', 'extra_child_cost',
'per_person','price','checkin_date','checkout_date','total_price','agent_short_words'], axis = 1)
literally_all_deets['amenities'] = [[amenity['name'] for amenity in amenities] for amenities in literally_all_deets['amenities']]
literally_all_deets['weekday_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['weekday_pricing']]
literally_all_deets['weekend_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['weekend_pricing']]
literally_all_deets['entire_week_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['entire_week_pricing']]
literally_all_deets['entire_month_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['entire_month_pricing']]
return literally_all_deets
# Takes 2 lists of listings (Old and New) and only responds with the Dataframe of the newly added listings
def added_villas_dataframe(old_slugs,new_slugs):
added_slugs = list(set(new_slugs).difference(set(old_slugs)))
added_villas = []
if added_slugs:
added_villas = vista_master_dataframe(added_slugs)
return added_villas
# Non Desctructive SQL QUERY - Try "SELECT * FROM VISTA_MASTER"
def vista_sql_query(query,cypher_key):
# Returns a daframe object of the query response
myConnection = db_connection(cypher_key,database="vista")
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# DESTRCUTIVE sql query
def vista_sql_destructive(query,cypher_key):
con = db_connection(cypher_key,database="vista")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
def vista_weekly_update_script(cypher_key,search_api_wait=10):
# Get the list of all the current villas lited
vista_search_data = vista_search_locations(get_all=True,wait_time=search_api_wait)
new_slugs = vista_search_data['slug'].values
query = "SELECT slug FROM VISTA_MASTER"
old_slugs = vista_sql_query(query,cypher_key)
old_slugs = old_slugs['slug'].values
# Get the list of recently added and removed slugs
added_slugs = list(set(new_slugs).difference(set(old_slugs)))
removed_slugs = list(set(old_slugs).difference(set(new_slugs)))
# Add the new listings to the Database
vista_newly_added_df = added_villas_dataframe(old_slugs,new_slugs)
vista_current_columns = vista_sql_query("SELECT * FROM VISTA_MASTER LIMIT 2",cypher_key).columns
dropcols = set(vista_newly_added_df).difference(set(vista_current_columns))
try:
vista_newly_added_df.drop(dropcols,axis=1,inplace=True)
except:
pass
if len(vista_newly_added_df) > 0:
vista_newly_added_df['listing_status'] = "LISTED"
vista_newly_added_df['status_on'] = datetime.datetime.today()
vista_newly_added_df['created_on'] = datetime.datetime.today()
# changind all the "Object" data types to str (to avoid some weird error in SQL)
all_object_types = pd.DataFrame(vista_newly_added_df.dtypes)
all_object_types = all_object_types[all_object_types[0]=='object'].index
for column in all_object_types:
vista_newly_added_df[column] = vista_newly_added_df[column].astype('str')
#return vista_newly_added_df
engine = db_connection(cypher_key,database="vista")
for i in range(len(vista_newly_added_df)):
try:
vista_newly_added_df.iloc[i:i+1].to_sql(name='VISTA_MASTER',if_exists='append',con = engine,index=False)
except IntegrityError:
pass
engine.dispose()
# Update listing Statuses
vista_update_listing_status(cypher_key)
# A Summary of the updates
final_success_response = {
"No of Added Villas" : len(added_slugs),
"No of Removed Villas" : len(removed_slugs),
"Added Villas" : added_slugs,
"Removed Villas" : removed_slugs
}
return final_success_response
# Update listing status
def vista_update_listing_status(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for id in vista_data[vista_data['listing_status']=='DELISTED']['id']:
stat = vista_check_if_listed(id)
print(id,stat)
if stat:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='LISTED',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='LISTED']['id']:
stat = vista_check_if_listed(id)
print(id,stat)
if not stat:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='DELISTED',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
# Breadth first seach algorithm to get the blocked dates
def vista_blocked_dates(property_id,ci,co):
# check if the listing is active
lt_status = vista_check_status(property_id)
if lt_status in ["INACTIVE","DELISTED"]:
return {
"id" : property_id,
"blocked_dates" : lt_status
}
# rg = Range => checkout - checkin (in days)
rg = (datetime.datetime.strptime(co, "%Y-%m-%d") - datetime.datetime.strptime(ci, "%Y-%m-%d")).days
api_calls = 0
# This list contains all the date ranges to be checked - there will be additions and subtractions to this list
DTE = [(ci,co,rg)]
# we will add the blocekd dates here
blocked = {}
explored = []
while len(DTE) != 0:
# To see how many API calls happened (fewer the better)
api_calls += 1
# Pick one item (date range) from the DTE list -> to see if it is available
dates = DTE.pop()
print(f"Checking : {dates[0]} for {dates[2]} days")
explored.append(dates)
checkin = dates[0]
checkout = dates[1]
range = dates[2]
# Call the vista API to see of this is available
api_response = vista_price_calculator_api(property_id=property_id,checkin=checkin,checkout=checkout)
# If no error -> it is available, start the next iteration of the loop
if "error" not in api_response.keys():
print("Not Blocked")
continue
# if the range is unavailable do this
else:
print("Blocked")
# if the range is 1, mark the date as blocked
if range == 1:
blocked[checkin] = api_response['data']['price']['amount_to_be_paid']
#blocked.append((checkin,api_response['data']['price']['amount_to_be_paid']))
# if the range is not 1, split the range in half and add both these ranges to the DTE list
else:
checkin_t = datetime.datetime.strptime(checkin, "%Y-%m-%d")
checkout_t = datetime.datetime.strptime(checkout, "%Y-%m-%d")
middle_date = checkin_t + datetime.timedelta(math.ceil(range/2))
first_half = ( str(checkin_t)[:10] , str(middle_date)[:10] , (middle_date - checkin_t).days )
second_half = ( str(middle_date)[:10] , str(checkout_t)[:10] , (checkout_t - middle_date).days)
DTE.extend([first_half,second_half])
response_obj = {
"id" : property_id,
"blocked_dates" : blocked,
"meta_data": {
"total_blocked_dates" : len(blocked),
"api_calls":api_calls,
"checked from": ci,
"checked till":co
#"date_ranges_checked":explored
}
}
return response_obj
# To check if the villa is inactive (Listed but blocked for all dates)
def vista_check_status(property_id="",slug=""):
if vista_check_if_listed(property_id,slug):
status = "LISTED"
else:
status = "DELISTED"
return status
if status == "LISTED":
min_nights = 1
for i in [8,16,32,64,128]:
price = vista_price_calculator_api(property_id , checkin=datetime.date.today()+datetime.timedelta(i), checkout = datetime.date.today()+datetime.timedelta(i + min_nights))
if "error" not in price.keys():
return "LISTED"
if i == 128:
return "INACTIVE"
elif price['error'] == 'Booking Not Available for these dates':
pass
elif isinstance(price['error'].split(" ")[4],int):
min_nights = price['error'].split(" ")[4]
pass
def vista_check_if_listed(property_id="",slug=""):
if len(slug)>0:
try:
listing = vista_listing_api(slug)
property_id = listing['data']['property_detail']['id']
except:
return False
price = vista_price_calculator_api(property_id , checkin=datetime.date.today()+datetime.timedelta(5), checkout = datetime.date.today()+datetime.timedelta(7))
if "error" not in price.keys():
return True
elif isinstance(price['error'],str):
return True
elif 'property_id' in dict(price['error']).keys():
return False
return False
# Update listing status
def vista_update_listing_status_old(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for id in vista_data[vista_data['listing_status']=='DELISTED']['id']:
print(id)
stat = vista_check_status(id)
print(id,stat)
if stat in ["LISTED","INACTIVE"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='LISTED']['id']:
stat = vista_check_status(id)
print(id,stat)
if stat in ["DELISTED","INACTIVE"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='INACTIVE']['id']:
stat = vista_check_status(id)
print(id,stat)
if stat in ["DELISTED","LISTED"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
def vista_update_listing_status(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for i,r in vista_data.iterrows():
try:
old_status = r['listing_status']
cal = vista_availability_api(r['id'])
if "error" in cal.keys():
current_status = "DELISTED"
else:
cal = pd.DataFrame(cal['data'])
cal['date'] = cal['date'].astype('datetime64')
if len(cal[cal['date']< datetime.datetime.today() + datetime.timedelta(90)])/88 > 1:
current_status = "INACTIVE"
else:
current_status = "LISTED"
if old_status != current_status:
print(f"Updating database for {r['id']} - {current_status}...")
query = f"UPDATE VISTA_MASTER SET listing_status='{current_status}',status_on='{str(datetime.datetime.today())}' WHERE id='{r['id']}'"
#print(query)
vista_sql_destructive(query,cypher_key)
else:
print(r['id'], "Unchanged")
except:
pass
"""# Lohono
```
# Lohono API wrappers
# Refining the APIs
# Master DataFrame
```
"""
# List of all lohono locations
def lohono_locations():
locations = ['india-alibaug','india-goa','india-lonavala','india-karjat']
return locations
# lohono Search API wrapper
def lohono_search_api(location_slug="india-goa",page=1):
url = "https://www.lohono.com/api/property"
params = {
'location_slug': location_slug,
'page': page
}
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/{ location_slug.split("-")[-1] }',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload, params=params)
search_data = json.loads(response.text.encode('utf8'))
return search_data
# lohono listing API wrapper
def lohono_listing_api(slug='prop-villa-magnolia-p5sp'):
url = f"https://www.lohono.com/api/property/{slug}"
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.lohono.com/villas/india/goa/prop-fonteira-vaddo-a-C6Cn',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload)
listing_data = json.loads(response.text.encode('utf8'))
return listing_data['response']
# lohono Pricing API wrapper
def lohono_pricing_api(slug,checkin,checkout,adult=2,child=0):
url = f"https://www.lohono.com/api/property/{slug}/price"
payload = "{\"property_slug\":\""+slug+"\",\"checkin_date\":\""+str(checkin)+"\",\"checkout_date\":\""+str(checkout)+"\",\"adult_count\":"+str(adult)+",\"child_count\":"+str(child)+",\"coupon_code\":\"\",\"price_package\":\"\",\"isEH\":false}"
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'content-type': 'application/json',
'origin': 'https://www.lohono.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/goa/{slug}?checkout_date={checkout}&adult_count={adult}&checkin_date={checkin}',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
response = requests.post(url, headers=headers, data = payload)
pricing_data = json.loads(response.text.encode('utf8'))
return pricing_data
# Basic details from the search API
def lohono_search(location_slugs=lohono_locations()):
page = 1
all_properties = []
for location_slug in location_slugs:
while True:
print(f"page{ page } for {location_slug}")
search_response = lohono_search_api(location_slug,page)
all_properties.extend(search_response['response']['properties'])
if search_response['paginate']['total_pages'] == page:
break
page += 1
return pd.DataFrame(all_properties)
# All details for all the listings
def lohono_master_dataframe():
search_data = lohono_search()
slugs = search_data['property_slug'].values
all_properties = []
for slug in slugs:
print(f"getting {slug}")
listing_raw = lohono_listing_api(slug)
all_properties.append(listing_raw)
all_properties = pd.DataFrame(all_properties)
all_properties['amenities'] = [[amenity['name'] for amenity in amenities] for amenities in all_properties['amenities']]
all_properties['price'] = search_data['rate']
all_properties['search_name'] = search_data['name']
return all_properties
"""# AirBnb"""
airbnb_home_types = ['Entire home apt','Hotel room','Private room', 'Shared room']
airbnb_imp_amenities = [5,4,16,7,9,12]
# AC, Wifi , Breakfast, Parking, Pool, Pets (Not in order)
# Airbnb Search API
def airbnb_search_api(place_id = "ChIJRYHfiwkB6DsRWIbipWBKa2k", city = "", state = "", min_price = 4000, max_price=50000, min_bedrooms=1, home_type=airbnb_home_types, items_per_grid = 50, amenities = [], items_offset = 0):
home_type = [item.replace(" ","%20") for item in home_type]
home_type = ["Entire%20home%2Fapt" if x=="Entire%20home%20apt" else x for x in home_type]
home_type_filter = "%22%2C%22".join(home_type)
amenities = [str(item) for item in amenities]
amenities_filter = "%2C".join(amenities)
url = f"https://www.airbnb.co.in/api/v3/ExploreSearch?locale=en-IN&operationName=ExploreSearch¤cy=INR&variables=%7B%22request%22%3A%7B%22metadataOnly%22%3Afalse%2C%22version%22%3A%221.7.8%22%2C%22itemsPerGrid%22%3A{items_per_grid}%2C%22tabId%22%3A%22home_tab%22%2C%22refinementPaths%22%3A%5B%22%2Fhomes%22%5D%2C%22source%22%3A%22structured_search_input_header%22%2C%22searchType%22%3A%22filter_change%22%2C%22mapToggle%22%3Afalse%2C%22roomTypes%22%3A%5B%22{home_type_filter}%22%5D%2C%22priceMin%22%3A{min_price}%2C%22priceMax%22%3A{max_price}%2C%22placeId%22%3A%22{place_id}%22%2C%22itemsOffset%22%3A{items_offset}%2C%22minBedrooms%22%3A{min_bedrooms}%2C%22amenities%22%3A%5B{amenities_filter}%5D%2C%22query%22%3A%22{city}%2C%20{state}%22%2C%22cdnCacheSafe%22%3Afalse%2C%22simpleSearchTreatment%22%3A%22simple_search_only%22%2C%22treatmentFlags%22%3A%5B%22simple_search_1_1%22%2C%22oe_big_search%22%5D%2C%22screenSize%22%3A%22large%22%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22274161d4ce0dbf360c201612651d5d8f080d23820ce74da388aed7f9e3b00c7f%22%7D%7D"
#url = f"https://www.airbnb.co.in/api/v3/ExploreSearch?locale=en-IN&operationName=ExploreSearch¤cy=INR&variables=%7B%22request%22%3A%7B%22metadataOnly%22%3Afalse%2C%22version%22%3A%221.7.8%22%2C%22itemsPerGrid%22%3A20%2C%22roomTypes%22%3A%5B%22Entire%20home%2Fapt%22%5D%2C%22minBedrooms%22%3A0%2C%22source%22%3A%22structured_search_input_header%22%2C%22searchType%22%3A%22pagination%22%2C%22tabId%22%3A%22home_tab%22%2C%22mapToggle%22%3Afalse%2C%22refinementPaths%22%3A%5B%22%2Fhomes%22%5D%2C%22ib%22%3Atrue%2C%22amenities%22%3A%5B4%2C5%2C7%2C9%2C12%2C16%5D%2C%22federatedSearchSessionId%22%3A%22e597713a-7e46-4d10-88e7-3a2a9f15dc8d%22%2C%22placeId%22%3A%22ChIJM6uk0Jz75zsRT1nlkg6PwiQ%22%2C%22itemsOffset%22%3A20%2C%22sectionOffset%22%3A2%2C%22query%22%3A%22Karjat%2C%20Maharashtra%22%2C%22cdnCacheSafe%22%3Afalse%2C%22simpleSearchTreatment%22%3A%22simple_search_only%22%2C%22treatmentFlags%22%3A%5B%22simple_search_1_1%22%2C%22oe_big_search%22%5D%2C%22screenSize%22%3A%22large%22%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22274161d4ce0dbf360c201612651d5d8f080d23820ce74da388aed7f9e3b00c7f%22%7D%7D"
payload = {}
headers = {
'authority': 'www.airbnb.co.in',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'device-memory': '4',
'x-airbnb-graphql-platform-client': 'apollo-niobe',
#'x-csrf-token': 'V4$.airbnb.co.in$lHdA3kStJv0$yEvcPM_C6eeUUHkQuYEdGFWrZreA5ui1e4A-pMzDFI=',
'x-airbnb-api-key': 'd306zoyjsyarp7ifhu67rjxn52tv0t20',
'x-csrf-without-token': '4',
'user-agent': mock_user_agent(),
'viewport-width': '1600',
'content-type': 'application/json',
'accept': '*/*',
'dpr': '1',
'ect': '4g',
'x-airbnb-graphql-platform': 'web',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
# 'referer': f'https://www.airbnb.co.in/s/{city}--{state}/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&adults=2&source=structured_search_input_header&search_type=filter_change&map_toggle=false&room_types%5B%5D=Entire%20home%2Fapt&price_min=4221&place_id={place_id}',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
response = requests.get(url, headers=headers, data = payload)
#response = json.loads(response.text.encode('utf8'))
return response
# Airbnb Calendar API
def airbnb_calendar_api(listing_id,start_month=9,start_year=2020,bev='1600684519_NDg5ZGY1ZDQ4YjNk',month_count=4):
url = f"https://www.airbnb.co.in/api/v3/PdpAvailabilityCalendar?operationName=PdpAvailabilityCalendar&locale=en-IN¤cy=INR&variables=%7B%22request%22%3A%7B%22count%22%3A{month_count}%2C%22listingId%22%3A%22{listing_id}%22%2C%22month%22%3A{start_month}%2C%22year%22%3A{start_year}%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22b94ab2c7e743e30b3d0bc92981a55fff22a05b20bcc9bcc25ca075cc95b42aac%22%7D%7D"
payload = {}
headers = {
'authority': 'www.airbnb.co.in',
#'pragma': 'no-cache',
#'cache-control': 'no-cache',
#'device-memory': '8',
'x-airbnb-graphql-platform-client': 'minimalist-niobe',
#'x-csrf-token': 'V4$.airbnb.co.in$lHdA3kStJv0$yEvcPMB_C6eeUUHkQuYEdGFWrZreA5ui1e4A-pMzDFI=',
'x-airbnb-api-key': 'd306zoyjsyarp7ifhu67rjxn52tv0t20',
#'x-csrf-without-token': '1',
'user-agent': mock_user_agent(),
'viewport-width': '1600',
'content-type': 'application/json',
'accept': '*/*',
'dpr': '1',
'ect': '4g',
'x-airbnb-graphql-platform': 'web',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.airbnb.co.in/rooms/{listing_id}?adults=2&source_impression_id=p3_1598719581_vge1qn5YJ%2FXWgUKg&check_in=2020-10-01&guests=1',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
'cookie': f'bev={bev};'
}
response = requests.request("GET", url, headers=headers, data = payload)
response = json.loads(response.text.encode('utf8'))
return response
#Airbnb search DataFrame
def airbnb_search(place_ids = ["ChIJRYHfiwkB6DsRWIbipWBKa2k"], max_iters = 5, min_price=4000,max_price=200000, home_type=[],amenities=[], min_bedrooms=1 ):
all_items = []
for place_id in place_ids:
counter = 1
offset = 0
while counter<=max_iters:
print(f"Round {counter} for {place_id}")
counter+=1
response = airbnb_search_api(place_id = place_id, min_price = min_price ,max_price=max_price, min_bedrooms=min_bedrooms,home_type=home_type,amenities=amenities, items_offset = offset)
offset += 50
if not response['data']['dora']['exploreV3']['sections']:
break
else:
for sections in response['data']['dora']['exploreV3']['sections']:
if 'listing' in sections['items'][0].keys():
all_items.extend(sections['items'])
items_df = pd.DataFrame([item['listing'] for item in all_items])
prices_df = pd.DataFrame([item['pricingQuote'] for item in all_items])
items_df[['canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']] = prices_df[['canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']]
return_obj = items_df[['id','name','roomAndPropertyType','reviews','avgRating','starRating','reviewsCount','amenityIds','previewAmenityNames','bathrooms','bedrooms','city','lat','lng','personCapacity','publicAddress','pictureUrl','pictureUrls','isHostHighlyRated','isNewListing','isSuperhost','canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']]
return_obj = return_obj.drop_duplicates('id')
return return_obj
# Airbnb Calendar DataFrame
def airbnb_calendar(listing_id,start_month=datetime.datetime.today().month,start_year=datetime.datetime.today().year):
api_response = airbnb_calendar_api(listing_id,start_month,start_year)
all_months = [month['days'] for month in api_response['data']['merlin']['pdpAvailabilityCalendar']['calendarMonths']]
all_days=[]
for month in all_months:
all_days.extend(month)
all_days = pd.DataFrame(all_days)
all_days['price'] = [item['localPriceFormatted'][1:].replace(",","") for item in all_days['price'].values]
all_days['calendarDate'] = pd.to_datetime(all_days['calendarDate'])
all_days['listing_id'] = listing_id
all_days = all_days.astype({'price':'int32'})
return all_days
# Get Occupancy data for a listing id
def airbnb_occupancy(listing_id):
clndr = airbnb_calendar(listing_id=listing_id,start_month=datetime.datetime.today().month,start_year=datetime.datetime.today().year)
clndr = clndr.set_index('calendarDate')
clndr_monthly = clndr.groupby(pd.Grouper(freq='M')).mean()
clndr_monthly['month-year'] = [str(item.month_name())+" "+str(item.year) for item in clndr_monthly.index]
clndr_monthly = clndr_monthly.set_index('month-year')
clndr_monthly['occupancy'] = 1-clndr_monthly['available']
occupancy = clndr_monthly['occupancy'].to_json()
available = clndr[clndr['available']==True].index
blocked = clndr[clndr['available']==False].index
available = [str(item.date()) for item in available]
blocked = [str(item.date()) for item in blocked]
return_obj = {
"listing_id": listing_id,
"monthly_occupancy" : occupancy,
"blocked_dates" : blocked,
"available_dates": available
}
return return_obj
"""# Helper Functions
```
next_weekday()
mock_user_agent()
mock_proxy()
earth_distance(lat1,lon1,lat2,lon2)
```
"""
# Get the next weekday ( 0=monday , 1 = tuesday ... )
def next_weekday(weekday=0, d=datetime.date.today()):
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + datetime.timedelta(days_ahead)
# default - next monday
# gives a random user-agent to use in the API call
def mock_user_agent():
users = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.2.15 Version/10.00",
"Opera/9.60 (Windows NT 6.0; U; en) Presto/2.1.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)"]
return users[random.randint(0,7)]
# Gives a 'proxies' object for a 'requests' call
def mock_proxy():
proxies_list = ["45.72.30.159:80",
"45.130.255.156:80",
"193.8.127.117:80",
"45.130.255.147:80",
"193.8.215.243:80",
"45.130.125.157:80",
"45.130.255.140:80",
"45.130.255.198:80",
"185.164.56.221:80",
"45.136.231.226:80"]
proxy = proxies_list[random.randint(0,9)]
proxies = {
"http": proxy,
"https": proxy
}
return proxies
# Arial distance between 2 pairs of coordinates
def earth_distance(lat1,lon1,lat2,lon2):
# Radius of the earth
R = 6373.0
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
#Haversine formula
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
return distance
def to_fancy_date(date):
tmstmp = pd.to_datetime(date)
day = tmstmp.day
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return f"{tmstmp.day}{suffix} {tmstmp.month_name()} {tmstmp.year}"
"""# TEST"""
def final_test():
ss_latest()
vista_latest()
vista_locations()
vista_search_locations_json(locations=["nainital, uttarakhand"],guests=2,get_all=False)
vista_search_locations(locations=["nainital, uttarakhand"],guests=2,get_all=False)
vista_listing(slug='the-boulevard-villa',guests=2,checkin=datetime.date.today()+datetime.timedelta(1), checkout=datetime.date.today()+datetime.timedelta(2))
vista_listing_other_details_api(slug='the-boulevard-villa')
vista_price_calculator(property_id='310', checkin=datetime.date.today()+datetime.timedelta(1), checkout = datetime.date.today()+datetime.timedelta(2), guest = 2, adult = 2, child = 0)
next_weekday(weekday=0, d=datetime.date.today())
vista_master_dataframe(slugs=(['vista-greenwoods-five-villa','maison-calme-villa','vista-greenwoods-four-villa','mehta-mansion','villa-maira']))
vista_map_all()
ss_map_all()
ss_vs_vista()
return "All Good :)"
| 3636c788d0392f7e84453434eea18c59 | /3636c788d0392f7e84453434eea18c59-4.3.1.tar.gz/3636c788d0392f7e84453434eea18c59-4.3.1/sstools/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Optional, List, Union, Tuple
from datetime import datetime
import json, os, shutil
# Pip
from ksimpleapi import Api
from simple_multiprocessing import MultiProcess, Task
# Local
from .models import Sport, Game, GameAssets
from ._cache_utils import _CacheUtils
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: 365Scores ----------------------------------------------------------- #
class Scores(Api):
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
def get_games(
self,
sports: Optional[Union[List[Union[Sport, int]], Union[Sport, int]]] = None,
competition_ids: Optional[Union[List[int], int]] = None,
start_date: Optional[Union[int, datetime, str]] = None, # 03/01/2021
end_date: Optional[Union[int, str]] = None, # 03/01/2021
only_major_games: bool = False,
only_live_games: bool = False,
included_status_groups: List[int] = [1, 2, 3, 4],
include_cancelled: bool = False,
include_postponed: bool = False
) -> Optional[List[Game]]:
if sports and type(sports) != list:
sports = [sports]
try:
res = self._get(
'https://webws.365scores.com/web/games/allscores',
params={
'appTypeId': 5,
'langId': 1,
'startDate': self.__normalized_date(start_date),
'endDate': self.__normalized_date(end_date),
'onlyMajorGames': 'true' if only_major_games else None,
'onlyLiveGames': 'true' if only_live_games else None,
'sports': ','.join([sport.value if type(sport) == Sport else sport for sport in sports]) if sports else None,
'competitionIds': ','.join(competition_ids) if competition_ids else None
}
).json()
games = []
for game_json in res['games']:
try:
sport_id = game_json['sportId']
competition_id = game_json['competitionId']
competition_dict = [competition_dict for competition_dict in res['competitions'] if competition_dict['id'] == competition_id][0]
home_competitor_country_id = game_json['homeCompetitor']['countryId']
away_competitor_country_id = game_json['awayCompetitor']['countryId']
game = Game(
game_json,
sport_dict=[sport_dict for sport_dict in res['sports'] if sport_dict['id'] == sport_id][0],
competition_dict=competition_dict,
competition_country_dict=[country_dict for country_dict in res['countries'] if country_dict['id'] == competition_dict['countryId']][0],
home_competitor_country_dict=[country_dict for country_dict in res['countries'] if country_dict['id'] == home_competitor_country_id][0],
away_competitor_country_dict=[country_dict for country_dict in res['countries'] if country_dict['id'] == away_competitor_country_id][0]
)
if (
(not included_status_groups or game.status_group in included_status_groups)
and
(include_cancelled or not game.cancelled)
and
(include_postponed or not game.postponed)
and
(not only_live_games or game.live)
):
games.append(game)
except Exception as e:
if self.debug:
print(e, json.dumps(game_json, indent=4))
return games
except Exception as e:
if self.debug:
print(e)
return None
def download_assets(
self,
game: Game,
competition_image_path: Optional[str] = None,
home_competitor_image_path: Optional[str] = None,
away_competitor_image_path: Optional[str] = None,
image_folder_path: Optional[str] = None,
round_images: bool = True,
request_timeout: float = 5,
use_cache: bool = True
) -> GameAssets:
if image_folder_path:
os.makedirs(image_folder_path, exist_ok=True)
def get_image(
image_url: str,
image_path: Optional[str] = None,
image_folder_path: Optional[str] = None,
round_image: bool = True,
request_timeout: float = 5,
use_cache: bool = True
) -> Tuple[str, bool]:
downloaded_image_path = _CacheUtils.temp_download_image_path_for_url(image_url, image_path, image_folder_path, use_cache)
final_download_image_path = _CacheUtils.final_download_image_path_for_url(image_url, image_path, image_folder_path)
if not os.path.exists(downloaded_image_path) and not self._download(image_url, downloaded_image_path, timeout=request_timeout):
return final_download_image_path, False
if not downloaded_image_path == final_download_image_path:
shutil.copy2(downloaded_image_path, final_download_image_path)
return final_download_image_path, True
_kwargs = {
'image_folder_path': image_folder_path,
'round_image': round_images,
'request_timeout': request_timeout,
'use_cache': use_cache
}
competition_image_url = game.competition.get_image_url(round=round_images)
home_competitor_image_url = game.home_competitor.get_image_url(round=round_images)
away_competitor_image_url = game.away_competitor.get_image_url(round=round_images)
(competition_image_path, _), (home_competitor_image_path, _), (away_competitor_image_path, _) = MultiProcess([
Task(get_image, competition_image_url, competition_image_path, **_kwargs),
Task(get_image, home_competitor_image_url, home_competitor_image_path, **_kwargs),
Task(get_image, away_competitor_image_url, away_competitor_image_path, **_kwargs)
]).solve()
return GameAssets(
competition_image_url, competition_image_path,
home_competitor_image_url, home_competitor_image_path,
away_competitor_image_url, away_competitor_image_path
)
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
def __normalized_date(
self,
date: Optional[Union[int, datetime, str]]
) -> Optional[str]:
if not date:
return None
if type(date) == int:
date = datetime.fromtimestamp(date)
if type(date) == datetime:
date = '{}/{}/{}'.format(str(date.day).zfill(2), str(date.month).zfill(2), date.year())
return date
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/scores.py | scores.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Optional
import os
# Pip
from kcu import kpath
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: _CacheUtils ---------------------------------------------------------- #
class _CacheUtils:
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
@classmethod
def images_cache_path(cls) -> str:
return kpath.new_tempdir(
path_src=cls.cache_path(),
appending_subpath='images',
append_random_subfolder_path=False,
create_folder_if_not_exists=True
)
@staticmethod
def cache_path() -> str:
return kpath.new_tempdir(
appending_subpath='py_365scores_cache',
append_random_subfolder_path=False,
create_folder_if_not_exists=True
)
@classmethod
def final_download_image_path_for_url(
cls,
image_url: str,
image_path: Optional[str] = None,
image_folder_path: Optional[str] = None
) -> str:
if image_path:
return image_path
return os.path.join(image_folder_path or cls.images_cache_path(), cls.image_name_for_url(image_url))
@classmethod
def temp_download_image_path_for_url(
cls,
image_url: str,
image_path: Optional[str] = None,
image_folder_path: Optional[str] = None,
use_cache: bool = True
) -> str:
if image_path:
return image_path
return os.path.join(
image_folder_path if not use_cache and image_folder_path else cls.images_cache_path(),
cls.image_name_for_url(image_url)
)
@classmethod
def image_name_for_url(
cls,
image_url: str
) -> str:
return '{}.png'.format(cls.image_id_for_url(image_url))
@staticmethod
def image_id_for_url(image_url: str) -> str:
# https://imagecache.365scores.com/image/upload/d_Countries:Round:153.png/Competitors/67096
base = image_url.split('d_Countries:')[-1].replace('.png', '').lower()
# round:153/competitors/67096
# 153/competitors/67096
is_round = 'round' in base
if is_round:
base = base.replace('round:', '')
# 153/competitors/67096
country_id, type, id = base.split('/')
return '{}-country_id_{}-id_{}{}'.format(type, country_id, id, '-round' if is_round else '')
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/_cache_utils.py | _cache_utils.py |
from .models import *
from .scores import Scores | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
import datetime
# Local
from .base import Sport, Country, BaseIdable
from .game_competitor import GameCompetitor
from .competition import Competition
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------- class: Game -------------------------------------------------------------- #
class Game(BaseIdable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict,
sport_dict: dict,
competition_dict: dict,
competition_country_dict: dict,
home_competitor_country_dict: dict,
away_competitor_country_dict: dict
):
super().__init__(d)
sport = Sport(sport_dict)
self.competition = Competition(competition_dict, sport_or_sport_dict=sport, country_or_country_dict=competition_country_dict)
self.home_competitor = GameCompetitor(d['homeCompetitor'], sport_or_sport_dict=sport, country_or_country_dict=home_competitor_country_dict)
self.away_competitor = GameCompetitor(d['awayCompetitor'], sport_or_sport_dict=sport, country_or_country_dict=away_competitor_country_dict)
self.season_num = self._d_val(d, 'seasonNum')
self.stage_num = self._d_val(d, 'stageNum')
self.group_num = self._d_val(d, 'groupNum')
self.round_num = self._d_val(d, 'roundNum')
self.competition_display_name = self._d_val(d, 'competitionDisplayName')
self.status_group = d['statusGroup']
self.status_text = d['statusText']
self.short_status_text = d['shortStatusText']
self.start_time_str = d['startTime']
self.start_time_date = datetime.datetime.strptime(self.start_time_str.split('+')[0], '%Y-%m-%dT%H:%M:%S')
self.start_time_ts = self.start_time_date.timestamp()
self.just_ended = d['justEnded']
self.live = self.status_group == 3
self.postponed = self.status_group == 4 and self.status_text.lower() == 'postponed'
self.cancelled = self.status_group == 4 and not self.postponed and self.status_text.lower() == 'cancelled'
self.scheduled = self.status_group == 2 and not self.postponed and not self.cancelled and self.status_text.lower() == 'scheduled'
self.game_time = d['gameTime']
self.game_time_display = self._d_val(d, 'gameTimeDisplay', '')
self.game_time_and_status_display_type = d['gameTimeAndStatusDisplayType']
self.has_tv_networks = d['hasTVNetworks']
self.has_lineups = self._d_val(d, 'hasLineups', False)
self.has_field_positions = self._d_val(d, 'hasFieldPositions', False)
self.has_missing_players = self._d_val(d, 'hasMissingPlayers', False)
self.score = (self.home_competitor.score, self.away_competitor.score)
self.aggregated_score = (self.home_competitor.aggregated_score, self.away_competitor.aggregated_score)
self.has_score = self.score != (-1, -1)
self.has_aggregated_score = self.aggregated_score != (-1, -1)
# ------------------------------------------------------- Public properties ------------------------------------------------------ #
@property
def sport(self) -> Sport:
return self.competition.sport
@property
def country(self) -> Country:
return self.competition.country
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/game.py | game.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Optional
# Pip
from jsoncodable import JSONCodable
# Local
from .game_asset import GameAsset
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: GameAssets ----------------------------------------------------------- #
class GameAssets(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
competition_image_url: str,
competition_image_path: str,
home_competitor_image_url: str,
home_competitor_image_path: str,
away_competitor_image_url: str,
away_competitor_image_path: str,
):
self.competition = GameAsset(competition_image_url, competition_image_path)
self.home_competitor = GameAsset(home_competitor_image_url, home_competitor_image_path)
self.away_competitor = GameAsset(away_competitor_image_url, away_competitor_image_path)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/game_assets.py | game_assets.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Union
# Local
from .base import BaseCompetingObject, Country, Sport
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: Competitor ----------------------------------------------------------- #
class Competitor(BaseCompetingObject):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict,
sport_or_sport_dict: Union[Sport, dict],
country_or_country_dict: Union[Country, dict]
):
super().__init__(d, sport_or_sport_dict=sport_or_sport_dict, country_or_country_dict=country_or_country_dict)
self.type = d['type']
# ----------------------------------------------------------- Overrides ---------------------------------------------------------- #
def _image_url_key(self) -> str:
return 'Competitors'
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/competitor.py | competitor.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Union
# Local
from .competitor import Competitor
from .base import Sport, Country
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------- class: GameCompetitor --------------------------------------------------------- #
class GameCompetitor(Competitor):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict,
sport_or_sport_dict: Union[Sport, dict],
country_or_country_dict: Union[Country, dict]
):
super().__init__(d, sport_or_sport_dict=sport_or_sport_dict, country_or_country_dict=country_or_country_dict)
self.score = d['score']
if self.score == int(self.score):
self.score = int(self.score)
self.aggregated_score = d['aggregatedScore']
if self.aggregated_score == int(self.aggregated_score):
self.aggregated_score = int(self.aggregated_score)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/game_competitor.py | game_competitor.py |
from .base import *
from .game import Game
from .competition import Competition
from .game_competitor import GameCompetitor
from .game_asset import GameAsset
from .game_assets import GameAssets | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
import os
# Pip
from jsoncodable import JSONCodable
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: GameAsset ----------------------------------------------------------- #
class GameAsset(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
url: str,
path: str
):
self.url = url
self.path = path
# ------------------------------------------------------ Public properties ------------------------------------------------------- #
@property
def is_donwloaded(self) -> bool:
return os.path.exists(self.path)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/game_asset.py | game_asset.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Union
# Local
from .base import BaseCompetingObject, Country, Sport
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: Competition ---------------------------------------------------------- #
class Competition(BaseCompetingObject):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict,
sport_or_sport_dict: Union[Sport, dict],
country_or_country_dict: Union[Country, dict]
):
super().__init__(d, sport_or_sport_dict=sport_or_sport_dict, country_or_country_dict=country_or_country_dict)
self.has_standings = self._d_val(d, 'hasStandings', False)
self.has_standings_groups = self._d_val(d, 'hasStandingsGroups', False)
self.has_active_games = self._d_val(d, 'hasActiveGames', False)
self.is_top = self._d_val(d, 'isTop', False)
self.total_games = self._d_val(d, 'totalGames', 0)
self.live_games = self._d_val(d, 'liveGames', 0)
# ----------------------------------------------------------- Overrides ---------------------------------------------------------- #
def _image_url_key(self) -> str:
return 'Competitions'
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/competition.py | competition.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Union
from abc import abstractmethod
# Local
from .core import BaseObject, SportType
from .sport import Sport
from .country import Country
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------ class: BaseCompetingObject ------------------------------------------------------ #
class BaseCompetingObject(BaseObject):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict,
sport_or_sport_dict: Union[Sport, dict],
country_or_country_dict: Union[Country, dict]
):
super().__init__(d)
self.sport = sport_or_sport_dict if isinstance(sport_or_sport_dict, Sport) else Sport(sport_or_sport_dict)
self.country = country_or_country_dict if isinstance(country_or_country_dict, Country) else Country(country_or_country_dict)
self.popularity_rank = d['popularityRank']
self.image_url = self.get_image_url(round=False)
self.image_url_round = self.get_image_url(round=True)
self.long_name = self._d_val(d, 'longName')
self.short_name = self._d_val(d, 'shortName')
# ------------------------------------------------------- Abstract methods ------------------------------------------------------- #
@abstractmethod
def _image_url_key(self) -> str:
pass
# ------------------------------------------------------- Public properties ------------------------------------------------------ #
@property
def sport_id(self) -> int:
return self.sport.id
@property
def sport_type(self) -> SportType:
return SportType(self.sport_id)
@property
def country_id(self) -> int:
return self.country.id
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
def get_name(self) -> str:
return self.long_name or self.name
def get_image_url(
self,
round: bool = False
) -> str:
return 'https://imagecache.365scores.com/image/upload/d_Countries{}:{}.png/{}/{}'.format(
':Round' if round else '',
self.country_id,
self._image_url_key(),
self.id
)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/base_competing_object.py | base_competing_object.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# Local
from .core import BaseObject, SportType
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------- class: Sport ------------------------------------------------------------- #
class Sport(BaseObject):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict
):
super().__init__(d)
self.type = SportType(self.id)
self.draw_support = self._d_val(d, 'longName', False)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/sport.py | sport.py |
from .base_competing_object import BaseCompetingObject
from .country import Country
from .sport import Sport
from .core import * | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# Local
from .core import BaseObject, SportType
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------ class: Country ------------------------------------------------------------ #
class Country(BaseObject):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict
):
super().__init__(d)
self.total_games = self._d_val(d, 'totalGames', 0)
self.live_games = self._d_val(d, 'liveGames', 0)
self.sport_types = [SportType(v) for v in d['sportTypes']]
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/country.py | country.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Optional, Hashable
# Pip
from jsoncodable import JSONCodable
from kcu import kjson
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------- class: BaseIdable ----------------------------------------------------------- #
class BaseIdable(JSONCodable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict
):
self.id = d['id']
# ------------------------------------------------------- Internal methods ------------------------------------------------------- #
def _d_val(self, d: dict, key: Hashable, def_val: Optional[any] = None) -> Optional[any]:
return kjson.get_value(d, key=key, default_value=def_val)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/core/base_idable.py | base_idable.py |
from .base_object import BaseObject
from .base_idable import BaseIdable
from .enums import * | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/core/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# Local
from .base_idable import BaseIdable
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: BaseObject ---------------------------------------------------------- #
class BaseObject(BaseIdable):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict
):
super().__init__(d)
self.name = d['name']
self.name_for_url = d['nameForURL']
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/core/base_object.py | base_object.py |
from .sport_type import SportType | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/core/enums/__init__.py | __init__.py |
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from enum import Enum
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------------------------- class: SportType ----------------------------------------------------------- #
class SportType(Enum):
Football = 1
Basketball = 2
Tennis = 3
Hockey = 4
Handball = 5
AmericanFootball = 6
Baseball = 7
Volleyball = 8
Rugby = 9
Cricket = 11
TableTennis = 12
ESports = 13
# ---------------------------------------------------------------------------------------------------------------------------------------- # | 365scores | /365scores-1.0.7-py3-none-any.whl/scores/models/base/core/enums/sport_type.py | sport_type.py |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = '36ban_commons',
version = '0.0.1',
keywords = ('36ban', 'commons'),
description = 'eking commons',
license = 'MIT License',
url = 'http://liluo.org',
author = 'zhaowenlei',
author_email = 'zhaowenlei789@sina.com',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires = [],
)
| 36ban_commons | /36ban_commons-0.0.1.tar.gz/36ban_commons-0.0.1/setup.py | setup.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import inflection
from app.commons.generate.generate_code_service import GenerateFileService
args = sys.argv
if __name__ == "__main__":
""" 数据库注释格式约定:第一行:名称+特殊字符(空格等)+其他注释信息
第二行:约定关键字
数据库注释关键字
编辑类型关键字:("input", "text", "radio", "checkbox", "select", "date", "datetime")
条件查询关键字:("==", "!=", "llike", "rlike", "like", "between")
条件查询排序关键字:("asc", "desc")
字段查询关键字:("get_by", "gets_by")
下拉类型:全大写字符串
"""
# table_name = "tests"
table_name = args[1]
table_name = table_name[table_name.find("=") + 1:]
gen_file = args[2]
gen_file = gen_file[gen_file.find("=") + 1:]
gfs = GenerateFileService(table_name=table_name)
dao_str = gfs.get_dao_string()
service_str = gfs.get_service_string()
handler_str = gfs.get_handler_string()
list_page_str = gfs.get_list_page_string()
add_edit_page_str = gfs.get_add_edit_page_string()
detail_page_str = gfs.get_detail_page_string()
dao_file_path = os.path.join(os.path.dirname(__file__), u"templates")
current_path = os.path.dirname(__file__)
app_path = current_path[:current_path.find("commons/")]
sing_table_name = inflection.singularize(table_name)
if gen_file == "dao" or not gen_file or gen_file == "all":
dao_file_name = sing_table_name + "_dao.py"
dao_file_path = os.path.join(app_path, u"daos/" + dao_file_name)
f = open(dao_file_path, 'w')
f.write(dao_str)
print dao_file_path
if gen_file == "service" or not gen_file or gen_file == "all":
service_file_name = sing_table_name + "_service.py"
service_file_path = os.path.join(app_path, u"services/" + service_file_name)
f = open(service_file_path, 'w')
f.write(service_str)
print service_file_path
if gen_file == "handler" or not gen_file or gen_file == "all":
handler_file_name = sing_table_name + ".py"
handler_file_path = os.path.join(app_path, u"handlers/" + handler_file_name)
f = open(handler_file_path, 'w')
f.write(handler_str)
print handler_file_path
if not os.path.exists(os.path.join(app_path, u"views/" + sing_table_name)):
os.mkdir(os.path.join(app_path, u"views/" + sing_table_name))
if gen_file == "list" or not gen_file or gen_file == "all":
list_page_file_name = table_name + ".html"
list_page_file_path = os.path.join(app_path, u"views/" + sing_table_name + "/" + list_page_file_name)
f = open(list_page_file_path, 'w')
f.write(list_page_str)
print list_page_file_path
if gen_file == "add" or not gen_file or gen_file == "all":
add_edit_page_file_name = sing_table_name + ".html"
add_edit_page_file_path = os.path.join(app_path, u"views/" + sing_table_name + "/" + add_edit_page_file_name)
f = open(add_edit_page_file_path, 'w')
f.write(add_edit_page_str)
print add_edit_page_file_path
if gen_file == "detail" or not gen_file or gen_file == "all":
detail_page_file_name = sing_table_name + "_detail.html"
detail_page_file_path = os.path.join(app_path, u"views/" + sing_table_name + "/" + detail_page_file_name)
f = open(detail_page_file_path, 'w')
f.write(detail_page_str)
print detail_page_file_path
print gfs.get_route_string()
| 36ban_commons | /36ban_commons-0.0.1.tar.gz/36ban_commons-0.0.1/generate/application.py | application.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import getpass
import os
import platform
import re
import time
import inflection
from tornado.template import Template, Loader
import yaml
from configs.settings import Settings
__author__ = 'wangshubin'
class MyLoader(Loader):
"""
不对空格进行过滤,保留文件的原始格式
原Loader对html,js进行了空格过滤,破坏了原始的文件格式
"""
def _create_template(self, name):
path = os.path.join(self.root, name)
with open(path, "rb") as f:
my_template = Template(f.read(), name=name, loader=self, compress_whitespace=False)
return my_template
class MyTemplate(object):
@staticmethod
def get_template_string(template_path, template_file_name, namespace):
loader = MyLoader(template_path)
t = loader.load(template_file_name)
return t.generate(**namespace)
class DBMessage(object):
"""
获取数据库相关信息类
"""
runmod = "development"
database_config = None
def __init__(self, config_name=None):
self.conn = self.get_conn(config_name)
def get_conn(self, config_name=None):
"""获取数据库连接,当前使用的是MySql数据库,如果更换数据库,新建子类,重写该方法即可
:param config_name:
:return:
"""
if config_name is None:
config_name = "default"
if not self.database_config:
file_stream = file(os.path.join(Settings.SITE_ROOT_PATH, 'configs/databases.yaml'), 'r')
yml = yaml.load(file_stream)
databases_config = yml.get(self.runmod)
config = databases_config[config_name]
url = config["url"] # sqlA的连接字符串,要截取mysqlDB所要的信息
self.database_config = dict()
self.database_config["user"] = url[16:url.rindex(":")]
self.database_config["passwd"] = url[url.rindex(":") + 1:url.rindex("@")]
self.database_config["host"] = url[url.rindex("@") + 1:url.rindex("/")]
self.database_config["database"] = url[url.rindex("/") + 1:url.rindex("?")]
self.database_config["charset"] = url[url.rindex("charset=") + 8:]
else:
pass
# 建立和数据库系统的连接
conn = MySQLdb.connect(host=self.database_config["host"], user=self.database_config["user"], passwd=self.database_config["passwd"], charset=self.database_config["charset"])
conn.select_db("information_schema")
return conn
def get_schema_names(self):
"""获取所有数据库名 获取数据库连接,当前使用的是MySql数据库,如果更换数据库,新建子类,重写该方法即可
:return:
"""
cursor = self.conn.cursor()
# 执行SQL 获取所有字段信息
cursor.execute("SELECT DISTINCT table_schema FROM columns ")
row = cursor.fetchall()
schema_names = list()
ignore_names = ("information_schema", "mysql")
for (table_schema,) in row:
if table_schema not in ignore_names:
schema_names.append(table_schema)
return tuple(schema_names)
def get_table_names(self, table_schema):
"""获取数据库中所有表名 获取数据库连接,当前使用的是MySql数据库,如果更换数据库,新建子类,重写该方法即可
:param table_schema:
:return:
"""
cursor = self.conn.cursor()
# 执行SQL 获取所有字段信息
cursor.execute("SELECT DISTINCT table_name FROM columns WHERE table_schema='" + table_schema + "'")
row = cursor.fetchall()
table_names = list()
for (table_name,) in row:
table_names.append(table_name)
return tuple(table_names)
def get_columns(self, table_name):
"""根据表名,获取数据库字段信息 获取数据库连接,当前使用的是MySql数据库,如果更换数据库,新建子类,重写该方法即可
:return: 所有的字段信息
"""
cursor = self.conn.cursor()
# 执行SQL 获取所有字段信息
cursor.execute(""" SELECT column_name,
column_default,
is_nullable,
data_type,
column_type,
column_comment
FROM columns
WHERE table_schema='""" + self.database_config["database"] + """' AND table_name = '""" + table_name + """' """)
row = cursor.fetchall()
method_keys = self.get_where_types()
order_by_keys = ("asc", "desc")
edit_keys = self.get_edit_types()
columns = list()
for (column_name, column_default, is_nullable, data_type, column_type, column_comment) in row:
column_length = self.get_column_length(column_type)
sql_a_type = self.get_sql_a_type(data_type)
column_title = self.get_first_word(column_comment)
col_msg = dict()
col_msg["column_name"] = column_name # 字段
col_msg["column_title"] = column_title # 字段对应的中文 默认为column_comment中截取;用于生成页面
col_msg["column_comment"] = "" # 数据库中的备注,用于生成title .py文件中的注释等
col_msg["data_type"] = data_type # sql类型
col_msg["column_default"] = column_default # 默认值
col_msg["is_nullable"] = is_nullable # 是否为空
col_msg["column_length"] = column_length # 字段长度
col_msg["sql_a_type"] = sql_a_type # 对应的sqlA类型
col_msg["condition_search"] = False # 是否参与条件查询 及 参与条件查询类型 where条件
col_msg["order_by"] = False # 是否参与排序 及 排序方式
col_msg["edit_type"] = self.get_sql_edit_type(data_type) # 编辑类型 默认数据库类型 或 input
col_msg["option_type"] = None # 下拉类型 大写字符串
col_msg["gets_by"] = False # 按字段查询所有
col_msg["get_by"] = False # 按字段查询第一个
# file_path="" # service handler view 子文件夹 这个应该在表注释中 好像没有表注释 通过命令参数形式给出吧
if column_comment:
comments = column_comment.split("\n")
col_msg["column_comment"] = comments[0] # 数据库中的备注,用于生成title .py文件中的注释等
if len(comments) > 1:
# 存在第二行的配置
config_str = comments[1]
configs = config_str.split(" ")
for conf in configs:
if conf in method_keys:
col_msg["condition_search"] = conf
if conf in order_by_keys:
col_msg["order_by"] = conf
if conf in edit_keys:
col_msg["edit_type"] = conf
if conf.isupper():
col_msg["option_type"] = conf
if conf == "get_by":
col_msg["get_by"] = True
if conf == "gets_by":
col_msg["gets_by"] = True
columns.append(col_msg)
return columns
def get_namespace(self, table_name, ext_namespace=None):
"""获取到文件魔板所需的变量
:return:
"""
columns = self.get_columns(table_name)
singular_table_name = inflection.singularize(table_name)
model_name = self.get_model_name(table_name)
sys_user_name = self.get_sys_user_name()
created_at = time.strftime('%Y-%m-%d %H:%M:%S')
sql_a_types = self.get_all_sql_a_type(columns)
mixins = self.get_all_mixin(columns)
condition_search_columns = list()
get_by_columns = list()
gets_by_columns = list()
order_by_columns = list()
for col in columns:
if col["condition_search"]:
condition_search_columns.append(col)
if col["get_by"]:
get_by_columns.append(col)
if col["gets_by"]:
gets_by_columns.append(col)
if col["order_by"]:
order_by_columns.append(col)
namespace = dict(model_name=model_name, # sqlA 映射对象名, 用于生成类名等
table_name=table_name, # 表名(复数形式) 可以用户列表的变量命名
singular_table_name=singular_table_name, # 表名的单数形式 可用于单数实例的命名
sys_user_name=sys_user_name, # 系统用户名,一般用于生成文件头部的注释
created_at=created_at, # 当前系统时间,一般用于文件头部的注释
sql_a_types=sql_a_types, # 对应的sqlA类型(不仅限于sqlA,如果以后使用其他框架,修改get_all_sql_a_type方法即可)
mixins=mixins, # 相关mixin IdMixin等。。。
columns=columns, # 数据库字段信息,自定义编辑信息等一些关于字段的信息
condition_search_columns=condition_search_columns, # 存产于条件查询的列信息 便于模版处理
order_by_columns=order_by_columns, # 存产于条件查询的排序信息 便于模版处理
gets_by_columns=gets_by_columns, # 存产于字段查询的列信息 便于模版处理
get_by_columns=get_by_columns # 存产于字段查询的列信息 便于模版处理
)
# 定制参数
if ext_namespace:
namespace.update(ext_namespace)
return namespace
@staticmethod
def get_dirs(dir_type):
dirs = dict(
dao=os.path.join(Settings.SITE_ROOT_PATH, 'app/daos'),
service=os.path.join(Settings.SITE_ROOT_PATH, 'app/services'),
handler=os.path.join(Settings.SITE_ROOT_PATH, 'app/handlers'),
view=os.path.join(Settings.SITE_ROOT_PATH, 'app/views')
)
dir_str_long = dirs[dir_type]
dir_str_shot = ""
return dir_str_long
def get_config_schema_name(self):
return self.database_config["database"]
@staticmethod
def get_edit_types():
"""所有编辑类型关键字
:return:
"""
return ("input", "text", "radio", "checkbox", "select", "date", "datetime", "time")
@staticmethod
def get_sql_edit_type(data_type):
"""数据库类型默认编辑类型
:return:
"""
types_map = dict(
datetime="datetime",
smallinteger="input",
numeric="input",
unicodetext="text",
varchar="input",
char="input",
pickletype="input",
bigint="input",
unicode="input",
binary="input",
enum="input",
date="date",
int="input",
interval="input",
time="time",
text="text",
float="input",
largebinary="input",
tinyint="radio")
if data_type in types_map:
return types_map[data_type]
else:
return "input"
@staticmethod
def get_where_types():
return ("==", "!=", "llike", "rlike", "like", "between")
@staticmethod
def get_first_word(word_str):
"""获取第一个特殊符号(非数字 字母 中文)前所有字符,待匹配字符为unicode
用于数据库注释中获取字段中文名
:param word_str:
:return:
"""
r = re.compile(ur'[\w]*[\u4E00-\u9FA5]*[\w]*') # 这个要加上ur
s_match = r.findall(word_str)
if s_match and len(s_match) > 0:
return s_match[0]
return ""
@staticmethod
def get_column_length(column_type):
"""获取数据库字段长度
:param column_type: 数据库字段类型(长度)
:return: 字段长度
"""
pattern = re.compile("\w*\((\d*)\)w*")
res = pattern.search(column_type)
if res is None:
return None
else:
return res.groups()[0]
@staticmethod
def get_sql_a_type(sql_type):
"""根据数据库字段类型获取对应的SQLA数据类型
这个类型对应还有待完善
:param sql_type:数据库字段类型
:return:SQLA数据类型字符串
"""
types_map = dict(
datetime="DateTime",
smallinteger="SmallInteger",
numeric="Numeric",
unicodetext="UnicodeText",
varchar="String",
char="String",
pickletype="PickleType",
bigint="BigInteger",
unicode="Unicode",
binary="Binary",
enum="Enum",
date="Date",
int="Integer",
interval="Interval",
time="Time",
text="Text",
float="Float",
largebinary="LargeBinary",
tinyint="Boolean")
if sql_type in types_map:
return types_map[sql_type]
else:
return None
@staticmethod
def get_model_name(table_name):
"""将‘user_categories’ 转换成‘UserCategory’,用作数据库表名转换成对应的实体名
:param table_name: 需要传入的表名
:return:
"""
model_name = inflection.singularize(inflection.camelize(table_name))
return model_name
@staticmethod
def get_all_sql_a_type(columns):
"""获取指定columns中所有的sqlA类型
:param columns:
:return:
"""
sql_a_types = set()
for col in columns:
# 过滤掉不存在类型,以及默认字段类型
if col["sql_a_type"] and col["column_name"] not in ("id", "created_at", "updated_at", "is_deleted"):
sql_a_types.add(col["sql_a_type"])
return sql_a_types
@staticmethod
def get_all_mixin(columns):
"""获取指定columns中所有的 mixin
:param columns:
:return:
"""
mixins = dict()
for col in columns:
col_name = col["column_name"]
if col_name == "id":
mixins.setdefault("id", "IdMixin")
if col_name == "created_at":
mixins.setdefault("created_at", "CreatedAtMixin")
if col_name == "updated_at":
mixins.setdefault("updated_at", "UpdatedAtMixin")
if col_name == "is_deleted":
mixins.setdefault("is_deleted", "IsDeletedMixin")
return mixins
@staticmethod
def get_sys_user_name():
"""获取当前系统用户名
:return:
"""
return getpass.getuser()
class GenerateFileService(object):
"""
生成文件通用服务 准备模版参数,调用模版生成字符串,读写文件等功能
"""
def __init__(self, table_name, columns=None):
self.table_name = table_name
self.dbm = DBMessage()
self.namespace = self.dbm.get_namespace(self.table_name)
if columns is not None:
self.namespace.update(dict(columns=columns))
self.template_path = os.path.join(os.path.dirname(__file__), u"templates")
self.templates = dict(dao="dao_template.txt",
service="service_template.txt",
handler="handler_template.txt",
list_page="list_page_template.txt",
add_edit_page="add_edit_page_template.txt",
detail_page="detail_page_template.txt",
route="route_template.txt"
)
def get_dao_string(self):
template_file_name = self.templates["dao"]
return MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
def get_service_string(self):
template_file_name = self.templates["service"]
return MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
def get_handler_string(self):
template_file_name = self.templates["handler"]
return MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
def get_route_string(self):
template_file_name = self.templates["route"]
return MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
def get_list_page_string(self):
template_file_name = self.templates["list_page"]
list_page_str = MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
list_page_str = list_page_str.replace("}&}", "}}")
return list_page_str
def get_add_edit_page_string(self):
template_file_name = self.templates["add_edit_page"]
add_edit_page_str = MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
add_edit_page_str = add_edit_page_str.replace("}&}", "}}")
return add_edit_page_str
def get_detail_page_string(self):
template_file_name = self.templates["detail_page"]
detail_page_str = MyTemplate.get_template_string(self.template_path, template_file_name, self.namespace)
detail_page_str = detail_page_str.replace("}&}", "}}")
return detail_page_str
if __name__ == "__main__":
gfs = GenerateFileService(table_name="tests")
print gfs.get_dao_string()
# print gfs.get_service_string()
# print gfs.get_handler_string()
# print gfs.get_route_string()
# print gfs.get_list_page_string()
# print gfs.get_add_edit_page_string()
# print gfs.get_detail_page_string()
| 36ban_commons | /36ban_commons-0.0.1.tar.gz/36ban_commons-0.0.1/generate/generate_code_service.py | generate_code_service.py |
__author__ = 'wangshubin'
| 36ban_commons | /36ban_commons-0.0.1.tar.gz/36ban_commons-0.0.1/generate/__init__.py | __init__.py |
def print_lol(the_list):
for each_item in the_list:
if isinstance(each_item,list):
print_lol(each_item)
else:
print(each_item)
| 3890612457908641 | /3890612457908641-1.0.0.tar.gz/3890612457908641-1.0.0/3890612457908641.py | 3890612457908641.py |
from distutils.core import setup
setup (
name = '3890612457908641',
version = '1.0.0',
py_modules = ['3890612457908641'],
author = 'Monicabear',
author_email = '118010331@link.cuhk.edu.cn',
url = 'https://i.cuhk.edu.cn',
description = 'A simple function that show elements in a list that may or may not contain a nested list.',
)
| 3890612457908641 | /3890612457908641-1.0.0.tar.gz/3890612457908641-1.0.0/setup.py | setup.py |
# Registration tools
__Before everything else, the current status of the whole thing here is that it only works on UNIX systems (eg Linux & MacOs) that have reasonnable chips (eg not M1 chips for example).__
## Purpose and "history"
This repository is about two scripts to do spatial and temporal registration of 3D microscopy images.
It was initially developed to help friends with their ever moving embryos living under a microscope.
I found that actually quite a few people were interested so I made a version of it that is somewhat easier to use.
In theory, the main difficulty to make the whole thing work is to install the different libraries.
## Credits
The whole thing is just a wrapping of the amazing blockmatching algorithm developed by [S. Ourselin et al.] and currently maintained Grégoire Malandin et al.@[Team Morphem - inria] (if I am not mistaking).
## Installation
[conda] and [pip] are required to install `registration-tools`
We recommand to install the registration tools in a specific environement (like [conda]). For example the following way:
conda create -n registration python=3.10
You can then activate the environement the following way:
conda activate registration
For here onward we assume that you are running the commands from the `registration` [conda] environement.
Then, to install the whole thing, it is necessary to first install blockmatching. To do so you can run the following command:
conda install vt -c morpheme -c trcabel
Then, you can install the 3D-registration library either directly via pip:
pip install 3D-registration
Or, if you want the latest version, by specifying the git repository:
pip install git+https://github.com/GuignardLab/registration-tools.git
### Troubleshooting
- Windows:
If you are trying to run the script on Windows you might need to install `pthreadvse2.dll`.
It can be found there: https://www.pconlife.com/viewfileinfo/pthreadvse2-dll/ . Make sure to download the version that matches your operating system (32 or 64 bits, most likely 64).
- MacOs:
As there are no M1 binaries available yet, please use rosetta or install an intel version of conda.
## Usage
Most of the description on how to use the two scripts is described in the [manual] (Note that the installation part is quite outdated, the remaining is ok).
That being said, once installed, one can run either of the scripts from anywhere in a terminal by typing:
time-registration
or
spatial-registration
The location of the json files or folder containing the json files will be prompted and when provided the registration will start.
It is also possible to run the registration from a script/notebook the following way:
```python
from registrationtools import TimeRegistration
tr = TimeRegistration('path/to/param.json')
tr.run_trsf()
```
or
```python
from registrationtools import TimeRegistration
tr = TimeRegistration('path/to/folder/with/jsonfiles/')
tr.run_trsf()
```
or
```python
from registrationtools import TimeRegistration
tr = TimeRegistration()
tr.run_trsf()
```
and a path will be asked to be inputed.
### Example json files
Few example json files are provided to help the potential users. You can find informations about what they do in the [manual].
[S. Ourselin et al.]: http://www-sop.inria.fr/asclepios/Publications/Gregoire.Malandain/ourselin-miccai-2000.pdf
[Team Morphem - inria]: https://team.inria.fr/morpheme/
[conda]: https://conda.io/projects/conda/en/latest/user-guide/install/index.html
[pip]: https://pypi.org/project/pip/
[manual]: https://github.com/GuignardLab/registration-tools/blob/master/User-manual/user-manual.pdf | 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/README.md | README.md |
from os.path import (
exists,
splitext,
isdir,
split as psplit,
expanduser as expusr,
)
import os, fnmatch
import warnings
from struct import pack, unpack, calcsize
from pickle import dumps, loads
import numpy as np
from .spatial_image import SpatialImage
from .inrimage import read_inrimage, write_inrimage
try:
from .tif import read_tif, write_tif
except Exception as e:
warnings.warn("pylibtiff library is not installed")
try:
from .h5 import read_h5, write_h5
except Exception as e:
warnings.warn("h5py library is not installed")
try:
from .klb import read_klb, write_klb
except Exception as e:
warnings.warn("KLB library is not installed")
from .folder import read_folder
def imread(filename, parallel=True, SP_im=True):
"""Reads an image file completely into memory.
It uses the file extension to determine how to read the file. It first tries
some specific readers for volume images (Inrimages, TIFFs, LSMs, NPY) or falls
back on PIL readers for common formats if installed.
In all cases the returned image is 3D (2D is upgraded to single slice 3D).
If it has colour or is a vector field it is even 4D.
:Parameters:
- `filename` (str)
:Returns Type:
|SpatialImage|
"""
filename = expusr(filename)
if not exists(filename):
raise IOError("The requested file do not exist: %s" % filename)
root, ext = splitext(filename)
ext = ext.lower()
if ext == ".gz":
root, ext = splitext(root)
ext = ext.lower()
if ext == ".inr":
return read_inrimage(filename)
elif ext in [".tif", ".tiff"]:
return read_tif(filename)
elif ext in [".h5", ".hdf5"]:
return read_h5(filename)
elif ext == ".klb":
return read_klb(filename, SP_im)
elif isdir(filename):
return read_folder(filename, parallel)
def imsave(filename, img):
"""Save a |SpatialImage| to filename.
.. note: `img` **must** be a |SpatialImage|.
The filewriter is choosen according to the file extension. However all file extensions
will not match the data held by img, in dimensionnality or encoding, and might raise `IOError`s.
For real volume data, Inrimage and NPY are currently supported.
For |SpatialImage|s that are actually 2D, PNG, BMP, JPG among others are supported if PIL is installed.
:Parameters:
- `filename` (str)
- `img` (|SpatialImage|)
"""
filename = expusr(filename)
root, ext = splitext(filename)
# assert isinstance(img, SpatialImage) or ext == '.klb'
# -- images are always at least 3D! If the size of dimension 3 (indexed 2) is 1, then it is actually
# a 2D image. If it is 4D it has vectorial or RGB[A] data. --
head, tail = psplit(filename)
head = head or "."
if not exists(head):
raise IOError("The directory do not exist: %s" % head)
# is2D = img.shape[2] == 1
ext = ext.lower()
if ext == ".gz":
root, ext = splitext(root)
ext = ext.lower()
if ext == ".inr":
write_inrimage(filename, img)
elif ext in [".tiff", ".tif"]:
write_tif(filename, img)
elif ext == ".klb":
write_klb(filename, img)
elif ext in [".h5", ".hdf5"]:
write_h5(filename, img)
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/IO.py | IO.py |
from pyklb import readfull, writefull, readheader
from .spatial_image import SpatialImage
import numpy as np
def read_klb(filename, SP_im=True):
tmp = readfull(filename)
if len(tmp.shape) == 2:
tmp = tmp.reshape((1,) + tmp.shape)
if SP_im:
im = SpatialImage(tmp.transpose(2, 1, 0), copy=False)
im.voxelsize = readheader(filename).get(
"pixelspacing_tczyx",
[
1.0,
]
* 5,
)[:1:-1]
else:
im = tmp.transpose(2, 1, 0)
return im
def write_klb(filename, im):
if np.isfortran(im):
if len(im.shape) == 3:
writefull(
im.transpose(2, 1, 0),
filename,
pixelspacing_tczyx=(1, 1) + tuple(im.resolution[::-1]),
)
else:
writefull(
im.transpose(1, 0),
filename,
pixelspacing_tczyx=(1, 1) + tuple(im.resolution[::-1]),
)
else:
writefull(im, filename)
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/klb.py | klb.py |
# -*- python -*-
#
# adapted from C. Pradal
#
# Copyright 2011 INRIA - CIRAD - INRA
#
# File author(s): Christophe Pradal <christophe.pradal@cirad.fr>
# Daniel Barbeau <daniel.barbeau@cirad.fr>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite : http://openalea.gforge.inria.fr
################################################################################
"""
This module reads 3D tiff format
"""
__license__ = "Cecill-C"
__revision__ = " $Id$ "
import numpy as np
from .spatial_image import SpatialImage
__all__ = []
import decimal
import numpy as np
from tifffile import TiffFile, imread, imwrite
import os, os.path, sys, time
__all__ += ["read_tif", "write_tif", "mantissa"]
def read_tif(filename, channel=0):
"""Read a tif image
:Parameters:
- `filename` (str) - name of the file to read
"""
def format_digit(v):
try:
v = eval(v)
except Exception as e:
pass
return v
with TiffFile(filename) as tif:
if "ImageDescription" in tif.pages[0].tags:
description = (
tif.pages[0].tags["ImageDescription"].value.split("\n")
)
separator = set.intersection(*[set(k) for k in description]).pop()
info_dict = {
v.split(separator)[0]: format_digit(v.split(separator)[1])
for v in description
}
else:
info_dict = {}
if "XResolution" in tif.pages[0].tags:
vx = tif.pages[0].tags["XResolution"].value
if vx[0] != 0:
vx = vx[1] / vx[0]
if isinstance(vx, list):
vx = vx[1] / vx[0]
else:
vx = 1.0
else:
vx = 1.0
if "YResolution" in tif.pages[0].tags:
vy = tif.pages[0].tags["YResolution"].value
if vy[0] != 0:
vy = vy[1] / vy[0]
if isinstance(vy, list):
vy = vy[1] / vy[0]
else:
vy = 1.0
else:
vy = 1.0
if "ZResolution" in tif.pages[0].tags:
vz = tif.pages[0].tags["ZResolution"].value
if vz[0] != 0:
if isinstance(vz, list):
vz = vz[1] / vz[0]
else:
vz = 1.0
elif "spacing" in info_dict:
vz = info_dict["spacing"]
else:
vz = 1.0
im = tif.asarray()
if len(im.shape) == 3:
im = np.transpose(im, (2, 1, 0))
elif len(im.shape) == 2:
im = np.transpose(im, (1, 0))
if 3 <= len(im.shape):
im = SpatialImage(im, (vx, vy, vz))
else:
print(im.shape, vx, vy)
im = SpatialImage(im, (vx, vy))
im.resolution = (vx, vy, vz)
return im
def mantissa(value):
"""Convert value to [number, divisor] where divisor is power of 10"""
# -- surely not the nicest thing around --
d = decimal.Decimal(str(value)) # -- lovely...
sign, digits, exp = d.as_tuple()
n_digits = len(digits)
dividend = int(
sum(v * (10 ** (n_digits - 1 - i)) for i, v in enumerate(digits))
* (1 if sign == 0 else -1)
)
divisor = int(10**-exp)
return dividend, divisor
def write_tif(filename, obj):
if len(obj.shape) > 3:
raise IOError(
"Vectorial images are currently unsupported by tif writer"
)
is3D = len(obj.shape) == 3
if hasattr(obj, "resolution"):
res = obj.resolution
elif hasattr(obj, "voxelsize"):
res = obj.resolution
else:
res = (1, 1, 1) if is3D else (1, 1)
vx = 1.0 / res[0]
vy = 1.0 / res[1]
if is3D:
spacing = res[2]
metadata = {"spacing": spacing, "axes": "ZYX"}
else:
metadata = {}
extra_info = {
"XResolution": res[0],
"YResolution": res[1],
"spacing": spacing
if is3D
else None, # : no way to save the spacing (no specific tag)
}
print(extra_info)
if obj.dtype.char in "BHhf":
return imwrite(
filename,
obj.T,
imagej=True,
resolution=(vx, vy),
metadata=metadata,
)
else:
return imwrite(filename, obj.T)
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/tif.py | tif.py |
# -*- python -*-
#
# spatial_image: spatial nd images
#
# Copyright 2006 INRIA - CIRAD - INRA
#
# File author(s): Jerome Chopard <jerome.chopard@sophia.inria.fr>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite : http://openalea.gforge.inria.fr
#
"""
This module create the main |SpatialImage| object
"""
__license__ = "Cecill-C"
__revision__ = " $Id: $ "
import numpy as np
from scipy import ndimage
import copy as cp
# -- deprecation messages --
# import warnings, exceptions
# msg = "SpatialImage.resolution is deprecated, use SpatialImage.voxelsize"
# rezexc = exceptions.PendingDeprecationWarning(msg)
class SpatialImage(np.ndarray):
"""
Associate meta data to np.ndarray
"""
def __new__(
cls,
input_array,
voxelsize=None,
vdim=None,
info=None,
dtype=None,
**kwargs
):
"""Instantiate a new |SpatialImage|
if voxelsize is None, vdim will be used to infer space size and affect
a voxelsize of 1 in each direction of space
.. warning :: `resolution` keyword is deprecated. Use `voxelsize` instead.
:Parameters:
- `cls` - internal python
- `input_array` (array) - data to put in the image
- `voxelsize` (tuple of float) - spatial extension in each direction
of space
- `vdim` (int) - size of data if vector data are used
- `info` (dict of str|any) - metainfo
"""
# if the input_array is 2D we can reshape it to 3D.
# ~ if input_array.ndim == 2: # Jonathan
# ~ input_array = input_array.reshape( input_array.shape+(1,) ) # Jonathan
# initialize datas. For some obscure reason, we want the data
# to be F-Contiguous in the NUMPY sense. I mean, if this is not
# respected, we will have problems when communicating with
# C-Code... yeah, that makes so much sense (fortran-contiguous
# to be c-readable...).
dtype = dtype if dtype is not None else input_array.dtype
if input_array.flags.f_contiguous:
obj = np.asarray(input_array, dtype=dtype).view(cls)
else:
obj = np.asarray(input_array, dtype=dtype, order="F").view(cls)
voxelsize = kwargs.get("resolution", voxelsize) # to manage transition
if voxelsize is None:
# ~ voxelsize = (1.,) * 3
voxelsize = (1.0,) * input_array.ndim # Jonathan
else:
# ~ if len(voxelsize) != 3 :
if (input_array.ndim != 4) and (
len(voxelsize) != input_array.ndim
): # Jonathan _ Compatibility with "champs_*.inr.gz" generated by Baloo & SuperBaloo
raise ValueError("data dimension and voxelsize mismatch")
obj.voxelsize = tuple(voxelsize)
obj.vdim = vdim if vdim else 1
# set metadata
if info is None:
obj.info = {}
else:
obj.info = dict(info)
# return
return obj
def _get_resolution(self):
# warnings.warn(rezexc)
return self.voxelsize
def _set_resolution(self, val):
# warnings.warn(rezexc)
self.voxelsize = val
resolution = property(_get_resolution, _set_resolution)
@property
def real_shape(self):
# ~ return np.multiply(self.shape[:3], self.voxelsize)
return np.multiply(self.shape, self.voxelsize) # Jonathan
def invert_z_axis(self):
"""
invert allong 'Z' axis
"""
self = self[:, :, ::-1]
def __array_finalize__(self, obj):
if obj is None:
return
# assert resolution
res = getattr(obj, "voxelsize", None)
if res is None: # assert vdim == 1
res = (1.0,) * len(obj.shape)
self.voxelsize = tuple(res)
# metadata
self.info = dict(getattr(obj, "info", {}))
def clone(self, data):
"""Clone the current image metadata
on the given data.
.. warning:: vdim is defined according to self.voxelsize and data.shape
:Parameters:
- `data` - (array)
:Returns Type: |SpatialImage|
"""
if len(data.shape) == len(self.voxelsize):
vdim = 1
elif len(data.shape) - len(self.voxelsize) == 1:
vdim = data.shape[-1]
else:
raise UserWarning("unable to handle such data dimension")
return SpatialImage(data, self.voxelsize, vdim, self.info)
@classmethod
def valid_array(cls, array_like):
return (
isinstance(array_like, (np.ndarray, cls))
and array_like.flags.f_contiguous
)
def empty_image_like(spatial_image):
array = np.zeros(spatial_image.shape, dtype=spatial_image.dtype)
return SpatialImage(array, spatial_image.voxelsize, vdim=1)
def null_vector_field_like(spatial_image):
array = np.zeros(list(spatial_image.shape) + [3], dtype=np.float32)
return SpatialImage(array, spatial_image.voxelsize, vdim=3)
def random_vector_field_like(spatial_image, smooth=0, max_=1):
# ~ if spatial_image.vdim == 1:
# ~ shape = spatial_image.shape+(3,)
# ~ else:
# ~ shape = spatial_image.shape
shape = spatial_image.shape # Jonathan
array = np.random.uniform(-max_, max_, shape)
if smooth:
array = ndimage.gaussian_filter(array, smooth)
return SpatialImage(array, spatial_image.voxelsize, dtype=np.float32)
def checkerboard(
nx=9, ny=8, nz=5, size=10, vs=(1.0, 1.0, 1.0), dtype=np.uint8
):
"""Creates a 3D checkerboard image with `nx` squares in width,
`ny` squares in height and `nz` squares in depth. The length of the edge in real units
of each square is `size`."""
sxv, syv, szv = np.array([size] * 3) / np.array(vs)
array = np.zeros((sxv * nx, syv * ny, szv * nz), dtype=dtype, order="F")
typeinfo = np.iinfo(dtype)
# -- wooo surely not the most beautiful implementation out here --
for k in range(nz):
kval = typeinfo.max if (k % 2 == 0) else typeinfo.min
jval = kval
for j in range(ny):
ival = jval
for i in range(nx):
array[
i * sxv : i * sxv + sxv,
j * syv : j * syv + syv,
k * szv : k * szv + szv,
] = ival
ival = typeinfo.max if (ival == typeinfo.min) else typeinfo.min
jval = typeinfo.max if (jval == typeinfo.min) else typeinfo.min
kval = typeinfo.max if (kval == typeinfo.min) else typeinfo.min
return SpatialImage(array, vs, dtype=dtype)
def is2D(image):
"""
Test if the `image` (array) is in 2D or 3D.
Return True if 2D, False if not.
"""
if len(image.shape) == 2 or image.shape[2] == 1:
return True
else:
return False
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/spatial_image.py | spatial_image.py |
from h5py import File
from .spatial_image import SpatialImage
import numpy as np
def read_h5(filename):
"""Read an hdf5 file
:Parameters:
- `filename` (str) - name of the file to read
"""
data = File(filename.replace("\\", ""), "r")["Data"]
im_out = np.zeros(data.shape, dtype=data.dtype)
data.read_direct(im_out)
return SpatialImage(im_out.transpose(2, 1, 0))
def write_h5(filename, im):
"""Write an image to an hdf5
:Parameters:
- `filename` (str) - name of the file to write
- `im` SpatialImage - image to write
"""
f = File(filename, mode="w")
dset = f.create_dataset("Data", data=im.transpose(2, 1, 0), chunks=True)
f.close()
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/h5.py | h5.py |
from .IO import imread, imsave
from .spatial_image import SpatialImage
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/__init__.py | __init__.py |
# -*- python -*-
#
# image.serial: read/write spatial nd images
#
# Copyright 2006 - 2011 INRIA - CIRAD - INRA
#
# File author(s): Jerome Chopard <jerome.chopard@sophia.inria.fr>
# Eric Moscardi <eric.moscardi@sophia.inria.fr>
# Daniel Barbeau <daniel.barbeau@sophia.inria.fr>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# OpenAlea WebSite : http://openalea.gforge.inria.fr
################################################################################
"""
This module defines inrimage format
"""
__license__ = "Cecill-C"
__revision__ = " $Id$ "
import os
from os import path
import numpy as np
from struct import calcsize, pack, unpack
import gzip
from io import BytesIO
from .spatial_image import SpatialImage
__all__ = ["read_inriheader", "read_inrimage", "write_inrimage"]
specific_header_keys = (
"XDIM",
"YDIM",
"ZDIM",
"VDIM",
"TYPE",
"PIXSIZE",
"SCALE",
"CPU",
"VX",
"VY",
"VZ",
"TX",
"TY",
"TZ",
"#GEOMETRY",
)
def open_inrifile(filename):
"""Open an inrimage file
Manage the gz attribute
"""
if path.splitext(filename)[1] in (".gz", ".zip"):
with gzip.open(filename, "rb") as fzip:
f = BytesIO(fzip.read())
fzip.close()
else:
f = open(filename, "rb")
return f
def _read_header(f):
"""Extract header from a stream and return it
as a python dict
"""
# read header string
header = ""
while header[-4:] != "##}\n":
header += f.read(256).decode("latin-1")
# read infos in header
prop = {}
hstart = header.find("{\n") + 1
hend = header.find("##}")
infos = [gr for gr in header[hstart:hend].split("\n") if len(gr) > 0]
# format infos
for prop_def in infos:
key, val = prop_def.split("=")
prop[key] = val
# return
return prop
def read_inriheader(filename):
"""Read only the header of an inrimage
:Parameters:
- `filename` (str) - name of the file to read
"""
f = open_inrifile(filename)
prop = _read_header(f)
f.close()
return prop
def read_inrimage(filename):
"""Read an inrimage, either zipped or not according to extension
:Parameters:
- `filename` (str) - name of the file to read
"""
f = open_inrifile(filename)
# read header
prop = _read_header(f)
prop["filename"] = filename # Jonathan : 14.05.2012
# extract usefull infos to read image
zdim = int(prop.pop("ZDIM"))
ydim = int(prop.pop("YDIM"))
xdim = int(prop.pop("XDIM"))
vdim = int(prop.pop("VDIM", 1))
# if vdim != 1 :
# msg = "don't know how to handle vectorial pixel values"
# raise NotImplementedError(msg)
# find data type
pixsize = int(prop.pop("PIXSIZE", "0").split(" ")[0])
dtype = prop.pop("TYPE")
if dtype == "unsigned fixed":
if pixsize == 0:
ntyp = np.dtype(np.int)
else:
try:
ntyp = eval("np.dtype(np.uint%d)" % pixsize)
except AttributeError:
raise UserWarning("undefined pix size: %d" % pixsize)
elif dtype == "float":
if pixsize == 0:
ntyp = np.dtype(np.float)
else:
try:
ntyp = eval("np.dtype(np.float%d)" % pixsize)
except AttributeError:
raise UserWarning("undefined pix size: %d" % pixsize)
else:
msg = "unable to read that type of datas : %s" % dtype
raise UserWarning(msg)
# read datas
size = ntyp.itemsize * xdim * ydim * zdim * vdim
mat = np.frombuffer(f.read(size), ntyp)
if vdim != 1:
mat = mat.reshape((vdim, xdim, ydim, zdim), order="F")
mat = mat.transpose(1, 2, 3, 0)
else:
mat = mat.reshape((xdim, ydim, zdim), order="F")
# mat = mat.transpose(2,1,0)
# create SpatialImage
res = tuple(float(prop.pop(k)) for k in ("VX", "VY", "VZ"))
for k in ("TX", "TY", "TZ"):
prop.pop(k, None)
img = SpatialImage(mat, res, vdim, prop)
# return
f.close()
return img
def write_inrimage_to_stream(stream, img):
assert img.ndim in (3, 4)
# metadata
info = dict(getattr(img, "info", {}))
if "filename" in info:
info["filename"] = path.basename(info["filename"])
else:
info["filename"] = path.basename("img.inr")
# image dimensions
if img.ndim < 4:
info["XDIM"], info["YDIM"], info["ZDIM"] = (
"%d" % val for val in img.shape
)
info["VDIM"] = "1"
else:
info["XDIM"], info["YDIM"], info["ZDIM"], info["VDIM"] = (
"%d" % val for val in img.shape
)
# image resolution
res = getattr(img, "resolution", (1, 1, 1))
info["VX"], info["VY"], info["VZ"] = ("%f" % val for val in res)
# data type
if img.dtype == np.uint8:
info["TYPE"] = "unsigned fixed"
info["PIXSIZE"] = "8 bits"
elif img.dtype == np.uint16:
info["TYPE"] = "unsigned fixed"
info["PIXSIZE"] = "16 bits"
elif img.dtype == np.uint32:
info["TYPE"] = "unsigned fixed"
info["PIXSIZE"] = "32 bits"
elif img.dtype == np.uint64:
info["TYPE"] = "unsigned fixed"
info["PIXSIZE"] = "64 bits"
elif img.dtype == np.float32:
info["TYPE"] = "float"
info["PIXSIZE"] = "32 bits"
elif img.dtype == np.float64:
info["TYPE"] = "float"
info["PIXSIZE"] = "64 bits"
# elif img.dtype == np.float128 :
# info["TYPE"] = "float"
# info["PIXSIZE"] = "128 bits"
else:
msg = "unable to write that type of datas : %s" % str(img.dtype)
raise UserWarning(msg)
# mandatory else an error occurs when reading image
info["#GEOMETRY"] = "CARTESIAN"
info["CPU"] = "decm"
# write header
header = "#INRIMAGE-4#{\n"
for (
k
) in (
specific_header_keys
): # HACK pas bo to ensure order of specific headers
try:
header += "%s=%s\n" % (k, info[k])
except KeyError:
pass
for k in set(info) - set(specific_header_keys):
header += "%s=%s\n" % (k, info[k])
# fill header to be a multiple of 256
header_size = len(header) + 4
if (header_size % 256) > 0:
header += "\n" * (256 - header_size % 256)
header += "##}\n"
contiguous_rule = "C" if img.flags["C_CONTIGUOUS"] else "F"
stream.write(header)
if img.ndim == 3:
stream.write(img.tostring(contiguous_rule))
elif img.ndim == 4:
mat = img.transpose(3, 0, 1, 2)
stream.write(mat.tostring(contiguous_rule))
else:
raise Exception("Unhandled image dimension %d." % img.ndim)
def write_inrimage(filename, img):
"""Write an inrimage zipped or not according to the extension
.. warning:: if img is not a |SpatialImage|, default values will be used
for the resolution of the image
:Parameters:
- `img` (|SpatialImage|) - image to write
- `filename` (str) - name of the file to read
"""
# open stream
zipped = path.splitext(filename)[1] in (".gz", ".zip")
if zipped:
f = gzip.GzipFile(filename, "wb")
# f = StringIO()
else:
f = open(filename, "wb")
try:
write_inrimage_to_stream(f, img)
except:
# -- remove probably corrupt file--
f.close()
if path.exists(filename) and path.isfile(filename):
os.remove(filename)
raise
else:
f.close()
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/inrimage.py | inrimage.py |
from os import path
import os
import numpy as np
from multiprocessing import Pool
def populate_im(parameters):
from .IO import imread, SpatialImage
path, pos = parameters
return imread(path)[:, :, 0]
def read_folder(folder, parallel):
from .IO import imread, SpatialImage
data_files = sorted(os.listdir(folder))
nb_images = len(data_files)
first_image = imread(path.join(folder, data_files[0]))
to_read = []
for i, p in enumerate(data_files):
to_read.append([path.join(folder, p), i]) # (i, path.join(folder, p)))
if parallel:
pool = Pool()
out = pool.map(populate_im, to_read)
pool.close()
pool.terminate()
else:
out = []
for param in to_read:
out.append(populate_im(param))
return SpatialImage(
np.array(out).transpose(1, 2, 0), voxelsize=first_image.voxelsize
)
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/IO/folder.py | folder.py |
#!/usr/bin/python
# This file is subject to the terms and conditions defined in
# file 'LICENCE', which is part of this source code package.
# Author: Leo Guignard (leo.guignard...@AT@...univ-amu.fr)
from pathlib import Path
from time import time
import os
from subprocess import call
import scipy as sp
from scipy import interpolate
import json
import numpy as np
from IO import imread, imsave, SpatialImage
from typing import List, Tuple
from statsmodels.nonparametric.smoothers_lowess import lowess
import sys
import xml.etree.ElementTree as ET
from xml.dom import minidom
from transforms3d.affines import decompose
from transforms3d.euler import mat2euler
if sys.version_info[0] < 3:
from future.builtins import input
try:
from pyklb import readheader
pyklb_found = True
except Exception as e:
pyklb_found = False
print("pyklb library not found, klb files will not be generated")
class trsf_parameters(object):
"""
Read parameters for the registration function from a preformated json file
"""
def check_parameters_consistancy(self) -> bool:
"""
Function that should check parameter consistancy
"""
correct = True
if not "path_to_data" in self.__dict__:
print('\n\t"path_to_data" is required')
correct = False
if not "file_name" in self.__dict__:
print('\n\t"file_name" is required')
correct = False
if not "trsf_folder" in self.__dict__:
print('\n\t"trsf_folder" is required')
correct = False
if not "voxel_size" in self.__dict__:
print('\n\t"voxel_size" is required')
correct = False
if not "ref_TP" in self.__dict__:
print('\n\t"ref_TP" is required')
correct = False
if not "projection_path" in self.__dict__:
print('\n\t"projection_path" is required')
correct = False
if self.apply_trsf and not "output_format" in self.__dict__:
print('\n\t"output_format" is required')
correct = False
if self.trsf_type != "translation" and (
self.lowess or self.trsf_interpolation
):
print("\n\tLowess or transformation interplolation")
print("\tonly work with translation")
correct = False
if self.lowess and not "window_size" in self.param_dict:
print('\n\tLowess smoothing "window_size" is missing')
print("\tdefault value of 5 will be used\n")
if self.trsf_interpolation and not "step_size" in self.param_dict:
print('\n\tTransformation interpolation "step_size" is missing')
print("\tdefault value of 100 will be used\n")
if self.trsf_type == "vectorfield" and self.ref_path is None:
print("\tNon-linear transformation asked with propagation.")
print("\tWhile working it is highly not recommended")
print(
"\tPlease consider not doing a registration from propagation"
)
print("\tTHIS WILL LITERALLY TAKE AGES!! IF IT WORKS AT ALL ...")
if (
not isinstance(self.spline, int)
or self.spline < 1
or 5 < self.spline
):
out = ("{:d}" if isinstance(self.spline, int) else "{:s}").format(
self.spline
)
print("The degree of smoothing for the spline interpolation")
print("Should be an Integer between 1 and 5, you gave " + out)
return correct
def __str__(self) -> str:
max_key = (
max([len(k) for k in self.__dict__.keys() if k != "param_dict"])
+ 1
)
max_tot = (
max(
[
len(str(v))
for k, v in self.__dict__.items()
if k != "param_dict"
]
)
+ 2
+ max_key
)
output = "The registration will run with the following arguments:\n"
output += "\n" + " File format ".center(max_tot, "-") + "\n"
output += "path_to_data".ljust(max_key, " ") + ": {}\n".format(
self.path_to_data
)
output += "file_name".ljust(max_key, " ") + ": {}\n".format(
self.file_name
)
output += "trsf_folder".ljust(max_key, " ") + ": {}\n".format(
self.trsf_folder
)
output += "output_format".ljust(max_key, " ") + ": {}\n".format(
self.output_format
)
output += "check_TP".ljust(max_key, " ") + ": {:d}\n".format(
self.check_TP
)
output += "\n" + " Time series properties ".center(max_tot, "-") + "\n"
output += "voxel_size".ljust(
max_key, " "
) + ": {:f}x{:f}x{:f}\n".format(*self.voxel_size)
output += "first".ljust(max_key, " ") + ": {:d}\n".format(self.first)
output += "last".ljust(max_key, " ") + ": {:d}\n".format(self.last)
output += "\n" + " Registration ".center(max_tot, "-") + "\n"
output += "compute_trsf".ljust(max_key, " ") + ": {:d}\n".format(
self.compute_trsf
)
if self.compute_trsf:
output += "ref_TP".ljust(max_key, " ") + ": {:d}\n".format(
self.ref_TP
)
if self.ref_path is not None:
output += "ref_path".ljust(max_key, " ") + ": {}\n".format(
self.ref_path
)
output += "trsf_type".ljust(max_key, " ") + ": {}\n".format(
self.trsf_type
)
if self.trsf_type == "vectorfield":
output += "sigma".ljust(max_key, " ") + ": {:.2f}\n".format(
self.sigma
)
output += "padding".ljust(max_key, " ") + ": {:d}\n".format(
self.padding
)
output += "lowess".ljust(max_key, " ") + ": {:d}\n".format(
self.lowess
)
if self.lowess:
output += "window_size".ljust(
max_key, " "
) + ": {:d}\n".format(self.window_size)
output += "trsf_interpolation".ljust(
max_key, " "
) + ": {:d}\n".format(self.trsf_interpolation)
if self.trsf_interpolation:
output += "step_size".ljust(max_key, " ") + ": {:d}\n".format(
self.step_size
)
output += "recompute".ljust(max_key, " ") + ": {:d}\n".format(
self.recompute
)
output += "apply_trsf".ljust(max_key, " ") + ": {:d}\n".format(
self.apply_trsf
)
if self.apply_trsf:
if self.projection_path is not None:
output += "projection_path".ljust(
max_key, " "
) + ": {}\n".format(self.projection_path)
output += "image_interpolation".ljust(
max_key, " "
) + ": {}\n".format(self.image_interpolation)
return output
def add_path_prefix(self, prefix: str):
"""
Add a prefix to all the relevant paths (this is mainly for the unitary tests)
Args:
prefix (str): The prefix to add in front of the path
"""
self.projection_path = (
os.path.join(prefix, self.projection_path) + os.path.sep
)
self.path_to_data = (
os.path.join(prefix, self.path_to_data) + os.path.sep
)
self.trsf_folder = os.path.join(prefix, self.trsf_folder) + os.path.sep
self.output_format = (
os.path.join(prefix, self.output_format) + os.path.sep
)
def __init__(self, file_name: str):
if not isinstance(file_name, dict):
with open(file_name) as f:
param_dict = json.load(f)
f.close()
else:
param_dict = {}
for k, v in file_name.items():
if isinstance(v, Path):
param_dict[k] = str(v)
else:
param_dict[k] = v
# Default parameters
self.check_TP = None
self.not_to_do = []
self.compute_trsf = True
self.ref_path = None
self.padding = 1
self.lowess = False
self.window_size = 5
self.step_size = 100
self.recompute = True
self.apply_trsf = True
self.sigma = 2.0
self.keep_vectorfield = False
self.trsf_type = "rigid"
self.image_interpolation = "linear"
self.path_to_bin = ""
self.spline = 1
self.trsf_interpolation = False
self.sequential = True
self.time_tag = "TM"
self.do_bdv = 0
self.bdv_voxel_size = None
self.bdv_unit = "microns"
self.projection_path = None
self.pre_2D = False
self.low_th = None
self.plot_trsf = False
self.param_dict = param_dict
if "registration_depth" in param_dict:
self.__dict__["registration_depth_start"] = 6
self.__dict__["registration_depth_end"] = param_dict[
"registration_depth"
]
elif not "registration_depth_start" in param_dict:
self.__dict__["registration_depth_start"] = 6
self.__dict__["registration_depth_end"] = 3
self.__dict__.update(param_dict)
self.voxel_size = tuple(self.voxel_size)
if not hasattr(self, "voxel_size_out"):
self.voxel_size_out = self.voxel_size
else:
self.voxel_size_out = tuple(self.voxel_size_out)
self.origin_file_name = file_name
if (
0 < len(self.projection_path)
and self.projection_path[-1] != os.path.sep
):
self.projection_path = self.projection_path + os.path.sep
self.path_to_data = os.path.join(self.path_to_data, "")
self.trsf_folder = os.path.join(self.trsf_folder, "")
self.path_to_bin = os.path.join(self.path_to_bin, "")
if 0 < len(self.path_to_bin) and not os.path.exists(self.path_to_bin):
print("Binary path could not be found, will try with global call")
self.path_to_bin = ""
class TimeRegistration:
@staticmethod
def read_trsf(path: str) -> np.ndarray:
"""
Read a transformation from a text file
Args:
path (str): path to a transformation
Returns
(np.ndarray): 4x4 ndarray matrix
"""
f = open(path)
if f.read()[0] == "(":
f.close()
f = open(path)
lines = f.readlines()[2:-1]
f.close()
return np.array([[float(v) for v in l.split()] for l in lines])
f.close()
return np.loadtxt(path)
def __produce_trsf(self, params: Tuple[trsf_parameters, int, int, bool]):
"""
Given a parameter object a reference time and two time points
compute the transformation that registers together two consecutive in time images.
Args:
params (tuple): a tuple with the parameter object, two consecutive time points
to register and a boolean to detemine wether to apply the trsf or not
"""
(p, t1, t2, make) = params
if not p.sequential:
p_im_flo = p.A0.format(t=t2)
p_im_ref = p.ref_path
t_ref = p.ref_TP
t_flo = t2
if t1 == p.to_register[0]:
self.__produce_trsf((p, t2, t1, make))
else:
if t1 < p.ref_TP:
t_ref = t2
t_flo = t1
else:
t_ref = t1
t_flo = t2
p_im_ref = p.A0.format(t=t_ref)
p_im_flo = p.A0.format(t=t_flo)
if not make:
print("trsf tp %d-%d not done" % (t1, t2))
np.savetxt(
os.path.join(p.trsf_folder, "t%06d-%06d.txt" % (t_flo, t_ref)),
np.identity(4),
)
elif t_flo != t_ref and (
p.recompute
or not os.path.exists(
os.path.join(p.trsf_folder, "t%06d-%06d.txt" % (t_flo, t_ref))
)
):
if p.low_th is not None and 0 < p.low_th:
th = " -ref-lt {lt:f} -flo-lt {lt:f} -no-norma ".format(
lt=p.low_th
)
else:
th = ""
if p.trsf_type != "vectorfield":
if p.pre_2D == 1:
call(
self.path_to_bin
+ "blockmatching -ref "
+ p_im_ref
+ " -flo "
+ p_im_flo
+ " -reference-voxel %f %f %f" % p.voxel_size
+ " -floating-voxel %f %f %f" % p.voxel_size
+ " -trsf-type rigid2D -py-hl %d -py-ll %d"
% (
p.registration_depth_start,
p.registration_depth_end,
)
+ " -res-trsf "
+ os.path.join(
p.trsf_folder,
"t%06d-%06d-tmp.txt" % (t_flo, t_ref),
)
+ th,
shell=True,
)
pre_trsf = (
" -init-trsf "
+ os.path.join(
p.trsf_folder,
"t%06d-%06d-tmp.txt" % (t_flo, t_ref),
)
+ " -composition-with-initial "
)
else:
pre_trsf = ""
call(
self.path_to_bin
+ "blockmatching -ref "
+ p_im_ref
+ " -flo "
+ p_im_flo
+ pre_trsf
+ " -reference-voxel %f %f %f" % p.voxel_size
+ " -floating-voxel %f %f %f" % p.voxel_size
+ " -trsf-type %s -py-hl %d -py-ll %d"
% (
p.trsf_type,
p.registration_depth_start,
p.registration_depth_end,
)
+ " -res-trsf "
+ os.path.join(
p.trsf_folder, "t%06d-%06d.txt" % (t_flo, t_ref)
)
+ th,
shell=True,
)
else:
if p.apply_trsf:
res = " -res " + p.A0_out.format(t=t_flo)
else:
res = ""
if p.keep_vectorfield and pyklb_found:
res_trsf = (
" -composition-with-initial -res-trsf "
+ os.path.join(
p.trsf_folder, "t%06d-%06d.klb" % (t_flo, t_ref)
)
)
else:
res_trsf = ""
if p.keep_vectorfield:
print(
"The vectorfield cannot be stored without pyklb being installed"
)
call(
self.path_to_bin
+ "blockmatching -ref "
+ p_im_ref
+ " -flo "
+ p_im_flo
+ " -reference-voxel %f %f %f" % p.voxel_size
+ " -floating-voxel %f %f %f" % p.voxel_size
+ " -trsf-type affine -py-hl %d -py-ll %d"
% (p.registration_depth_start, p.registration_depth_end)
+ " -res-trsf "
+ os.path.join(
p.trsf_folder, "t%06d-%06d.txt" % (t_flo, t_ref)
)
+ th,
shell=True,
)
call(
self.path_to_bin
+ "blockmatching -ref "
+ p_im_ref
+ " -flo "
+ p_im_flo
+ " -init-trsf "
+ os.path.join(
p.trsf_folder, "t%06d-%06d.txt" % (t_flo, t_ref)
)
+ res
+ " -reference-voxel %f %f %f" % p.voxel_size
+ " -floating-voxel %f %f %f" % p.voxel_size
+ " -trsf-type %s -py-hl %d -py-ll %d"
% (
p.trsf_type,
p.registration_depth_start,
p.registration_depth_end,
)
+ res_trsf
+ (
" -elastic-sigma {s:.1f} {s:.1f} {s:.1f} "
+ " -fluid-sigma {s:.1f} {s:.1f} {s:.1f}"
).format(s=p.sigma)
+ th,
shell=True,
)
def run_produce_trsf(self, p: trsf_parameters):
"""
Parallel processing of the transformations from t to t-1/t+1 to t (depending on t<r)
The transformation is computed using blockmatching algorithm
Args:
p (trsf_paraneters): a trsf_parameter object
"""
mapping = [
(p, t1, t2, not (t1 in p.not_to_do or t2 in p.not_to_do))
for t1, t2 in zip(p.to_register[:-1], p.to_register[1:])
]
tic = time()
tmp = []
for mi in mapping:
tmp += [self.__produce_trsf(mi)]
tac = time()
whole_time = tac - tic
secs = whole_time % 60
whole_time = whole_time // 60
mins = whole_time % 60
hours = whole_time // 60
print("%dh:%dmin:%ds" % (hours, mins, secs))
def compose_trsf(
self, flo_t: int, ref_t: int, trsf_p: str, tp_list: List[int]
) -> str:
"""
Recusrively build the transformation that allows
to register time `flo_t` onto the frame of time `ref_t`
assuming that it exists the necessary intermediary transformations
Args:
flo_t (int): time of the floating image
ref_t (int): time of the reference image
trsf_p (str): path to folder containing the transformations
tp_list (list): list of time points that have been processed
Returns:
out_trsf (str): path to the result composed transformation
"""
out_trsf = trsf_p + "t%06d-%06d.txt" % (flo_t, ref_t)
if not os.path.exists(out_trsf):
flo_int = tp_list[tp_list.index(flo_t) + np.sign(ref_t - flo_t)]
# the call is recursive, to build `T_{flo\leftarrow ref}`
# we need `T_{flo+1\leftarrow ref}` and `T_{flo\leftarrow ref-1}`
trsf_1 = self.compose_trsf(flo_int, ref_t, trsf_p, tp_list)
trsf_2 = self.compose_trsf(flo_t, flo_int, trsf_p, tp_list)
call(
self.path_to_bin
+ "composeTrsf "
+ out_trsf
+ " -trsfs "
+ trsf_2
+ " "
+ trsf_1,
shell=True,
)
return out_trsf
@staticmethod
def __lowess_smooth(
X: np.ndarray, T: np.ndarray, frac: float
) -> np.ndarray:
"""
Smooth a curve using the lowess algorithm.
See:
Cleveland, W.S. (1979)
“Robust Locally Weighted Regression and Smoothing Scatterplots”.
Journal of the American Statistical Association 74 (368): 829-836.
Args:
X (np.ndarray): 1D array for the point positions
T (np.ndarray): 1D array for the times corresponding to the `X` positions
frac (float): Between 0 and 1. The fraction of the data used when estimating each value within X
Returns:
(np.ndarray): the smoothed X values
"""
return lowess(X, T, frac=frac, is_sorted=True, return_sorted=False)
@staticmethod
def interpolation(
p: trsf_parameters, X: np.ndarray, T: np.ndarray
) -> np.ndarray:
"""
Interpolate positional values between missing timepoints using spline interplolation
Args:
p (trsf_parameters): parameters for the function
X (np.ndarray): 1D array for the point positions
T (np.ndarray): 1D array for the times corresponding to the `X` position
Returns:
(np.ndarray): The interoplated function
"""
return sp.interpolate.InterpolatedUnivariateSpline(T, X, k=p.spline)
@classmethod
def read_param_file(clf, p_param: str = None) -> List[trsf_parameters]:
"""
Asks for, reads and formats the parameter file
Args:
p_params (str | None): path to the json parameter files.
It can either be a json or a folder containing json files.
If it is None (default), a path is asked to the user.
Returns:
(list): list of `trsf_parameters` objects
"""
if not isinstance(p_param, dict):
if p_param is None:
if len(sys.argv) < 2 or sys.argv[1] == "-f":
p_param = input(
"\nPlease inform the path to the json config file:\n"
)
else:
p_param = sys.argv[1]
stable = False or isinstance(p_param, Path)
while not stable:
tmp = p_param.strip('"').strip("'").strip(" ")
stable = tmp == p_param
p_param = tmp
if os.path.isdir(p_param):
f_names = [
os.path.join(p_param, f)
for f in os.listdir(p_param)
if ".json" in f and not "~" in f
]
else:
f_names = [p_param]
else:
f_names = [p_param]
params = []
for file_name in f_names:
if isinstance(file_name, str):
print("")
print("Extraction of the parameters from file %s" % file_name)
p = trsf_parameters(file_name)
if not p.check_parameters_consistancy():
print("\n%s Failed the consistancy check, it will be skipped")
else:
params += [p]
print("")
return params
@staticmethod
def prepare_paths(p: trsf_parameters):
"""
Prepare the paths in a format that is usable by the algorithm
Args:
p (trsf_parameters): parameter object
"""
image_formats = [
".tiff",
".tif",
".inr",
".gz",
".klb",
".h5",
".hdf5",
]
### Check the file names and folders:
p.im_ext = p.file_name.split(".")[-1] # Image type
p.A0 = os.path.join(p.path_to_data, p.file_name) # Image path
# Output format
if p.output_format is not None:
# If the output format is a file alone
if os.path.split(p.output_format)[0] == "":
p.A0_out = os.path.join(p.path_to_data, p.output_format)
# If the output format is a folder alone
elif not os.path.splitext(p.output_format)[-1] in image_formats:
p.A0_out = os.path.join(p.output_format, p.file_name)
else:
p.A0_out = p.output_format
else:
p.A0_out = os.path.join(
p.path_to_data,
p.file_name.replace(p.im_ext, p.suffix + "." + p.im_ext),
)
# Time points to work with
p.time_points = np.array(
[i for i in np.arange(p.first, p.last + 1) if not i in p.not_to_do]
)
if p.check_TP:
missing_time_points = []
for t in p.time_points:
if not os.path.exists(p.A0.format(t=t)):
missing_time_points += [t]
if len(missing_time_points) != 0:
print("The following time points are missing:")
print("\t" + missing_time_points)
print("Aborting the process")
exit()
if not p.sequential:
p.ref_path = p.A0.format(t=p.ref_TP)
if p.apply_trsf:
for t in sorted(p.time_points):
folder_tmp = os.path.split(p.A0_out.format(t=t))[0]
if not os.path.exists(folder_tmp):
os.makedirs(folder_tmp)
max_t = max(p.time_points)
if p.trsf_interpolation:
p.to_register = sorted(p.time_points)[:: p.step_size]
if not max_t in p.to_register:
p.to_register += [max_t]
else:
p.to_register = p.time_points
if not hasattr(p, "bdv_im") or p.bdv_im is None:
p.bdv_im = p.A0
if not hasattr(p, "out_bdv") or p.out_bdv is None:
p.out_bdv = os.path.join(p.trsf_folder, "bdv.xml")
if p.bdv_voxel_size is None:
p.bdv_voxel_size = p.voxel_size
def lowess_filter(self, p: trsf_parameters, trsf_fmt: str) -> str:
"""
Apply a lowess filter on a set of ordered transformations (only works for translations).
It does it from already computed transformations and write new transformations on disk.
Args:
p (trsf_parameters): parameter object
trsf_fmt (srt): the string format for the initial transformations
Returns:
(str): output transformation string
"""
X_T = []
Y_T = []
Z_T = []
T = []
for t in p.to_register:
trsf_p = p.trsf_folder + trsf_fmt.format(flo=t, ref=p.ref_TP)
trsf = self.read_trsf(trsf_p)
T += [t]
X_T += [trsf[0, -1]]
Y_T += [trsf[1, -1]]
Z_T += [trsf[2, -1]]
frac = float(p.window_size) / len(T)
X_smoothed = self.__lowess_smooth(p, X_T, T, frac=frac)
Y_smoothed = self.__lowess_smooth(p, Y_T, T, frac=frac)
Z_smoothed = self.__lowess_smooth(p, Z_T, T, frac=frac)
new_trsf_fmt = "t{flo:06d}-{ref:06d}-filtered.txt"
for i, t in enumerate(T):
mat = np.identity(4)
mat[0, -1] = X_smoothed[i]
mat[1, -1] = Y_smoothed[i]
mat[2, -1] = Z_smoothed[i]
np.savetxt(
p.trsf_folder + new_trsf_fmt.format(flo=t, ref=p.ref_TP), mat
)
return new_trsf_fmt
def interpolate(
self, p: trsf_parameters, trsf_fmt: str, new_trsf_fmt: str = None
) -> str:
"""
Interpolate a set of ordered transformations (only works for translations).
It does it from already computed transformations and write new transformations on disk.
Args:
p (trsf_parameters): parameter object
trsf_fmt (srt): the string format for the initial transformations
new_trsf_fmt (str | None): path to the interpolated transformations. If None,
the format will be "t{flo:06d}-{ref:06d}-interpolated.txt"
Returns:
(str): output transformation format
"""
if new_trsf_fmt is None:
new_trsf_fmt = "t{flo:06d}-{ref:06d}-interpolated.txt"
X_T = []
Y_T = []
Z_T = []
T = []
for t in p.to_register:
trsf_p = os.path.join(
p.trsf_folder, trsf_fmt.format(flo=t, ref=p.ref_TP)
)
trsf = self.read_trsf(trsf_p)
T += [t]
X_T += [trsf[0, -1]]
Y_T += [trsf[1, -1]]
Z_T += [trsf[2, -1]]
X_interp = self.interpolation(p, X_T, T)
Y_interp = self.interpolation(p, Y_T, T)
Z_interp = self.interpolation(p, Z_T, T)
for t in p.time_points:
mat = np.identity(4)
mat[0, -1] = X_interp(t)
mat[1, -1] = Y_interp(t)
mat[2, -1] = Z_interp(t)
np.savetxt(
os.path.join(
p.trsf_folder,
new_trsf_fmt.format(flo=t, ref=p.ref_TP),
mat,
)
)
trsf_fmt = new_trsf_fmt
return trsf_fmt
@staticmethod
def pad_trsfs(p: trsf_parameters, trsf_fmt: str):
"""
Pad transformations
Args:
p (trsf_parameters): parameter object
trsf_fmt (srt): the string format for the initial transformations
"""
if not p.sequential and p.ref_path.split(".")[-1] == "klb":
im_shape = readheader(p.ref_path)["imagesize_tczyx"][-1:-4:-1]
elif not p.sequential:
im_shape = imread(p.ref_path).shape
elif p.A0.split(".")[-1] == "klb":
im_shape = readheader(p.A0.format(t=p.ref_TP))["imagesize_tczyx"][
-1:-4:-1
]
else:
im_shape = imread(p.A0.format(t=p.ref_TP)).shape
im = SpatialImage(np.ones(im_shape), dtype=np.uint8)
im.voxelsize = p.voxel_size
if pyklb_found:
template = os.path.join(p.trsf_folder, "tmp.klb")
res_t = "template.klb"
else:
template = os.path.join(p.trsf_folder, "tmp.tif")
res_t = "template.tif"
imsave(template, im)
identity = np.identity(4)
trsf_fmt_no_flo = trsf_fmt.replace("{flo:06d}", "%06d")
new_trsf_fmt = "t{flo:06d}-{ref:06d}-padded.txt"
new_trsf_fmt_no_flo = new_trsf_fmt.replace("{flo:06d}", "%06d")
for t in p.not_to_do:
np.savetxt(
os.path.join(
p.trsf_folder,
trsf_fmt.format(flo=t, ref=p.ref_TP),
identity,
)
)
call(
p.path_to_bin
+ "changeMultipleTrsfs -trsf-format "
+ os.path.join(p.trsf_folder, trsf_fmt_no_flo.format(ref=p.ref_TP))
+ " -index-reference %d -first %d -last %d "
% (p.ref_TP, min(p.time_points), max(p.time_points))
+ " -template "
+ template
+ " -res "
+ os.path.join(
p.trsf_folder, new_trsf_fmt_no_flo.format(ref=p.ref_TP)
)
+ " -res-t "
+ os.path.join(p.trsf_folder, res_t)
+ " -trsf-type %s -vs %f %f %f" % ((p.trsf_type,) + p.voxel_size),
shell=True,
)
def compute_trsfs(self, p: trsf_parameters):
"""
Compute all the transformations from a given set of parameters
Args:
p (trsf_parameters): Parameter object
"""
# Create the output folder for the transfomrations
if not os.path.exists(p.trsf_folder):
os.makedirs(p.trsf_folder)
trsf_fmt = "t{flo:06d}-{ref:06d}.txt"
try:
self.run_produce_trsf(p)
if p.sequential:
if min(p.to_register) != p.ref_TP:
self.compose_trsf(
min(p.to_register),
p.ref_TP,
p.trsf_folder,
list(p.to_register),
)
if max(p.to_register) != p.ref_TP:
self.compose_trsf(
max(p.to_register),
p.ref_TP,
p.trsf_folder,
list(p.to_register),
)
np.savetxt(
os.path.join("{:s}", trsf_fmt).format(
p.trsf_folder, flo=p.ref_TP, ref=p.ref_TP
),
np.identity(4),
)
except Exception as e:
print(p.trsf_folder)
print(e)
if p.lowess:
trsf_fmt = self.lowess_filter(p, trsf_fmt)
if p.trsf_interpolation:
trsf_fmt = interpolate(p, trsf_fmt)
if p.padding:
self.pad_trsfs(p, trsf_fmt)
@staticmethod
def apply_trsf(p: trsf_parameters):
"""
Apply transformations to a movie from a set of computed transformations
Args:
p (trsf_parameters): Parameter object
"""
trsf_fmt = "t{flo:06d}-{ref:06d}.txt"
if p.lowess:
trsf_fmt = "t{flo:06d}-{ref:06d}-filtered.txt"
if p.trsf_interpolation:
trsf_fmt = "t{flo:06d}-{ref:06d}-interpolated.txt"
if p.padding:
trsf_fmt = "t{flo:06d}-{ref:06d}-padded.txt"
if pyklb_found:
template = os.path.join(p.trsf_folder, "template.klb")
X, Y, Z = readheader(template)["imagesize_tczyx"][-1:-4:-1]
else:
template = os.path.join(p.trsf_folder, "template.tif")
X, Y, Z = imread(template).shape
elif p.A0.split(".")[-1] == "klb":
X, Y, Z = readheader(p.A0.format(t=p.ref_TP))["imagesize_tczyx"][
-1:-4:-1
]
template = p.A0.format(t=p.ref_TP)
else:
X, Y, Z = imread(p.A0.format(t=p.ref_TP)).shape
template = p.A0.format(t=p.ref_TP)
if p.voxel_size != p.voxel_size_out:
before, after = os.path.splitext(template)
old_template = template
template = "".join((before, ".final_template", after))
call(
p.path_to_bin
+ f"applyTrsf {old_template} {template} "
+ "-vs %f %f %f" % p.voxel_size_out,
shell=True,
)
X, Y, Z = imread(template).shape
xy_proj = np.zeros((X, Y, len(p.time_points)), dtype=np.uint16)
xz_proj = np.zeros((X, Z, len(p.time_points)), dtype=np.uint16)
yz_proj = np.zeros((Y, Z, len(p.time_points)), dtype=np.uint16)
for i, t in enumerate(sorted(p.time_points)):
folder_tmp = os.path.split(p.A0_out.format(t=t))[0]
if not os.path.exists(folder_tmp):
os.makedirs(folder_tmp)
call(
p.path_to_bin
+ "applyTrsf %s %s -trsf "
% (p.A0.format(t=t), p.A0_out.format(t=t))
+ os.path.join(
p.trsf_folder, trsf_fmt.format(flo=t, ref=p.ref_TP)
)
+ " -template "
+ template
+ " -floating-voxel %f %f %f " % p.voxel_size
+ " -reference-voxel %f %f %f " % p.voxel_size_out
+ " -interpolation %s" % p.image_interpolation,
shell=True,
)
try:
im = imread(p.A0_out.format(t=t))
except Exception as e:
# print("applyTrsf failed at t=",str(t),", retrying now")
call(
p.path_to_bin
+ "applyTrsf %s %s -trsf "
% (p.A0.format(t=t), p.A0_out.format(t=t))
+ os.path.join(
p.trsf_folder, trsf_fmt.format(flo=t, ref=p.ref_TP)
)
+ " -template "
+ template
+ " -floating-voxel %f %f %f " % p.voxel_size
+ " -reference-voxel %f %f %f " % p.voxel_size_out
+ " -interpolation %s" % p.image_interpolation,
shell=True,
)
im = imread(p.A0_out.format(t=t))
if p.projection_path is not None:
xy_proj[..., i] = SpatialImage(np.max(im, axis=2))
xz_proj[..., i] = SpatialImage(np.max(im, axis=1))
yz_proj[..., i] = SpatialImage(np.max(im, axis=0))
if p.projection_path is not None:
if not os.path.exists(p.projection_path):
os.makedirs(p.projection_path)
p_to_data = p.projection_path
num_s = p.file_name.find("{")
num_e = p.file_name.find("}") + 1
f_name = p.file_name.replace(p.file_name[num_s:num_e], "")
if not os.path.exists(p_to_data.format(t=-1)):
os.makedirs(p_to_data.format(t=-1))
imsave(
os.path.join(
p_to_data, f_name.replace(p.im_ext, "xyProjection.tif")
),
SpatialImage(xy_proj),
)
imsave(
os.path.join(
p_to_data, f_name.replace(p.im_ext, "xzProjection.tif")
),
SpatialImage(xz_proj),
)
imsave(
os.path.join(
p_to_data, f_name.replace(p.im_ext, "yzProjection.tif")
),
SpatialImage(yz_proj),
)
@staticmethod
def inv_trsf(trsf: np.ndarray) -> np.ndarray:
"""
Inverse a transformation
Args:
trsf (np.ndarray): 4x4 transformation matrix
Returns
(np.ndarray): the inverse of the input transformation
"""
return np.linalg.lstsq(trsf, np.identity(4))[0]
@staticmethod
def prettify(elem: ET.Element) -> str:
"""
Return a pretty-printed XML string for the Element.
Args:
elem (xml.etree.ElementTree.Element): xml element
Returns:
(str): a nice version of our xml file
"""
rough_string = ET.tostring(elem, "utf-8")
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
@staticmethod
def do_viewSetup(
ViewSetup: ET.SubElement,
p: trsf_parameters,
im_size: Tuple[int, int, int],
i: int,
):
"""
Setup xml elements for BigDataViewer
Args:
ViewSetup (ET.SubElement): ...
p (trsf_parameters): Parameter object
im_size (tuple): tuple of 3 integers with the dimension of the image
i (int): angle id
"""
id_ = ET.SubElement(ViewSetup, "id")
id_.text = "%d" % i
name = ET.SubElement(ViewSetup, "name")
name.text = "%d" % i
size = ET.SubElement(ViewSetup, "size")
size.text = "%d %d %d" % tuple(im_size)
voxelSize = ET.SubElement(ViewSetup, "voxelSize")
unit = ET.SubElement(voxelSize, "unit")
unit.text = p.bdv_unit
size = ET.SubElement(voxelSize, "size")
size.text = "%f %f %f" % tuple(p.bdv_voxel_size)
attributes = ET.SubElement(ViewSetup, "attributes")
illumination = ET.SubElement(attributes, "illumination")
illumination.text = "0"
channel = ET.SubElement(attributes, "channel")
channel.text = "0"
tile = ET.SubElement(attributes, "tile")
tile.text = "0"
angle = ET.SubElement(attributes, "angle")
angle.text = "%d" % i
def do_ViewRegistration(
self,
ViewRegistrations: ET.SubElement,
p: trsf_parameters,
t: int,
a: int,
):
"""
Write the view registration for BigDataViewer
Args:
ViewSetup (ET.SubElement): ...
p (trsf_parameters): Parameter object
t (int): time point to treat
a (int): angle to treat (useless for time regisrtation)
"""
ViewRegistration = ET.SubElement(ViewRegistrations, "ViewRegistration")
ViewRegistration.set("timepoint", "%d" % t)
ViewRegistration.set("setup", "0")
ViewTransform = ET.SubElement(ViewRegistration, "ViewTransform")
ViewTransform.set("type", "affine")
affine = ET.SubElement(ViewTransform, "affine")
f = os.path.join(p.trsf_folder, "t%06d-%06d.txt" % (t, p.ref_TP))
trsf = self.read_trsf(f)
trsf = self.inv_trsf(trsf)
formated_trsf = tuple(trsf[:-1, :].flatten())
affine.text = ("%f " * 12) % formated_trsf
ViewTransform = ET.SubElement(ViewRegistration, "ViewTransform")
ViewTransform.set("type", "affine")
affine = ET.SubElement(ViewTransform, "affine")
affine.text = (
"%f 0.0 0.0 0.0 0.0 %f 0.0 0.0 0.0 0.0 %f 0.0" % p.voxel_size
)
def build_bdv(self, p: trsf_parameters):
"""
Build the BigDataViewer xml
Args:
p (trsf_parameters): Parameter object
"""
if not p.im_ext in ["klb"]: # ['tif', 'klb', 'tiff']:
print("Image format not adapted for BigDataViewer")
return
SpimData = ET.Element("SpimData")
SpimData.set("version", "0.2")
SpimData.set("encoding", "UTF-8")
base_path = ET.SubElement(SpimData, "BasePath")
base_path.set("type", "relative")
base_path.text = "."
SequenceDescription = ET.SubElement(SpimData, "SequenceDescription")
ImageLoader = ET.SubElement(SequenceDescription, "ImageLoader")
ImageLoader.set("format", p.im_ext)
Resolver = ET.SubElement(ImageLoader, "Resolver")
Resolver.set(
"type", "org.janelia.simview.klb.bdv.KlbPartitionResolver"
)
ViewSetupTemplate = ET.SubElement(Resolver, "ViewSetupTemplate")
template = ET.SubElement(ViewSetupTemplate, "template")
template.text = p.bdv_im.format(t=p.to_register[0])
timeTag = ET.SubElement(ViewSetupTemplate, "timeTag")
timeTag.text = p.time_tag
ViewSetups = ET.SubElement(SequenceDescription, "ViewSetups")
ViewSetup = ET.SubElement(ViewSetups, "ViewSetup")
if p.im_ext == "klb":
im_size = tuple(
readheader(p.A0.format(t=p.ref_TP))["imagesize_tczyx"][
-1:-4:-1
]
)
else:
im_size = tuple(imread(p.A0.format(t=p.ref_TP)).shape)
self.do_viewSetup(ViewSetup, p, im_size, 0)
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "illumination")
Illumination = ET.SubElement(Attributes, "Illumination")
id_ = ET.SubElement(Illumination, "id")
id_.text = "0"
name = ET.SubElement(Illumination, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "channel")
Channel = ET.SubElement(Attributes, "Channel")
id_ = ET.SubElement(Channel, "id")
id_.text = "0"
name = ET.SubElement(Channel, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "tile")
Tile = ET.SubElement(Attributes, "Tile")
id_ = ET.SubElement(Tile, "id")
id_.text = "0"
name = ET.SubElement(Tile, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "angle")
Angle = ET.SubElement(Attributes, "Angle")
id_ = ET.SubElement(Angle, "id")
id_.text = "0"
name = ET.SubElement(Angle, "name")
name.text = "0"
TimePoints = ET.SubElement(SequenceDescription, "Timepoints")
TimePoints.set("type", "range")
first = ET.SubElement(TimePoints, "first")
first.text = "%d" % min(p.to_register)
last = ET.SubElement(TimePoints, "last")
last.text = "%d" % max(p.to_register)
ViewRegistrations = ET.SubElement(SpimData, "ViewRegistrations")
b = min(p.to_register)
e = max(p.to_register)
for t in range(b, e + 1):
self.do_ViewRegistration(ViewRegistrations, p, t)
with open(p.out_bdv, "w") as f:
f.write(self.prettify(SpimData))
f.close()
def plot_transformations(self, p: trsf_parameters):
trsf_fmt = "t{flo:06d}-{ref:06d}.txt"
if p.lowess:
trsf_fmt = "t{flo:06d}-{ref:06d}-filtered.txt"
if p.trsf_interpolation:
trsf_fmt = "t{flo:06d}-{ref:06d}-interpolated.txt"
if p.padding:
trsf_fmt = "t{flo:06d}-{ref:06d}-padded.txt"
import matplotlib.pyplot as plt
tX, tY, tZ = [], [], []
rX, rY, rZ = [], [], []
for t in sorted(p.time_points):
trsf = self.read_trsf(
os.path.join(
p.trsf_folder, trsf_fmt.format(flo=t, ref=p.ref_TP)
)
)
(tx, ty, tz), M, *_ = decompose(trsf)
rx, ry, rz = mat2euler(M)
tX.append(tx)
tY.append(ty)
tZ.append(tz)
rX.append(np.rad2deg(rx))
rY.append(np.rad2deg(ry))
rZ.append(np.rad2deg(rz))
fig, ax = plt.subplots(3, 1, figsize=(8, 5), sharex=True, sharey=True)
ax[0].plot(p.time_points, tX, "o-")
ax[1].plot(p.time_points, tY, "o-")
ax[2].plot(p.time_points, tZ, "o-")
for axis, axi in zip(["X", "Y", "Z"], ax):
axi.set_ylabel(f"{axis} Translation [µm]")
ax[2].set_xlabel("Time")
fig.suptitle("Translations")
fig.tight_layout()
fig, ax = plt.subplots(3, 1, figsize=(8, 5), sharex=True, sharey=True)
ax[0].plot(p.time_points, rX, "o-")
ax[1].plot(p.time_points, rY, "o-")
ax[2].plot(p.time_points, rZ, "o-")
for axis, axi in zip(["X", "Y", "Z"], ax):
axi.set_ylabel(f"{axis} Rotation\nin degree")
ax[2].set_xlabel("Time")
fig.suptitle("Rotations")
fig.tight_layout()
plt.show()
def run_trsf(self):
"""
Start the Spatial registration after having informed the parameter files
"""
for p in self.params:
try:
print("Starting experiment")
print(p)
self.prepare_paths(p)
if p.compute_trsf:
self.compute_trsfs(p)
if p.plot_trsf:
self.plot_transformations(p)
if p.apply_trsf and p.trsf_type != "vectorfield":
self.apply_trsf(p)
if p.do_bdv:
self.build_bdv(p)
except Exception as e:
print("Failure of %s" % p.origin_file_name)
print(e)
def __init__(self, params=None):
if params is None:
self.params = self.read_param_file()
elif (
isinstance(params, str)
or isinstance(params, Path)
or isinstance(params, dict)
):
self.params = TimeRegistration.read_param_file(params)
else:
self.params = params
if self.params is not None and 0 < len(self.params):
self.path_to_bin = self.params[0].path_to_bin
def time_registration():
reg = TimeRegistration()
reg.run_trsf()
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/registrationtools/time_registration.py | time_registration.py |
import tifffile
import os
import registrationtools
import json
import numpy as np
from glob import glob
from ensure import check ##need to pip install !
from pathlib import Path
def get_paths() :
#Ask the user for the folder containing his data, find the movies is they are in tiff format and put them in a list after confirmation
path_correct=False
while path_correct==False :
path_folder = input('Path to the folder of the movie(s) (in tiff format only) : \n ')
paths_movies = sorted(glob(rf'{path_folder}/*.tif'))
if len(paths_movies) > 0 :
print('You have',len(paths_movies),'movie(s), which is (are) : ')
for path_movie in paths_movies :
print('',Path(path_movie).stem) #the empty str at the begining is to align the answers one space after the questions, for readability
path_correct = int(input('Correct ? (1 for yes, 0 for no) \n '))
else :
print('There is no tiff file in the folder. \n')
return (paths_movies)
def dimensions(list_paths:list) :
# Take the path to the data and returns its size in the dimensions CTZXY. Raise error if the other movies do not have the same C ot T dimensions.
movie = tifffile.imread(list_paths[0])
name_movie = Path(list_paths[0]).stem
dim_correct = False
while dim_correct == False :
print('\nThe dimensions of ',name_movie, 'are ',movie.shape,'. \n')
dimensions=input('What is the order of the dimensions (for example TZCYX or XYZT) ? T stands for Time, C for channels if your image has multiple channels, Z for depth (or number or plans) and XY is your field of view. \n ')
sorted_axes = sorted(dimensions)
if ''.join(sorted_axes) == "CTXYZ":
number_channels = movie.shape[dimensions.find('C')]
dim_correct=True
elif ''.join(sorted_axes) == "TXYZ":
number_channels = 1
dim_correct=True
elif len(sorted_axes) != len(movie.shape) :
print('Error : Number of dimensions is incorrect. \n')
dim_correct = False
else:
print(' \n The letters you choose has to be among these letters : X,Y,Z,C,T, with XYZT mandatory, and no other letters are allowed. \nEvery letter can be included only once. \n')
number_timepoints = movie.shape[dimensions.find('T')]
if 'C' in dimensions :
number_channels = movie.shape[dimensions.find('C')]
else :
number_channels = 1
if dim_correct == True :
depth = movie.shape[dimensions.find('Z')]
size_X = movie.shape[dimensions.find('X')]
size_Y = movie.shape[dimensions.find('Y')]
print('\nSo',name_movie, 'has',number_channels,'channels, ',number_timepoints,'timepoints, the depth in z is',depth,'pixels and the XY plane measures ',size_X,'x',size_Y,'pixels.')
dim_correct = int(input('Correct ? (1 for yes, 0 for no) \n '))
#checking if the movies have the same dimensions in C and T, otherwise the registration cannot be computed.
if 'C' in dimensions : #have to check beforehand if multichannels, otherwise, dimensions.find('C') will return -1 and mov.shape[dimensions.find('C')] will probably return the dimension of the X axis
for path in list_paths :
mov = tifffile.imread(path)
check(mov.shape[dimensions.find('T')]).equals(number_timepoints).or_raise(Exception, 'These movies do not all have the same number of timepoints. \n')
check(mov.shape[dimensions.find('C')]).equals(number_channels).or_raise(Exception, 'These movies do not all have the same number of channels. \n')
#if XYZ are not the same thats ok ?
return(number_channels,number_timepoints,depth,size_X,size_Y)
def get_channels_name(number_channels:int) :
#Ask for the name of the channels, return a list of str containing all the channels
channels=[]
if number_channels == 1:
ch = str(input('Name of the channel : \n '))
channels.append(ch)
else : #if multichannels
for n in range(number_channels) :
ch = str(input('Name of channel n°'+str(n+1) +' : \n '))
channels.append(ch)
return(channels)
def reference_channel(channels:list) :
#Ask for the reference channel among the channels given (with safety check)
if len(channels) == 1 :
ch_ref=channels[0]
else :
channel_correct=False
print('\nAmong the channels'+ str(channels)+', you need a reference channel to compute the registration. A good option is generally a marker that is expressed ubiquitously\n')
while channel_correct==False :
ch_ref = input('Name of the reference channel : \n ')
if ch_ref not in channels :
print('The reference channel is not in', channels,'(do not put any other character than the name itself)')
channel_correct=False
else :
channel_correct=True
channels_float=channels.copy()
channels_float.remove(ch_ref)
return(channels_float,ch_ref)
def sort_by_channels_and_timepoints(list_paths:str,channels:str,number_timepoints:int):
#Take a list of movies and cut them into a timesequence of 3D stacks, one timesequence per channel. Works for one channel or multiple channels.
for path in list_paths :
for n in range(len(channels)) :
name_movie = Path(path).stem
directory = name_movie+'_'+channels[n]
cut_timesequence(path_to_movie = path,
directory=directory,
ind_current_channel=n,
number_timepoints = number_timepoints,
number_channels = len(channels))
def cut_timesequence (path_to_movie:str, directory:str, ind_current_channel:int, number_timepoints:int,number_channels:int):
#take a movie and cut it into a timesequence in the given directory. Creates the folder structure for the registration.
movie = tifffile.imread(path_to_movie)
position_t = np.argwhere(np.array(movie.shape)==number_timepoints)[0][0]
movie=np.moveaxis(movie,source=position_t,destination=0) #i did not test this
path_to_data=os.path.dirname(path_to_movie)
path_dir = rf'{path_to_data}\{directory}'
check(os.path.isdir(path_dir)).is_(False).or_raise(Exception,"Please delete the folder "+directory+" and run again.")
os.mkdir(os.path.join(path_to_data,directory))
os.mkdir(os.path.join(path_to_data+'/'+directory,"trsf"))
os.mkdir(os.path.join(path_to_data+'/'+directory,"output"))
os.mkdir(os.path.join(path_to_data+'/'+directory,"proj_output"))
os.mkdir(os.path.join(path_to_data+'/'+directory,"stackseq"))
if number_channels >1 :
position_c = np.argwhere(np.array(movie.shape)==number_channels)[0][0]
movie=np.moveaxis(movie,source=position_c,destination=2) #we artificially change to the format TZCXY (reference in Fiji). Its just to cut into timesequence. does not modify the data
for t in range(number_timepoints) :
stack = movie[t,:,ind_current_channel,:,:]
tifffile.imwrite(path_to_data+'/'+directory+"/stackseq/movie_t"+str(format(t,'03d')+'.tif'),stack)
else :
for t in range(number_timepoints) :
stack = movie[t,:,:,:]
tifffile.imwrite(path_to_data+'/'+directory+"/stackseq/movie_t"+str(format(t,'03d')+'.tif'),stack)
def get_voxel_sizes() :
#ask the user for the voxel size of his input and what voxel size does he want in output. Returns 2 tuples for this values.
print('\nTo register properly, you need to specify the voxel size of your input image. This can be found in Fiji, Image>Show Info. ')
print('Voxel size of your original image (XYZ successively) :\n ')
x= float(input('X :'))
y= float(input('Y :'))
z= float(input('Z :'))
voxel_size_input = [x,y,z]
print('Initial voxel size =',voxel_size_input)
change_voxel_size = int(input(' \nYou can choose to have another voxel size on the registered image , for example to have an isotropic output image (voxel size [1,1,1]), Or you can also choose to keep the same voxel size. \nDo you want to change the voxel size of your movies ? (1 for yes, 0 for no) : \n '))
if change_voxel_size==1 :
print('\nVoxel size of your image after transformation (XYZ): \n ')
x= float(input('X :'))
y= float(input('Y :'))
z= float(input('Z :'))
voxel_size_output = [x,y,z]
elif change_voxel_size==0 :
voxel_size_output = voxel_size_input
print('\nVoxel size after transformation =',voxel_size_output)
return(voxel_size_input,voxel_size_output)
def get_trsf_type() :
#Ask for what transformation the user want and return a string
list_trsf_types = ['rigid2D','rigid3D','translation2D','translation3D'] #needs to be completed
print('\nYou can choose to apply different transformation types depending on your data : ',list_trsf_types)
trsf_correct = False
while trsf_correct == False :
trsf_type = str(input('\nWhich one do you want to use ? (please enter the name of the transformation only, no other character) \n '))
if trsf_type in list_trsf_types :
trsf_correct = True
else :
print(('You can only choose a transformation that is in this list :',list_trsf_types))
return(trsf_type)
def data_preparation() :
#Gather all the functions asking the user for the parameters. Returns the useful ones for registration (not XYZ size and channels list)
list_paths = get_paths()
number_channels,number_timepoints,depth,size_X,size_Y = dimensions(list_paths)
channels=get_channels_name(number_channels)
channels_float,ch_ref = reference_channel(channels)
sort_by_channels_and_timepoints(list_paths = list_paths,
channels = channels,
number_timepoints = number_timepoints)
voxel_size_input, voxel_size_output = get_voxel_sizes()
trsf_type = get_trsf_type()
return(list_paths,
number_timepoints,
channels_float,ch_ref,
voxel_size_input, voxel_size_output,
trsf_type)
def run_registration (list_paths:list,channels_float:list, ch_ref:str, voxel_size_input:tuple, voxel_size_output:tuple, trsf_type:str) :
#Does the actual registration : Take the reference channel first to compute and apply the trsf, then loop on the float channels and only apply it
for path in list_paths :
json_string=[]
folder=os.path.dirname(path)
name_movie = Path(path).stem
movie = tifffile.imread(path)
directory = name_movie+'_'+ch_ref
data_ref = {
"path_to_data": rf'{folder}/{directory}/stackseq/',
"file_name": "movie_t{t:03d}.tif",
"trsf_folder": rf'{folder}/{directory}/trsf/',
"output_format": rf'{folder}/{directory}/output/',
"projection_path":rf'{folder}/{directory}/proj_output/',
"check_TP": 0,
"voxel_size": voxel_size_input,
"voxel_size_out" : voxel_size_output,
"first": 0,
"last": movie.shape[0]-1,
"not_to_do": [],
"compute_trsf": 1,
"ref_TP": int(movie.shape[0]/2),
"trsf_type": trsf_type,
"padding": 1,
"recompute": 1,
"apply_trsf":1,
"out_bdv": "",
"plot_trsf":0
}
json_string.append(json.dumps(data_ref))
tr = registrationtools.TimeRegistration(data_ref)
tr.run_trsf()
#registration rest of the channels
for c in channels_float :
directory = name_movie+'_'+c
data_float = {
"path_to_data": rf'{folder}/{directory}/stackseq/',
"file_name": "movie_t{t:03d}.tif",
"trsf_folder": rf'{folder}/{name_movie}_{ch_ref}/trsf/',
"output_format": rf'{folder}/{directory}/output/',
"projection_path":rf'{folder}/{directory}/proj_output/',
"check_TP": 0,
"voxel_size": voxel_size_input,
"voxel_size_out" : voxel_size_output,
"first": 0,
"last": movie.shape[0]-1,
"not_to_do": [],
"compute_trsf": 0,
"ref_TP": int(movie.shape[0]/2),
"trsf_type": trsf_type,
"padding": 1,
"recompute": 1,
"apply_trsf":1,
"out_bdv": "",
"plot_trsf":0
}
json_string.append(json.dumps(data_float))
tr = registrationtools.TimeRegistration(data_float)
tr.run_trsf()
return(json_string)
def save_sequences_as_stacks(list_paths:list,channels:list,number_timepoints:int) :
#save the timesequence as a hyperstack
for path in list_paths :
path_to_data=os.path.dirname(path)
name_movie = Path(path).stem
movie = tifffile.imread(path)
stack0 =tifffile.imread(rf"{path_to_data}/{name_movie}_{channels[0]}/output/movie_t000.tif") #we use the first image (3D) to know the dimensions
registered_movie = np.zeros((number_timepoints,stack0.shape[0],len(channels),stack0.shape[1],stack0.shape[2]),dtype=np.float32) #one movie per channel, of format (t,z,y,x).Datatype uint16 or float32 is necessary to export as hyperstack
for ind_c,c in enumerate(channels) :
directory = name_movie+'_'+c
for t in range(movie.shape[0]) :
stack =tifffile.imread(rf"{path_to_data}/{directory}/output/movie_t{format(t,'03d')}.tif")
#we take each stack in a given timepoint
registered_movie[t,:,ind_c,:,:]=stack #and put it in a new hyperstack
tifffile.imwrite(path_to_data+rf"/{name_movie}_registered.tif",registered_movie.astype(np.float32),imagej=True) #write a hyperstack in the main folder
print('saved registered', name_movie, 'of size', registered_movie.shape)
def save_jsonfile(list_paths,json_string):
path_to_data = os.dirname(list_paths[0])
keep_same_dir = int(input(str('Do you want to save your json files in the same master directory, in '+path_to_data+'\jsonfiles ? (1 for yes, 0 for no)')))
if keep_same_dir ==1 :
path_to_json = rf'{path_to_data}\jsonfiles'
os.mkdir(os.path.join(path_to_data,'jsonfiles'))
else :
path_to_json = input('In which folder do you want to write your jsonfile ?').replace("\\", "/") #if single backslashes, fine. If double backslashes (when copy/paste the Windows path), compatibility problems, thats why we replace by single slashes.')
print('saving',len(json_string),'json files :')
for ind_json,json in enumerate(json_string) :
with open(path_to_json+'\param'+str(ind_json)+'.json','w') as outfile : #maybe not the best name
outfile.write(json)
print(path_to_json)
print('Done saving')
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/registrationtools/utils.py | utils.py |
#!/usr/bin/python
# This file is subject to the terms and conditions defined in
# file 'LICENCE', which is part of this source code package.
# Author: Leo Guignard (leo.guignard...@AT@...univ-amu.fr)
import numpy as np
import os
import sys
import json
from subprocess import call
import xml.etree.ElementTree as ET
from xml.dom import minidom
from shutil import copyfile
from pathlib import Path
from typing import Union, List, Tuple
from IO import imread, imsave, SpatialImage
class trsf_parameters(object):
"""
Read parameters for the registration function from a preformated json file
"""
def check_parameters_consistancy(self) -> bool:
"""
Function that should check parameter consistancy
@TODO:
write something that actually do check
"""
correct = True
if not (
hasattr(self, "out_pattern") or hasattr(self, "output_format")
):
print("The output pattern cannot be an empty string")
correct = False
return correct
def __str__(self) -> str:
max_key = (
max([len(k) for k in self.__dict__.keys() if k != "param_dict"])
+ 1
)
output = "The registration will run with the following arguments:\n"
output += "\n" + " File format \n"
output += "path_to_data".ljust(max_key, " ") + ": {:s}\n".format(
self.path_to_data
)
output += "ref_im".ljust(max_key, " ") + ": {:s}\n".format(self.ref_im)
output += "flo_ims".ljust(max_key, " ") + ": "
tmp_just_len = len("flo_ims".ljust(max_key, " ") + ": ")
already = tmp_just_len + 1
for flo in self.flo_ims:
output += (" " * (tmp_just_len - already)) + "{:s}\n".format(flo)
already = 0
output += "init_trsfs".ljust(max_key, " ") + ": "
tmp_just_len = len("init_trsfs".ljust(max_key, " ") + ": ")
already = tmp_just_len + 1
for init_trsfs in self.init_trsfs:
if init_trsfs is not None and 0 < len(init_trsfs):
output += (" " * (tmp_just_len - already)) + "{}\n".format(
init_trsfs
)
already = 0
output += "trsf_types".ljust(max_key, " ") + ": "
tmp_just_len = len("trsf_types".ljust(max_key, " ") + ": ")
already = tmp_just_len + 1
for trsf_type in self.trsf_types:
output += (" " * (tmp_just_len - already)) + "{:s}\n".format(
trsf_type
)
already = 0
output += "ref_voxel".ljust(
max_key, " "
) + ": {:f} x {:f} x {:f}\n".format(*self.ref_voxel)
output += "flo_voxels".ljust(max_key, " ") + ": "
tmp_just_len = len("flo_voxels".ljust(max_key, " ") + ": ")
already = tmp_just_len + 1
for flo_voxel in self.flo_voxels:
output += (
" " * (tmp_just_len - already)
) + "{:f} x {:f} x {:f}\n".format(*flo_voxel)
already = 0
output += "out_voxel".ljust(
max_key, " "
) + ": {:f} x {:f} x {:f}\n".format(*self.out_voxel)
output += "out_pattern".ljust(max_key, " ") + ": {:s}\n".format(
self.out_pattern
)
return output
def add_path_prefix(self, prefix: str):
"""
Add a prefix to all the relevant paths (this is mainly for the unitary tests)
Args:
prefix (str): The prefix to add in front of the path
"""
self.path_to_data = str(Path(prefix) / self.path_to_data)
self.trsf_paths = [
str(Path(prefix) / trsf) for trsf in self.trsf_paths
]
def __init__(self, file_name: str):
if not isinstance(file_name, dict):
with open(file_name) as f:
param_dict = json.load(f)
f.close()
else:
param_dict = {}
for k, v in file_name.items():
if isinstance(v, Path):
param_dict[k] = str(v)
else:
param_dict[k] = v
# Default parameters
self.param_dict = param_dict
self.init_trsfs = [[], [], []]
self.path_to_bin = ""
self.registration_depth_start = 6
self.registration_depth_end = 3
self.init_trsf_real_unit = True
self.image_interpolation = "linear"
self.apply_trsf = True
self.compute_trsf = True
self.test_init = False
self.begin = None
self.end = None
self.trsf_types = []
self.time_tag = None
self.bdv_unit = "microns"
self.bdv_voxel_size = None
self.do_bdv = 0
self.flo_im_sizes = None
self.copy_ref = False
self.bbox_out = False
if "registration_depth" in param_dict:
self.__dict__["registration_depth_start"] = 6
self.__dict__["registration_depth_end"] = param_dict[
"registration_depth"
]
elif not "registration_depth_start" in param_dict:
self.__dict__["registration_depth_start"] = 6
self.__dict__["registration_depth_end"] = 3
self.__dict__.update(param_dict)
self.ref_voxel = tuple(self.ref_voxel)
self.flo_voxels = [tuple(vox) for vox in self.flo_voxels]
self.out_voxel = tuple(self.out_voxel)
self.origin_file_name = file_name
self.path_to_bin = os.path.join(self.path_to_bin, "")
if 0 < len(self.path_to_bin) and not os.path.exists(self.path_to_bin):
print("Binary path could not be found, will try with global call")
self.path_to_bin = ""
class SpatialRegistration:
@staticmethod
def axis_rotation_matrix(
axis: str,
angle: float,
min_space: Tuple[int, int, int] = None,
max_space: Tuple[int, int, int] = None,
) -> np.ndarray:
"""Return the transformation matrix from the axis and angle necessary
Args:
axis (str): axis of rotation ("X", "Y" or "Z")
angle (float) : angle of rotation (in degree)
min_space (tuple(int, int, int)): coordinates of the bottom point (usually (0, 0, 0))
max_space (tuple(int, int, int)): coordinates of the top point (usually im shape)
Returns:
(ndarray): 4x4 rotation matrix
"""
import math
I = np.linalg.inv
D = np.dot
if axis not in ["X", "Y", "Z"]:
raise Exception(f"Unknown axis: {axis}")
rads = math.radians(angle)
s = math.sin(rads)
c = math.cos(rads)
centering = np.identity(4)
if min_space is None and max_space is not None:
min_space = np.array([0.0, 0.0, 0.0])
if max_space is not None:
space_center = (max_space - min_space) / 2.0
offset = -1.0 * space_center
centering[:3, 3] = offset
rot = np.identity(4)
if axis == "X":
rot = np.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, c, -s, 0.0],
[0.0, s, c, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
elif axis == "Y":
rot = np.array(
[
[c, 0.0, s, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-s, 0.0, c, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
elif axis == "Z":
rot = np.array(
[
[c, -s, 0.0, 0.0],
[s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
return D(I(centering), D(rot, centering))
@staticmethod
def flip_matrix(axis: str, im_size: tuple) -> np.ndarray:
"""
Build a matrix to flip an image according to a given axis
Args:
axis (str): axis along the flip is done ("X", "Y" or "Z")
im_size (tuple(int, int, int)): coordinates of the top point (usually im shape)
Returns:
(ndarray): 4x4 flipping matrix
"""
out = np.identity(4)
if axis == "X":
out[0, 0] = -1
out[0, -1] = im_size[0]
if axis == "Y":
out[1, 1] = -1
out[1, -1] = im_size[1]
if axis == "Z":
out[2, 2] = -1
out[2, -1] = im_size[2]
return out
@staticmethod
def translation_matrix(axis: str, tr: float) -> np.ndarray:
"""
Build a matrix to flip an image according to a given axis
Args:
axis (str): axis along the flip is done ("X", "Y" or "Z")
tr (float): translation value
Returns:
(ndarray): 4x4 flipping matrix
"""
out = np.identity(4)
if axis == "X":
out[0, -1] = tr
if axis == "Y":
out[1, -1] = tr
if axis == "Z":
out[2, -1] = tr
return out
@classmethod
def read_param_file(
clf, p_param: trsf_parameters = None
) -> trsf_parameters:
"""
Asks for, reads and formats the parameter file
"""
if not isinstance(p_param, dict):
if p_param is None:
if len(sys.argv) < 2 or sys.argv[1] == "-f":
p_param = input(
"\nPlease inform the path to the json config file:\n"
)
else:
p_param = sys.argv[1]
stable = False or isinstance(p_param, Path)
while not stable:
tmp = p_param.strip('"').strip("'").strip(" ")
stable = tmp == p_param
p_param = tmp
if os.path.isdir(p_param):
f_names = [
os.path.join(p_param, f)
for f in os.listdir(p_param)
if ".json" in f and not "~" in f
]
else:
f_names = [p_param]
else:
f_names = [p_param]
params = []
for file_name in f_names:
if isinstance(file_name, str):
print("")
print("Extraction of the parameters from file %s" % file_name)
p = trsf_parameters(file_name)
if not p.check_parameters_consistancy():
print("\n%s Failed the consistancy check, it will be skipped")
else:
params += [p]
print("")
return params
@staticmethod
def read_trsf(path: Union[str, Path]) -> np.ndarray:
"""
Read a transformation from a text file
Args:
path (str | Path): path to a transformation
Returns:
(ndarray): 4x4 transformation matrix
"""
f = open(path)
if f.read()[0] == "(":
f.close()
f = open(path)
lines = f.readlines()[2:-1]
f.close()
return np.array([[float(v) for v in l.split()] for l in lines])
else:
f.close()
return np.loadtxt(path)
@staticmethod
def prepare_paths(p: trsf_parameters):
"""
Prepare the paths in a format that is usable by the algorithm
Args:
p (trsf_parameters): path to a transformation
"""
p.ref_A = os.path.join(p.path_to_data, p.ref_im)
p.flo_As = []
for flo_im in p.flo_ims:
p.flo_As += [os.path.join(p.path_to_data, flo_im)]
if os.path.split(p.out_pattern)[0] == "":
ext = p.ref_im.split(".")[-1]
p.ref_out = p.ref_A.replace(ext, p.out_pattern + "." + ext)
p.flo_outs = []
for flo in p.flo_As:
p.flo_outs += [flo.replace(ext, p.out_pattern + "." + ext)]
else:
if not os.path.exists(p.out_pattern):
os.makedirs(p.out_pattern)
p.ref_out = os.path.join(p.out_pattern, p.ref_im)
p.flo_outs = []
for flo in p.flo_ims:
p.flo_outs += [os.path.join(p.out_pattern, flo)]
if not hasattr(p, "trsf_paths"):
p.trsf_paths = [os.path.split(pi)[0] for pi in p.flo_outs]
p.trsf_names = ["A{a:d}-{trsf:s}.trsf" for _ in p.flo_outs]
else:
formated_paths = []
p.trsf_names = []
for pi in p.trsf_paths:
path, n = os.path.split(pi)
if os.path.splitext(n)[-1] == "":
n = ""
path = pi
if not os.path.exists(path):
os.makedirs(path)
if n == "":
n = "A{a:d}-{trsf:s}.trsf"
elif not "{a:" in n:
if not "{trsf:" in n:
n += "{a:d}-{trsf:s}.trsf"
else:
n += "{a:d}.trsf"
elif not "{trsf:" in n:
n += "{trsf:s}.trsf"
formated_paths += [path]
p.trsf_names += [n]
p.trsf_paths = formated_paths
if p.time_tag is not None:
s = p.ref_A.find(p.time_tag) + len(p.time_tag)
e = s
while p.ref_A[e].isdigit() and e < len(p.ref_A):
e += 1
p.begin = p.end = int(p.ref_A[s:e])
else:
p.begin = p.end = None
if p.flo_im_sizes is None:
p.flo_im_sizes = []
for im_p in p.flo_As:
p.flo_im_sizes.append(imread(im_p).shape)
if (
not hasattr(p, "ref_im_size") or p.ref_im_size is None
) and p.flo_im_sizes is not None:
p.ref_im_size = p.flo_im_sizes[0]
else:
p.ref_im_size = imread(p.ref_A).shape
if not hasattr(p, "bdv_im") or p.bdv_im is None:
p.bdv_im = [p.ref_A] + p.flo_As
if not hasattr(p, "out_bdv") or p.out_bdv is None:
p.out_bdv = os.path.join(p.trsf_paths[0], "bdv.xml")
if p.bdv_voxel_size is None:
p.bdv_voxel_size = p.ref_voxel
@staticmethod
def inv_trsf(trsf: Union[np.ndarray, List[List]]) -> np.ndarray:
"""
Inverse a given transformation
Args:
trsf (np.ndarray): 4x4 ndarray
Returns:
(np.ndarray): the 4x4 inverted matrix
"""
return np.linalg.lstsq(trsf, np.identity(4))[0]
def vox_to_real(
self,
trsf: Union[np.ndarray, List[List]],
ref_vs: List[float],
) -> np.ndarray:
"""
Transform a transformation for voxel units to physical units
Args:
trsf (ndarray): 4x4 matrix
ref_vs (list(float, float, float)): initial voxel size in each dimension
Returns:
(np.ndarray): new matrix in metric size
"""
H_ref = [
[ref_vs[0], 0, 0, 0],
[0, ref_vs[1], 0, 0],
[0, 0, ref_vs[2], 0],
[0, 0, 0, 1],
]
H_ref_inv = self.inv_trsf(H_ref)
return np.dot(trsf, H_ref_inv)
def compute_trsfs(self, p: trsf_parameters):
"""
Here is where the magic happens, give as an input the trsf_parameters object, get your transformations computed
Args:
p (trsf_parameters): parameters to compute the transformation
"""
for A_num, flo_A in enumerate(p.flo_As):
flo_voxel = p.flo_voxels[A_num]
init_trsf = p.init_trsfs[A_num]
trsf_path = p.trsf_paths[A_num]
trsf_name = p.trsf_names[A_num]
if isinstance(init_trsf, list):
i = 0
trsfs = []
im_size = (
np.array(p.flo_im_sizes[A_num], dtype=float) * flo_voxel
)
while i < len(init_trsf):
t_type = init_trsf[i]
i += 1
axis = init_trsf[i]
i += 1
if "rot" in t_type:
angle = init_trsf[i]
i += 1
trsfs += [
self.axis_rotation_matrix(
axis.upper(), angle, np.zeros(3), im_size
)
]
elif "flip" in t_type:
trsfs += [self.flip_matrix(axis.upper(), im_size)]
elif "trans" in t_type:
tr = init_trsf[i]
i += 1
trsfs += [self.translation_matrix(axis, tr)]
res = np.identity(4)
for trsf in trsfs:
res = np.dot(res, trsf)
if not os.path.exists(trsf_path):
os.makedirs(trsf_path)
init_trsf = os.path.join(
trsf_path, "A{:d}-init.trsf".format(A_num + 1)
)
np.savetxt(init_trsf, res)
elif not p.init_trsf_real_unit and init_trsf is not None:
tmp = self.vox_to_real(
self.inv_trsf(self.read_trsf(init_trsf)),
flo_voxel,
p.ref_voxel,
)
init_ext = init_trsf.split(".")[-1]
init_trsf = init_trsf.replace(init_ext, "real.txt")
np.savetxt(init_trsf, self.inv_trsf(tmp))
if init_trsf is not None:
init_trsf_command = " -init-trsf {:s}".format(init_trsf)
else:
init_trsf_command = ""
i = 0
if not p.test_init:
for i, trsf_type in enumerate(p.trsf_types[:-1]):
if i != 0:
init_trsf_command = " -init-trsf {:s}".format(
os.path.join(trsf_path, res_trsf)
)
res_trsf = os.path.join(
trsf_path,
trsf_name.format(a=A_num + 1, trsf=trsf_type),
)
call(
p.path_to_bin
+ "blockmatching -ref "
+ p.ref_A.format(t=p.begin)
+ " -flo "
+ flo_A.format(t=p.begin)
+ " -reference-voxel %f %f %f" % p.ref_voxel
+ " -floating-voxel %f %f %f" % flo_voxel
+ " -trsf-type %s -py-hl %d -py-ll %d"
% (
trsf_type,
p.registration_depth_start,
p.registration_depth_end,
)
+ init_trsf_command
+ " -res-trsf "
+ res_trsf
+ " -composition-with-initial",
shell=True,
)
trsf_type = p.trsf_types[-1]
i = len(p.trsf_types) - 1
if i != 0:
init_trsf_command = " -init-trsf {:s}".format(
os.path.join(trsf_path, res_trsf)
)
res_trsf = os.path.join(
trsf_path, trsf_name.format(a=A_num + 1, trsf=trsf_type)
)
res_inv_trsf = os.path.join(
trsf_path,
("inv-" + trsf_name).format(a=A_num + 1, trsf=trsf_type),
)
call(
p.path_to_bin
+ "blockmatching -ref "
+ p.ref_A.format(t=p.begin)
+ " -flo "
+ flo_A.format(t=p.begin)
+ " -reference-voxel %f %f %f" % p.ref_voxel
+ " -floating-voxel %f %f %f" % flo_voxel
+ " -trsf-type %s -py-hl %d -py-ll %d"
% (
trsf_type,
p.registration_depth_start,
p.registration_depth_end,
)
+ init_trsf_command
+ " -res-trsf "
+ res_trsf
+ # ' -res-voxel-trsf ' + res_voxel_trsf + \
# ' -res ' + flo_out +\
" -composition-with-initial",
shell=True,
)
call(
p.path_to_bin + "invTrsf %s %s" % (res_trsf, res_inv_trsf),
shell=True,
)
@staticmethod
def pad_trsfs(p: trsf_parameters, t: int = None):
"""
Pad transformations
Args:
p (trsf_parameters): parameter object
trsf_fmt (srt): the string format for the initial transformations
"""
out_voxel = p.out_voxel
trsf_path = p.trsf_paths[0]
trsf_name = p.trsf_names[0]
trsf_type = p.trsf_types[-1]
im_shape = imread(p.ref_A.format(t=t)).shape
im = SpatialImage(np.ones(im_shape, dtype=np.uint8))
im.voxelsize = p.ref_voxel
template = os.path.join(trsf_path, "tmp.tif")
res_t = "template.tif"
imsave(template, im)
identity = np.identity(4)
if p.test_init:
trsf_name_only_a = "A{a:d}-init.trsf"
else:
where_a = trsf_name.find("{a:d}")
no_a = (trsf_name[:where_a] + trsf_name[where_a + 5 :]).format(
trsf=trsf_type
)
trsf_name_only_a = no_a[:where_a] + "{a:d}" + no_a[where_a:]
trsf_fmt = os.path.join(trsf_path, trsf_name_only_a)
trsf_fmt_no_flo = trsf_fmt.replace("{a:d}", "%d")
new_trsf_fmt = ".".join(trsf_fmt.split(".")[:-1]) + "-padded.txt"
new_trsf_fmt_no_flo = new_trsf_fmt.replace("{a:d}", "%d")
np.savetxt(trsf_fmt.format(a=0, trsf=trsf_type), identity)
call(
p.path_to_bin
+ "changeMultipleTrsfs -trsf-format "
+ trsf_fmt_no_flo
+ " -index-reference %d -first %d -last %d "
% (0, 0, len(p.trsf_paths))
+ " -template "
+ template
+ " -res "
+ new_trsf_fmt_no_flo
+ " -res-t "
+ os.path.join(trsf_path, res_t)
+ " "
+ " -trsf-type %s" % (trsf_type),
shell=True,
)
template = os.path.join(trsf_path, res_t)
return new_trsf_fmt, template
def apply_trsf(self, p, t=None):
"""
Apply the transformation according to `trsf_parameters`
Args:
p (trsf_parameters): parameters for the transformation
"""
if p.bbox_out:
trsf_fmt, template = self.pad_trsfs(p, t)
if p.out_voxel != p.ref_voxel:
before, after = os.path.splitext(template)
old_template = template
template = "".join((before, ".final_template", after))
call(
p.path_to_bin
+ f"applyTrsf {old_template} {template} "
+ "-vs %f %f %f" % p.out_voxel,
shell=True,
)
A0_trsf = (
" -trsf " + trsf_fmt.format(a=0) + " -template " + template
)
else:
A0_trsf = ""
if p.out_voxel != p.ref_voxel or p.bbox_out:
call(
p.path_to_bin
+ "applyTrsf"
+ " -flo "
+ p.ref_A.format(t=t)
+ " -res "
+ p.ref_out.format(t=t)
+ " -floating-voxel %f %f %f" % p.ref_voxel
+ " -vs %f %f %f" % p.out_voxel
+ A0_trsf,
shell=True,
)
elif p.copy_ref:
copyfile(p.ref_A.format(t=t), p.ref_out.format(t=t))
else:
p.ref_out = p.ref_A
for A_num, flo_A in enumerate(p.flo_As):
flo_voxel = p.flo_voxels[A_num]
trsf_path = p.trsf_paths[A_num]
trsf_name = p.trsf_names[A_num]
init_trsf = p.init_trsfs[A_num]
if p.test_init:
if isinstance(init_trsf, list):
trsf = " -trsf " + os.path.join(
trsf_path, "A{:d}-init.trsf".format(A_num + 1)
)
else:
trsf = " -trsf " + init_trsf
elif not p.bbox_out:
t_type = "" if len(p.trsf_types) < 1 else p.trsf_types[-1]
trsf = " -trsf " + os.path.join(
trsf_path, trsf_name.format(a=A_num + 1, trsf=t_type)
)
else:
trsf = (
" -trsf "
+ trsf_fmt.format(a=A_num + 1)
+ " -template "
+ template
)
flo_out = p.flo_outs[A_num]
call(
p.path_to_bin
+ "applyTrsf"
+ " -flo "
+ flo_A.format(t=t)
+ " -floating-voxel %f %f %f" % flo_voxel
+ " -res "
+ flo_out.format(t=t)
+ " -ref "
+ p.ref_out.format(t=t)
+ " -reference-voxel %f %f %f" % p.out_voxel
+ trsf
+ " -interpolation %s" % p.image_interpolation,
shell=True,
)
@staticmethod
def prettify(elem: ET.Element) -> str:
"""
Return a pretty-printed XML string for the Element.
Args:
elem (xml.etree.ElementTree.Element): xml element
Returns:
(str): a nice version of our xml file
"""
rough_string = ET.tostring(elem, "utf-8")
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
@staticmethod
def do_viewSetup(
ViewSetup: ET.SubElement,
p: trsf_parameters,
im_size: Tuple[int, int, int],
i: int,
):
"""
Setup xml elements for BigDataViewer
Args:
ViewSetup (ET.SubElement): ...
p (trsf_parameters): Parameter object
im_size (tuple): tuple of 3 integers with the dimension of the image
i (int): angle id
"""
id_ = ET.SubElement(ViewSetup, "id")
id_.text = "%d" % i
name = ET.SubElement(ViewSetup, "name")
name.text = "%d" % i
size = ET.SubElement(ViewSetup, "size")
size.text = "%d %d %d" % tuple(im_size)
voxelSize = ET.SubElement(ViewSetup, "voxelSize")
unit = ET.SubElement(voxelSize, "unit")
unit.text = p.bdv_unit
size = ET.SubElement(voxelSize, "size")
size.text = "%f %f %f" % tuple(p.bdv_voxel_size)
attributes = ET.SubElement(ViewSetup, "attributes")
illumination = ET.SubElement(attributes, "illumination")
illumination.text = "0"
channel = ET.SubElement(attributes, "channel")
channel.text = "0"
tile = ET.SubElement(attributes, "tile")
tile.text = "0"
angle = ET.SubElement(attributes, "angle")
angle.text = "%d" % i
def do_ViewRegistration(
self,
ViewRegistrations: ET.SubElement,
p: trsf_parameters,
t: int,
a: int,
):
"""
Write the view registration for BigDataViewer
Args:
ViewSetup (ET.SubElement): ...
p (trsf_parameters): Parameter object
t (int): time point to treat
a (int): angle to treat
"""
ViewRegistration = ET.SubElement(ViewRegistrations, "ViewRegistration")
ViewRegistration.set("timepoint", "%d" % t)
ViewRegistration.set("setup", "%d" % a)
ViewTransform = ET.SubElement(ViewRegistration, "ViewTransform")
ViewTransform.set("type", "affine")
if a != 0:
affine = ET.SubElement(ViewTransform, "affine")
trsf_path = p.trsf_paths[a - 1]
trsf_name = p.trsf_names[a - 1]
trsf_type = p.trsf_types[-1]
f = os.path.join(
trsf_path, ("inv-" + trsf_name).format(a=a, trsf=trsf_type)
)
trsf = self.read_trsf(f)
formated_trsf = tuple(trsf[:-1, :].flatten())
affine.text = ("%f " * 12) % formated_trsf
ViewTransform = ET.SubElement(ViewRegistration, "ViewTransform")
ViewTransform.set("type", "affine")
affine = ET.SubElement(ViewTransform, "affine")
affine.text = (
"%f 0.0 0.0 0.0 0.0 %f 0.0 0.0 0.0 0.0 %f 0.0"
% p.flo_voxels[a - 1]
)
else:
affine = ET.SubElement(ViewTransform, "affine")
affine.text = (
"%f 0.0 0.0 0.0 0.0 %f 0.0 0.0 0.0 0.0 %f 0.0" % p.ref_voxel
)
def build_bdv(self, p: trsf_parameters):
"""
Build the BigDataViewer xml
Args:
p (trsf_parameters): Parameter object
"""
SpimData = ET.Element("SpimData")
SpimData.set("version", "0.2")
base_path = ET.SubElement(SpimData, "BasePath")
base_path.set("type", "relative")
base_path.text = "."
SequenceDescription = ET.SubElement(SpimData, "SequenceDescription")
ImageLoader = ET.SubElement(SequenceDescription, "ImageLoader")
ImageLoader.set("format", "klb")
Resolver = ET.SubElement(ImageLoader, "Resolver")
Resolver.set(
"type", "org.janelia.simview.klb.bdv.KlbPartitionResolver"
)
for im_p in p.bdv_im:
ViewSetupTemplate = ET.SubElement(Resolver, "ViewSetupTemplate")
template = ET.SubElement(ViewSetupTemplate, "template")
template.text = im_p
timeTag = ET.SubElement(ViewSetupTemplate, "timeTag")
timeTag.text = p.time_tag
ViewSetups = ET.SubElement(SequenceDescription, "ViewSetups")
ViewSetup = ET.SubElement(ViewSetups, "ViewSetup")
i = 0
self.do_viewSetup(ViewSetup, p, p.ref_im_size, i)
for i, pi in enumerate(p.flo_voxels):
ViewSetup = ET.SubElement(ViewSetups, "ViewSetup")
self.do_viewSetup(ViewSetup, p, p.flo_im_sizes[i], i + 1)
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "illumination")
Illumination = ET.SubElement(Attributes, "Illumination")
id_ = ET.SubElement(Illumination, "id")
id_.text = "0"
name = ET.SubElement(Illumination, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "channel")
Channel = ET.SubElement(Attributes, "Channel")
id_ = ET.SubElement(Channel, "id")
id_.text = "0"
name = ET.SubElement(Channel, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "tile")
Tile = ET.SubElement(Attributes, "Tile")
id_ = ET.SubElement(Tile, "id")
id_.text = "0"
name = ET.SubElement(Tile, "name")
name.text = "0"
Attributes = ET.SubElement(ViewSetups, "Attributes")
Attributes.set("name", "angle")
for i in range(len(p.flo_voxels) + 1):
Angle = ET.SubElement(Attributes, "Angle")
id_ = ET.SubElement(Angle, "id")
id_.text = "%d" % i
name = ET.SubElement(Angle, "name")
name.text = "%d" % i
TimePoints = ET.SubElement(SequenceDescription, "Timepoints")
TimePoints.set("type", "range")
first = ET.SubElement(TimePoints, "first")
first.text = "%d" % p.begin
last = ET.SubElement(TimePoints, "last")
last.text = "%d" % p.end
ViewRegistrations = ET.SubElement(SpimData, "ViewRegistrations")
for t in range(p.begin, p.end + 1):
self.do_ViewRegistration(ViewRegistrations, p, t, 0)
for a in range(len(p.flo_voxels)):
self.do_ViewRegistration(ViewRegistrations, p, t, a + 1)
with open(p.out_bdv, "w") as f:
f.write(self.prettify(SpimData))
f.close()
def run_trsf(self):
"""
Start the Spatial registration after having informed the parameter files
"""
for p in self.params:
try:
print("Starting experiment")
print(p)
self.prepare_paths(p)
if p.compute_trsf or p.test_init:
self.compute_trsfs(p)
if p.apply_trsf or p.test_init:
if not (p.begin is None and p.end is None):
for t in range(p.begin, p.end + 1):
self.apply_trsf(p, t)
else:
self.apply_trsf(p)
if p.do_bdv:
self.build_bdv(p)
except Exception as e:
print("Failure of %s" % p.origin_file_name)
print(e)
def __init__(self, params=None):
if params is None:
self.params = self.read_param_file()
elif (
isinstance(params, str)
or isinstance(params, Path)
or isinstance(params, dict)
):
self.params = SpatialRegistration.read_param_file(params)
else:
self.params = params
if self.params is not None and 0 < len(self.params):
self.path_to_bin = self.params[0].path_to_bin
def spatial_registration():
reg = SpatialRegistration()
reg.run_trsf()
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/registrationtools/spatial_registration.py | spatial_registration.py |
__version__ = "0.4.3"
from .time_registration import TimeRegistration, time_registration
from .spatial_registration import SpatialRegistration, spatial_registration
from .utils import *
import sys
if 8 < sys.version_info.minor:
import importlib.resources as importlib_resources
pkg = importlib_resources.files("registrationtools") / "data"
else:
from importlib_resources._legacy import path
pkg = path("registrationtools", "data").args[0]
image_path = pkg / "images"
json_path_spatial = pkg / "JSON_spatial"
json_path_time = pkg / "JSON_time"
| 3D-registration | /3D-registration-0.4.3.tar.gz/3D-registration-0.4.3/src/registrationtools/__init__.py | __init__.py |
3DCORE
========
3D Coronal Rope Ejection modelling techniqe for coronal mass ejection (CME) flux ropes.
Installation
------------
Install the latest version manually using `git` and `pip`:
git clone https://github.com/ajefweiss/py3DCORE
cd 3DCORE
pip install . | 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/README.md | README.md |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="3DCORE",
packages=find_packages(),
package_data={'': ['*.json']},
include_package_data=True,
version="1.1.4",
author="Andreas J. Weiss",
author_email="andreas.weiss@oeaw.ac.at",
description="3D Coronal Rope Ejection Model",
keywords=["astrophysics", "solar physics", "space weather"],
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/ajefweiss/py3DCORE",
install_requires=[
"heliosat",
"numba",
"numpy",
"scipy",
"spiceypy"
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
],
)
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/setup.py | setup.py |
# -*- coding: utf-8 -*-
"""util.py
3DCORE utility functions.
"""
import logging
import py3dcore
import sys
def configure_logging(debug=False, logfile=None, verbose=False):
"""Configures built in python logger.
Parameters
----------
debug : bool, optional
Enable debug logging.
logfile : None, optional
Logfile path.
verbose : bool, optional
Enable verbose logging.
"""
root = logging.getLogger()
root.setLevel(logging.DEBUG)
stream = logging.StreamHandler(sys.stdout)
if debug and verbose:
stream.setLevel(logging.DEBUG)
elif verbose:
stream.setLevel(logging.INFO)
else:
stream.setLevel(logging.WARNING)
stream.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(message)s"))
root.addHandler(stream)
if logfile:
file = logging.FileHandler(logfile, "a")
if debug:
file.setLevel(logging.DEBUG)
else:
file.setLevel(logging.INFO)
file.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
root.addHandler(file)
# disable annoying loggers
logging.getLogger("numba.byteflow").setLevel("WARNING")
logging.getLogger("numba.cuda.cudadrv.driver").setLevel("WARNING")
logging.getLogger("numba.interpreter").setLevel("WARNING")
def select_model(model):
if model.upper() == "TTGHV1" or model.upper() == "THIN_TORUS_GH" or model.upper() == "THINTORUSGH3DCOREMODEL":
return py3dcore.models.TTGHv1
elif model.upper() == "TTGHV2":
return py3dcore.models.TTGHv2
elif model.upper() == "TTGHV2T":
return py3dcore.models.TTGHv2T
elif model.upper() == "TTGHV3":
return py3dcore.models.TTGHv3
elif model.upper() == "TTNCV2":
return py3dcore.models.TTNCv2
else:
raise NotImplementedError("unkown model \"%s\"", model.upper())
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/util.py | util.py |
# -*- coding: utf-8 -*-
"""_extcuda.py
Utility functions for Numba CUDA.
"""
import logging
import numba.cuda as cuda
import numpy as np
from functtools import lru_cache
@lru_cache
def get_threads_blocks(length, max_thread_size=9):
"""Calculate thread/block sizes for launching CUDA kernels.
Parameters
----------
length : int
Number of threads.
max_thread_size : int
Maximum thread size in 2^N.
Returns
-------
(int, int)
Number of threads and thread blocks.
"""
logger = logging.getLogger(__name__)
counter = 0
_length = length
while True:
if _length % 2 == 1:
logger.warning("get_threads_blocks could not fully factorize the number of threads")
break
_length //= 2
counter += 1
if counter >= max_thread_size:
break
return 2**counter, _length
def initialize_array(shape, value, dtype=np.float32):
"""Create and initialize cuda device array.
Parameters
----------
shape : tuple
Array shape.
value: np.float32
Value to fill array with.
dtype: type, optional
Array data type, by default np.float32.
Returns
-------
Initialized cuda device array.
numba.cuda.cudadrv.devicearray.DeviceNDArray
"""
arr = cuda.device_array(shape, dtype=dtype).ravel()
threads, blocks = get_threads_blocks(len(arr))
_cuda_initialize_array[blocks, threads](arr, value)
return arr.reshape(shape)
@cuda.jit
def _cuda_initialize_array(array, value):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
array[i] = value
def extract_array_column(array, column):
"""Exctract column from array .
Parameters
----------
array : numba.cuda.cudadrv.devicearray.DeviceNDArray
Cuda Array
column: int, optional
Column to be extracted
Returns
-------
Extracted column as cuda device array.
numba.cuda.cudadrv.devicearray.DeviceNDArray
"""
_array = cuda.device_array((len(array),), dtype=array.dtype)
threads, blocks = get_threads_blocks(len(_array))
_cuda_extract_column[blocks, threads](array, column, _array)
return _array
@cuda.jit
def _cuda_extract_column(array, column, out):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
out[i] = array[i, column]
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/_extcuda.py | _extcuda.py |
# -*- coding: utf-8 -*-
"""params.py
Implements the base 3DCORE parameter class.
"""
import logging
import numba
import numpy as np
import scipy
import scipy.stats
class Base3DCOREParameters(object):
"""Base 3DCORE parameters class.
"""
params_dict = None
dtype = None
qindices = None
def __init__(self, params_dict, **kwargs):
"""Initialize Base3DCOREParameters class.
Parameters
----------
params_dict : dict
Parameter dictionary.
Other Parameters
----------------
dtype: type
Numpy data type, by default np.float32.
qindices: np.ndarray
Quaternion array columns, by default [1, 2, 3].
"""
self.params_dict = params_dict
self.dtype = kwargs.get("dtype", np.float32)
self.qindices = np.array(
kwargs.get("qindices", np.array([1, 2, 3]))
).ravel().astype(int)
# update arrays
self._update_arr()
def __getitem__(self, key):
for dk in self.params_dict:
if self.params_dict[dk]["index"] == key or self.params_dict[dk]["name"] == key:
return self.params_dict[dk]
if isinstance(key, int):
raise KeyError("key %i is out of range", key)
else:
raise KeyError("key \"%s\" does not exist", key)
def __iter__(self):
self.iter_n = 0
return self
def __len__(self):
return len(self.params_dict.keys())
def __next__(self):
if self.iter_n < len(self):
result = self[self.iter_n]
self.iter_n += 1
return result
else:
raise StopIteration
def generate(self, iparams_arr, **kwargs):
"""Generate initial parameters
Parameters
----------
iparams : Union[np.ndarray, List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Initial parameters array.
Other Parameters
----------------
rng_states : List[numba.cuda.cudadrv.devicearray.DeviceNDArray]
CUDA rng state array.
use_cuda : bool
CUDA flag, by default False.
"""
size = len(iparams_arr)
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
for param in self:
index = param["index"]
if param["distribution"] == "fixed":
iparams_arr[:, index] = param["fixed_value"]
elif param["distribution"] == "uniform":
maxv = param["maximum"]
minv = param["minimum"]
iparams_arr[:, index] = np.random.rand(size) * (maxv - minv) + minv
elif param["distribution"] == "gaussian":
maxv = param["maximum"]
minv = param["minimum"]
kwargs = {
"loc": param["mean"],
"scale": param["std"]
}
iparams_arr[:, index] = draw_iparams(np.random.normal, maxv, minv, size,
**kwargs)
elif param["distribution"] == "gamma":
maxv = param["maximum"]
minv = param["minimum"]
kwargs = {
"loc": param["shape"],
"scale": param["scale"]
}
iparams_arr[:, index] = draw_iparams(np.random.gamma, maxv, minv, size,
**kwargs)
elif param["distribution"] == "log-uniform":
maxv = param["maximum"]
minv = param["minimum"]
kwargs = {
"a": minv,
"b": maxv
}
iparams_arr[:, index] = draw_iparams(scipy.stats.loguniform.rvs, maxv, minv,
size, **kwargs)
elif param["distribution"] == "exponential":
maxv = param["maximum"]
minv = param["minimum"]
kwargs = {
"scale": param["scale"]
}
iparams_arr[:, index] = draw_iparams(custom_exponential, maxv, minv,
size, **kwargs)
else:
raise NotImplementedError("%s distribution is not implemented",
param["distribution"])
self._update_arr()
def perturb(self, iparams_arr, particles, weights, kernels_lower, **kwargs):
"""Perburb parameters.
Parameters
----------
iparams : Union[np.ndarray, List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Initial parameters array.
particles : Union[np.ndarray, List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Particles.
weights : Union[np.ndarray, List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Weight array.
kernels_lower : Union[np.ndarray, List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Transition kernels in the form of a lower triangular matrix.
Number of dimensions determines the type of method used.
Other Parameters
----------------
rng_states : List[numba.cuda.cudadrv.devicearray.DeviceNDArray]
CUDA rng state array.
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
if kernels_lower.ndim == 1:
raise DeprecationWarning("1D transition kernels are no longer supported")
elif kernels_lower.ndim == 2:
# perturbation with one covariance matrix
_numba_perturb_with_kernel(iparams_arr, particles, weights, kernels_lower,
self.type_arr, self.bound_arr, self.maxv_arr,
self.minv_arr)
elif kernels_lower.ndim == 3:
# perturbation with local covariance matrices
_numba_perturb_with_kernels(iparams_arr, particles, weights, kernels_lower,
self.type_arr, self.bound_arr, self.maxv_arr,
self.minv_arr)
else:
raise ValueError("kernel array must be 3-dimensional or lower")
def weight(self, particles, particles_old, weights, weights_old, kernels, **kwargs):
"""Update particle weights.
Parameters
----------
particles : Union[np.ndarray,
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Particle array.
particles_old : Union[np.ndarray,
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Old particle array.
weights : Union[np.ndarray,
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Paritcle weights array.
weights_old : Union[np.ndarray,
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Old particle weights array.
kernels : Union[np.ndarray,
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
Transition kernels. Number of dimensions determines the type of method used.
Other Parameters
----------------
exclude_priors: bool
Exclude prior distributions from weighting, by default False.
use_cuda : bool
CUDA flag, by default False.
"""
logger = logging.getLogger(__name__)
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
if kernels.ndim == 1:
raise DeprecationWarning("1D transition kernels are no longer supported")
elif kernels.ndim == 2:
_numba_weights_with_kernel(particles, particles_old,
weights, weights_old, kernels)
elif kernels.ndim == 3:
_numba_weights_with_kernels(particles, particles_old,
weights, weights_old, kernels)
else:
raise ValueError("kernel array must be 3-dimensional or lower")
# include priors into weighting scheme
if not kwargs.get("exclude_priors", False):
_numba_calculate_weights_priors(
particles, weights, self.type_arr, self.dp1_arr, self.dp2_arr)
# a hack for abnormally big weights
quant = np.quantile(weights, 0.99)
for i in range(0, len(weights)):
if weights[i] > quant:
weights[i] = 0
# find and remove bad weights (NaN)
nanc = np.sum(np.isnan(weights))
infc = np.sum(weights == np.inf)
if nanc + infc > 0:
logger.warning("detected %i NaN/inf weights, setting to 0", nanc + infc)
weights[np.isnan(weights)] = 0
weights[weights == np.inf] = 0
# renormalize
wsum = np.sum(weights)
for i in range(0, len(weights)):
weights[i] /= wsum
def _update_arr(self):
"""Translate params_dict to arrays.
Following distributions are supported:
- 0 fixed value (delta)
- 1 uniform
- 2 gaussian, sigma
- 3 gamma, shape+scale
- 4 log-uniform
- 5 exponential, scale
"""
self.active = np.zeros((len(self), ), dtype=self.dtype)
self.bound_arr = np.zeros((len(self), ), dtype=self.dtype)
self.maxv_arr = np.zeros((len(self), ), dtype=self.dtype)
self.dp1_arr = np.zeros((len(self), ), dtype=self.dtype)
self.dp2_arr = np.zeros((len(self), ), dtype=self.dtype)
self.minv_arr = np.zeros((len(self), ), dtype=self.dtype)
self.type_arr = np.zeros((len(self), ), dtype=self.dtype)
for param in self:
index = param["index"]
self.active[index] = param.get("active", 1)
if param["distribution"] == "fixed":
self.type_arr[index] = 0
self.maxv_arr[index] = param["fixed_value"]
elif param["distribution"] == "uniform":
self.type_arr[index] = 1
self.maxv_arr[index] = param["maximum"]
self.minv_arr[index] = param["minimum"]
elif param["distribution"] == "gaussian":
self.type_arr[index] = 2
self.maxv_arr[index] = param["maximum"]
self.minv_arr[index] = param["minimum"]
self.dp1_arr[index] = param.get("mean", 0)
self.dp2_arr[index] = param.get("std", 0)
elif param["distribution"] == "gamma":
self.type_arr[index] = 3
self.maxv_arr[index] = param["maximum"]
self.minv_arr[index] = param["minimum"]
self.dp1_arr[index] = param.get("shape", 0)
self.dp2_arr[index] = param.get("scale", 0)
elif param["distribution"] == "log-uniform":
self.type_arr[index] = 4
self.maxv_arr[index] = param["maximum"]
self.minv_arr[index] = param["minimum"]
elif param["distribution"] == "exponential":
self.type_arr[index] = 5
self.maxv_arr[index] = param["maximum"]
self.minv_arr[index] = param["minimum"]
self.dp1_arr[index] = param.get("scale", 0)
else:
raise NotImplementedError("%s distribution is not implemented",
param["distribution"])
if param["boundary"] == "continuous":
self.bound_arr[index] = 0
elif param["boundary"] == "periodic":
self.bound_arr[index] = 1
else:
raise NotImplementedError("%s boundary condition is not implemented",
param["boundary"])
def draw_iparams(func, maxv, minv, size, **kwargs):
"""Draw random numbers from func, within the interval [minv, maxv].
"""
i = 0
numbers = func(size=size, **kwargs)
while True:
filter = ((numbers > maxv) | (numbers < minv))
if np.sum(filter) == 0:
break
numbers[filter] = func(size=len(filter), **kwargs)[filter]
i += 1
if i > 1000:
raise RuntimeError("drawing numbers inefficiently (%i/%i after 1000 iterations)",
len(filter), size)
return numbers
def custom_exponential(**kwargs):
numbers = np.random.exponential(**kwargs)
signs = np.random.randint(2, size=len(numbers))
return numbers * (1 - 2 * signs)
@numba.njit
def _numba_perturb_with_kernel(iparams_arr, particles, weights, kernel_lower, type_arr,
bound_arr, maxv_arr, minv_arr):
for i in range(len(iparams_arr)):
# particle selector si
r = np.random.rand(1)[0]
for j in range(len(weights)):
r -= weights[j]
if r <= 0:
si = j
break
# draw candidates until a candidite is within the valid range
c = 0
Nc = 25
perturbations = np.dot(kernel_lower, np.random.randn(len(particles[si]), Nc))
while True:
candidate = particles[si] + perturbations[:, c]
accepted = True
for k in range(len(candidate)):
if candidate[k] > maxv_arr[k]:
if bound_arr[k] == 0:
accepted = False
break
elif bound_arr[k] == 1:
candidate[k] = minv_arr[k] + candidate[k] - maxv_arr[k]
else:
raise NotImplementedError
if candidate[k] < minv_arr[k]:
if bound_arr[k] == 0:
accepted = False
break
elif bound_arr[k] == 1:
candidate[k] = maxv_arr[k] + candidate[k] - minv_arr[k]
else:
raise NotImplementedError
if accepted:
break
c += 1
if c >= Nc - 1:
c = 0
perturbations = np.dot(kernel_lower, np.random.randn(len(particles[si]), Nc))
iparams_arr[i] = candidate
@numba.njit
def _numba_perturb_with_kernels(iparams_arr, particles, weights, kernels_lower, type_arr,
bound_arr, maxv_arr, minv_arr):
for i in range(len(iparams_arr)):
# particle selector si
r = np.random.rand(1)[0]
for j in range(len(weights)):
r -= weights[j]
if r <= 0:
si = j
break
#print("selected candidate ", si)
# draw candidates until a candidate is within the valid range
c = 0
Nc = 25
perturbations = np.dot(kernels_lower[si], np.random.randn(len(particles[si]), Nc))
while True:
#print("iter ", c)
candidate = particles[si] + perturbations[:, c]
#print(candidate)
#print(bound_arr)
accepted = True
for k in range(len(candidate)):
#print("cand k ", k)
if candidate[k] > maxv_arr[k]:
if bound_arr[k] == 0:
accepted = False
#print("out of bounds")
break
elif bound_arr[k] == 1:
while candidate[k] > maxv_arr[k]:
candidate[k] = minv_arr[k] + candidate[k] - maxv_arr[k]
#print("redrawing")
else:
#print("error raise?")
raise NotImplementedError
if candidate[k] < minv_arr[k]:
if bound_arr[k] == 0:
accepted = False
break
elif bound_arr[k] == 1:
while candidate[k] < minv_arr[k]:
candidate[k] = maxv_arr[k] + candidate[k] - minv_arr[k]
else:
#print("error raise?")
raise NotImplementedError
if accepted:
break
c += 1
if c >= Nc - 1:
c = 0
perturbations = np.dot(kernels_lower[si], np.random.randn(len(particles[si]), Nc))
#print("bad drawing")
#print("saving candidate")
iparams_arr[i] = candidate
@numba.njit(parallel=True)
def _numba_weights_with_kernel(particles, particles_prev, weights, weights_prev, kernel):
inv_kernel = np.linalg.pinv(kernel).astype(particles.dtype)
for i in numba.prange(len(particles)):
nw = 0
for j in range(len(particles_prev)):
v = _numba_calculate_weights_reduce(particles[i], particles_prev[j], inv_kernel)
nw += weights_prev[j] * np.exp(-v)
weights[i] = 1 / nw
@numba.njit(parallel=True)
def _numba_weights_with_kernels(particles, particles_prev, weights, weights_prev, kernels):
inv_kernels = np.zeros_like(kernels).astype(particles.dtype)
# compute kernel inverses
for i in numba.prange(len(kernels)):
inv_kernels[i] = np.linalg.pinv(kernels[i])
for i in numba.prange(len(particles)):
nw = 0
for j in range(len(particles_prev)):
v = _numba_calculate_weights_reduce(particles[i], particles_prev[j], inv_kernels[j])
nw += weights_prev[j] * np.exp(-v)
weights[i] = 1 / nw
@numba.njit(inline="always")
def _numba_calculate_weights_reduce(x1, x2, A):
dx = (x1 - x2).astype(A.dtype)
return np.dot(dx, np.dot(A, dx))
# numba does not support scipy functions
# @numba.njit(parallel=True)
def _numba_calculate_weights_priors(particles, weights, type_arr, dp1_arr, dp2_arr):
for i in range(len(weights)):
for j in range(len(type_arr)):
if type_arr[j] <= 1:
# fixed or uniform, no weighting
pass
elif type_arr[j] == 2:
# gaussian distribution
weights[i] *= np.exp(-0.5 * (particles[i, j] -
dp1_arr[j])**2/dp2_arr[j]**2) / dp2_arr[j]
elif type_arr[j] == 3:
# gamma distribution
weights[i] *= np.exp(-particles[i, j]/dp2_arr[j]) * particles[i, j] ** (
dp1_arr[j] - 1) / scipy.special.gamma(dp1_arr[j]) / dp2_arr[j]**dp1_arr[j]
elif type_arr[j] == 4:
# log-uniform distribution
weights[i] *= 1 / particles[i, j]
elif type_arr[j] == 5:
# exponential distribution
value = np.abs(particles[i, j])
weights[i] *= np.exp(-value / dp1_arr[j])
else:
raise NotImplementedError("distribution #%i is not implemented",
type_arr[j])
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/params.py | params.py |
# -*- coding: utf-8 -*-
"""rotqs.py
Implements functions for using quaternions to rotate vectors.
"""
import numba
import numpy as np
from numba import guvectorize
def generate_quaternions(iparams_arr, qs_sx, qs_xs, use_cuda=False, indices=None):
"""Wrapper function for generating rotational quaternions from iparams array.
Parameters
----------
iparams_arr : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameters array.
qs_sx : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (s) -> (x) rotational quaternions.
qs_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
use_cuda : bool
CUDA flag, by default False.
indices : np.ndarray, optional
Lon/Lat/Inc indices in params, by default None.
"""
if indices is None:
indices = np.array([1, 2, 3])
else:
indices = np.array(indices)
if use_cuda:
raise NotImplementedError("CUDA functionality is not available yet")
else:
(i1, i2, i3) = (indices[0], indices[1], indices[2])
(lon, lat, inc) = (iparams_arr[:, i1], iparams_arr[:, i2], iparams_arr[:, i3])
ux = np.array([0, 1.0, 0, 0])
uy = np.array([0, 0, 1.0, 0])
uz = np.array([0, 0, 0, 1.0])
rlon = _numba_quaternion_create(lon, uz)
rlat = _numba_quaternion_create(-lat, quaternion_rotate(uy, rlon))
rinc = _numba_quaternion_create(
inc, quaternion_rotate(ux, _numba_quaternion_multiply(rlat, rlon)))
_numba_quaternion_multiply(rinc, _numba_quaternion_multiply(rlat, rlon), qs_xs)
_numba_quaternion_conjugate(qs_xs, qs_sx)
def quaternion_rotate(vec, q, use_cuda=False):
if use_cuda:
raise NotImplementedError("CUDA functionality is not available yet")
else:
return _numba_quaternion_multiply(
q, _numba_quaternion_multiply(vec, _numba_quaternion_conjugate(q)))
@numba.njit
def _numba_quaternion_rotate(vec, q):
qh = (q[0], -q[1], -q[2], -q[3])
# mul 1
ma = - vec[1] * qh[1] - vec[2] * qh[2] - vec[3] * qh[3]
mb = + vec[1] * qh[0] + vec[2] * qh[3] - vec[3] * qh[2]
mc = - vec[1] * qh[3] + vec[2] * qh[0] + vec[3] * qh[1]
md = + vec[1] * qh[2] - vec[2] * qh[1] + vec[3] * qh[0]
# mul 2
rb = q[0] * mb + q[1] * ma + q[2] * md - q[3] * mc
rc = q[0] * mc - q[1] * md + q[2] * ma + q[3] * mb
rd = q[0] * md + q[1] * mc - q[2] * mb + q[3] * ma
return np.array([rb, rc, rd])
@guvectorize([
"void(float32, float32[:], float32[:])",
"void(float64, float64[:], float64[:])"],
'(), (n) -> (n)')
def _numba_quaternion_create(rot, vec, res):
argument = np.radians(rot / 2)
res[0] = np.cos(argument)
# due to ufunc broadcasting rules vec must be of length 4, first entry is not used
res[1] = vec[1] * np.sin(argument)
res[2] = vec[2] * np.sin(argument)
res[3] = vec[3] * np.sin(argument)
@guvectorize([
"void(float32[:], float32[:])",
"void(float64[:], float64[:])"],
'(n) -> (n)')
def _numba_quaternion_conjugate(q, res):
res[0] = q[0]
res[1:] = -q[1:]
@guvectorize([
"void(float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:])"],
'(n), (n) -> (n)')
def _numba_quaternion_multiply(q1, q2, res):
(q1a, q1b, q1c, q1d) = (q1[0], q1[1], q1[2], q1[3])
(q2a, q2b, q2c, q2d) = (q2[0], q2[1], q2[2], q2[3])
res[0] = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
res[1] = q1a * q2b + q1b * q2a + q1c * q2d - q1d * q2c
res[2] = q1a * q2c - q1b * q2d + q1c * q2a + q1d * q2b
res[3] = q1a * q2d + q1b * q2c - q1c * q2b + q1d * q2a
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/rotqs.py | rotqs.py |
# -*- coding: utf-8 -*-
"""_extmath.py
Extra math functions.
"""
import numba.cuda as cuda
import numpy as np
from numba import guvectorize
def cholesky(mat, **kwargs):
"""Cholesky Decomposition.
Uses the LDL variant of the Cholesky decomposition to compute the lower triangular matrix of a
positiv-semidefinite matrix.
Parameters
----------
mat : np.ndarray
Single or multiple positive semi-definite matrix/matrices.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
Returns
-------
np.ndarray
Lower triangular matrix/matrices.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
return _numba_ldl_lsqrtd(mat)
@guvectorize([
"void(float32[:, :], float32[:, :])",
"void(float64[:, :], float64[:, :])"],
'(n, n) -> (n, n)')
def _numba_ldl_lsqrtd(mat, res):
n = mat.shape[0]
_lmat = np.identity(n)
_dmat = np.zeros((n, n))
for i in range(n):
_dmat[i, i] = mat[i, i] - np.sum(_lmat[i, :i]**2 * np.diag(_dmat)[:i])
for j in range(i + 1, n):
if _dmat[i, i] == 0:
_lmat[i, i] = 0
else:
_lmat[j, i] = mat[j, i] - np.sum(_lmat[j, :i] * _lmat[i, :i] * np.diag(_dmat)[:i])
_lmat[j, i] /= _dmat[i, i]
res[:] = np.dot(_lmat, np.sqrt(_dmat))[:]
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/_extmath.py | _extmath.py |
# -*- coding: utf-8 -*-
"""_extnumba.py
Utility functions for Numba.
"""
import numba
import numpy as np
def set_random_seed(seed):
"""Sets python & numba seed to same value.
"""
np.random.seed(seed)
_numba_set_random_seed(seed)
@numba.njit
def _numba_set_random_seed(seed):
np.random.seed(seed)
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/_extnumba.py | _extnumba.py |
# -*- coding: utf-8 -*-
"""base.py
Implements the base 3DCORE model classes.
"""
import logging
import numba.cuda as cuda
import numpy as np
from itertools import product
from numba.cuda.random import create_xoroshiro128p_states as cxoro128p
from scipy.optimize import least_squares
from py3dcore._extnumba import set_random_seed
from py3dcore.rotqs import generate_quaternions
class Base3DCOREModel(object):
"""Base 3DCORE model class.
"""
parameters = None
dtype = None
launch = None
iparams_arr = sparams_arr = None
qs_sx = qs_xs = None
rng_states = None
g = f = h = p = None
runs = 0
use_cuda = False
# internal
_tva = _tvb = None
_b, _sp = None, None
def __init__(self, launch, functions, parameters, sparams_count, runs, **kwargs):
"""Initialize Base3DCOREModel.
Initial parameters must be generated seperately (see generate_iparams).
Parameters
----------
launch : datetime.datetime
Initial datetime.
functions : dict
Propagation, magnetic and transformation functions as dict.
parameters : py3dcore.model.Base3DCOREParameters
Model parameters instance.
sparams_count: int
Number of state parameters.
runs : int
Number of parallel model runs.
Other Parameters
----------------
cuda_device: int
CUDA device, by default 0.
use_cuda : bool, optional
CUDA flag, by default False.
Raises
------
ValueError
If the number of model runs is not divisible by 256 and CUDA flag is set.
If cuda is selected but not available.
"""
self.launch = launch.timestamp()
self.g = functions["g"]
self.f = functions["f"]
self.h = functions["h"]
self.p = functions["p"]
self.parameters = parameters
self.dtype = parameters.dtype
self.runs = runs
self.use_cuda = kwargs.get("use_cuda", False)
if self.use_cuda and cuda.is_available():
raise NotImplementedError("CUDA functionality is not available yet")
# cuda.select_device(kwargs.get("cuda_device", 0))
# self.use_cuda = True
elif self.use_cuda:
raise ValueError("cuda is not available")
if self.use_cuda:
self.iparams_arr = cuda.device_array((self.runs, len(self.parameters)),
dtype=self.dtype)
self.sparams_arr = cuda.device_array((self.runs, sparams_count),
dtype=self.dtype)
self.qs_sx = cuda.device_array((self.runs, 4), dtype=self.dtype)
self.qs_xs = cuda.device_array((self.runs, 4), dtype=self.dtype)
self._tva = cuda.device_array((self.runs, 3), dtype=self.dtype)
self._tvb = cuda.device_array((self.runs, 3), dtype=self.dtype)
else:
self.iparams_arr = np.empty(
(self.runs, len(self.parameters)), dtype=self.dtype)
self.sparams_arr = np.empty(
(self.runs, sparams_count), dtype=self.dtype)
self.qs_sx = np.empty((self.runs, 4), dtype=self.dtype)
self.qs_xs = np.empty((self.runs, 4), dtype=self.dtype)
self._tva = np.empty((self.runs, 3), dtype=self.dtype)
self._tvb = np.empty((self.runs, 3), dtype=self.dtype)
def generate_iparams(self, seed):
"""Generate initial parameters.
Parameters
----------
seed : int
Random number seed.
"""
if self.use_cuda:
self.rng_states = cxoro128p(self.runs, seed=seed)
else:
set_random_seed(seed)
self.parameters.generate(
self.iparams_arr, use_cuda=self.use_cuda, rng_states=self.rng_states)
generate_quaternions(self.iparams_arr, self.qs_sx, self.qs_xs, use_cuda=self.use_cuda,
indices=self.parameters.qindices)
def perturb_iparams(self, particles, weights, kernels):
"""Generate initial parameters by perturbing given particles.
Parameters
----------
particles : np.ndarray
Particles.
weights : np.ndarray
Particle weights.
kernels : np.ndarray
Transition kernels.
"""
self.parameters.perturb(self.iparams_arr, particles, weights, kernels,
use_cuda=self.use_cuda, rng_states=self.rng_states)
generate_quaternions(self.iparams_arr, self.qs_sx, self.qs_xs, use_cuda=self.use_cuda,
indices=self.parameters.qindices)
def update_iparams(self, iparams_arr, seed=None):
"""Set initial parameters to given array. Array size must match the length of "self.runs".
Parameters
----------
iparams : np.ndarray
Initial parameters array.
seed : int, optional
Random seed, by default None.
"""
if seed:
if self.use_cuda:
self.rng_states = cxoro128p(self.runs, seed=seed)
else:
set_random_seed(seed)
self.iparams_arr = iparams_arr.astype(self.dtype)
generate_quaternions(self.iparams_arr, self.qs_sx, self.qs_xs, use_cuda=self.use_cuda,
indices=self.parameters.qindices)
def propagate(self, t):
"""Propagate all model runs to datetime t.
Parameters
----------
t : datetime.datetime
Datetime.
"""
dt = self.dtype(t.timestamp() - self.launch)
self.p(dt, self.iparams_arr, self.sparams_arr, use_cuda=self.use_cuda)
def get_field(self, s, b):
"""Calculate magnetic field vectors at (s) coordinates.
Parameters
----------
s : np.ndarray
Position vector array.
b : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Magnetic field output array.
"""
self.transform_sq(s, q=self._tva)
self.h(self._tva, b, self.iparams_arr, self.sparams_arr, self.qs_xs,
use_cuda=self.use_cuda, rng_states=self.rng_states)
def get_sparams(self, sparams, sparams_out):
"""Get selected sparams values.
Parameters
----------
sparams : np.ndarray
Selected sparams indices.
sparams_out : np.ndarray
Output array.
"""
if self.use_cuda:
raise NotImplementedError
else:
for i in range(len(sparams)):
sparam = sparams[i]
sparams_out[i] = self.sparams_arr[i, sparam]
def sim_fields(self, *args, **kwargs):
"""Legacy dummy for simulate
"""
if "b" in kwargs:
kwargs["b_out"] = kwargs.pop("b")
return self.simulate(*args, **kwargs)
def simulate(self, t, pos, sparams=None, b_out=None, sparams_out=None):
"""Calculate magnetic field vectors at (s) coordinates and at times t (datetime timestamps).
Additionally returns any selected sparams.
Parameters
----------
t : list[float]
Evaluation datetimes.
pos : np.ndarray
Position vector array at evaluation datetimes.
sparams : list[int]
List of state parameters to return.
b_out : Union[List[np.ndarray],
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]], optional
Magnetic field temporary array, by default None.
sparams_out : Union[List[np.ndarray],
List[numba.cuda.cudadrv.devicearray.DeviceNDArray]], optional
State parameters temporary array, by default None.
Returns
-------
Union[list[np.ndarray], list[numba.cuda.cudadrv.devicearray.DeviceNDArray]],
Union[list[np.ndarray], list[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
List of magnetic field output and state parameters at evaluation datetimes.
Raises
------
ValueError
If pos array has invalid dimension (must be 1 or 2).
"""
logger = logging.getLogger(__name__)
if np.array(pos).ndim == 1:
pos = [np.array(pos, dtype=self.dtype) for _ in range(0, len(t))]
elif np.array(pos).ndim == 2:
pos = [np.array(p, dtype=self.dtype) for p in pos]
else:
logger.exception("position array has invalid dimension")
raise ValueError("position array has invalid dimension")
if self.use_cuda:
pos = [cuda.to_device(p) for p in pos]
if b_out is None:
if self.use_cuda:
if self._b is None or len(self._b[0]) != len(t) \
or self._b[0][0].shape != (self.runs, 3):
self._b = [cuda.device_array((self.runs, 3), dtype=self.dtype)
for _ in range(0, len(t))]
else:
if self._b is None or len(self._b) != len(t) or self._b[0].shape != (self.runs, 3):
self._b = [np.empty((self.runs, 3), dtype=self.dtype)
for _ in range(0, len(t))]
b_out = self._b
if sparams and len(sparams) > 0 and sparams_out is None:
if self.use_cuda:
if self._sp is None or len(self._sp[0]) != len(t) \
or self._sp[0][0].shape != (self.runs, len(sparams)):
self._sp = [cuda.device_array((self.runs, len(sparams)), dtype=self.dtype)
for _ in range(0, len(t))]
else:
if self._sp is None or len(self._sp) != len(t) or \
self._sp[0].shape != (self.runs, len(sparams)):
self._sp = [np.empty((self.runs, len(sparams)), dtype=self.dtype)
for _ in range(0, len(t))]
sparams_out = self._sp
for i in range(0, len(t)):
self.propagate(t[i])
self.get_field(pos[i], b_out[i])
if sparams and len(sparams) > 0:
self.get_sparams(sparams, sparams_out[i])
if sparams and len(sparams) > 0:
return b_out, sparams_out
else:
return b_out
def transform_sq(self, s, q):
"""Transform (s) coordinates to (q) coordinates.
Parameters
----------
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
"""
self.f(s, q, self.iparams_arr, self.sparams_arr, self.qs_sx,
use_cuda=self.use_cuda)
def transform_qs(self, s, q):
"""Transform (q) coordinates to (s) coordinates.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
"""
self.g(q, s, self.iparams_arr, self.sparams_arr, self.qs_xs,
use_cuda=self.use_cuda)
class Toroidal3DCOREModel(Base3DCOREModel):
def plot_fieldline(self, ax, fl, arrows=None, **kwargs):
"""Plot magnetic field line.
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib axes.
fl : np.ndarray
Field line in (s) coordinates.
"""
if arrows:
handles = []
for index in list(range(len(fl)))[::arrows][:-1]:
x, y, z = fl[index]
xv, yv, zv = fl[index + arrows // 2] - fl[index]
handles.append(ax.quiver(
x, y, z, xv, yv, zv, **kwargs
))
return handles
else:
return ax.plot(*fl.T, **kwargs)
def visualize_fieldline(self, q0, index=0, sign=1, steps=1000, step_size=0.005):
"""Integrates along the magnetic field lines starting at a point q0 in (q) coordinates and
returns the field lines in (s) coordinates.
Parameters
----------
q0 : np.ndarray
Starting point in (q) coordinates.
index : int, optional
Model run index, by default 0.
sign : int, optional
Integration direction, by default 1.
steps : int, optional
Number of integration steps, by default 1000.
step_size : float, optional
Integration step size, by default 0.01.
Returns
-------
np.ndarray
Integrated magnetic field lines in (s) coordinates.
"""
# only implemented in cpu mode
if self.use_cuda:
raise NotImplementedError("visualize_fieldline not supported in cuda mode")
if self.iparams_arr[index, -1] > 0 or self.iparams_arr[index, -1] < 0:
raise Warning("cannot generate field lines with non-zero noise level")
_tva = np.empty((3,), dtype=self.dtype)
_tvb = np.empty((3,), dtype=self.dtype)
sign = sign / np.abs(sign)
self.g(q0, _tva, self.iparams_arr[index], self.sparams_arr[index], self.qs_xs[index],
use_cuda=False)
fl = [np.array(_tva, dtype=self.dtype)]
def iterate(s):
self.f(s, _tva, self.iparams_arr[index], self.sparams_arr[index], self.qs_sx[index],
use_cuda=False)
self.h(_tva, _tvb, self.iparams_arr[index], self.sparams_arr[index], self.qs_xs[index],
use_cuda=False, bounded=False)
return sign * _tvb / np.linalg.norm(_tvb)
while len(fl) < steps:
# use implicit method and least squares for calculating the next step
sol = getattr(least_squares(
lambda x: x - fl[-1] - step_size *
iterate((x.astype(self.dtype) + fl[-1]) / 2),
fl[-1]), "x")
fl.append(np.array(sol.astype(self.dtype)))
fl = np.array(fl, dtype=self.dtype)
return fl
def visualize_fieldline_dpsi(self, q0, dpsi=np.pi, index=0, sign=1, step_size=0.005):
"""Integrates along the magnetic field lines starting at a point q0 in (q) coordinates and
returns the field lines in (s) coordinates.
Parameters
----------
q0 : np.ndarray
Starting point in (q) coordinates.
dpsi: float
Delta psi to integrate by, default np.pi
index : int, optional
Model run index, by default 0.
sign : int, optional
Integration direction, by default 1.
step_size : float, optional
Integration step size, by default 0.01.
Returns
-------
np.ndarray
Integrated magnetic field lines in (s) coordinates.
"""
# only implemented in cpu mode
if self.use_cuda:
raise NotImplementedError("visualize_fieldline not supported in cuda mode")
if self.iparams_arr[index, -1] > 0 or self.iparams_arr[index, -1] < 0:
raise Warning("cannot generate field lines with non-zero noise level")
_tva = np.empty((3,), dtype=self.dtype)
_tvb = np.empty((3,), dtype=self.dtype)
sign = sign / np.abs(sign)
self.g(q0, _tva, self.iparams_arr[index], self.sparams_arr[index], self.qs_xs[index],
use_cuda=False)
fl = [np.array(_tva, dtype=self.dtype)]
def iterate(s):
self.f(s, _tva, self.iparams_arr[index], self.sparams_arr[index], self.qs_sx[index],
use_cuda=False)
self.h(_tva, _tvb, self.iparams_arr[index], self.sparams_arr[index], self.qs_xs[index],
use_cuda=False, bounded=False)
return sign * _tvb / np.linalg.norm(_tvb)
psi_pos = q0[1]
dpsi_count = 0
while dpsi_count < dpsi:
# use implicit method and least squares for calculating the next step
sol = getattr(least_squares(
lambda x: x - fl[-1] - step_size *
iterate((x.astype(self.dtype) + fl[-1]) / 2),
fl[-1]), "x")
fl.append(np.array(sol.astype(self.dtype)))
self.f(fl[-1], _tva, self.iparams_arr[index], self.sparams_arr[index],
self.qs_sx[index], use_cuda=False)
dpsi_count += np.abs(psi_pos - _tva[1])
psi_pos = _tva[1]
fl = np.array(fl, dtype=self.dtype)
return fl
def visualize_wireframe(self, index=0, r=1.0, d=10):
"""Generate model wireframe.
Parameters
----------
index : int, optional
Model run index, by default 0.
r : float, optional
Surface radius (r=1 equals the boundary of the flux rope), by default 1.0.
Returns
-------
np.ndarray
Wireframe array (to be used with plot_wireframe).
"""
r = np.array([np.abs(r)], dtype=self.dtype)
c = 360 // d + 1
u = np.radians(np.r_[0:360. + d:d])
v = np.radians(np.r_[0:360. + d:d])
# combination of wireframe points in (q)
arr = np.array(list(product(r, u, v)), dtype=self.dtype).reshape(c ** 2, 3)
# only implemented in cpu mode
if self.use_cuda:
raise NotImplementedError("visualize_wireframe not supported in cuda mode")
for i in range(0, len(arr)):
self.g(arr[i], arr[i], self.iparams_arr[index], self.sparams_arr[index],
self.qs_xs[index], use_cuda=False)
return arr.reshape((c, c, 3))
def visualize_wireframe_dpsi(self, psi0, dpsi, index=0, r=1.0, d=10):
"""Generate model wireframe.
Parameters
----------
index : int, optional
Model run index, by default 0.
r : float, optional
Surface radius (r=1 equals the boundary of the flux rope), by default 1.0.
Returns
-------
np.ndarray
Wireframe array (to be used with plot_wireframe).
"""
r = np.array([np.abs(r)], dtype=self.dtype)
deg_min = 180 * psi0 / np.pi
deg_max = 180 * (psi0 + dpsi) / np.pi
u = np.radians(np.r_[deg_min:deg_max + d:d])
v = np.radians(np.r_[0:360. + d:d])
c1 = len(u)
c2 = len(v)
# combination of wireframe points in (q)
arr = np.array(list(product(r, u, v)), dtype=self.dtype).reshape(c1 * c2, 3)
for i in range(0, len(arr)):
self.g(arr[i], arr[i], self.iparams_arr[index], self.sparams_arr[index],
self.qs_xs[index], use_cuda=False)
return arr.reshape((c1, c2, 3))
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/model.py | model.py |
# -*- coding: utf-8 -*-
from . import fitting # noqa: F401
from . import models # noqa: F401
from . import raytracing # noqa: F401
__author__ = 'Andreas J. Weiss'
__copyright__ = 'Copyright (C) 2019 Andreas J. Weiss'
__license__ = 'MIT'
__version__ = '1.1.4'
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
"""raytracing.py
Implements basic ray tracing functionality
"""
import itertools
import multiprocessing
import numba
import numpy as np
class Raytracing(object):
def __init__(self, camera, target, angle, resolution, plane_d=0.1, mask_sun=None):
"""Initialize raytracing class
Parameters
----------
camera_position : np.ndarray
Camera position.
target : [np.ndarray
Camera target.
viewport_angle : np.ndarray
Viewport angle in degrees.
resolution : tuple
Image resolution (nx, ny).
"""
self.angle = np.pi * angle / 180
self.nx, self.ny = resolution
self.camera_pos = camera
self.target_pos = target
self.sun_pos = mask_sun
w = np.array([0, 0, -1])
t = target - camera
b = np.cross(w, t)
t_n = t / np.linalg.norm(t)
b_n = b / np.linalg.norm(b)
v_n = np.cross(t_n, b_n)
g_x = plane_d * np.tan(self.angle / 2)
g_y = g_x * (self.nx - 1) / (self.ny - 1)
self.q_x = 2 * g_x / (self.ny - 1) * b_n
self.q_y = 2 * g_y / (self.nx - 1) * v_n
self.p_1m = t_n * plane_d - g_x * b_n - g_y * v_n
def generate_image(self, model, iparams, t_launch, t_image, density, **kwargs):
"""Generate raytraced image of the 3DCORE model at a given datetime.
Parameters
----------
model : Base3DCOREModel
3DCORE model class.
iparams : np.ndarray
Inital parameters for the 3DCORE model.
t_launch : datetime.datetime
3DCORE launch datetime.
t_image : datetime.datetime
Datetime at which to generate the RT image.
density : func
Density function for the 3DCORE model in (q)-coordinates.
Returns
-------
np.ndarray
RT image.
"""
pool = multiprocessing.Pool(multiprocessing.cpu_count())
step_params = (kwargs.get("step_large", 0.05), kwargs.get("step_small", .0005),
kwargs.get("step_min", 0), kwargs.get("step_max", 2))
rt_params = (self.camera_pos, self.sun_pos, self.p_1m, self.q_x, self.q_y)
coords = itertools.product(list(range(self.nx)), list(range(self.ny)))
result = pool.starmap(worker_generate_image,
[(model, i, j, iparams, t_launch, t_image, step_params, rt_params,
density)
for (i, j) in coords])
image = np.zeros((self.nx, self.ny))
for pix in result:
image[pix[0], pix[1]] = pix[2]
return image
def worker_generate_image(model, i, j, iparams, t_launch, t_image, step_params, rt_params, density):
if i % 16 == 0 and j == 0:
print("tracing line", i)
step_large, step_small, step_min, step_max = step_params
e, s, p, q_x, q_y = rt_params
iparams_arr = iparams.reshape(1, -1)
model_obj = model(t_launch, runs=1, use_gpu=False)
model_obj.update_iparams(iparams_arr, seed=42)
model_obj.propagate(t_image)
rho_0, rho_1 = model_obj.sparams_arr[0, 2:4]
# large steps
step_large_cnt = int((step_max - step_min) // step_large)
rays_large = np.empty((step_large_cnt, 3), dtype=np.float32)
t_steps_large = np.linspace(step_min, step_max, step_large_cnt)
rt_rays(rays_large, t_steps_large, i, j, rt_params)
rays_large_qs = np.empty((len(rays_large), 3), dtype=np.float32)
model_obj.transform_sq(rays_large, rays_large_qs)
# detect if ray is close to 3DCORE structure or the origin (sun)
mask = generate_mask(model_obj, i, j, rays_large, rays_large_qs, step_large, step_large_cnt,
rho_0, rho_1)
if mask[0] == -1:
return i, j, np.nan
elif np.all(mask == 0):
return i, j, 0
arg_first = np.argmax(mask > 0) - 1
arg_last = len(mask) - np.argmax(mask[::-1] > 0)
if arg_last == len(mask):
arg_last -= 1
# small steps
step_small_cnt = int((t_steps_large[arg_last] - t_steps_large[arg_first]) // step_small)
rays_small = np.empty((step_small_cnt, 3), dtype=np.float32)
t_steps_small = np.linspace(t_steps_large[arg_first], t_steps_large[arg_last], step_small_cnt)
rt_rays(rays_small, t_steps_small, i, j, rt_params)
rays_small_qs = np.empty((len(rays_small), 3), dtype=np.float32)
model_obj.transform_sq(rays_small, rays_small_qs)
intensity = integrate_intensity(i, j, rays_small, rays_small_qs, step_small_cnt, rho_0, rho_1,
e, density)
return i, j, intensity
@numba.njit(inline="always")
def rt_p_ij(i, j, p, q_x, q_y):
return p + q_x * (i - 1) + q_y * (j - 1)
@numba.njit(inline="always")
def rt_ray(e, i, j, t, p, q_x, q_y):
ray = rt_p_ij(i, j, p, q_x, q_y)
ray = ray / np.linalg.norm(ray)
return e + t * ray
@numba.njit(parallel=True)
def rt_rays(rays_arr, t_steps, i, j, rt_params):
e, _, p, q_x, q_y = rt_params
for k in numba.prange(len(t_steps)):
rays_arr[k] = rt_ray(e, i, j, t_steps[k], p, q_x, q_y)
return rays_arr
def generate_mask(model_obj, i, j, rays_large, rays_large_qs, step_large, step_large_cnt, rho_0,
rho_1):
mask = np.zeros((len(rays_large)), dtype=np.float32)
fail_flag = False
for k in range(step_large_cnt):
q0, q1, q2 = rays_large_qs[k]
# check distance to sun
if np.linalg.norm(rays_large[k]) < 2 * step_large:
mask[k] = 1
if np.linalg.norm(rays_large[k]) < 0.00465047:
fail_flag = True
break
# compute rays surface points
rays_large_qs_surf = np.array(rays_large_qs)
rays_large_qs_surf[:, 0] = 1
rays_large_surf = np.empty_like(rays_large_qs_surf)
model_obj.transform_qs(rays_large_surf, rays_large_qs_surf)
for k in range(step_large_cnt):
# check distance to cme
if np.linalg.norm(rays_large[k] - rays_large_surf[k]) < 2 * step_large:
mask[k] = 1
if fail_flag:
return -1 * np.ones_like(mask)
else:
return mask
@numba.njit
def integrate_intensity(i, j, rays_small, rays_small_qs, step_small_cnt, rho_0, rho_1, e, density):
intensity = 0
fail_flag = False
for k in range(step_small_cnt):
q0, q1, q2 = rays_small_qs[k]
# check distance to sun
if np.linalg.norm(rays_small[k]) < 0.00465047:
fail_flag = True
break
# check distance to cme
if q0 < 3:
v1 = e - rays_small[k]
v2 = rays_small[k]
thomson_cos_sq = np.dot(v1, v2)**2 / np.linalg.norm(v1)**2 * np.linalg.norm(v2)**2
intensity += thomson_cos_sq * density(q0, q1, q2, rho_0, rho_1)
if fail_flag:
return np.nan
else:
return intensity
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/raytracing/raytracing.py | raytracing.py |
# -*- coding: utf-8 -*-
from .raytracing import * # noqa: F401
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/raytracing/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
from .ttghv0 import TTGHv0 # noqa: F401
from .ttghv1 import TTGHv1 # noqa: F401
from .ttghv2 import TTGHv2 # noqa: F401
from .ttghv2t import TTGHv2T # noqa: F401
from .ttghv3 import TTGHv3 # noqa: F401
from .ttncv2 import TTNCv2 # noqa: F401
# legacy definition
ThinTorusGH3DCOREModel = TTGHv1 # noqa: F401
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
import numpy as np
from numba import guvectorize
from py3dcore.models.ttghv0.coordinates import _numba_jac
from py3dcore.rotqs import _numba_quaternion_rotate
# Wrappers
def h(q, b, iparams, sparams, q_xs, **kwargs):
"""Wrapper functions for calculating magnetic field vector at (q) coordinates in (s) according
to the gold hoyle solution using the tapered torus model.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
b : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Magnetic field output array.
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameter array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameter array.
q_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
Other Parameters
----------------
bounded : bool
Return zero values if measurement is taken outside of the flux rope structure.
rng_states : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
CUDA rng state array.
use_cuda : bool, optional
CUDA flag, by default False.
Raises
------
RuntimeError
If oo method is called with gpu flag set.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
bounded = kwargs.get("bounded", True)
_numba_h(q, iparams, sparams, q_xs, bounded, b)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], boolean, float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], boolean, float64[:])"],
'(i), (j), (k), (l), () -> (i)')
def _numba_h(q, iparams, sparams, q_xs, bounded, b):
bsnp = np.empty((3,))
(q0, q1, q2) = (q[0], q[1], q[2])
if q0 <= 1 or bounded is False:
(_, _, _, _, _, delta, _, _, turns, bradius, _, _, _, noise) = iparams
(_, _, rho_0, rho_1, b_t) = sparams
# get normal vectors
(dr, dpsi, dphi) = _numba_jac(q0, q1, q2, rho_0, rho_1, delta)
# unit normal vectors
dr = dr / np.linalg.norm(dr)
dpsi_norm = np.linalg.norm(dpsi)
dpsi = dpsi / dpsi_norm
dphi_norm = np.linalg.norm(dphi)
dphi = dphi / dphi_norm
br = 0
fluxfactor = 1
h = (delta - 1)**2 / (1 + delta)**2
E = np.pi * (1 + delta) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
t = turns * rho_1 / rho_0 * E / 2 / np.pi
denom = (1 + t**2 * q0**2)
bpsi = b_t / denom * fluxfactor
bphi = b_t * t * q0 / denom / (1 + q0 * rho_1 / rho_0 * np.cos(q2)) * fluxfactor
# magnetic field in (x)
bsnp[0] = dr[0] * br + dpsi[0] * bpsi + dphi[0] * bphi
bsnp[1] = dr[1] * br + dpsi[1] * bpsi + dphi[1] * bphi
bsnp[2] = dr[2] * br + dpsi[2] * bpsi + dphi[2] * bphi
# magnetic field in (s)
bss = _numba_quaternion_rotate(np.array([0, bsnp[0], bsnp[1], bsnp[2]]), q_xs)
b[0] = bss[0] + np.random.normal(0, noise)
b[1] = bss[1] + np.random.normal(0, noise)
b[2] = bss[2] + np.random.normal(0, noise)
else:
b[0] = 0
b[1] = 0
b[2] = 0
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv0/magfield.py | magfield.py |
# -*- coding: utf-8 -*-
import numba
import numpy as np
# Wrappers
def p(t, iparams, sparams, **kwargs):
"""Wrapper function for propagating the flux rope state parameters.
Parameters
----------
t : float
Datetime timestamp,
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameters array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameters array.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_p(t, iparams, sparams)
@numba.njit
def _numba_p(t, iparams, sparams):
for i in range(0, len(iparams)):
(t_i, _, _, _, d, _, r, v, _, _, b_i, bg_d, bg_v, _) = iparams[i]
# rescale parameters
bg_d = bg_d * 1e-7
r = r * 695510
dt = t - t_i
dv = v - bg_v
bg_sgn = int(-1 + 2 * int(v > bg_v))
rt = (bg_sgn / bg_d * np.log1p(bg_sgn * bg_d * dv * dt) + bg_v * dt + r) / 1.496e8
vt = dv / (1 + bg_sgn * bg_d * dv * dt) + bg_v
rho_1 = d * (rt ** 1.14) / 2
rho_0 = (rt - rho_1) / 2
b_t = b_i * (2 * rho_0) ** (-1.64)
sparams[i, 0] = t
sparams[i, 1] = vt
sparams[i, 2] = rho_0
sparams[i, 3] = rho_1
sparams[i, 4] = b_t
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv0/propagation.py | propagation.py |
# -*- coding: utf-8 -*-
import numba
import numpy as np
from numba import guvectorize
from py3dcore.rotqs import _numba_quaternion_rotate
# Wrappers
def g(q, s, iparams, sparams, q_xs, **kwargs):
"""Wrapper function for transforming (q) coordinates to (s) coordinates using the tapered torus
global shape.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
qs_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
Raises
------
RuntimeError
If oo method is called with gpu flag set.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_g(q, iparams, sparams, q_xs, s)
def f(s, q, iparams, sparams, q_sx, **kwargs):
"""Wrapper function for transforming (s) coordinates to (q) coordinates using the tapered torus
global shape.
Parameters
----------
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
qs_sx : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (s) -> (x) rotational quaternions.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
Raises
------
RuntimeError
if oo method is called in gpu mode
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_f(s, iparams, sparams, q_sx, q)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], float64[:])"],
'(i), (j), (k), (l) -> (i)')
def _numba_g(q, iparams, sparams, q_xs, s):
(q0, q1, q2) = q
delta = iparams[5]
(_, _, rho_0, rho_1, _) = sparams
x = np.array([
0,
-(rho_0 + q0 * rho_1 * np.cos(q2)) * np.cos(q1) + rho_0,
(rho_0 + q0 * rho_1 * np.cos(q2)) * np.sin(q1),
q0 * rho_1 * np.sin(q2) * delta]
)
s[:] = _numba_quaternion_rotate(x, q_xs)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], float64[:])"],
'(i), (j), (k), (l) -> (i)')
def _numba_f(s, iparams, sparams, q_sx, q):
delta = iparams[5]
(_, _, rho_0, rho_1, _) = sparams
_s = np.array([0, s[0], s[1], s[2]]).astype(s.dtype)
xs = _numba_quaternion_rotate(_s, q_sx)
(x0, x1, x2) = xs
rd = np.sqrt((rho_0 - x0) ** 2 + x1 ** 2) - rho_0
if x0 == rho_0:
if x1 >= 0:
psi = np.pi / 2
else:
psi = 3 * np.pi / 2
else:
psi = np.arctan2(-x1, x0 - rho_0) + np.pi
if rd == 0:
if x2 >= 0:
phi = np.pi / 2
else:
phi = 3 * np.pi / 2
else:
if rd > 0:
phi = np.arctan(x2 / rd / delta)
else:
phi = -np.pi + np.arctan(x2 / rd / delta)
if phi < 0:
phi += 2 * np.pi
if phi == np.pi / 2 or phi == 3 * np.pi / 2:
r = x2 / delta / rho_1 / np.sin(phi)
else:
r = np.abs((np.sqrt((rho_0 - x0) ** 2 + x1 ** 2) - rho_0) / np.cos(phi) / rho_1)
q[0] = r
q[1] = psi
q[2] = phi
@numba.njit
def _numba_jac(q0, q1, q2, rho_0, rho_1, delta):
dr = np.array([
-rho_1 * np.cos(q2) * np.cos(q1),
rho_1 * np.cos(q2) * np.sin(q1),
rho_1 * np.sin(q2) * delta
])
dpsi = np.array([
rho_0 * np.sin(q1) + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * np.sin(q1),
rho_0 * np.cos(q1) + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * np.cos(q1),
0
])
dphi = np.array([
q0 * rho_1 * np.sin(q2) * np.cos(q1),
-q0 * rho_1 * np.sin(q2) * np.sin(q1),
q0 * rho_1 * np.cos(q2) * delta
])
return dr, dpsi, dphi
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv0/coordinates.py | coordinates.py |
# -*- coding: utf-8 -*-
import json
import numpy as np
import os
import py3dcore
from py3dcore.model import Toroidal3DCOREModel
from py3dcore.params import Base3DCOREParameters
from py3dcore.models.ttghv0.coordinates import g, f
from py3dcore.models.ttghv0.magfield import h
from py3dcore.models.ttghv0.propagation import p
class TTGHv0(Toroidal3DCOREModel):
"""Implements the torus gold-hoyle (v0) 3DCORE model.
Extended Summary
================
For this specific model there are a total of 14 initial parameters which are as follows:
0: t_i time offset
1: lon longitude
2: lat latitude
3: inc inclination
4: diameter cross section diameter at 1 AU
5: delta cross section aspect ratio
6: radius initial cme radius
7: velocity initial cme velocity
8: turns magnetic field turns over entire flux rope
9: m_coeff magnetic field coefficient needed for some magnetic models
10: b magnetic field strength at center at 1AU
11: bg_d solar wind background drag coefficient
12: bg_v solar wind background speed
13: noise instrument noise
There are 5 state parameters which are as follows:
0: t_t current time
1: v_t current velocity
2: rho_0 torus major radius
3: rho_1 torus minor radius
4: b magnetic field strength at center
"""
def __init__(self, launch, runs, **kwargs):
"""Initialize ThinTorusGH3DCOREModel model.
Parameters
----------
launch: datetime.datetime
Initial datetime.
runs : int
Number of parallel model runs.
Other Parameters
----------------
cuda_device: int
CUDA device, by default 0.
dtype: type
Data type, by default np.float32.
use_cuda : bool
CUDA flag, by default False.
"""
funcs = {
"g": g,
"f": f,
"h": h,
"p": p
}
dtype = kwargs.pop("dtype", np.float32)
parameters = kwargs.pop("parameters", self.default_parameters())
if isinstance(parameters, dict):
parameters = Base3DCOREParameters(parameters, dtype=dtype)
super(TTGHv0, self).__init__(
launch, funcs, parameters, sparams_count=5, runs=runs, **kwargs
)
@classmethod
def default_parameters(cls):
path = os.path.join(os.path.dirname(py3dcore.__file__), "models/ttghv0/parameters.json")
with open(path) as fh:
data = json.load(fh)
return data
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv0/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
import numpy as np
from numba import guvectorize
from py3dcore.models.ttghv3.coordinates import _numba_jac
from py3dcore.rotqs import _numba_quaternion_rotate
# Wrappers
def h(q, b, iparams, sparams, q_xs, **kwargs):
"""Wrapper functions for calculating magnetic field vector at (q) coordinates in (s) according
to the gold hoyle solution using the tapered torus model.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
b : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Magnetic field output array.
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameter array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameter array.
q_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
Other Parameters
----------------
bounded : bool
Return zero values if measurement is taken outside of the flux rope structure.
rng_states : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
CUDA rng state array.
use_cuda : bool, optional
CUDA flag, by default False.
Raises
------
RuntimeError
If oo method is called with gpu flag set.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
bounded = kwargs.get("bounded", True)
_numba_h(q, iparams, sparams, q_xs, bounded, b)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], boolean, float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], boolean, float64[:])"],
'(i), (j), (k), (l), () -> (i)')
def _numba_h(q, iparams, sparams, q_xs, bounded, b):
bsnp = np.empty((3,))
(q0, q1, q2) = (q[0], q[1], q[2])
if q0 <= 1 or bounded is False:
(_, _, _, _, _, w, _, _, _, turns, _, _, _, noise) = iparams
(_, _, rho_0, rho_1, delta, b_t) = sparams
# get normal vectors
(dr, dpsi, dphi) = _numba_jac(q0, q1, q2, rho_0, rho_1, delta, w)
# unit normal vectors
dr = dr / np.linalg.norm(dr)
dpsi_norm = np.linalg.norm(dpsi)
dpsi = dpsi / dpsi_norm
dphi_norm = np.linalg.norm(dphi)
dphi = dphi / dphi_norm
br = 0
fluxfactor = 1 / np.sin(q1 / 2)**2
h = (delta - 1)**2 / (1 + delta)**2
E = np.pi * (1 + delta) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
t = turns * rho_1 / rho_0 * E / 2 / np.pi * np.sin(q1 / 2)**2
denom = (1 + t**2 * q0**2)
bpsi = b_t / denom * fluxfactor
bphi = b_t * t * q0 / denom / (1 + q0 * rho_1 / rho_0 * np.cos(q2)) * fluxfactor
# magnetic field in (x)
bsnp[0] = dr[0] * br + dpsi[0] * bpsi + dphi[0] * bphi
bsnp[1] = dr[1] * br + dpsi[1] * bpsi + dphi[1] * bphi
bsnp[2] = dr[2] * br + dpsi[2] * bpsi + dphi[2] * bphi
# magnetic field in (s)
bss = _numba_quaternion_rotate(np.array([0, bsnp[0], bsnp[1], bsnp[2]]), q_xs)
b[0] = bss[0] + np.random.normal(0, noise)
b[1] = bss[1] + np.random.normal(0, noise)
b[2] = bss[2] + np.random.normal(0, noise)
else:
b[0] = 0
b[1] = 0
b[2] = 0
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv3/magfield.py | magfield.py |
# -*- coding: utf-8 -*-
import numba
import numpy as np
# Wrappers
def p(t, iparams, sparams, **kwargs):
"""Wrapper function for propagating the flux rope state parameters.
Parameters
----------
t : float
Datetime timestamp,
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameters array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameters array.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_p(t, iparams, sparams)
@numba.njit
def _numba_p(t, iparams, sparams):
for i in range(0, len(iparams)):
(t_i, _, _, _, pa, _, f, r, v, _, b_i, bg_d, bg_v, _) = iparams[i]
# rescale parameters
bg_d = bg_d * 1e-7
r = r * 695510
dt = t - t_i
dv = v - bg_v
bg_sgn = int(-1 + 2 * int(v > bg_v))
rt = (bg_sgn / bg_d * np.log1p(bg_sgn * bg_d * dv * dt) + bg_v * dt + r) / 1.496e8
vt = dv / (1 + bg_sgn * bg_d * dv * dt) + bg_v
rho_1y = rt * np.sin(2 * np.pi * pa / 360 / 2)
delta = rt * (rho_1y / r * 1.496e8) / (rho_1y + f * (rt - r / 1.496e8))
rho_1 = rho_1y / delta
rho_0 = (rt - rho_1) / 2
b_t = b_i * (2 * rho_0) ** (-1.64)
sparams[i, 0] = t
sparams[i, 1] = vt
sparams[i, 2] = rho_0
sparams[i, 3] = rho_1
sparams[i, 4] = delta
sparams[i, 5] = b_t
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv3/propagation.py | propagation.py |
# -*- coding: utf-8 -*-
import numba
import numpy as np
from numba import guvectorize
from py3dcore.rotqs import _numba_quaternion_rotate
# Wrappers
def g(q, s, iparams, sparams, q_xs, **kwargs):
"""Wrapper function for transforming (q) coordinates to (s) coordinates using the tapered torus
global shape.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
qs_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_g(q, iparams, sparams, q_xs, s)
def f(s, q, iparams, sparams, q_sx, **kwargs):
"""Wrapper function for transforming (s) coordinates to (q) coordinates using the tapered torus
global shape.
Parameters
----------
s : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (s) coordinate array.
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The (q) coordinate array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
qs_sx : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (s) -> (x) rotational quaternions.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_f(s, iparams, sparams, q_sx, q)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], float64[:])"],
'(i), (j), (k), (l) -> (i)')
def _numba_g(q, iparams, sparams, q_xs, s):
(q0, q1, q2) = q
w = iparams[5]
(_, _, rho_0, rho_1, delta, _) = sparams
x = np.array([
0,
-(rho_0 + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2)) * np.cos(q1) + rho_0,
w * np.sin(q1 / 2)**2 * (rho_0 + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2)) * np.sin(q1),
q0 * rho_1 * np.sin(q1 / 2)**2 * np.sin(q2) * delta]
)
s[:] = _numba_quaternion_rotate(x, q_xs)
@numba.njit(["f4(f4, f4, f4, f4)", "f8(f8, f8, f8, f8)"])
def _numba_solve_psi(x0, x1, w, rho_0):
"""Search for psi within range [pi/2, 3 * pi /2].
Uses a simple damped newton method.
"""
func = lambda psi: x1 / (x0 - rho_0) + w * np.sin(psi / 2)**2 * np.tan(psi)
dfunc = lambda psi: w * np.sin(psi / 2) * np.cos(psi / 2) * np.tan(psi) + w * np.sin(psi / 2)**2 / np.cos(psi)**2
t2 = np.pi
alpha = 1
# max iter
N = 1e3
Nc = 0
while True:
if Nc >= N:
return np.nan
t1 = t2 - alpha * func(t2) / dfunc(t2)
if t1 > 3 * np.pi / 2 or t1 < np.pi / 2:
alpha /= 2
continue
if np.abs(func(t2)) < 1e-10:
break
t2 = t1
Nc += 1
return t2
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], float64[:])"],
'(i), (j), (k), (l) -> (i)')
def _numba_f(s, iparams, sparams, q_sx, q):
w = iparams[5]
(_, _, rho_0, rho_1, delta, _) = sparams
_s = np.array([0, s[0], s[1], s[2]]).astype(s.dtype)
xs = _numba_quaternion_rotate(_s, q_sx)
(x0, x1, x2) = xs
if x0 == rho_0:
if x1 >= 0:
psi = np.pi / 2
else:
psi = 3 * np.pi / 2
else:
psi = _numba_solve_psi(x0, x1, w, rho_0)
# abort if no solution for psi is found
if np.isnan(psi):
q[0] = 1e9
q[1] = 1e-9
q[2] = 1e-9
else:
g1 = np.cos(psi)**2 + w**2 * np.sin(psi / 2)**4 * np.sin(psi)**2
rd = np.sqrt(((rho_0 - x0) ** 2 + x1 ** 2) / g1) - rho_0
if rd == 0:
if x2 >= 0:
phi = np.pi / 2
else:
phi = 3 * np.pi / 2
else:
if rd > 0:
phi = np.arctan(x2 / rd / delta)
else:
phi = -np.pi + np.arctan(x2 / rd / delta)
if phi < 0:
phi += 2 * np.pi
if phi == np.pi / 2 or phi == 3 * np.pi / 2:
r = x2 / delta / rho_1 / np.sin(phi) / np.sin(psi / 2)**2
else:
r = np.abs(rd / np.cos(phi) / np.sin(psi / 2)**2 / rho_1)
q[0] = r
q[1] = psi
q[2] = phi
@numba.njit
def _numba_jac(q0, q1, q2, rho_0, rho_1, delta, w):
dr = np.array([
-rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * np.cos(q1),
w * np.sin(q1 / 2)**2 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * np.sin(q1),
rho_1 * np.sin(q1 / 2)**2 * np.sin(q2) * delta
])
dpsi = np.array([
rho_0 * np.sin(q1) + q0 * rho_1 * np.sin(q1 / 2)**2 *
np.cos(q2) * np.sin(q1) - q0
* rho_1 * np.cos(q1 / 2) * np.sin(q1 / 2) * np.cos(q2) * np.cos(q1),
w * (rho_0 * np.cos(q1) + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * np.cos(q1)
+ q0 * rho_1 * np.cos(q1 / 2) * np.sin(q1 / 2) * np.cos(q2) * np.sin(q1))
* np.sin(q1 / 2)**2 + np.sin(q1 / 2) * np.cos(q1 / 2) * w
* (rho_0 + q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2)) * np.sin(q1),
q0 * rho_1 * delta * np.cos(q1 / 2) * np.sin(q1 / 2) * np.sin(q2)
])
dphi = np.array([
q0 * rho_1 * np.sin(q1 / 2)**2 * np.sin(q2) * np.cos(q1),
-w * np.sin(q1 / 2)**2 * q0 * rho_1 * np.sin(q1 / 2)**2 * np.sin(q2) * np.sin(q1),
q0 * rho_1 * np.sin(q1 / 2)**2 * np.cos(q2) * delta
])
return dr, dpsi, dphi
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv3/coordinates.py | coordinates.py |
# -*- coding: utf-8 -*-
import json
import numpy as np
import os
import py3dcore
from py3dcore.model import Toroidal3DCOREModel
from py3dcore.params import Base3DCOREParameters
from py3dcore.models.ttghv3.coordinates import g, f
from py3dcore.models.ttghv3.magfield import h
from py3dcore.models.ttghv3.propagation import p
class TTGHv3(Toroidal3DCOREModel):
"""Implements the thin torus gold-hoyle (v3) 3DCORE model.
Extended Summary
================
For this specific model there are a total of 14 initial parameters which are as follows:
0: t_i time offset
1: lon longitude
2: lat latitude
3: inc inclination
4: pa pitch angle
5: w cme width ratio
6: f flattening factor
7: r0 initial cme radius
8: v0 initial cme velocity
9: tau magnetic field turns over entire flux rope
10: b magnetic field strength at center at 1AU
11: bg_d solar wind background drag coefficient
12: bg_v solar wind background speed
13: noise instrument noise
There are 6 state parameters which are as follows:
0: t_t current time
1: v_t current velocity
2: rho_0 torus major radius
3: rho_1 torus minor radius
4: delta aspect-ratio
5: b magnetic field strength at center
"""
def __init__(self, launch, runs, **kwargs):
"""Initialize the thin torus gold-hoyle (v3) 3DCORE model.
Parameters
----------
launch: datetime.datetime
Initial datetime.
runs : int
Number of parallel model runs.
Other Parameters
----------------
cuda_device: int
CUDA device, by default 0.
dtype: type
Data type, by default np.float32.
use_cuda : bool
CUDA flag, by default False.
"""
funcs = {
"g": g,
"f": f,
"h": h,
"p": p
}
dtype = kwargs.pop("dtype", np.float32)
parameters = kwargs.pop("parameters", self.default_parameters())
if isinstance(parameters, dict):
parameters = Base3DCOREParameters(parameters, dtype=dtype)
super(TTGHv3, self).__init__(
launch, funcs, parameters, sparams_count=6, runs=runs, **kwargs
)
@classmethod
def default_parameters(cls):
path = os.path.join(os.path.dirname(py3dcore.__file__), "models/ttghv3/parameters.json")
with open(path) as fh:
data = json.load(fh)
return data
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttghv3/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
import numpy as np
from numba import guvectorize
from py3dcore.models.ttncv2.coordinates import _numba_jac
from py3dcore.rotqs import _numba_quaternion_rotate
# Wrappers
def h(q, b, iparams, sparams, q_xs, **kwargs):
"""Wrapper functions for calculating magnetic field vector at (q) coordinates in (s) according
to the gold hoyle solution using the tapered torus model.
Parameters
----------
q : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
The state parameter array.
b : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Magnetic field output array.
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameter array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameter array.
q_xs : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Array for (x) -> (s) rotational quaternions.
Other Parameters
----------------
bounded : bool
Return zero values if measurement is taken outside of the flux rope structure.
rng_states : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
CUDA rng state array.
use_cuda : bool, optional
CUDA flag, by default False.
Raises
------
RuntimeError
If oo method is called with gpu flag set.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
bounded = kwargs.get("bounded", True)
_numba_h(q, iparams, sparams, q_xs, bounded, b)
@guvectorize([
"void(float32[:], float32[:], float32[:], float32[:], boolean, float32[:])",
"void(float64[:], float64[:], float64[:], float64[:], boolean, float64[:])"],
'(i), (j), (k), (l), () -> (i)')
def _numba_h(q, iparams, sparams, q_xs, bounded, b):
bsnp = np.empty((3,))
(q0, q1, q2) = (q[0], q[1], q[2])
if q0 <= 1 or bounded is False:
(t_i, _, _, _, _, w, delta, _, _, turns, _, _, _, _, _, noise) = iparams
(_, _, rho_0, rho_1, b_t) = sparams
# get normal vectors
(dr, dpsi, dphi) = _numba_jac(q0, q1, q2, rho_0, rho_1, delta, w)
# unit normal vectors
dr = dr / np.linalg.norm(dr)
dpsi_norm = np.linalg.norm(dpsi)
dpsi = dpsi / dpsi_norm
dphi_norm = np.linalg.norm(dphi)
dphi = dphi / dphi_norm
br = 0
fluxfactor = 1 / np.sin(q1 / 2)**2
hand = turns / np.abs(turns)
turns = np.abs(turns)
dinv = 1 / delta
b_10 = b_t * delta * 2
#h = (delta - 1)**2 / (1 + delta)**2
#E = np.pi * (1 + delta) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
#t = turns * rho_1 / rho_0 * E / 2 / np.pi * np.sin(q1 / 2)**2
bpsi = dinv * b_10 * (2 - q0**2) * fluxfactor
bphi = 2 * dinv * hand * b_10 * q0 / (dinv**2 + 1) / turns * fluxfactor
# magnetic field in (x)
bsnp[0] = dr[0] * br + dpsi[0] * bpsi + dphi[0] * bphi
bsnp[1] = dr[1] * br + dpsi[1] * bpsi + dphi[1] * bphi
bsnp[2] = dr[2] * br + dpsi[2] * bpsi + dphi[2] * bphi
# magnetic field in (s)
bss = _numba_quaternion_rotate(np.array([0, bsnp[0], bsnp[1], bsnp[2]]), q_xs)
b[0] = bss[0] + np.random.normal(0, noise)
b[1] = bss[1] + np.random.normal(0, noise)
b[2] = bss[2] + np.random.normal(0, noise)
else:
b[0] = 0
b[1] = 0
b[2] = 0
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttncv2/magfield.py | magfield.py |
# -*- coding: utf-8 -*-
import numba
import numpy as np
# Wrappers
def p(t, iparams, sparams, **kwargs):
"""Wrapper function for propagating the flux rope state parameters.
Parameters
----------
t : float
Datetime timestamp,
iparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Initial parameters array.
sparams : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
State parameters array.
Other Parameters
----------------
use_cuda : bool
CUDA flag, by default False.
"""
if kwargs.get("use_cuda", False):
raise NotImplementedError("CUDA functionality is not available yet")
else:
_numba_p(t, iparams, sparams)
@numba.njit
def _numba_p(t, iparams, sparams):
for i in range(0, len(iparams)):
(t_i, _, _, _, d, _, _, r, v, _, n_a, n_b, b_i, bg_d, bg_v, _) = iparams[i]
# rescale parameters
bg_d = bg_d * 1e-7
r = r * 695510
dt = t - t_i
dv = v - bg_v
bg_sgn = int(-1 + 2 * int(v > bg_v))
rt = (bg_sgn / bg_d * np.log1p(bg_sgn * bg_d * dv * dt) + bg_v * dt + r) / 1.496e8
vt = dv / (1 + bg_sgn * bg_d * dv * dt) + bg_v
rho_1 = d * (rt ** n_a) / 2
rho_0 = (rt - rho_1) / 2
b_t = b_i * (2 * rho_0) ** (-n_b)
sparams[i, 0] = t
sparams[i, 1] = vt
sparams[i, 2] = rho_0
sparams[i, 3] = rho_1
sparams[i, 4] = b_t
| 3DCORE | /3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/models/ttncv2/propagation.py | propagation.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.