input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
'ignored_channels_not_on_list', channel=channel.mention)
else:
channels.remove(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_changes_removed', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_changes.command("list")
async def ignored_channels_changes_list(self, ctx):
"""ignored_channels_list_help"""
await self.list_channels(ctx, "changes")
@staticmethod
async def list_channels(ctx, type):
channel_list = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", f'IGNORED_CHANNELS_{type.upper()}')
if len(channel_list) > 0:
channels = "\n".join(ctx.guild.get_channel(c).mention for c in channel_list)
else:
channels = Translator.translate('no_ignored_channels', ctx)
embed = discord.Embed(color=ctx.guild.roles[-1].color, description=channels)
embed.set_author(name=Translator.translate(f'ignored_channels_list_{type}', ctx, guild=ctx.guild.name),
icon_url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@ignored_channels.group("edits", aliases=["edit"], invoke_without_command=True)
@commands.guild_only()
async def ignored_channels_edits(self, ctx):
"""ignored_channels_edits_help"""
if ctx.invoked_subcommand == self.ignored_channels_edits:
await ctx.invoke(self.bot.get_command("help"), query="configure ignored_channels other")
@ignored_channels_edits.command("add")
async def ignored_channels_edits_add(self, ctx, channel: TextChannel):
"""ignored_channels_add_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_OTHER')
if channel.id in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_already_on_list', channel=channel.mention)
else:
channels.append(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_edits_added', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_edits.command("remove")
async def ignored_channels_edits_remove(self, ctx, channel: TextChannel):
"""ignored_channels_remove_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_OTHER')
if channel.id not in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_not_on_list')
else:
channels.remove(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_edits_removed', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_edits.command("list")
async def ignored_channels_edits_list(self, ctx):
"""ignored_channels_list_help"""
await self.list_channels(ctx, "other")
@commands.group(invoke_without_command=True)
@commands.guild_only()
async def disable(self, ctx: commands.Context):
"""disable_help"""
pass
@disable.command()
async def mute(self, ctx: commands.Context):
"""disable_mute_help"""
await Infraction.filter(type="Mute", guild_id=ctx.guild.id, active=True).update(active=False)
infractions = await Infraction.filter(type="Mute", guild_id=ctx.guild.id, active=True)
role = ctx.guild.get_role(Configuration.get_var(ctx.guild.id, "ROLES", "MUTE_ROLE"))
for i in infractions:
member = ctx.guild.get_member(i.user_id)
if member is not None:
await member.remove_roles(role, reason=f"Mute feature has been disabled")
Configuration.set_var(ctx.guild.id, "ROLES", "MUTE_ROLE", 0)
await ctx.send(
"Mute feature has been disabled, all people muted have been unmuted and the role can now be removed.")
async def dm_configure(self, ctx, kind, value):
config_key = f"DM_ON_{kind.upper()}"
current = Configuration.get_var(ctx.guild.id, "INFRACTIONS", config_key)
if value is None:
await MessageUtils.send_to(ctx, 'WRENCH', f'dm_on_{kind}_msg_is_' + ('enabled' if current else 'disabled'))
elif current != value:
Configuration.set_var(ctx.guild.id, "INFRACTIONS", config_key, value)
await MessageUtils.send_to(ctx, 'YES', f'dm_on_{kind}_msg_' + ('enabled' if value else 'disabled'))
else:
await MessageUtils.send_to(ctx, 'WARNING',
f'dm_on_{kind}_msg_already_' + ('enabled' if value else 'disabled'))
@configure.command()
async def dm_on_warn(self, ctx, value: bool = None):
"""dm_on_warn_help"""
await self.dm_configure(ctx, 'warn', value)
@configure.command()
async def dm_on_kick(self, ctx, value: bool = None):
"""dm_on_kick_help"""
await self.dm_configure(ctx, 'kick', value)
@configure.command()
async def dm_on_ban(self, ctx, value: bool = None):
"""dm_on_ban_help"""
await self.dm_configure(ctx, 'ban', value)
@configure.command()
async def dm_on_tempban(self, ctx, value: bool = None):
"""dm_on_tempban_help"""
await self.dm_configure(ctx, 'tempban', value)
@configure.command()
async def dm_on_mute(self, ctx, value: bool = None):
"""dm_on_mute_help"""
await self.dm_configure(ctx, 'mute', value)
@configure.command()
async def dm_on_unmute(self, ctx, value: bool = None):
"""dm_on_unmute_help"""
await self.dm_configure(ctx, 'unmute', value)
@configure.command(aliases=["dm_on"])
async def dm_notifications(self, ctx):
"""dm_notifications_help"""
embed = discord.Embed(color=600870, title=Translator.translate('infraction_dm_settings', ctx))
enabled = f"{Emoji.get_chat_emoji('YES')} {Translator.translate('enabled', ctx)}"
disabled = f"{Emoji.get_chat_emoji('NO')} {Translator.translate('disabled', ctx)}"
for x in ["WARN", "UNMUTE", "MUTE", "KICK", "BAN", "TEMPBAN"]:
key = f"DM_ON_{x}"
v = Configuration.get_var(ctx.guild.id, "INFRACTIONS", key)
embed.add_field(name=key, value=enabled if v else disabled)
await ctx.send(embed=embed)
@configure.command()
async def log_embeds(self, ctx, value: bool):
Configuration.set_var(ctx.guild.id, "MESSAGE_LOGS", "EMBED", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('embed_log_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command(aliases=["log_message_id"])
async def log_message_ids(self, ctx, value: bool):
Configuration.set_var(ctx.guild.id, "MESSAGE_LOGS", "MESSAGE_ID", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('message_id_log_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.group(aliases=["censorlist", "cl"], invoke_without_command=True)
async def censor_list(self, ctx):
"""censor_list_help"""
if ctx.invoked_subcommand is None:
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if len(censor_list) > 0:
pages = Pages.paginate("\n".join(censor_list))
else:
pages = [Translator.translate('censor_list_empty', ctx)]
content, view, page_num = SimplePager.get_parts(pages, 0, ctx.guild.id, 'censor_list')
await ctx.send(f"**{Translator.translate(f'censor_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```", view=view)
@censor_list.command("add")
async def censor_list_add(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if word in censor_list:
await MessageUtils.send_to(ctx, "NO", "already_censored", word=word)
else:
censor_list.append(word)
await MessageUtils.send_to(ctx, "YES", "entry_added", entry=word)
Configuration.save(ctx.guild.id)
@censor_list.command("remove")
async def censor_list_remove(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "not_censored", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "entry_removed", entry=word)
Configuration.save(ctx.guild.id)
@censor_list.command("get")
async def censor_list_get(self, ctx):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if len(censor_list) > 0:
out = '\n'.join(censor_list)
buffer = io.BytesIO()
buffer.write(out.encode())
buffer.seek(0)
await MessageUtils.send_to(ctx, 'YES', 'censor_list_file',
attachment=discord.File(buffer, filename="token_censorlist.txt"),
server=ctx.guild.name)
else:
await MessageUtils.send_to(ctx, 'WARNING', 'word_censor_list_empty')
@censor_list.command("upload")
async def censor_list_upload(self, ctx):
await self.receive_list(ctx, "CENSORING", "TOKEN_CENSORLIST", "censor")
async def receive_list(self, ctx, target_cat, target_key, prefix):
if len(ctx.message.attachments) != 1:
await MessageUtils.send_to(ctx, 'NO', 'censor_attachment_required')
return
else:
attachment = ctx.message.attachments[0]
if not attachment.filename.endswith(".txt"):
await MessageUtils.send_to(ctx, 'NO', 'censor_attachment_required')
return
elif attachment.size > 1_000_000:
await MessageUtils.send_to(ctx, 'NO', 'attachment_too_big')
return
b = await attachment.read()
try:
content = b.decode('utf-8')
except Exception:
await MessageUtils.send_to(ctx, 'NO', 'list_parsing_failed')
return
new_list = content.splitlines()
if len(new_list) > 250:
await MessageUtils.send_to(ctx, 'NO', 'list_too_long')
return
Configuration.set_var(ctx.guild.id, target_cat, target_key, new_list)
if ctx.guild.id in self.bot.get_cog("Censor").regexes:
del self.bot.get_cog("Censor").regexes[ctx.guild.id]
await MessageUtils.send_to(ctx, 'YES', f'{prefix}_list_set')
@configure.group(aliases=["wordcensorlist", "wcl"], invoke_without_command=True)
async def word_censor_list(self, ctx):
if ctx.invoked_subcommand is None:
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if len(censor_list) > 0:
pages = Pages.paginate("\n".join(censor_list))
else:
pages = [Translator.translate('word_censor_list_empty', ctx)]
content, view, page_num = SimplePager.get_parts(pages, 0, ctx.guild.id, 'word_censor_list')
await ctx.send(
f"**{Translator.translate(f'word_censor_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```",
view=view)
@word_censor_list.command("add")
async def word_censor_list_add(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if word in censor_list:
await MessageUtils.send_to(ctx, "NO", "word_already_censored", word=word)
else:
censor_list.append(word)
await MessageUtils.send_to(ctx, "YES", "word_entry_added", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Censor").regexes:
del self.bot.get_cog("Censor").regexes[ctx.guild.id]
@word_censor_list.command("remove")
async def word_censor_list_remove(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "word_not_censored", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "word_entry_removed", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Censor").regexes:
del self.bot.get_cog("Censor").regexes[ctx.guild.id]
@word_censor_list.command("get")
async def word_censor_list_get(self, ctx):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if len(censor_list) > 0:
out = '\n'.join(censor_list)
buffer = io.BytesIO()
buffer.write(out.encode())
buffer.seek(0)
await MessageUtils.send_to(ctx, 'YES', 'word_censor_list_file',
attachment=discord.File(buffer, filename="word_censorlist.txt"),
server=ctx.guild.name)
else:
await MessageUtils.send_to(ctx, 'WARNING', 'word_censor_list_empty')
@word_censor_list.command("upload")
async def word_censor_list_upload(self, ctx):
await self.receive_list(ctx, "CENSORING", "WORD_CENSORLIST", "word_censor")
@configure.group(aliases=["flaglist", "fl"], invoke_without_command=True)
async def flag_list(self, ctx):
"""flag_list_help"""
if ctx.invoked_subcommand is None:
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "TOKEN_LIST")
if len(censor_list) > 0:
pages = Pages.paginate("\n".join(censor_list))
else:
pages = [Translator.translate('flag_list_empty', ctx)]
content, view, page_num = SimplePager.get_parts(pages, 0, ctx.guild.id, 'flag_list')
await ctx.send(
f"**{Translator.translate(f'flagged_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```",
view=view)
@flag_list.command("add")
async def flag_list_add(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "TOKEN_LIST")
if word in censor_list:
await MessageUtils.send_to(ctx, "NO", "already_flagged", word=word)
else:
censor_list.append(word)
await MessageUtils.send_to(ctx, "YES", "flag_added", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Moderation").regexes:
del self.bot.get_cog("Moderation").regexes[ctx.guild.id]
@flag_list.command("remove")
async def flag_list_remove(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "TOKEN_LIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "not_flagged", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "flag_removed", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Moderation").regexes:
del self.bot.get_cog("Moderation").regexes[ctx.guild.id]
@flag_list.command("upload")
async def flag_list_upload(self, ctx):
await self.receive_list(ctx, "FLAGGING", "TOKEN_LIST", "flag")
@flag_list.command("get")
async def flag_list_get(self, ctx):
flag_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "TOKEN_LIST")
if len(flag_list) > 0:
out = '\n'.join(flag_list)
buffer = io.BytesIO()
buffer.write(out.encode())
buffer.seek(0)
await MessageUtils.send_to(ctx, 'YES', 'flag_list_file',
attachment=discord.File(buffer, filename="flag_list.txt"),
server=ctx.guild.name)
else:
await MessageUtils.send_to(ctx, 'WARNING', 'flag_list_empty')
@configure.group(aliases=["wordflaglist", "wfl"], invoke_without_command=True)
async def word_flag_list(self, ctx):
"""word_flag_list_help"""
if ctx.invoked_subcommand is None:
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "WORD_LIST")
if len(censor_list) > 0:
pages = Pages.paginate("\n".join(censor_list))
else:
pages = [Translator.translate('word_flag_list_empty', ctx)]
content, view, page_num = SimplePager.get_parts(pages, 0, ctx.guild.id, 'word_flag_list')
await ctx.send(
f"**{Translator.translate(f'flagged_word_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```",
view=view)
@word_flag_list.command("add")
async def word_flag_list_add(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "WORD_LIST")
if word in censor_list:
await MessageUtils.send_to(ctx, "NO", "word_already_flagged", word=word)
else:
censor_list.append(word)
await MessageUtils.send_to(ctx, "YES", "word_flag_added", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Moderation").regexes:
del self.bot.get_cog("Moderation").regexes[ctx.guild.id]
@word_flag_list.command("remove")
async def word_flag_list_remove(self, ctx, *, word: str):
word = word.lower()
censor_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "WORD_LIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "word_not_flagged", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "word_flag_removed", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Moderation").regexes:
del self.bot.get_cog("Moderation").regexes[ctx.guild.id]
@word_flag_list.command("upload")
async def word_flag_list_upload(self, ctx):
await self.receive_list(ctx, "FLAGGING", "WORD_LIST", "word_flag")
@word_flag_list.command("get")
async def word_flag_list_get(self, ctx):
flag_list = Configuration.get_var(ctx.guild.id, "FLAGGING", "WORD_LIST")
if len(flag_list) > 0:
out = '\n'.join(flag_list)
buffer = io.BytesIO()
buffer.write(out.encode())
buffer.seek(0)
await MessageUtils.send_to(ctx, 'YES', 'word_flag_list_file',
attachment=discord.File(buffer, filename="word_flag_list.txt"),
server=ctx.guild.name)
else:
await MessageUtils.send_to(ctx, 'WARNING', 'word_flag_list_empty')
@configure.group(invoke_without_command=True)
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def role_list(self, ctx):
"""configure_role_list_help"""
if ctx.invoked_subcommand is None:
items = Configuration.get_var(ctx.guild.id, "ROLES", f"ROLE_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE") else "block"
if len(items) == 0:
desc = Translator.translate(f"no_role_{mode}", ctx)
else:
desc = "\n".join(f"<@&{item}>" for item in items)
embed = discord.Embed(title=Translator.translate(f"current_role_{mode}_list", ctx), description=desc)
await ctx.send(embed=embed)
@role_list.command("add")
async def role_list_add(self, ctx, *, role: discord.Role):
"""configure_role_list_add"""
roles = Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE") else "block"
if role == ctx.guild.default_role:
await MessageUtils.send_to(ctx, "NO", "default_role_forbidden")
elif role.id in roles:
await MessageUtils.send_to(ctx, "NO", f"role_list_add_fail", role=Utils.escape_markdown(role.name))
else:
roles.append(role.id)
Configuration.save(ctx.guild.id)
await MessageUtils.send_to(ctx, "YES", f"role_list_add_confirmation_{mode}",
| |
<gh_stars>100-1000
#!/usr/bin/env python3
import os
import sys
import errno
import stat
from binascii import unhexlify, hexlify
from fuse import FUSE, FuseOSError, Operations
import struct
import os
import socket
import json
import tempfile
## Class used to mimic file access to external memory.
class Physmem(Operations):
## Functions exposed by each driver class on server.
order = ['install', 'map', 'read', 'uninstall']
## Class constructor.
#
# Install driver and recover data necessary to read memory.
# @param sock socket connection with server.
# @param mountpoint path to file with mounted memory.
# @param driver driver class used by server to expose memory.
# @param installArguments additional data defined by driver class.
# @exceptions Exception if server was unable to deliver basic data.
def __init__(self, sock, mountpoint, driver, installArguments):
self.FILENAME = "memimage.raw"
self.read_total = 0
self.read_stat_cached = 0
self.host = sock.getsockname()[0]
self.port = sock.getsockname()[1]
self.socket = sock
self.driver = driver
self.installArguments = installArguments
# send first order to install driver.
order = ('%s\n%s\n%s' % (self.driver, self.order[0], self.installArguments)).encode('utf-8')
msg = struct.pack("<I%ds" % len(order), len(order), order)
self.socket.sendall(msg)
received = self.socket.recv(4)
response = struct.unpack("<I", received)[0]
if (response):
raise Exception(struct.unpack("<%ds" % response, self.socket.recv(response))[0].decode('utf-8'))
print("[*] Driver installed")
# send map order.
order = ('%s\n%s\n' % (self.driver, self.order[1])).encode('utf-8')
msg = struct.pack("<I%ds" % len(order), len(order), order)
self.socket.sendall(msg)
# Receive map data from the server
received = self.socket.recv(32)
dtb, build, kernel_base, n = struct.unpack("<QQQQ", received)
#print("DTB", hex(dtb))
#print("build", build)
#print("kernel_base", hex(kernel_base))
if kernel_base == 0:
kernel_base = None
self.runs = []
for x in range(n):
received = self.socket.recv(16)
start, size = struct.unpack("<QQ", received)
#print(hex(start), size)
self.runs.append((start,size))
self.image_size = start + size
#print("Image size: %u MB" % (self.image_size/(1024*1024)))
self.read_progress = 0
self.read_stat_cached = 0
# Caching
self.PAGE_SIZE = 4096
self.cache = {}
self.queued_offset = None
self.queued_size = 0
self.gathered = []
self.extra = []
# Write the config to JSON file
config = dict(dtb=dtb, kernel_base=kernel_base, build=build, image=os.path.join(mountpoint, self.FILENAME))
#print(config)
with open('config.json', 'w') as f:
json.dump(config, f)
print("[*] Wrote config to config.json")
print("[*] Exposing the physical memory as a file")
## Destructor closing connection.
def __del__(self):
# Send command to unload the driver
order = ('%s\n%s\n' % (self.driver, self.order[3])).encode('utf-8')
msg = struct.pack("<I%ds" % len(order), len(order), order)
self.socket.sendall(msg)
received = self.socket.recv(4)
response = struct.unpack("<I", received)[0]
if (response):
raise Exception(struct.unpack("<%ds" % response, self.socket.recv(response))[0].decode('utf-8'))
# Send exit command
order = ('exit\n').encode('utf-8')
msg = struct.pack("<I%ds" % len(order), len(order), order)
self.socket.sendall(msg)
print("[*] Read %u MB, cached reads %u MB" % (self.read_total / (1024*1024), self.read_stat_cached / (1024*1024)))
self.socket.close()
## Fuse, read attributes of files/directories.
#
# @param path path of file/directory.
# @param fh flags, not used.
# @exceptions FuseOSError if path was other than one supported file, or file dir.
def getattr(self, path, fh=None):
if path == "/":
dir = { 'st_mode' : stat.S_IFDIR | 0o555, 'st_nlink' : 2 }
return dir
elif path == os.path.join('/', self.FILENAME):
#size = os.stat(self.root).st_size
size = self.image_size
f = { 'st_mode' : stat.S_IFREG | 0o444, 'st_nlink' : 1, 'st_size' : size }
return f
raise FuseOSError(errno.ENOENT)
## Fuse, provide directory content.
#
# Only one file is supported.
# @param path path of file/directory, not used.
# @param fh flags, not used.
def readdir(self, path, fh):
dirents = ['.', '..', self.FILENAME]
for r in dirents:
yield r
# Fuse, open file.
#
# Always successful. __init__ ensures data is accessible.
def open(self, path, flags):
return 0
## Internal, retrive page.
#
# Function retrives page from cache, or adds page to queue.
# Can triger fetching queued data from server.
# @param pagenum number of page to retrive.
def _gather_page(self, pagenum):
#print("Gathering page %u (offset %x)" % (pagenum, pagenum*self.PAGE_SIZE))
if len(self.extra) > 0 or (self.queued_size != 0 and pagenum*self.PAGE_SIZE != self.queued_offset+self.queued_size):
#print("Fetching queued data (requested %x, queued %x-%x)" % (pagenum*self.PAGE_SIZE, self.queued_offset, self.queued_offset+self.queued_size))
#print("Fetching queued data")
self._recv_queued()
if self.read_progress > 1024*1024:
self.read_total += self.read_progress
self.read_progress = 0
#print("Read %u MB, cached reads %u MB" % (self.read_total / (1024*1024), self.read_stat_cached / (1024*1024)))
if pagenum in self.cache:
#print("Returning page %u (offset %x) from cache" % (pagenum, pagenum*self.PAGE_SIZE))
self.read_stat_cached += self.PAGE_SIZE
self.read_progress += self.PAGE_SIZE
#print("Appending cached")
#self.gathered.append(self.cache[pagenum])
self.extra.append(self.cache[pagenum])
return
requested_length = length = self.PAGE_SIZE
offset = pagenum*self.PAGE_SIZE
for start,size in self.runs:
if start <= offset < (start + size):
if (offset + length) > (start + size):
padlen = (offset + length) - (start + size)
#print("We have extra")
self.extra.append(b'\x00'*padlen)
length = requested_length - padlen
break
else:
# We don't want to cache these
#print("Appending zeros")
self.extra.append(b'\x00'*length)
return
self.queued_size += length
if self.queued_offset is None:
self.queued_offset = offset
self.read_progress += length
return
## Internal, retrive queued data from server.
def _recv_queued(self):
# Is there anything to read from network?
if self.queued_size == 0:
# Add the stuff from extra anyway
if len(self.extra) > 0:
self.gathered.extend(self.extra)
self.extra = []
return
assert((self.queued_offset % self.PAGE_SIZE) == 0)
order = ('%s\n%s\n' % (self.driver, self.order[2])).encode('utf-8')
msg = struct.pack("<I%dsQQ" % len(order), len(order) + 16, order, self.queued_offset, self.queued_size)
self.socket.send(msg)
to_read = self.queued_size
blobs = []
while to_read > 0:
blob = self.socket.recv(to_read)
if not blob:
break
blobs.append(blob)
to_read -= len(blob)
# Add data to cache
# self.queued_offset is guaranteed to be a multiple of self.PAGE_SIZE
data = b''.join(blobs)
#print("Received %u bytes from offset %x" % (len(data), self.queued_offset))
for i in range(len(data)//self.PAGE_SIZE):
#print("Caching page %u" % (self.queued_offset//self.PAGE_SIZE))
assert((self.queued_offset//self.PAGE_SIZE + i) not in self.cache)
self.cache[self.queued_offset//self.PAGE_SIZE + i] = data[i*self.PAGE_SIZE:(i+1)*self.PAGE_SIZE]
#print("Items in gathered before: %u" % (len(self.gathered)))
self.gathered.extend(blobs)
self.gathered.extend(self.extra)
self.extra = []
self.queued_offset = None
self.queued_size = 0
return
## Internal, get all gathered data.
#
# Trigers fetching queued data.
def _get_all(self):
self._recv_queued()
buf = b''.join(self.gathered)
self.gathered = []
return buf
## Fuse, read uncached data.
#
# Function will fetch data, without checking cache. New data will not be cached.
# @param path path ot file. Not used, only one file is supported.
# @param requsted_length requested size of data.
# @param offset offset from file start.
# @param fh flags, not used.
def read_uncached(self, path, requsted_length, offset, fh):
length = requsted_length
extra = b''
for start,size in self.runs:
if start <= offset < (start + size):
if (offset + length) > (start + size):
padlen = (offset + length) - (start + size)
extra = b'\x00'*padlen
length = requsted_length - padlen
break
else:
#print("Returning zeros")
return b'\x00'*length
#print("Reading %u bytes from 0x%x" % (length, offset))
self.read_progress += length
#print("Sending")
order = ('%s\n%s\n' % (self.driver, self.order[2])).encode('utf-8')
msg = struct.pack("<I%dsQQ" % len(order), len(order) + 16, order, offset, length)
self.socket.send(msg)
#print("Sent %u bytes. Receiving" % (sent))
amount_received = 0
to_read = length
blobs = []
while amount_received < length:
blob = self.socket.recv(to_read)
if not blob:
break
blobs.append(blob)
amount_received += len(blob)
to_read -= len(blob)
data = b''.join(blobs)
data += extra
#print("Received %u bytes" % (len(data)))
if self.read_progress > 1024*1024:
self.read_total += self.read_progress
self.read_progress = 0
#print("Read %u megabytes" % (self.read_total / (1024*1024)))
return data
## Fuse, read data.
#
# Function will first look in cache, missing data will be fetched. New data will be cached.
# @param path path ot file. Not used, only one file is supported.
# @param requsted_length requested size of data.
# @param offset offset from file start.
# @param fh flags, not used.
def read_cached(self, path, requested_length, offset, fh):
#print("[read] offset %x, length: %u" % (offset, requested_length))
for pagenum in range(offset // self.PAGE_SIZE, (offset+requested_length) // self.PAGE_SIZE+1, 1):
self._gather_page(pagenum)
buf = self._get_all()
#print("Len buf %u" % (len(buf)))
buf = buf[offset % self.PAGE_SIZE:-(self.PAGE_SIZE-((offset+requested_length) % self.PAGE_SIZE))]
#print("Len buf %u" % (len(buf)), hex(offset % self.PAGE_SIZE), hex(self.PAGE_SIZE-((offset+requested_length) % self.PAGE_SIZE)))
return buf
## Fuse, read data.
#
# Same as read_cached.
# Function will first look in cache, missing data will be fetched. New data will be cached.
# @param path path ot file. Not used, only one file is supported.
# @param requsted_length requested size of data.
# @param offset offset from file start.
# @param fh flags, not used.
def read(self, path, requested_length, offset, fh):
#print("[read] offset %x, length: %u" % (offset, requested_length))
#data1 = self.read_uncached(path, | |
Should return None if
the concrete agent does not require point name mapping and/or unit
conversion.
"""
def parse_point_name_mapping(self):
"""
Parses point name mapping, which should contain a mapping of service
points to standard points, with specified units.
"""
point_name_mapping = {}
try:
mapping_file = self.get_point_name_defs_file()
except Exception as e:
_log.warning("Error loading mapping file ({})".format(e))
return None
if mapping_file:
try:
if isinstance(mapping_file, str):
mapping_file = open(mapping_file)
# else assume it is a file like object
config_dict = csv.DictReader(mapping_file)
for map_item in config_dict:
service_point_name = map_item.get("Service_Point_Name")
if service_point_name:
standard_point_name = map_item.get(
"Standard_Point_Name", service_point_name)
if not len(standard_point_name):
standard_point_name = service_point_name
standard_units = map_item.get("Standard_Units")
service_units = map_item.get("Service_Units")
point_name_mapping[service_point_name] = \
{"Standard_Point_Name": standard_point_name,
"Standard_Units": standard_units,
"Service_Units": service_units}
except IOError as error:
_log.error("Error parsing standard point name mapping: "
"{}".format(error))
raise ValueError("Error parsing point name mapping from file "
"{}".format(error))
finally:
mapping_file.close()
return point_name_mapping
def _configure(self, config_name, actions, contents):
"""
Handles most of the configuration of weather agent implementations
:param config_name: unused parameter, required by config store
:param actions: unused parameter, required by config store
:param contents: Configuration dictionary used to specify operational
parameters for the agent.
"""
self.vip.heartbeat.start()
_log.info("Configuring weather agent.")
config = self._default_config.copy()
config.update(contents)
max_size_gb = config.get("max_size_gb")
try:
if max_size_gb is not None:
self._max_size_gb = float(max_size_gb)
except ValueError:
_log.warn("Invalid value for max_size_gb: {} "
"defaulting to 1GB".format(max_size_gb))
self._max_size_gb = 1
self._api_key = config.get("api_key")
self._api_calls_limit = config.get("api_calls_limit",
self._api_calls_limit)
self.poll_locations = config.get("poll_locations")
self.poll_interval = config.get("poll_interval")
self.poll_topic_suffixes = config.get("poll_topic_suffixes")
try:
self.validate_poll_config()
self.configure(config)
except Exception as e:
_log.error("Failed to load weather agent settings with error:"
"{}".format(e))
self.vip.health.set_status(STATUS_BAD,
"Configuration of weather agent failed "
"with error: {}".format(e))
else:
_log.debug("Configuration successful")
try:
self._cache = WeatherCache(self._database_file,
calls_period=self._api_calls_period,
calls_limit=self._api_calls_limit,
api_services=self._api_services,
max_size_gb=self._max_size_gb)
self.vip.health.set_status(STATUS_GOOD,
"Configuration of weather agent "
"successful")
except sqlite3.OperationalError as error:
_log.error("Error initializing cache: {}".format(error))
self.vip.health.set_status(STATUS_BAD, "Cache failed to start "
"during configuration")
if self.do_polling:
if self.poll_greenlet:
self.poll_greenlet.kill()
self.poll_greenlet = self.core.periodic(self.poll_interval,
self.poll_for_locations)
def validate_poll_config(self):
"""
Ensures that polling settings have been properly configured.
:return: boolean indicating whether the polling options provided were
properly formatted.
"""
if self.poll_locations:
if not self.poll_interval:
err_msg = "poll_interval is mandatory configuration when " \
"poll_locations are specified"
raise ValueError(err_msg)
if (self.poll_topic_suffixes is not None and
(not isinstance(self.poll_topic_suffixes, list) or
len(self.poll_topic_suffixes) < len(self.poll_locations))):
err_msg = "poll_topic_suffixes, if set, should be a list of " \
"string with the same length as poll_locations. If " \
"it is not set results for all locations will be " \
"published to a single topic(" \
"weather/poll/current/all). If it is a list, " \
"each location's result will be published to the " \
"corresponding topic (" \
"weather/poll/current/<topic_suffix>)"
raise ValueError(err_msg)
self.do_polling = True
def configure(self, configuration):
"""Optional, may be implemented by a concrete implementation to add
support for the configuration store.
Values should be stored in this function only.
The process thread is stopped before this is called if it is running.
It is started afterwards.
:param configuration:
"""
pass
# RPC, helper and abstract methods to be used by concrete
# implementations of the weather agent
@RPC.export
def get_version(self):
"""
Provides the current version of the agent.
:return: current version number in string format.
"""
return __version__
@RPC.export
def get_api_features(self):
"""
Provides api features and corresponding descriptions for users of the
weather agent.
:return: dictionary formatted as {function call: description string}
"""
features = {}
for service_name in self._api_services:
features[service_name] = \
self._api_services[service_name]["description"]
return features
def api_calls_available(self, num_calls=1):
"""
Pass through to cache to check if there are api calls available to
remote API
:param num_calls: number of calls to request for API tracking
:return: whether the requested number of calls are available
"""
return self._cache.api_calls_available(num_calls=num_calls)
def add_api_call(self):
"""
Add API call timestamp entry to cache - this method is used by concrete
implementations for managing API call tracking for features not included
in the base weather agent
:return:
"""
return self._cache.add_api_call()
@RPC.export
def get_current_weather(self, locations):
"""
RPC method returning current weather data for each location provided.
Will provide cached data for efficiency if
available.
:param locations: List of location dictionary objects.
:return: list of dictionaries containing weather data for each location.
result dictionary would contain all location details passed
as input. Weather data results will be returned in the key
'weather_results'. In case of errors, error message will be
in the key 'weather_error'.
For example:
Input: [{"zipcode":"99353"}, {"zipcode":"invalid zipcode"},
{"zipcode":"99354"}]
Output:
[{'observation_time': '2018-11-15T20:53:00.000000+00:00',
'zipcode': '99353',
'weather_results':
{ 'dew_point_temperature': -6.099999999999966,
'wind_speed_of_gust': {'qualityControl': 'qc:Z',
'unitCode': 'unit:m_s-1',
'value': None
},
'textDescription': 'Mostly Cloudy',
'timestamp': '2018-11-15T20:53:00+00:00'
}
},
{'zipcode': 'invalid zipcode',
'weather_error': "Invalid location"
},
{'zipcode': '99354',
'weather_error': 'Remote API returned
invalid response (code 500)'
}
]
"""
result = []
for location in locations:
record_dict = self.validate_location_dict(SERVICE_CURRENT_WEATHER,
location)
if record_dict:
result.append(record_dict)
continue
# Attempt getting from cache
record_dict = self.get_cached_current_data(location)
cache_warning = record_dict.get(WEATHER_WARN)
# if there was no data in cache or if data is old, query api
if not record_dict.get(WEATHER_RESULTS):
_log.debug("Current weather data from api")
if self.api_calls_available():
record_dict = self.get_current_weather_remote(location)
# rework this check to catch specific problems (probably
# just weather_error)
if cache_warning:
warnings = record_dict.get(WEATHER_WARN, [])
warnings.extend(cache_warning)
record_dict[WEATHER_WARN] = warnings
else:
record_dict[WEATHER_ERROR] = "No calls currently " \
"available for the " \
"configured API key"
result.append(record_dict)
return result
def get_cached_current_data(self, location):
"""
Retrieves current weather data stored in cache if it exists and is
current (the timestamp is within the update
interval) for the location
:param location: location to retrieve current stored data for.
:return: current weather data dictionary
"""
result = location.copy()
try:
observation_time, data = \
self._cache.get_current_data(SERVICE_CURRENT_WEATHER,
jsonapi.dumps(location))
if observation_time and data:
interval = self._api_services[SERVICE_CURRENT_WEATHER][
"update_interval"]
# ts in cache is tz aware utc
current_time = get_aware_utc_now()
next_update_at = observation_time + interval
if current_time < next_update_at:
result["observation_time"] = \
format_timestamp(observation_time)
result[WEATHER_RESULTS] = jsonapi.loads(data)
except Exception as error:
bad_cache_message = "Weather agent failed to read from " \
"cache"
self.vip.health.set_status(STATUS_BAD,
bad_cache_message)
status = Status.from_json(self.vip.health.get_status_json())
self.vip.health.send_alert(CACHE_READ_ERROR, status)
_log.error("{}. Exception:{}".format(bad_cache_message,
error))
self.cache_read_error = True
result[WEATHER_WARN] = [bad_cache_message]
else:
if self.cache_read_error:
self.vip.health.set_status(STATUS_GOOD)
self.cache_read_error = False
return result
def get_current_weather_remote(self, location):
"""
Retrieves current weather data for a location from the remote api
service provider
:param location: location for which to retrieve current weather data
from the api
:return: dictionary of weather data, or containing an error message
if the api call failed. Example - input: output:
"""
result = location.copy()
try:
observation_time, data = self.query_current_weather(
location)
observation_time, oldtz = process_timestamp(
observation_time)
if self.point_name_mapping:
data = self.apply_mapping(data)
if observation_time is not None:
storage_record = [jsonapi.dumps(location),
observation_time,
jsonapi.dumps(data)]
try:
self.store_weather_records(SERVICE_CURRENT_WEATHER,
storage_record)
except Exception:
bad_cache_message = "Weather agent failed to write to " \
"cache"
result[WEATHER_WARN] = [bad_cache_message]
result["observation_time"] = \
format_timestamp(observation_time)
result[WEATHER_RESULTS] = data
else:
result[WEATHER_ERROR] = "Weather api did not " \
"return any records"
except Exception as error:
_log.error("Exception getting current weather from remote {}".format(error))
result[WEATHER_ERROR] = str(error)
return result
@abstractmethod
def query_current_weather(self, location):
"""
Abstract method for sending/receiving requests for current weather
data from an api service
:param location: location for which to query the remote api
:return: dictionary containing a single record of current weather data
"""
# TODO make sure we can extract the errors we get if there aren't
# api_calls_available
def get_forecast_by_service(self, locations, service, service_length,
quantity):
"""
RPC method returning hourly forecast weather data for each location
provided. Will provide cached data for
efficiency if available.
:param locations: list of location dictionaries for which to return
weather data
:param service_length: string representing the service interval
:param quantity: number of time series data points of data to include
with each location's records
:param service:
:return: list of dictionaries containing weather data for each location.
result dictionary would contain all location details passed
as input in addition to results. Weather data results will be
returned in the key 'weather_results'. value of
'weather_results' will be in the format
[[<forecast time>, <dictionary of data returned for
that forecast time>], [<forecast time>, <dictionary of data
returned for | |
<filename>cbuildbot/stages/build_stages.py
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the build stages."""
from __future__ import print_function
import glob
import os
from chromite.cbuildbot import chroot_lib
from chromite.cbuildbot import commands
from chromite.cbuildbot import constants
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import repository
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import test_stages
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import portage_util
class CleanUpStage(generic_stages.BuilderStage):
"""Stages that cleans up build artifacts from previous runs.
This stage cleans up previous KVM state, temporary git commits,
clobbers, and wipes tmp inside the chroot.
"""
option_name = 'clean'
def _CleanChroot(self):
logging.info('Cleaning chroot.')
commands.CleanupChromeKeywordsFile(self._boards,
self._build_root)
chroot_dir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
chroot_tmpdir = os.path.join(chroot_dir, 'tmp')
if os.path.exists(chroot_tmpdir):
osutils.RmDir(chroot_tmpdir, ignore_missing=True, sudo=True)
cros_build_lib.SudoRunCommand(['mkdir', '--mode', '1777', chroot_tmpdir],
print_cmd=False)
# Clear out the incremental build cache between runs.
cache_dir = 'var/cache/portage'
d = os.path.join(chroot_dir, cache_dir)
osutils.RmDir(d, ignore_missing=True, sudo=True)
for board in self._boards:
d = os.path.join(chroot_dir, 'build', board, cache_dir)
osutils.RmDir(d, ignore_missing=True, sudo=True)
def _DeleteChroot(self):
logging.info('Deleting chroot.')
chroot = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
if os.path.exists(chroot):
# At this stage, it's not safe to run the cros_sdk inside the buildroot
# itself because we haven't sync'd yet, and the version of the chromite
# in there might be broken. Since we've already unmounted everything in
# there, we can just remove it using rm -rf.
osutils.RmDir(chroot, ignore_missing=True, sudo=True)
def _DeleteArchivedTrybotImages(self):
"""Clear all previous archive images to save space."""
logging.info('Deleting archived trybot images.')
for trybot in (False, True):
archive_root = self._run.GetArchive().GetLocalArchiveRoot(trybot=trybot)
osutils.RmDir(archive_root, ignore_missing=True)
def _DeleteArchivedPerfResults(self):
"""Clear any previously stashed perf results from hw testing."""
logging.info('Deleting archived perf results.')
for result in glob.glob(os.path.join(
self._run.options.log_dir,
'*.%s' % test_stages.HWTestStage.PERF_RESULTS_EXTENSION)):
os.remove(result)
def _DeleteChromeBuildOutput(self):
logging.info('Deleting Chrome build output.')
chrome_src = os.path.join(self._run.options.chrome_root, 'src')
for out_dir in glob.glob(os.path.join(chrome_src, 'out_*')):
osutils.RmDir(out_dir)
def _BuildRootGitCleanup(self):
logging.info('Cleaning up buildroot git repositories.')
# Run git gc --auto --prune=all on all repos in CleanUpStage
commands.BuildRootGitCleanup(self._build_root, prune_all=True)
def _DeleteAutotestSitePackages(self):
"""Clears any previously downloaded site-packages."""
logging.info('Deteing autotest site packages.')
site_packages_dir = os.path.join(self._build_root, 'src', 'third_party',
'autotest', 'files', 'site-packages')
# Note that these shouldn't be recreated but might be around from stale
# builders.
osutils.RmDir(site_packages_dir, ignore_missing=True)
def _WipeOldOutput(self):
logging.info('Wiping old output.')
commands.WipeOldOutput(self._build_root)
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
if (not (self._run.options.buildbot or self._run.options.remote_trybot)
and self._run.options.clobber):
if not commands.ValidateClobber(self._build_root):
cros_build_lib.Die("--clobber in local mode must be approved.")
# If we can't get a manifest out of it, then it's not usable and must be
# clobbered.
manifest = None
if not self._run.options.clobber:
try:
manifest = git.ManifestCheckout.Cached(self._build_root, search=False)
except (KeyboardInterrupt, MemoryError, SystemExit):
raise
except Exception as e:
# Either there is no repo there, or the manifest isn't usable. If the
# directory exists, log the exception for debugging reasons. Either
# way, the checkout needs to be wiped since it's in an unknown
# state.
if os.path.exists(self._build_root):
logging.warning("ManifestCheckout at %s is unusable: %s",
self._build_root, e)
# Clean mount points first to be safe about deleting.
commands.CleanUpMountPoints(self._build_root)
if manifest is None:
self._DeleteChroot()
repository.ClearBuildRoot(self._build_root,
self._run.options.preserve_paths)
else:
tasks = [self._BuildRootGitCleanup,
self._WipeOldOutput,
self._DeleteArchivedTrybotImages,
self._DeleteArchivedPerfResults,
self._DeleteAutotestSitePackages]
if self._run.options.chrome_root:
tasks.append(self._DeleteChromeBuildOutput)
if self._run.config.chroot_replace and self._run.options.build:
tasks.append(self._DeleteChroot)
else:
tasks.append(self._CleanChroot)
parallel.RunParallelSteps(tasks)
class InitSDKStage(generic_stages.BuilderStage):
"""Stage that is responsible for initializing the SDK."""
option_name = 'build'
def __init__(self, builder_run, chroot_replace=False, **kwargs):
"""InitSDK constructor.
Args:
builder_run: Builder run instance for this run.
chroot_replace: If True, force the chroot to be replaced.
"""
super(InitSDKStage, self).__init__(builder_run, **kwargs)
self.force_chroot_replace = chroot_replace
def PerformStage(self):
chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
replace = self._run.config.chroot_replace or self.force_chroot_replace
pre_ver = post_ver = None
if os.path.isdir(self._build_root) and not replace:
try:
pre_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
commands.RunChrootUpgradeHooks(
self._build_root, chrome_root=self._run.options.chrome_root,
extra_env=self._portage_extra_env)
except failures_lib.BuildScriptFailure:
logging.PrintBuildbotStepText('Replacing broken chroot')
logging.PrintBuildbotStepWarnings()
else:
# Clear the chroot manifest version as we are in the middle of building.
chroot_manager = chroot_lib.ChrootManager(self._build_root)
chroot_manager.ClearChrootVersion()
if not os.path.isdir(chroot_path) or replace:
use_sdk = (self._run.config.use_sdk and not self._run.options.nosdk)
pre_ver = None
commands.MakeChroot(
buildroot=self._build_root,
replace=replace,
use_sdk=use_sdk,
chrome_root=self._run.options.chrome_root,
extra_env=self._portage_extra_env)
post_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
if pre_ver is not None and pre_ver != post_ver:
logging.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
else:
logging.PrintBuildbotStepText(post_ver)
commands.SetSharedUserPassword(
self._build_root,
password=self._run.config.shared_user_password)
class SetupBoardStage(generic_stages.BoardSpecificBuilderStage, InitSDKStage):
"""Stage that is responsible for building host pkgs and setting up a board."""
option_name = 'build'
def PerformStage(self):
# We need to run chroot updates on most builders because they uprev after
# the InitSDK stage. For the SDK builder, we can skip updates because uprev
# is run prior to InitSDK. This is not just an optimization: It helps
# workaround http://crbug.com/225509
if self._run.config.build_type != constants.CHROOT_BUILDER_TYPE:
usepkg_toolchain = (self._run.config.usepkg_toolchain and
not self._latest_toolchain)
commands.UpdateChroot(
self._build_root, toolchain_boards=[self._current_board],
usepkg=usepkg_toolchain)
# Always update the board.
usepkg = self._run.config.usepkg_build_packages
commands.SetupBoard(
self._build_root, board=self._current_board, usepkg=usepkg,
chrome_binhost_only=self._run.config.chrome_binhost_only,
force=self._run.config.board_replace,
extra_env=self._portage_extra_env, chroot_upgrade=False,
profile=self._run.options.profile or self._run.config.profile)
class BuildPackagesStage(generic_stages.BoardSpecificBuilderStage,
generic_stages.ArchivingStageMixin):
"""Build Chromium OS packages."""
option_name = 'build'
def __init__(self, builder_run, board, suffix=None, afdo_generate_min=False,
afdo_use=False, update_metadata=False, **kwargs):
if afdo_use:
suffix = self.UpdateSuffix(constants.USE_AFDO_USE, suffix)
super(BuildPackagesStage, self).__init__(builder_run, board, suffix=suffix,
**kwargs)
self._afdo_generate_min = afdo_generate_min
self._update_metadata = update_metadata
assert not afdo_generate_min or not afdo_use
useflags = self._portage_extra_env.get('USE', '').split()
if afdo_use:
useflags.append(constants.USE_AFDO_USE)
if useflags:
self._portage_extra_env['USE'] = ' '.join(useflags)
def VerifyChromeBinpkg(self, packages):
# Sanity check: If we didn't check out Chrome (and we're running on ToT),
# we should be building Chrome from a binary package.
if (not self._run.options.managed_chrome and
self._run.manifest_branch == 'master'):
commands.VerifyBinpkg(self._build_root,
self._current_board,
constants.CHROME_CP,
packages,
extra_env=self._portage_extra_env)
def GetListOfPackagesToBuild(self):
"""Returns a list of packages to build."""
if self._run.config.packages:
# If the list of packages is set in the config, use it.
return self._run.config.packages
# TODO: the logic below is duplicated from the build_packages
# script. Once we switch to `cros build`, we should consolidate
# the logic in a shared location.
packages = ['virtual/target-os']
# Build Dev packages by default.
packages += ['virtual/target-os-dev']
# Build test packages by default.
packages += ['virtual/target-os-test']
# Build factory packages if requested by config.
if self._run.config.factory:
packages += ['virtual/target-os-factory',
'virtual/target-os-factory-shim']
if self._run.ShouldBuildAutotest():
packages += ['chromeos-base/autotest-all']
return packages
def RecordPackagesUnderTest(self, packages_to_build):
"""Records all packages that may affect the board to BuilderRun."""
deps = dict()
# Include packages that are built in chroot because they can
# affect any board.
packages = ['virtual/target-sdk']
# Include chromite because we are running cbuildbot.
packages += ['chromeos-base/chromite']
try:
deps.update(commands.ExtractDependencies(self._build_root, packages))
# Include packages that will be built as part of the board.
deps.update(commands.ExtractDependencies(self._build_root,
packages_to_build,
board=self._current_board))
except Exception as e:
# Dependency extraction may fail due to bad ebuild changes. Let
# the build continues because we have logic to triage build
# packages failures separately. Note that we only categorize CLs
# on the package-level if dependencies are extracted
# successfully, so it is safe to ignore the exception.
logging.warning('Unable to gather packages under test: %s', e)
else:
logging.info('Recording packages under test')
self.board_runattrs.SetParallel('packages_under_test', set(deps.keys()))
def PerformStage(self):
# If we have rietveld patches, always compile Chrome from source.
noworkon = not self._run.options.rietveld_patches
packages = self.GetListOfPackagesToBuild()
self.VerifyChromeBinpkg(packages)
self.RecordPackagesUnderTest(packages)
commands.Build(self._build_root,
self._current_board,
build_autotest=self._run.ShouldBuildAutotest(),
usepkg=self._run.config.usepkg_build_packages,
chrome_binhost_only=self._run.config.chrome_binhost_only,
packages=packages,
skip_chroot_upgrade=True,
chrome_root=self._run.options.chrome_root,
noworkon=noworkon,
noretry=self._run.config.nobuildretry,
extra_env=self._portage_extra_env)
if self._update_metadata:
# TODO: Consider moving this into its own stage if there are other similar
# things to do after build_packages.
# Extract firmware version information from the newly created updater.
main, ec = commands.GetFirmwareVersions(self._build_root,
self._current_board)
update_dict = {'main-firmware-version': main, 'ec-firmware-version': ec}
self._run.attrs.metadata.UpdateBoardDictWithDict(
self._current_board, update_dict)
# Write board metadata update to cidb
build_id, db = self._run.GetCIDBHandle()
if db:
db.UpdateBoardPerBuildMetadata(build_id, self._current_board,
update_dict)
class BuildImageStage(BuildPackagesStage):
"""Build standard Chromium OS images."""
option_name = 'build'
config_name = 'images'
def _BuildImages(self):
# We only build base, dev, and test images from this stage.
if self._afdo_generate_min:
images_can_build = set(['test'])
else:
images_can_build = set(['base', 'dev', 'test'])
images_to_build = set(self._run.config.images).intersection(
images_can_build)
version = self._run.attrs.release_tag
disk_layout = self._run.config.disk_layout
if self._afdo_generate_min and version:
version = '%s-afdo-generate' % version
rootfs_verification = self._run.config.rootfs_verification
commands.BuildImage(self._build_root,
self._current_board,
sorted(images_to_build),
rootfs_verification=rootfs_verification,
version=version,
disk_layout=disk_layout,
extra_env=self._portage_extra_env)
# Update link to latest image.
latest_image = os.readlink(self.GetImageDirSymlink('latest'))
cbuildbot_image_link = self.GetImageDirSymlink()
if os.path.lexists(cbuildbot_image_link):
os.remove(cbuildbot_image_link)
os.symlink(latest_image, cbuildbot_image_link)
self.board_runattrs.SetParallel('images_generated', True)
parallel.RunParallelSteps(
[self._BuildVMImage, lambda: self._GenerateAuZip(cbuildbot_image_link),
self._BuildGceTarballs])
def _BuildVMImage(self):
if self._run.config.vm_tests and not self._afdo_generate_min:
commands.BuildVMImageForTesting(
self._build_root,
self._current_board,
extra_env=self._portage_extra_env)
def _GenerateAuZip(self, image_dir):
"""Create au-generator.zip."""
if not self._afdo_generate_min:
commands.GenerateAuZip(self._build_root,
image_dir,
extra_env=self._portage_extra_env)
def _BuildGceTarballs(self):
"""Creates .tar.gz files that can be converted to GCE images.
These files will be | |
<filename>pytorch_fid_wrapper/fid_score.py<gh_stars>1-10
"""
# ----------------------------
# ----- pfw docstrings -----
# ----------------------------
Adapted from:
https://github.com/mseitzer/pytorch-fid/blob/4d7695b39764ba1d54ab6639e0695e5c4e6f346a/pytorch_fid/fid_score.py
Modifications are:
* modify calculate_activation_statistics ot handle in-memory N x C x H x W tensors
instead of file lists with a dataloader
* add fid() and get_stats()
# ---------------------------------------------
# ----- pytorch-fid original docstrings -----
# ---------------------------------------------
Calculates the Frechet Inception Distance (FID) to evaluate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code adapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from pytorch_fid_wrapper.inception import InceptionV3
from pytorch_fid_wrapper import params as pfw_params
def get_activations(images, model, batch_size=50, dims=2048, device="cpu"):
"""
Calculates the activations of the pool_3 layer for all images.
Args:
images ([type]): Tensor of images N x C x H x W
model ([type]): Instance of inception model
batch_size (int, optional): Batch size of images for the model to process at
once. Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This behavior is
retained to match the original FID score implementation. Defaults to 50.
dims (int, optional): Dimensionality of features returned by Inception.
Defaults to 2048.
device (str | torch.device, optional): Device to run calculations.
Defaults to "cpu".
Returns:
np.ndarray: A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the query
tensor.
"""
model.eval()
n_batches = len(images) // batch_size
assert n_batches > 0, (
"Not enough images to make at least 1 full batch. "
+ "Provide more images or decrease batch_size"
)
pred_arr = np.empty((len(images), dims))
start_idx = 0
for b in range(n_batches):
batch = images[b * batch_size : (b + 1) * batch_size].to(device)
if batch.nelement() == 0:
continue
with torch.no_grad():
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx : start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""
Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Args:
mu1 (np.ndarray): Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
sigma1 (np.ndarray): The covariance matrix over activations for generated
samples.
mu2 (np.ndarray): The sample mean over activations, precalculated on a
representative data set.
sigma2 (np.ndarray): The covariance matrix over activations, precalculated on an
representative data set.
eps (float, optional): Fallback in case of infinite covariance.
Defaults to 1e-6.
Returns:
float: The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; "
"adding %s to diagonal of cov estimates"
) % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def calculate_activation_statistics(
images, model, batch_size=50, dims=2048, device="cpu"
):
"""
Calculation of the statistics used by the FID.
Args:
images (torch.Tensor): Tensor of images N x C x H x W
model (torch.nn.Module): Instance of inception model
batch_size (int, optional): The images tensor is split into batches with
batch size batch_size. A reasonable batch size depends on the hardware.
Defaults to 50.
dims (int, optional): Dimensionality of features returned by Inception.
Defaults to 2048.
device (str | torch.device, optional): Device to run calculations.
Defaults to "cpu".
Returns:
tuple(np.ndarray, np.ndarray): (mu, sigma)
mu => The mean over samples of the activations of the pool_3 layer of
the inception model.
sigma => The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(images, model, batch_size, dims, device)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def get_stats(images, model=None, batch_size=None, dims=None, device=None):
"""
Get the InceptionV3 activation statistics (mu, sigma) for a batch of `images`.
If `model` (InceptionV3) is not provided, it will be instanciated according
to `dims`.
Other arguments are optional and will be inherited from `pfw.params` if not
provided. Use `pfw.set_config` to change those params globally for future calls
Args:
images (torch.Tensor): The images to compute the statistics for. Must be
N x C x H x W
model (torch.nn.Module, optional): InceptionV3 model. Defaults to None.
batch_size (int, optional): Inception inference batch size.
Will use `pfw.params.batch_size` if not provided. Defaults to None.
dims (int, optional): which inception block to select. See
InceptionV3.BLOCK_INDEX_BY_DIM. Will use pfw.params.dims if not provided.
Defaults to None.
device (str | torch.device, optional): PyTorch device for inception inference.
Will use pfw.params.device if not provided. Defaults to None.
Returns:
tuple(np.ndarray, np.ndarray): (mu, sigma)
mu => The mean over samples of the activations of the pool_3 layer of
the inception model.
sigma => The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
if batch_size is None:
batch_size = pfw_params.batch_size
if dims is None:
dims = pfw_params.dims
if device is None:
device = pfw_params.device
if model is None:
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
else:
assert isinstance(model, InceptionV3)
return calculate_activation_statistics(images, model, batch_size, dims, device)
def fid(
fake_images,
real_images=None,
real_m=None,
real_s=None,
batch_size=None,
dims=None,
device=None,
):
"""
Computes the FID score of `fake_images` w.r.t. either precomputed stats on real
data, or another batch of images (typically real ones).
If `real_images` is `None`, you must provide `real_m` **and** `real_s` with
matching dimensions to `fake_images`.
If `real_images` is not `None` it will prevail over `real_m` and `real_s`
which will be ignored
Other arguments are optional and will be inherited from `pfw.params` if not
provided. Use `pfw.set_config` to change those params globally for future calls
Args:
fake_images (torch.Tensor): N x C x H x W tensor.
real_images (torch.Tensor, optional): N x C x H x W tensor. If provided,
stats will be computed from it, ignoring real_s and real_m.
Defaults to None.
real_m (, optional): Mean of a previous activation stats computation,
typically on real data. Defaults to None.
real_s (, optional): Std of a previous activation stats computation,
typically on real data. Defaults to None.
batch_size (int, optional): Inception inference batch_size.
Will use pfw.params.batch_size if not provided. Defaults to None.
dims (int, optional): which inception block to select.
See InceptionV3.BLOCK_INDEX_BY_DIM. Will use pfw.params.dims
if not provided. | |
received_damage = random.randint(r_monster[0].min_damage,
r_monster[0].max_damage)
player_hp = player_hp - received_damage
if player_hp <= 0:
player_hp = 0
await message.channel.send(
":shield:\n```ini\n{} received [{}] damage! Your HP:\n[{}/{}]```"
.format(message.author.name, received_damage, player_hp,
max_hp))
await asyncio.sleep(wait_time)
if player_hp <= 0:
player_hp = 0
await message.channel.send(
"{} has been killed by the *{}*! RIP :skull:"
.format(message.author.mention,
r_monster[0].name))
await asyncio.sleep(wait_time)
lost_gems = int(p['gem'] * .4)
p['gem'] = int(p['gem'] * .6)
await message.channel.send("*{} died and lost **{}** :gem:*"
.format(message.author.mention,
lost_gems))
p['hp'] = p['max_hp'] # resetting to full hp after death
with open('./resources/battle/log.json', 'w') as f:
json.dump(contents, f, indent=4)
someone_died = 1
return
elif msg.content.lower() == 'run' or msg.content.lower() == prefix + 'run':
escape = [
':man_running: {} got away!'.format(message.author.mention),
':man_running: {} ran away!'.format(message.author.mention),
':man_running: {} barely escaped!'.format(message.author.mention),
':man_running: {} ran like a {}!'.format(message.author.mention,
generate_insult())
]
response = random.choice(escape)
await message.channel.send(response)
return
else:
await message.channel.send(
'{} did not choose fight or run! The fight was cancelled.'.format(
message.author.mention))
return
# Gems check
if 'gems' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
await message.channel.send(
'{} has **{}** :gem:'.format(message.author.mention, p['gem']))
# HP check
elif 'hp' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
await message.channel.send(
'{} has **{}**/**{}** HP'.format(message.author.mention, p['hp'], p['max_hp']))
# Level check
elif 'lvl' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
if p['lvl'] >= max_player_level:
await message.channel.send(
":dart: *{} has reached* **max level**! :dart:\n```cs\nLEVEL {}```"
.format(message.author.mention, max_player_level))
else:
await message.channel.send("{}\n```cs\nLEVEL {}```"
.format(message.author.mention, p['lvl']))
# Sword check
elif 'sword' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
for l in p['inv']:
await message.channel.send("{} has a {}{}"
.format(message.author.mention, l['modifier'],
l['sword']))
# Sword crafting
elif 'craft' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
if p['gem'] < crafting_cost:
await message.channel.send("{} doesn't have enough :gem:"
.format(message.author.mention))
return
else:
await message.channel.send(
"{}, crafting costs **{}** :gem:\nDo you wish to continue? **yes**/**no**"
.format(message.author.mention, crafting_cost))
def check(msg):
return msg.author == message.author
try:
msg = await client.wait_for('message', check=check, timeout=timeout_time)
except asyncio.TimeoutError:
await message.channel.send(
"{} didn't respond in time, you didn't craft anything:zzz:"
.format(message.author.mention))
return
else:
if msg.content.lower() == 'yes':
p['gem'] = p['gem'] - crafting_cost
with open('./resources/battle/log.json', 'w') as f:
json.dump(contents, f, indent=4)
r_modifier = random.choices(modifiers, modifiers_weight)
r_sword = random.choices(swords, swords_weight)
await message.channel.send(
"{} made a {}{}".format(message.author.mention, r_modifier[0].name,
r_sword[0].name))
for p in contents['players']:
if str(message.author.id) == str(p['name']):
for l in p['inv']:
await message.channel.send(
"{}, you can only have *one* sword. Do you want to sell your: "
"{}{}**yes**/**no** "
.format(message.author.mention, l['modifier'],
l['sword']))
def check(msg):
return msg.author == message.author
try:
msg = await client.wait_for('message', check=check,
timeout=timeout_time)
except asyncio.TimeoutError:
await message.channel.send(
"{} didn't respond in time, you lost the sword!:zzz:"
.format(message.author.mention))
return
else:
for p in contents['players']:
if str(message.author.id) == str(p['name']):
for l in p['inv']:
if msg.content.lower() == 'yes':
old_modifier = l['modifier']
old_sword = l['sword']
l['modifier'] = r_modifier[0].name
l['sword'] = r_sword[0].name
sell_value = 0
for m in modifiers:
if old_modifier in str(m.name):
sell_value = sell_value + m.price
break
for s in swords:
if old_sword in str(s.name):
sell_value = sell_value + s.price
break
await message.channel.send(
"{}, you sold your {}{} for **{}** :gem:\n Now you have a {}{}"
.format(message.author.mention,
old_modifier, old_sword, sell_value,
l['modifier'],
l['sword']))
if p['gem'] >= max_gems:
p['gem'] = max_gems
await message.channel.send(
"*But {} can't hold any more :gem:!*".format(
message.author.mention))
else:
p['gem'] = p['gem'] + sell_value
with open('./resources/battle/log.json', 'w') as f:
json.dump(contents, f, indent=4)
elif msg.content.lower() == 'no':
await message.channel.send(
"{}, you kept your {}{}"
.format(message.author.mention, l['modifier'],
l['sword']))
elif msg.content.lower() == 'no':
await message.channel.send(
"{} didn't craft anything.".format(message.author.mention))
else:
await message.channel.send(
"{} didn't answer **yes** or **no**.".format(message.author.mention))
# Items check
elif 'items' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
if str(p['items']) == '[]':
await message.channel.send(
"{} has no items in their inventory!".format(message.author.mention))
return
else:
v = 0
player_item_list = []
player_item_list_hp = []
player_item_list_sell = []
for g in p['items']:
player_item_list.append(p['items'][v]['name'])
for i in items:
if str(p['items'][v]['name']) in str(i.name):
player_item_list_hp.append(i.heal)
player_item_list_sell.append(i.sell_price)
player_item_list.append(
' - **' + str(player_item_list_hp[v]) + '** :sparkling_heart:')
player_item_list.append(' - **' + str(player_item_list_sell[v]) + '** :gem:\n')
if v < max_items:
v = v + 1
player_item_list_msg = "".join(map(str, player_item_list))
await message.channel.send(
'{} has the following items: \n{}'.format(message.author.mention,
player_item_list_msg))
# Admin Shop restock (hidden from help)
elif 'restock' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
# loading shop log
if not os.path.isfile('./resources/battle/shop.json'):
# if file not found, make log.json with create_json_player function
create_json_shop()
await message.channel.send('Shop.json has been created. Please try again.')
else:
with open('./resources/battle/shop.json') as shop_log:
if 'Administrator' in str(message.author.roles):
contents_shop = json.load(shop_log)
create_json_shop()
del (contents_shop['shop_items'][0])
with open('./resources/battle/shop.json', 'w') as f:
json.dump(contents_shop, f, indent=4)
contents_shop['shop_items'].append({'items': []})
with open('./resources/battle/shop.json', 'w') as f:
json.dump(contents_shop, f, indent=4)
for c in contents_shop['shop_items']:
# Generate new items in shop if empty
await message.channel.send(
"Shop has been restocked!")
shop_list = []
for s in range(max_shop_items):
r_item = random.choices(items, items_weight)
shop_list.append(r_item)
for h in shop_list:
c['items'].append({
'name': '{}'.format(h[0].name)
})
with open('./resources/battle/shop.json', 'w') as f:
json.dump(contents_shop, f, indent=4)
return
else:
await message.channel.send('{} is not an admin!'.format(message.author.mention))
# Shop
elif 'shop' in '{}'.format(message.content.lower()):
for p in contents['players']:
if str(message.author.id) == str(p['name']):
# loading shop log
if not os.path.isfile('./resources/battle/shop.json'):
# if file not found, make log.json with create_json_player function
create_json_shop()
await message.channel.send('Shop.json has been created. Please try again.')
else:
with open('./resources/battle/shop.json') as shop_log:
if len(message.content.split()) == 3:
split = message.content.split()[2].lower()
if 'buy' in str(split):
await message.channel.send(
"{}, tell me which *item* you want to **buy** after the command!".format(
message.author.mention))
elif 'sell' in str(split):
await message.channel.send(
"{}, tell me which *item* you want to **sell** after the command!".format(
message.author.mention))
else:
await message.channel.send(
"{}, you can only **buy**/**sell** in the shop.".format(
message.author.mention))
elif len(message.content.split()) > 3:
split = message.content.split()[2].lower()
split_item = message.content.split()[3].lower()
if 'buy' in str(split):
contents_shop = json.load(shop_log)
if str(contents_shop) == '' or str(contents_shop) == '[]':
create_json_shop()
await message.channel.send(
'Shop.json was empty and has been fixed. Please try again.')
else:
for c in contents_shop['shop_items']:
if str(split_item) in str(c).lower():
for i in items:
if str(split_item) in str(i.name).lower():
if p['gem'] < i.buy_price:
await message.channel.send(
"{} doesn't have enough :gem:".format(
message.author.mention))
elif len(p['items']) >= max_items:
await message.channel.send(
"{} can't go above the item limit! "
"*(You have {} items)*".format(
message.author.mention, max_items))
else:
for t in c['items']:
if str(split_item) in str(t).lower():
number = c['items'].index(t)
del (c['items'][number])
r_item = random.choices(items,
items_weight)
c['items'].append({
'name': '{}'.format(r_item[0].name)
})
p['items'].append({
'name': '{}'.format(i.name)
})
p['gem'] = p['gem'] - i.buy_price
with open(
'./resources/battle/shop.json',
'w') as f:
json.dump(contents_shop, f,
indent=4)
with open('./resources/battle/log.json',
'w') as f:
json.dump(contents, f, indent=4)
await message.channel.send(
"{} bought the {}!".format(
message.author.mention,
split_item))
return
else:
await message.channel.send(
"{}, that item isn't in the shop!".format(
message.author.mention))
elif 'sell' in str(split):
if str(p['items']) == '[]':
await message.channel.send(
"{} has no items in their inventory!".format(
message.author.mention))
return
else:
for v in range(max_items):
for g in p['items']:
# If item entered does not exist in item list
if str(split_item) not in str(item_list).lower():
await message.channel.send(
"{}, that item does not exist!".format(
message.author.mention))
return
# If item entered DOES exist but not in player inv
else:
if str(split_item) not in str(
p['items']).lower():
await message.channel.send(
"{}, that item isn't in your inventory!".format(
message.author.mention))
return
# If item entered is found in player inventory
if str(split_item) in str(p['items']).lower():
for i in items:
if str(split_item) in str(i.name).lower():
await message.channel.send(
"{} sold the {} for **{}** :gem:".format(
message.author.mention, i.name,
i.sell_price))
for t in p['items']:
if str(split_item) in str(t).lower():
number = p['items'].index(t)
del (p['items'][number])
break
if p['gem'] >= max_gems:
p['gem'] = max_gems
await message.channel.send(
"*But {} can't hold any more :gem:!*".format(
message.author.mention))
else:
p['gem'] = p['gem'] + i.sell_price
with open('./resources/battle/log.json',
'w') as f:
json.dump(contents, f, indent=4)
return
else:
contents_shop = json.load(shop_log)
if str(contents_shop) == '' or str(contents_shop) == '[]':
create_json_shop()
await message.channel.send(
'Shop.json was empty and has been fixed. Please try again.')
else:
for c in contents_shop['shop_items']:
# Generate new items in shop if empty
if str(c['items']) == '[]':
await message.channel.send(
"Shop now has new items! Please try again.")
shop_list = []
for s in range(max_shop_items):
r_item = random.choices(items, items_weight)
shop_list.append(r_item)
for h in | |
<filename>tests/bugs/core_5501_test.py<gh_stars>0
#coding:utf-8
#
# id: bugs.core_5501
# title: Unclear gstat's diagnostic when damaged page in DB file appears encrypted
# decription:
# Test creates table 'TEST' with varchar and blob fields, + index on varchar, and add some data to it.
# Blob field is filled by long values in order to prevent acomodation of its content within data pages.
# As result, this table should have pages of three different types: DataPage, BTreePage and BlobPage.
#
# Then we find number of first PP of this table by scrolling RDB$PAGES join RDB$RELATIONS result set.
# After this we:
# * define type of every page starting from first PP for 'TEST' table and up to total pages of DB,
# and doing this for each subsequent page, until ALL THREE different page types will be detected:
# 1) data page, 2) index B-Tree and 3) blob page.
# These page numbers are stored in variables: (brk_datapage, brk_indxpage, brk_blobpage).
# When all three page numbers are found, loop is terminated;
# * close connection and open dB as binary file for reading and writing;
# * store previous content of .fdb in variable 'raw_db_content' (for further restore);
# * move file seek pointer at the beginning of every page from list: (brk_datapage, brk_indxpage, brk_blobpage);
# * BREAK page content by writing invalid binary data in the header of page;
# This invalid data are: bytes 0...7 ==> 0xFFAACCEEBB0000CC; bytes 8...15 ==> 0xDDEEAADDCC00DDEE;
# * Close DB file handle and:
# ** 1) run 'gstat -e';
# ** 2) run online validation;
# * open DB file again as binary and restore its content from var. 'raw_db_content' in order
# fbtest framework could finish this test (by making connect and drop this database);
#
# KEY POINTS:
# * report of 'gstat -e' should contain line with text 'ENCRYPTED 3 (DB problem!)'
# (number '3' should present becase we damaged pages of THREE diff. types: DP, BTree and Blob).
# * report of online validation should contain lines with info about three diff. page types which have problems.
#
# Checked on 3.0.2.32702 (CS/SC/SS), 4.0.0.563 (CS/SC/SS)
#
# tracker_id: CORE-5501
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.2
# resources: None
substitutions_1 = [('total \\d+,', 'total'), ('non-crypted \\d+', 'non-crypted'), ('crypted \\d+', 'crypted')]
init_script_1 = """
alter database drop linger;
commit;
create table test(s varchar(1000) unique using index test_s_unq, b blob);
commit;
set count on;
insert into test(s, b)
select
rpad( '',1000, uuid_to_char(gen_uuid()) ),
rpad( '',
10000, -- NB: blob should have a big size! It should NOT be stored withih a data page.
'qwertyuioplkjhgfdsazxcvbnm0987654321')
from rdb$types
rows 100;
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import fdb
# import re
# import subprocess
# import time
# from fdb import services
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = <PASSWORD>
# dbnm = db_conn.database_name
#
# so=sys.stdout
# se=sys.stderr
#
# map_dbo={}
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# def fill_dbo(con, map_dbo):
# cur=con.cursor()
# sql='''
# select rel_id, rel_name, idx_id, idx_name
# from (
# select
# rr.rdb$relation_id rel_id, -- 0
# rr.rdb$relation_name rel_name, -- 1
# -1 idx_id, -- 2
# '' idx_name, -- 3
# rr.rdb$relation_type rel_type,
# rr.rdb$system_flag sys_flag
# from rdb$relations rr
#
# union all
#
# select
# rr.rdb$relation_id rel_id, -- 0
# rr.rdb$relation_name rel_name, -- 1
# coalesce(ri.rdb$index_id-1,-1) idx_id, -- 2
# coalesce(ri.rdb$index_name,'') idx_name, -- 3
# rr.rdb$relation_type rel_type,
# rr.rdb$system_flag sys_flag
# from rdb$relations rr
# join rdb$indices ri on
# rr.rdb$relation_name = ri.rdb$relation_name
# ) r
# where
# coalesce(r.rel_type,0) = 0 -- exclude views, GTT and external tables
# and r.sys_flag is distinct from 1
# '''
# cur.execute(sql)
# for r in cur:
# map_dbo[ r[0], r[2] ] = ( r[1].strip(), r[3].strip() )
#
# #--------------------------------------------
#
# def parse_page_header(con, page_number, map_dbo):
#
# from struct import unpack_from
#
# global PAGE_TYPES
#
# page_buffer = con.get_page_contents( page_number )
#
# # dimitr, 20.01.2017 ~13:00
# # all *CHAR = 1 byte, *SHORT = 2 bytes, *LONG = 4 bytes.
#
# # https://docs.python.org/2/library/struct.html
# # struct.unpack_from(fmt, buffer[, offset=0])
# # Unpack the buffer according to the given format.
# # The result is a tuple even if it contains exactly one item.
# # The buffer must contain at least the amount of data required by the format
# # len(buffer[offset:]) must be at least calcsize(fmt).
# # First character of the format string can be used to indicate the byte order,
# # size and alignment of the packed data
# # Native byte order is big-endian or little-endian:
# # < little-endian
# # > big-endian
# # Intel x86 and AMD64 (x86-64) are little-endian
# # Use sys.byteorder to check the endianness of your system:
# # https://docs.python.org/2/library/struct.html#format-characters
# # c char string of length 1
# # b signed char
# # B unsigned char
# # h short
# # H unsigned short integer
# # i int integer 4
# # I unsigned int integer 4
# # l long (4)
# # L unsigned long (4)
# # q long long (8)
# # Q unsigned long long
#
# (page_type,) = unpack_from('<b',page_buffer)
#
# relation_id=-1
# index_id=-1
# segment_cnt=-1 # for Data page: number of record segments on page
#
# if page_type == 4:
# # POINTER pege:
# # *pag* dpg_header=16, SLONG dpg_sequence=4, SLONG ppg_next=4, USHORT ppg_count=2 ==> 16+4+4+2=26
# # struct pointer_page
# # {
# # pag ppg_header;
# # SLONG ppg_sequence; // Sequence number in relation
# # SLONG ppg_next; // Next pointer page in relation
# # USHORT ppg_count; // Number of slots active
# # USHORT ppg_relation; // Relation id
# # USHORT ppg_min_space; // Lowest slot with space available
# # USHORT ppg_max_space; // Highest slot with space available
# # SLONG ppg_page[1]; // Data page vector
# # };
# (relation_id,) = unpack_from('<H',page_buffer,26) # 'H' ==> USHORT
#
# # ------------------------------------------------------------------------------------------------------
#
#
# if page_type == 5:
# # DATA page:
# # *pag* dpg_header=16, SLONG dpg_sequence=4 ==> 16+4 = 20:
# # struct data_page
# # {
# # 16 pag dpg_header;
# # 4 SLONG dpg_sequence; // Sequence number in relation
# # 2 USHORT dpg_relation; // Relation id
# # 2 USHORT dpg_count; // Number of record segments on page
# # struct dpg_repeat
# # {
# # USHORT dpg_offset; // Offset of record fragment
# # USHORT dpg_length; // Length of record fragment
# # } dpg_rpt[1];
# # };
# (relation_id,) = unpack_from('<H',page_buffer,20) # 'H' ==> USHORT
# (segment_cnt,) = unpack_from('<H',page_buffer,22)
#
#
# # ------------------------------------------------------------------------------------------------------
#
#
# if page_type == 6:
# # Index root page
# # struct index_root_page
# # {
# # pag irt_header;
# # USHORT irt_relation; // relation id (for consistency)
# (relation_id,) = unpack_from('<H',page_buffer,16) # 'H' ==> USHORT
#
#
# # ------------------------------------------------------------------------------------------------------
#
# index_id=-1
# ix_level=-1
# btr_len=-1
#
# if page_type == 7:
# # B-tree page ("bucket"):
# # struct btree_page
# # {
# # 16 pag btr_header;
# # 4 SLONG btr_sibling; // right sibling page
# # 4 SLONG btr_left_sibling; // left sibling page
# # 4 SLONG btr_prefix_total; // sum of all prefixes on page
# # 2 USHORT btr_relation; // relation id for consistency
# # 2 USHORT btr_length; // length of data in bucket
# # 1 UCHAR btr_id; // index id for consistency
# # 1 UCHAR btr_level; // index level (0 = leaf)
# # btree_nod btr_nodes[1];
# # };
# (relation_id,) = unpack_from('<H',page_buffer,28) # 'H' ==> USHORT
# (btr_len,) = unpack_from('<H',page_buffer,30) # 'H' ==> USHORT // length of data in bucket
# (index_id,) = unpack_from('<B',page_buffer,32) # 'B' => UCHAR
# (ix_level,) = unpack_from('<B',page_buffer,33)
#
# #----------------------------------------------------------------------------------------------------------
#
# if index_id>=0 and (relation_id, index_id) in map_dbo:
# u = map_dbo[ relation_id, index_id ]
# page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9), | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from six import iteritems
import os
import sys
import copy
import numpy
import platform
from dcase_util.containers import DictContainer, ListDictContainer, OneToOneMappingContainer
from dcase_util.utils import VectorRecipeParser, Path, ApplicationPaths, FileFormat
class ParameterContainer(DictContainer):
"""Parameter container class for parameters, inherited from DictContainer class."""
valid_formats = [FileFormat.YAML] #: Valid file formats
class AppParameterContainer(ParameterContainer):
"""Parameter container class for application parameters, inherited from ParameterContainer."""
def __init__(self, data=None, app_base=None, application_directory_parameter_filename='parameters.yaml', **kwargs):
"""Constructor
Parameters
----------
data : dict
Dictionary to initialize container
Default value None
app_base : str
Absolute path to the project root
Default value None
section_process_order : list, optional
Parameter section processing order. Given dict is used to override internal default list.
Default value 'parameters.yaml'
path_structure : dict of lists, optional
Defines how paths are created, section hash is used to create unique folder names. Given dict is used to
override internal default list.
method_dependencies : dict of dicts, optional
Given dict is used to override internal default list.
magic_field : dict, optional
Dict of field names for specific tasks. Given dict is used to override internal default list.
non_hashable_fields : list, optional
List of fields skipped when parameter hash for the section is calculated. Given list is used to override
internal default list.
control_sections : list, optional
List of top level sections used for framework control, for these section no hash is calculated. Given list
is used to override internal default list.
"""
# Run DictContainer init
DictContainer.__init__(self, data, **kwargs)
super(ParameterContainer, self).__init__(**kwargs)
# Defaults
# Map for field names
self.default_field_labels = OneToOneMappingContainer({
'DEFAULT-PARAMETERS': 'defaults',
'SET-LIST': 'sets',
'SET-ID': 'set_id',
'ACTIVE-SET': 'active_set',
'PARAMETERS': 'parameters',
'LABEL': 'method',
'RECIPE': 'recipe',
'CHAIN': 'chain',
'STACKING_RECIPE': 'stacking_recipe',
'BASE': 'base',
'ENABLE': 'enable',
'METHOD_PARAMETERS': 'method_parameters',
'DEPENDENCY_PARAMETERS': 'dependency_parameters',
'DEPENDENCY_LABEL': 'dependency_method',
})
# Map for Section names
self.default_section_labels = OneToOneMappingContainer({
'GENERAL': 'general',
'PATH': 'path',
'APPLICATION_PATHS': 'application',
'EXTERNAL_PATHS': 'external',
'FLOW': 'flow',
'LOGGING': 'logging',
})
# Section processing order
self.default_section_process_order = [
'FLOW',
'GENERAL',
'LOGGING',
'PATH',
]
# Define how paths are constructed from section hashes
self.default_path_structure = DictContainer({})
self.default_method_dependencies = DictContainer({})
# Fields to be skipped when parameter hash is calculated
self.default_non_hashable_fields = [
'_hash',
'verbose',
'print_system_progress',
'log_system_parameters',
'log_system_progress',
'log_learner_status',
'show_model_information',
'use_ascii_progress_bar',
'label',
'active_scenes',
'active_events',
'plotting_rate',
'focus_span',
'output_format',
]
self.default_non_hashable_sections = [
'FLOW',
'PATH',
'LOGGING',
'GENERAL'
]
# Mark container non-processed, allow processing only once
self.processed = False
# Application base path
if app_base is not None:
self.app_base = app_base
else:
self.app_base = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.split(self.app_base)[1] == 'src':
# If we are in 'src' folder remove one level
self.app_base = os.path.join(os.path.split(self.app_base)[0])
self.application_directory_parameter_filename = application_directory_parameter_filename
self.field_labels = self.default_field_labels
self.section_labels = self.default_section_labels
self.section_process_order = self.default_section_process_order
self.path_structure = self.default_path_structure
self.method_dependencies = self.default_method_dependencies
self.non_hashable_sections = self.default_non_hashable_sections
# Reset container and inject parameters
self.reset(**kwargs)
def reset(self,
field_labels=None,
section_labels=None, section_process_order=None,
path_structure=None,
method_dependencies=None,
non_hashable_fields=None, non_hashable_sections=None, **kwargs):
# Mark container non-processed, allow processing only once
self.processed = False
# Map for field names
self.field_labels = self.default_field_labels
if field_labels is not None:
self.field_labels.update(field_labels)
# Map for Section names
self.section_labels = self.default_section_labels
if section_labels is not None:
self.section_labels.update(section_labels)
# Define section processing order
self.section_process_order = self.default_section_process_order
if section_process_order is not None:
self.section_process_order.update(section_process_order)
# Translate section_process_order
for order_id, section_label in enumerate(self.section_process_order):
if section_label in self.section_labels:
self.section_process_order[order_id] = self.section_labels[section_label]
# Define how paths are constructed from section hashes
self.path_structure = self.default_path_structure
if path_structure is not None:
self.path_structure.update(path_structure)
# Translate path_structure
path_structure_tmp = copy.deepcopy(self.path_structure)
for key, structure in iteritems(path_structure_tmp):
for part_id, part in enumerate(structure):
split = part.split('.')
# Translate two first levels
# First level with section_labels
if split[0] in self.section_labels:
split[0] = self.section_labels[split[0]]
# Second level with field_labels
if len(split) > 1 and split[1] in self.field_labels:
split[1] = self.field_labels[split[1]]
structure[part_id] = '.'.join(split)
self.path_structure[key] = structure
# Translate key
if key in self.section_labels:
new_key = self.section_labels[key]
self.path_structure[new_key] = self.path_structure.pop(key)
# Method dependencies map
self.method_dependencies = self.default_method_dependencies
if method_dependencies is not None:
self.method_dependencies.update(method_dependencies)
# Fields to be skipped when parameter hash is calculated
self.non_hashable_fields = self.default_non_hashable_fields
if non_hashable_fields is not None:
self.non_hashable_fields.update(non_hashable_fields)
# Parameters sections which will not be included in the master parameter hash
self.non_hashable_sections = self.default_non_hashable_sections
if non_hashable_sections is not None:
self.non_hashable_sections.update(non_hashable_sections)
# Translate non_hashable_sections
for order_id, section_label in enumerate(self.non_hashable_sections):
if section_label in self.section_labels:
self.non_hashable_sections[order_id] = self.section_labels[section_label]
def process(self, create_paths=True, create_parameter_hints=True):
"""Process parameters
Parameters
----------
create_paths : bool
Create paths
Default value True
create_parameter_hints : bool
Create parameters files to all data folders
Default value True
Raises
------
ValueError:
No valid active set given
Returns
-------
self
"""
# Check for empty parameter container
if len(self) == 0:
message = '{name}: Parameter container empty, cannot be process'.format(
name=self.__class__.__name__
)
self.logger.exception(message)
raise IOError(message)
# Process only once
if not self.processed:
# Translate non hashable section names
for section_id, section in enumerate(self.non_hashable_sections):
if section in self.field_labels:
self.non_hashable_sections[section_id] = self.field_labels[section]
if self.field_labels['SET-LIST'] in self:
for set_id, set_defined_parameters in enumerate(self[self.field_labels['SET-LIST']]):
# Get default parameters
set_params = DictContainer(copy.deepcopy(self[self.field_labels['DEFAULT-PARAMETERS']]))
set_params.merge(override=set_defined_parameters)
self.process_set(
parameters=set_params,
create_paths=create_paths,
create_parameter_hints=create_parameter_hints
)
self[self.field_labels['SET-LIST']][set_id] = set_params
if (self.field_labels['DEFAULT-PARAMETERS'] in self and
self.field_labels['SET-LIST'] in self and
self.field_labels['ACTIVE-SET'] in self):
# Active set ID
active_set_id = self[self.field_labels['ACTIVE-SET']]
active_set = ListDictContainer(self[self.field_labels['SET-LIST']]).search(
key=self.field_labels['SET-ID'],
value=active_set_id
)
if not active_set:
message = '{name}: No valid active set given [{set_name}]'.format(
name=self.__class__.__name__,
set_name=active_set_id
)
self.logger.exception(message)
raise ValueError(message)
self.merge(override=active_set)
elif self.field_labels['DEFAULT-PARAMETERS'] in self:
# Only default parameter set is given, make it only one.
self.merge(
override=copy.deepcopy(self[self.field_labels['DEFAULT-PARAMETERS']])
)
else:
# No sets used
self.process_set(
parameters=self,
create_paths=create_paths,
create_parameter_hints=create_parameter_hints
)
self.processed = True
# 8. Clean up
# self._clean_unused_parameters()
return self
def process_set(self, parameters, create_paths=True, create_parameter_hints=True):
"""Process parameter set
Parameters
----------
parameters : dict
Dictionary to process
create_paths : bool
Create paths
Default value True
create_parameter_hints : bool
Create parameters files to all data folders
Default value True
Returns
-------
self
"""
# Get processing order for sections
section_list = []
for section in self.section_process_order + list(set(list(parameters.keys())) - set(self.section_process_order)):
if section in parameters:
section_list.append(section)
# Convert all main level sections to DictContainers
self._convert_main_level_to_containers(
parameters=parameters
)
# Prepare paths
self._prepare_paths(
parameters=parameters
)
# 1. Process parameters
for section in section_list:
# Reverse translated section name
section_name = self.section_labels.flipped.map(section)
# Get processing function
field_process_func = getattr(
self,
'_process_{SECTION_NAME}'.format(SECTION_NAME=section_name),
None
)
if field_process_func is not None:
# Call processing function if it exists
field_process_func(
parameters=parameters
)
# Call processing function to method section related to the current section
section_method_parameters = section + '_' + self.field_labels['METHOD_PARAMETERS']
if section in parameters and isinstance(parameters[section],
dict) and section_method_parameters in parameters:
if self.field_labels['LABEL'] in parameters[section] or self.field_labels['RECIPE'] in parameters[section]:
field_process_parameters_func = getattr(
self,
'_process_{SECTION_NAME}_METHOD_PARAMETERS'.format(SECTION_NAME=section_name),
None
)
if field_process_parameters_func is not None:
field_process_parameters_func(parameters=parameters)
# 2. Add parameter hash to methods
self._add_hash_to_method_parameters(
parameters=parameters
)
# 3. Parse recipes
recipe_paths = parameters.get_leaf_path_list(target_field_endswith=self.field_labels['RECIPE'])
for recipe_path in recipe_paths:
parameters.set_path(
path=recipe_path,
new_value=VectorRecipeParser().parse(
recipe=parameters.get_path(path=recipe_path)
)
)
# 4. Process methods
for section in section_list:
self._process_method_parameters(
parameters=parameters,
section=section
)
# 5. Inject dependencies
for section in section_list:
section_name = self.section_labels.flipped.map(section)
if section_name:
# Apply only named sections
if self.get_path_translated(parameters=parameters, path=[section, 'PARAMETERS']):
for key, item in iteritems(self.get_path_translated(parameters=parameters, path=[section, 'PARAMETERS'])):
if self.method_dependencies.get_path([section_name, key]):
dependency_path = self._translated_path(
self.method_dependencies.get_path([section_name, key]).split('.')
)
if len(dependency_path) == 1:
section_method_parameters = section + '_' + self.field_labels['METHOD_PARAMETERS']
item[self.field_labels['DEPENDENCY_PARAMETERS']] = copy.deepcopy(
parameters.get_path([section_method_parameters] + dependency_path[1:])
)
item[self.field_labels['DEPENDENCY_LABEL']] = dependency_path[-1]
elif len(dependency_path) == 2:
section_method_parameters = dependency_path[0] + '_' + self.field_labels[
'METHOD_PARAMETERS']
item[self.field_labels['DEPENDENCY_PARAMETERS']] = copy.deepcopy(
parameters.get_path([section_method_parameters] + dependency_path[1:])
)
item[self.field_labels['DEPENDENCY_LABEL']] = dependency_path[-1]
# 6. Add hash
self._add_hash_to_main_parameters(
parameters=parameters
)
self._add_main_hash(
parameters=parameters
)
# 7. Post process paths
self._process_application_paths(
parameters=parameters,
create_paths=create_paths,
create_parameter_hints=create_parameter_hints
)
return self
def get_path_translated(self, path, parameters=None):
"""Get data with path, path can contain string constants which will be translated.
Parameters
----------
path : list of str
Path parts
parameters : dict
Parameter dictionary. If none given self is used.
Default value None
Returns
-------
dict
"""
if parameters is None:
parameters = self
return parameters.get_path(
path=self._translated_path(path)
)
def set_path_translated(self, path, new_value, parameters=None):
"""Set data with path, path can contain string constants which will be translated.
Parameters
----------
path : list of str
Path parts
new_value : various
Value to be set
parameters : dict
Parameter dictionary. If none given self is used.
Default | |
formed by the UV locations, and if `max_l1_interp` is
# provided, also the pixels whose interpolation is too much
# of a stretch to be trusted. In the context of "canvas
# painting," this will be the canvas' base color.
'max_l1_interp': np.inf, # trust/accept all interpolations
# Maximum L1 distance, which we can trust in interpolation,
# to pixels that have values. Interpolation across a longer
# range will not be trusted, and hence will be filled with
# `fill_value`.
}
.. code-block:: python
method = {
'func': 'rbf',
# Which SciPy function to call.
'func_underlying': 'linear',
# Fed to `Rbf` as the `method` parameter.
'smooth': 0, # no smoothing
# Fed to `Rbf` as the `smooth` parameter.
}
Returns:
numpy.ndarray: Interpolated values at query locations, of shape
``grid_res`` for single-channel input or ``(grid_res[0], grid_res[1],
values.shape[2])`` for multi-channel input.
"""
if values.ndim == 1:
values = values.reshape(-1, 1)
assert values.ndim == 2 and values.shape[0] == uvs.shape[0]
if method is None:
method = {'func': 'griddata'}
h, w = grid_res
# Generate query coordinates
grid_x, grid_y = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# +---> x
# |
# v y
grid_u, grid_v = grid_x, 1 - grid_y
# ^ v
# |
# +---> u
if method['func'] == 'griddata':
from scipy.interpolate import griddata
cv2 = preset_import('cv2', assert_success=True)
func_underlying = method.get('func_underlying', 'linear')
fill_value = method.get('fill_value', (0,))
max_l1_interp = method.get('max_l1_interp', np.inf)
fill_value = np.array(fill_value)
if len(fill_value) == 1:
fill_value = np.tile(fill_value, values.shape[1])
assert len(fill_value) == values.shape[1]
if max_l1_interp is None:
max_l1_interp = np.inf # trust everything
# Figure out which pixels can be trusted
has_value = np.zeros((h, w), dtype=np.uint8)
ri = ((1 - uvs[:, 1]) * (h - 1)).astype(int).ravel()
ci = (uvs[:, 0] * (w - 1)).astype(int).ravel()
in_canvas = np.logical_and.reduce(
(ri >= 0, ri < h, ci >= 0, ci < w)) # to ignore out-of-canvas points
has_value[ri[in_canvas], ci[in_canvas]] = 1
dist2val = cv2.distanceTransform(1 - has_value, cv2.DIST_L1, 3)
trusted = dist2val <= max_l1_interp
# Process each color channel separately
interps = []
for ch_i in range(values.shape[1]):
v_fill = fill_value[ch_i]
v = values[:, ch_i]
interp = griddata(uvs, v, (grid_u, grid_v),
method=func_underlying,
fill_value=v_fill)
interp[~trusted] = v_fill
interps.append(interp)
interps = np.dstack(interps)
elif method['func'] == 'rbf':
from scipy.interpolate import Rbf
func_underlying = method.get('func_underlying', 'linear')
smooth = method.get('smooth', 0)
# Process each color channel separately
interps = []
for ch_i in range(values.shape[1]):
v = values[:, ch_i]
rbfi = Rbf(uvs[:, 0], uvs[:, 1], v,
function=func_underlying,
smooth=smooth)
interp = rbfi(grid_u, grid_v)
interps.append(interp)
interps = np.dstack(interps)
else:
raise NotImplementedError(method['func'])
if interps.shape[2] == 1:
return interps[:, :, 0].squeeze()
return interps
def find_local_extrema(im, want_maxima, kernel_size=3):
"""Finds local maxima or minima in an image.
Args:
im (numpy.ndarray): H-by-W if single-channel (e.g., grayscale)
or H-by-W-by-C for multi-channel (e.g., RGB) images. Extrema
are found independently for each of the C channels.
want_maxima (bool): Whether maxima or minima are wanted.
kernel_size (int, optional): Side length of the square window under
consideration. Must be larger than 1.
Returns:
numpy.ndarray: Binary map indicating if each pixel is a local extremum.
"""
from scipy.ndimage.filters import minimum_filter, maximum_filter
logger.error("find_local_extrema() not tested yet!")
# Figure out image size and number of channels
if im.ndim == 3:
h, w, c = im.shape
expanded = False
elif im.ndim == 2:
h, w = im.shape
c = 1
im = np.expand_dims(im, axis=2) # adds singleton dimension
expanded = True
else:
raise ValueError("'im' must have either two or three dimensions")
kernel = np.ones((kernel_size, kernel_size)).astype(bool)
is_extremum = np.zeros((h, w, c), dtype=bool)
for i in range(c):
z = im[:, :, i]
if want_maxima:
equals_extremum = maximum_filter(z, footprint=kernel) == z
else:
equals_extremum = minimum_filter(z, footprint=kernel) == z
is_extremum[:, :, i] = equals_extremum
if expanded:
is_extremum = is_extremum[:, :, 0]
return is_extremum
def compute_gradients(im):
"""Computes magnitudes and orientations of image gradients.
With Scharr operators:
.. code-block:: none
[ 3 0 -3 ] [ 3 10 3]
[10 0 -10] and [ 0 0 0]
[ 3 0 -3 ] [-3 -10 -3]
Args:
im (numpy.ndarray): H-by-W if single-channel (e.g., grayscale) or
H-by-W-by-C if multi-channel (e.g., RGB) images. Gradients are
computed independently for each of the C channels.
Returns:
tuple:
- **grad_mag** (*numpy.ndarray*) -- Magnitude image of the
gradients.
- **grad_orient** (*numpy.ndarray*) -- Orientation image of the
gradients (in radians).
.. code-block:: none
y ^ pi/2
|
pi |
--------+--------> 0
-pi | x
| -pi/2
"""
cv2 = preset_import('cv2', assert_success=True)
# Figure out image size and number of channels
if im.ndim == 3:
h, w, c = im.shape
expanded = False
elif im.ndim == 2:
h, w = im.shape
c = 1
im = np.expand_dims(im, axis=2) # adds singleton dimension
expanded = True
else:
raise ValueError("'im' must have either two or three dimensions")
grad_mag = np.zeros((h, w, c))
grad_orient = np.zeros((h, w, c))
for i in range(c):
z = im[:, :, i]
ddepth = -1 # same depth as the source
# Along horizontal direction
xorder, yorder = 1, 0
grad_h = cv2.Sobel(z, ddepth, xorder, yorder, ksize=-1) # 3x3 Scharr
grad_h = grad_h.astype(float)
# Along vertical direction
xorder, yorder = 0, 1
grad_v = cv2.Sobel(z, ddepth, xorder, yorder, ksize=-1) # 3x3 Scharr
grad_v = grad_v.astype(float)
# Magnitude
grad_mag[:, :, i] = np.sqrt(np.square(grad_h) + np.square(grad_v))
# Orientation
grad_orient[:, :, i] = np.arctan2(grad_v, grad_h)
if expanded:
grad_mag = grad_mag[:, :, 0]
grad_orient = grad_orient[:, :, 0]
return grad_mag, grad_orient
def gamma_correct(im, gamma=2.2):
r"""Applies gamma correction to an ``uint`` image.
Args:
im (numpy.ndarray): H-by-W if single-channel (e.g., grayscale) or
H-by-W-by-C multi-channel (e.g., RGB) ``uint`` images.
gamma (float, optional): Gamma value :math:`< 1` shifts image towards
the darker end of the spectrum, while value :math:`> 1` towards
the brighter.
Returns:
numpy.ndarray: Gamma-corrected image.
"""
cv2 = preset_import('cv2', assert_success=True)
assert im.dtype in ('uint8', 'uint16')
# Don't correct alpha channel, if exists
alpha = None
if im.ndim == 3 and im.shape[2] == 4:
alpha = im[:, :, 3]
im = im[:, :, :3]
# Correct with lookup table
type_max = np.iinfo(im.dtype).max
table = np.array([
((x / type_max) ** (1 / gamma)) * type_max
for x in np.arange(0, type_max + 1)
]).astype(im.dtype)
im_corrected = cv2.LUT(im, table)
# Concat alpha channel back
if alpha is not None:
im_corrected = np.dstack((im_corrected, alpha))
return im_corrected
def rgb2lum(im):
"""Converts RGB to relative luminance (if input is linear RGB) or luma
(if input is gamma-corrected RGB).
Args:
im (numpy.ndarray): RGB array of shape ``(..., 3)``.
Returns:
numpy.ndarray: Relative luminance or luma array.
"""
assert im.shape[-1] == 3, "Input's last dimension must hold RGB"
lum = 0.2126 * im[..., 0] + 0.7152 * im[..., 1] + 0.0722 * im[..., 2]
return lum
def _assert_float_0to1(arr):
if arr.dtype.kind != 'f':
raise TypeError("Input must be float (is %s)" % arr.dtype)
if (arr < 0).any() or (arr > 1).any():
raise ValueError("Input image has pixels outside [0, 1]")
def _assert_3ch(arr):
if arr.ndim != 3:
raise ValueError("Input image is not even 3D (H-by-W-by-3)")
n_ch = arr.shape[2]
if n_ch != 3:
raise ValueError("Input image must have 3 channels, but has %d" % n_ch)
srgb_linear_thres = 0.0031308
srgb_linear_coeff = 12.92
srgb_exponential_coeff = 1.055
srgb_exponent = 2.4
def linear2srgb(im, clip=False):
r"""Converts an image from linear RGB values to sRGB.
Args:
im (numpy.ndarray): Of type ``float``, and all pixels must be
:math:`\in [0, 1]`.
clip (bool, optional): Whether to clip values to :math:`[0,1]`.
Defaults to ``False``.
Returns:
numpy.ndarray: Converted image in sRGB.
"""
if clip:
im = np.clip(im, 0, 1)
_assert_float_0to1(im)
im_ = deepcopy(im)
# Guaranteed to be [0, 1] floats
linear_ind = im_ <= srgb_linear_thres
nonlinear_ind = im_ > srgb_linear_thres
im_[linear_ind] = im_[linear_ind] * srgb_linear_coeff
im_[nonlinear_ind] = srgb_exponential_coeff * (
np.power(im_[nonlinear_ind], 1 / srgb_exponent)
) - (srgb_exponential_coeff - 1)
return im_
def srgb2linear(im, clip=False):
r"""Converts an image from sRGB values to linear RGB.
Args:
im (numpy.ndarray): Of type ``float``, and all pixels must be
:math:`\in [0, 1]`.
clip (bool, optional): Whether to clip values to :math:`[0,1]`.
Defaults to ``False``.
Returns:
numpy.ndarray: Converted image in linear RGB.
"""
if clip:
im = np.clip(im, 0, 1)
| |
1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# compute ifdentry and ifdvalue bytes from code, dtype, count, value
# append (code, ifdentry, ifdvalue, writeonce) to tags list
code = tifftags[code] if code in tifftags else int(code)
if dtype not in tifftypes:
raise ValueError("unknown dtype %s" % dtype)
tifftype = tifftypes[dtype]
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format, pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if software:
addtag('software', 's', 0, software, writeonce=True)
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
addtag('planar_configuration', 'H', 1, 1 if planarconfig=='contig'
else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
addtag('extra_samples', 'H', 1, 1) # alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
addtag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
addtag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
with open(filename, 'wb') as fh:
seek = fh.seek
tell = fh.tell
def write(arg, *args):
fh.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write ifdentries
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(t[1] for t in tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = tell()
seek(tag_offset + tagindex*tag_size + offset_size + 4)
write(offset_format, pos)
seek(pos)
if tag[0] == 273:
strip_offsets_offset = pos
elif tag[0] == 279:
strip_byte_counts_offset = pos
write(tag[2])
# write image data
data_offset = tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip_offsets and strip_byte_counts if necessary
pos = tell()
for tagindex, tag in enumerate(tags):
if tag[0] == 273: # strip_offsets
if tag[2]:
seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
else:
seek(tag_offset + tagindex*tag_size + offset_size + 4)
write(offset_format, data_offset)
elif tag[0] == 279: # strip_byte_counts
if compress:
if tag[2]:
seek(strip_byte_counts_offset)
for size in strip_byte_counts:
write(offset_format, size)
else:
seek(tag_offset + tagindex*tag_size +
offset_size + 4)
write(offset_format, strip_byte_counts[0])
break
seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def imread(files, *args, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
Examples
--------
>>> im = imread('test.tif', 0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(*args, **kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> tif = TiffFile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, arg, name=None, multifile=False):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Human readable label of open file.
multifile : bool
If True, series may include pages from multiple files.
"""
if isinstance(arg, basestring):
filename = os.path.abspath(arg)
self._fh = open(filename, 'rb')
else:
filename = str(name)
self._fh = arg
self._fh.seek(0, 2)
self._fsize = self._fh.tell()
self._fh.seek(0)
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TiffFiles
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
try:
self._fromfile()
except Exception:
self._fh.close()
raise
def close(self):
"""Close open file handle(s)."""
for tif in self._tiffs.values():
if tif._fh:
tif._fh.close()
tif._fh = None
self._tiffs = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
| |
self.mq_matching_id_points_9.append(" ")
if "9" in self.ilias_test_question_type_collection_mq_answers[i]:
self.mq_ilias_response_text_10.append(self.mattText_text_all_mq_answers[t + 9])
self.mq_ilias_response_img_label_10.append(self.mq_response_img_label[t + 9])
self.mq_ilias_response_img_string_base64_encoded_10.append(self.mq_response_img_data[t + 9])
self.mq_matching_id_10.append(self.mq_matching_ids[t + 9])
self.mq_matching_id_points_10.append(self.mq_matching_ids_points[t + 9])
else:
self.mq_ilias_response_text_10.append(" ")
self.mq_ilias_response_img_label_10.append(" ")
self.mq_ilias_response_img_string_base64_encoded_10.append(" ")
self.mq_matching_id_10.append(" ")
self.mq_matching_id_points_10.append(" ")
t += int(max(self.ilias_test_question_type_collection_mq_answers[i])) + 1
####### Sonstige Funktionen
def split_description_main_from_img(self, ilias_test_question_description):
self.ilias_test_question_description = ilias_test_question_description
self.test_list1 = []
self.test_list1_l_join = []
for i in range(len(self.ilias_test_question_description)):
# Text aus Fach übernehmen
self.test_neu1 = self.ilias_test_question_description[i]
#Text auftrennen nach Beschreibung und IMG
self.test_list1 = self.test_neu1.split('</p>')
# IMG teil löschen
for i in range(len(self.test_list1)):
if "img" in self.test_list1[i]:
self.test_list1.pop(i)
break
self.test_list1_l_join.append('</p>'.join(self.test_list1))
for i in range(len(self.test_list1_l_join)):
self.test_list1_l_join[i] = self.test_list1_l_join[i].replace('<p>', "")
self.test_list1_l_join[i] = self.test_list1_l_join[i].replace('</p>', "")
return self.test_list1_l_join
###### TAXONOMIE FUNKTIONEN ###############
# Taxonomie aus DB schreiben
def set_taxonomy_for_question(self, id_nr, number_of_entrys, item, question_type_pool_qpl_file_path_template, question_type_pool_qpl_file_path_output):
# Zusatz für Taxonomie-Einstellungen
self.number_of_entrys = number_of_entrys
self.question_type_pool_qpl_file_path_template = question_type_pool_qpl_file_path_template
self.question_type_pool_qpl_file_path_output = question_type_pool_qpl_file_path_output
self.id_int_numbers = 400000 + id_nr
self.number_of_entrys.append(format(self.id_int_numbers, '06d')) # Zahlenfolge muss 6-stellig sein.
item.set('ident', "il_0_qst_" + str(self.id_int_numbers))
# Hier wird die QPL bearbeitet - Taxonomie
self.mytree = ET.parse(self.question_type_pool_qpl_file_path_template)
self.myroot = self.mytree.getroot()
# Hinzufügen von Question QRef in qpl Datei
for i in range(id_nr):
ContentObject = ET.Element('ContentObject')
MetaData = ET.SubElement(ContentObject, 'MetaData')
Settings = ET.SubElement(ContentObject, 'Settings')
PageObject = ET.SubElement(ContentObject, 'PageObject')
PageContent = ET.SubElement(PageObject, 'PageContent')
Question = ET.SubElement(PageContent, 'Question')
Question.set('QRef', "il_0_qst_" + self.number_of_entrys[i])
print("------->","il_0_qst_" + self.number_of_entrys[i] )
QuestionSkillAssignments = ET.SubElement(ContentObject, 'QuestionSkillAssignments')
TriggerQuestion = ET.SubElement(QuestionSkillAssignments, 'TriggerQuestion')
TriggerQuestion.set('Id', self.number_of_entrys[i])
self.myroot.append(PageObject)
# self.myroot.append(QuestionSkillAssignments)
self.mytree.write(self.question_type_pool_qpl_file_path_output)
# Hinzufügen von TriggerQuestion ID in qpl Datei
for i in range(id_nr):
ContentObject = ET.Element('ContentObject')
MetaData = ET.SubElement(ContentObject, 'MetaData')
Settings = ET.SubElement(ContentObject, 'Settings')
PageObject = ET.SubElement(ContentObject, 'PageObject')
PageContent = ET.SubElement(PageObject, 'PageContent')
Question = ET.SubElement(PageContent, 'Question')
Question.set('QRef', "il_0_qst_" + self.number_of_entrys[i])
QuestionSkillAssignments = ET.SubElement(ContentObject, 'QuestionSkillAssignments')
TriggerQuestion = ET.SubElement(QuestionSkillAssignments, 'TriggerQuestion')
TriggerQuestion.set('Id', self.number_of_entrys[i])
self.myroot.append(QuestionSkillAssignments)
self.mytree.write(self.question_type_pool_qpl_file_path_output)
def taxonomy_file_refresh(self, file_location):
self.file_location = file_location
# print("refresh_file_location: " + str(self.file_location))
with open(self.file_location, 'r') as xml_file:
xml_str = xml_file.read()
xml_str = xml_str.replace('ns0:', 'exp:')
xml_str = xml_str.replace('ns2:', 'ds:')
xml_str = xml_str.replace('ns3:', '') # replace "x" with "new value for x"
xml_str = xml_str.replace(
'<exp:Export xmlns:ns0="http://www.ilias.de/Services/Export/exp/4_1" xmlns:ns2="http://www.ilias.de/Services/DataSet/ds/4_3" xmlns:ns3="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd">',
'<exp:Export InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:exp="http://www.ilias.de/Services/Export/exp/4_1" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd" xmlns="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:ds="http://www.ilias.de/Services/DataSet/ds/4_3">')
xml_str = xml_str.replace(
'<exp:Export xmlns:ns0="http://www.ilias.de/Services/Export/exp/4_1" xmlns:ns2="http://www.ilias.de/Services/DataSet/ds/4_3" xmlns:ns3="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Entity="tax" InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" SchemaVersion="4.3.0" TargetRelease="5.4.0" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd">',
'<exp:Export InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:exp="http://www.ilias.de/Services/Export/exp/4_1" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd" xmlns="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:ds="http://www.ilias.de/Services/DataSet/ds/4_3">')
with open(self.file_location, 'w') as replaced_xml_file:
replaced_xml_file.write(xml_str)
###### ZUSÄTZLICHE FUNKTIONEN ###############
# Auflistung der Ordner im Ordner-Pfad: pool_directory_output
def find_max_id_in_dir(self, directory_path, question_type):
self.list_of_directories = []
self.list_of_file_IDs = []
self.filename_with_zip_index = []
self.list_of_directories = os.listdir(directory_path)
self.question_type = question_type
# Wird in der Liste eine Datei mit der Endung "*.zip" gefunden, dann Index speichern
for i in range(len(self.list_of_directories)):
if ".zip" in self.list_of_directories[i]:
self.filename_with_zip_index.append(i)
# Aus der Datei-Liste alle Einträge aus der *.zip Liste entfernen
# Dadurch enthält die Datei-Liste keine Namen mehr mit ".zip" Endung
# .pop entfernt einen Eintrag aus der Liste und schiebt die restlichen Einträge wieder zusammen
# Werden mehrere Einträge entfernt, ändert sich auch immer der Index der verbleibenden Einträge
# z.B: Liste mit 5 Einträgen: Liste[0,1,2,3,4] -> Liste.pop(0) -> Liste[1,2,3,4]
# Sollen mehrerer Einträge entfernt werden, veschiebt sich der Index um die Anzahl der bereits gelöschten Einträge
# Daher ist hier auch ein .pop(x)-j ("j" für Schleifendurchlauf), da sich der Index bei jeden ".pop()" und 1 verschiebt
for j in range(len(self.filename_with_zip_index)):
self.list_of_directories.pop(self.filename_with_zip_index[j]-j)
# Die letzten sieben (7) Zeichen des Orndernamen in eine Liste packen. Die letzten 7 Zeichen geben die ID des Fragenpools an
# Die Ordnernamen für ILIAS sind immer in dem Format: z.B.: 1604407426__0__tst_2040314
# Die ID wird im nachhinein um "1" inkrementiert
for k in range(len(self.list_of_directories)):
self.list_of_file_IDs.append(self.list_of_directories[k][-7:])
# Wenn keine Ordner gefunden werden, dann ID aus Vorlage übernehmen
if len(self.list_of_directories) == 0:
# Falls sich keine *.zip Ordner in der "ilias_pool_abgabe" befinden, wird die ID über eine Vorlage (fest hinterlegt) bestimmt.
# Die Zahl muss 7-stellig sein!
self.pool_id_file_zip_template = ""
if self.question_type == "formelfrage":
self.pool_id_file_zip_template = "1115532"
elif self.question_type == "singlechoice":
self.pool_id_file_zip_template = "2225532"
elif self.question_type == "multiplechoice":
self.pool_id_file_zip_template = "3335532"
elif self.question_type == "zuordnungsfrage":
self.pool_id_file_zip_template = "4445532"
else:
self.pool_id_file_zip_template = "6665532"
self.list_of_file_IDs.append(self.pool_id_file_zip_template)
# Alle String Einträge nach "INT" konvertieren um mit der max() funktion die höchste ID herauszufiltern
self.list_of_file_IDs = list(map(int, self.list_of_file_IDs))
self.file_max_id = str(max(self.list_of_file_IDs)+1)
return self.file_max_id
# Ordner erstellen
def createFolder(self, directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def create_pool_dir_from_template(self, pool_directory_output):
# Gibt den Pfad zum Ordner an, indem der Pool erstellt wird
# --> ILIAS-Formelfrage\ff_ilias_pool_abgabe
self.pool_directory_output = pool_directory_output
# Neuen Ordner erstellen
XML_Interface.createFolder(self, os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir)))
#print("======", os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir)))
# Hier wird das Verzeichnis kopiert, um die Struktur vom Fragenpool-Ordner zu erhalten
# Die Struktur stammt aus einem Vorlage-Ordner. Die notwendigen XML Dateien werden im Anschluss ersetzt bzw. mit Werten aktualisiert
XML_Interface.copytree(self, os.path.normpath(os.path.join(self.project_root_path, "Vorlage_für_Fragenpool", 'Vorlage_1596569820__0__qpl_2074808')),
os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir)))
# Da durch "copytree" alle Daten kopiert werden, werden hier die qpl.xml und die qti.xml auf die aktuelle Nummer umbenannt und später dadurch überschrieben
# Anpassung ID für "qti".xml
os.rename(os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir, "1596569820__0__qti_2074808.xml")),
os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir, self.ilias_id_pool_qti_xml)))
# Anpassung ID für "qpl".xml
os.rename(os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir, "1596569820__0__qpl_2074808.xml")),
os.path.normpath(os.path.join(self.pool_directory_output, self.ilias_id_pool_qpl_dir, self.ilias_id_pool_qpl_xml)))
# Kopiert komplette Ordnerstruktur und Inhalt. WIrd für Pool Erstellung verwendet
def copytree(self, src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def add_dir_for_images(self, orig_img_file_path, dir_path, img_name, img_format, id_nr):
# Pfad zum Ziel-Ordner. Test oder Pool Ordner , indem das Bild eingefügt werden soll
self.orig_img_file_path = orig_img_file_path
self.dir_path = dir_path
self.img_name = img_name
self.img_format = img_format
if self.orig_img_file_path != "":
# Neuen Ordner in object Ordner ablegen
self.object_dir = 'il_0_mob_000000' + str(id_nr)
XML_Interface.createFolder(self, self.dir_path + '/' + self.object_dir + '/')
self.object_img_dir_output_path = os.path.join(self.dir_path, self.object_dir, self.img_name + "." +self.img_format)
#Bild Datei kopieren
print("++++++++++++++++++++++",self.orig_img_file_path, self.object_img_dir_output_path )
shutil.copyfile(self.orig_img_file_path, self.object_img_dir_output_path)
# if question_pool_img_path != "ilias_id_pool_img_dir_not_used_for_ilias_test":
# if test_or_pool == "ilias_test":
#
# if self.description_img_name_var != "" and self.description_img_name_var != "EMPTY":
# XML_Interface.createFolder(self, self.question_test_img_path + '/' + 'il_0_mob_000000' + str(id_nr) + '/')
#
# # img wird immer als PNG Datei abgelegt.
# with open(os.path.join(self.question_test_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"), 'wb') as image_file:
# image_file.write(self.description_img_data_var)
#
# self.image = Image.open(os.path.join(self.question_test_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"))
# self.image.save(os.path.join(self.question_test_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"))
#
# else: # image pool
# if self.description_img_name_var != "" and self.description_img_name_var != "EMPTY":
# XML_Interface.createFolder(self, self.question_pool_img_path + '/' + 'il_0_mob_000000' + str(id_nr) + '/')
#
# # img wird immer als PNG Datei abgelegt.
# # with open(self.question_pool_img_path + "\\il_0_mob_000000" + str(id_nr) + "\\" + self.description_img_name_var + ".png", 'wb') as image_file:
# with open(os.path.join(self.question_pool_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"), 'wb') as image_file:
# image_file.write(self.description_img_data_var)
#
# self.image = Image.open(os.path.join(self.question_pool_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"))
# self.image.save(os.path.join(self.question_pool_img_path, "il_0_mob_000000" + str(id_nr), self.description_img_name_var + ".png"))
def get_img_name_and_format_from_path(self, img_path):
# Im Ordnernamen dürfen keine Umlaute vorhanden sein
self.char_to_replace_dict = {'A': 'AE',
'Ö': 'OE',
'Ü': 'UE',
'ä': 'ae',
'ö': 'oe',
'ü': 'ue',
'ß': 'ss',
}
self.img_path = img_path
self.find_img_name_index = self.img_path.rfind("/") # Gibt den Index in dem das letzte "/" auftaucht an. "rfind" durchsucht den String von rechts beginnend
self.find_img_format_index = self.img_path.rfind(".") # Gibt den Index in dem das letzte "/" auftaucht an. "rfind" durchsucht den String von rechts beginnend
self.img_name = self.img_path[int(self.find_img_name_index) + 1: int(self.find_img_format_index)] # letzten char des bildnamens ist das dateiformat: Testbild.jpg
self.img_format = self.img_path[int(self.find_img_format_index)+1:]
for key, value in self.char_to_replace_dict.items():
self.img_name = self.img_name.replace(key, value)
return self.img_name, self.img_format
def add_picture_to_description_main(self, description_img_path_1, description_img_path_2, description_img_path_3, img_file_path_output,
question_description_main, question_description_mattext, question_description_material, id_nr):
self.description_img_path_1 = description_img_path_1
self.description_img_path_2 = description_img_path_2
self.description_img_path_3 = description_img_path_3
self.img_file_path_output = img_file_path_output
self.question_description_main = question_description_main
self.question_description_mattext = question_description_mattext
self.description_img_name_1, self.description_img_format_1 = XML_Interface.get_img_name_and_format_from_path(self, self.description_img_path_1)
self.description_img_name_2, self.description_img_format_2 = XML_Interface.get_img_name_and_format_from_path(self, self.description_img_path_2)
self.description_img_name_3, self.description_img_format_3 = XML_Interface.get_img_name_and_format_from_path(self, self.description_img_path_3)
# Ordner erzeugen und Bild ablegen
XML_Interface.add_dir_for_images(self, self.description_img_path_1, self.img_file_path_output, self.description_img_name_1, self.description_img_format_1, id_nr)
XML_Interface.add_dir_for_images(self, self.description_img_path_2, self.img_file_path_output, self.description_img_name_2, self.description_img_format_2, id_nr)
XML_Interface.add_dir_for_images(self, self.description_img_path_3, self.img_file_path_output, self.description_img_name_3, self.description_img_format_3, id_nr)
self.picture_string_name_replace_1 = "%Bild1%"
self.picture_string_name_replace_2 = "%Bild2%"
self.picture_string_name_replace_3 = "%Bild3%"
self.check_img_1_exists = False
| |
+ l0_4 * 2.844781386 + l0_5 * 2.4526730903 + l0_6 * -1.9175165077 + l0_7 * -0.7443755288 + l0_8 * -3.1591419438 + l0_9 * 0.8441602697 + l0_10 * 1.1979484448 + l0_11 * 2.138098544 + l0_12 * 0.9274159536 + l0_13 * -2.1573448803 + l0_14 * -3.7698356464)
l1_21 = self.activation_tanh(l0_0 * 5.187120117 + l0_1 * -7.7525670576 + l0_2 * 1.9008346975 + l0_3 * -1.2031603996 + l0_4 * 5.917669142 + l0_5 * -3.1878682719 + l0_6 * 1.0311747828 + l0_7 * -2.7529484612 + l0_8 * -1.1165884578 + l0_9 * 2.5524942323 + l0_10 * -0.38623241 + l0_11 * 3.7961317445 + l0_12 * -6.128820883 + l0_13 * -2.1470707709 + l0_14 * 2.0173792965)
l1_22 = self.activation_tanh(l0_0 * -6.0241676562 + l0_1 * 0.7474455584 + l0_2 * 1.7435724844 + l0_3 * 0.8619835076 + l0_4 * -0.1138406797 + l0_5 * 6.5979359352 + l0_6 * 1.6554154348 + l0_7 * -3.7969458806 + l0_8 * 1.1139097376 + l0_9 * -1.9588417 + l0_10 * 3.5123392221 + l0_11 * 9.4443103128 + l0_12 * -7.4779291395 + l0_13 * 3.6975940671 + l0_14 * 8.5134262747)
l1_23 = self.activation_tanh(l0_0 * -7.5486576471 + l0_1 * -0.0281420865 + l0_2 * -3.8586839454 + l0_3 * -0.5648792233 + l0_4 * -7.3927282026 + l0_5 * -0.3857538046 + l0_6 * -2.9779885698 + l0_7 * 4.0482279965 + l0_8 * -1.1522499578 + l0_9 * -4.1562500212 + l0_10 * 0.7813134307 + l0_11 * -1.7582667612 + l0_12 * 1.7071109988 + l0_13 * 6.9270873208 + l0_14 * -4.5871357362)
l1_24 = self.activation_tanh(l0_0 * -5.3603442228 + l0_1 * -9.5350611629 + l0_2 * 1.6749984422 + l0_3 * -0.6511065892 + l0_4 * -0.8424823239 + l0_5 * 1.9946675213 + l0_6 * -1.1264361638 + l0_7 * 0.3228676616 + l0_8 * 5.3562230396 + l0_9 * -1.6678168952 + l0_10 * 1.2612580068 + l0_11 * -3.5362671399 + l0_12 * -9.3895191366 + l0_13 * 2.0169228673 + l0_14 * -3.3813191557)
l1_25 = self.activation_tanh(l0_0 * 1.1362866429 + l0_1 * -1.8960071702 + l0_2 * 5.7047307243 + l0_3 * -1.6049785053 + l0_4 * -4.8353898931 + l0_5 * -1.4865381145 + l0_6 * -0.2846893475 + l0_7 * 2.2322095997 + l0_8 * 2.0930488668 + l0_9 * 1.7141411002 + l0_10 * -3.4106032176 + l0_11 * 3.0593289612 + l0_12 * -5.0894813904 + l0_13 * -0.5316299133 + l0_14 * 0.4705265416)
l1_26 = self.activation_tanh(l0_0 * -0.9401400975 + l0_1 * -0.9136086957 + l0_2 * -3.3808688582 + l0_3 * 4.7200776773 + l0_4 * 3.686296919 + l0_5 * 14.2133723935 + l0_6 * 1.5652940954 + l0_7 * -0.2921139433 + l0_8 * 1.0244504511 + l0_9 * -7.6918299134 + l0_10 * -0.594936135 + l0_11 * -1.4559914156 + l0_12 * 2.8056435224 + l0_13 * 2.6103905733 + l0_14 * 2.3412348872)
l1_27 = self.activation_tanh(l0_0 * 1.1573980186 + l0_1 * 2.9593661909 + l0_2 * 0.4512594325 + l0_3 * -0.9357210858 + l0_4 * -1.2445804495 + l0_5 * 4.2716471631 + l0_6 * 1.5167912375 + l0_7 * 1.5026853293 + l0_8 * 1.3574772038 + l0_9 * -1.9754386842 + l0_10 * 6.727671436 + l0_11 * 8.0145772889 + l0_12 * 7.3108970663 + l0_13 * -2.5005627841 + l0_14 * 8.9604502277)
l1_28 = self.activation_tanh(l0_0 * 6.3576350212 + l0_1 * -2.9731672725 + l0_2 * -2.7763558082 + l0_3 * -3.7902984555 + l0_4 * -1.0065574585 + l0_5 * -0.7011836061 + l0_6 * -1.0298068578 + l0_7 * 1.201007784 + l0_8 * -0.7835862254 + l0_9 * -3.9863597435 + l0_10 * 6.7851825502 + l0_11 * 1.1120256721 + l0_12 * -2.263287351 + l0_13 * 1.8314374104 + l0_14 * -2.279102097)
l1_29 = self.activation_tanh(l0_0 * -7.8741911036 + l0_1 * -5.3370618518 + l0_2 * 11.9153868964 + l0_3 * -4.1237170553 + l0_4 * 2.9491152758 + l0_5 * 1.0317132502 + l0_6 * 2.2992199883 + l0_7 * -2.0250502364 + l0_8 * -11.0785995839 + l0_9 * -6.3615588554 + l0_10 * -1.1687644976 + l0_11 * 6.3323478015 + l0_12 * 6.0195076962 + l0_13 * -2.8972208702 + l0_14 * 3.6107747183)
l2_0 = self.activation_tanh(l1_0 * -0.590546797 + l1_1 * 0.6608304658 + l1_2 * -0.3358268839 + l1_3 * -0.748530283 + l1_4 * -0.333460383 + l1_5 * -0.3409307681 + l1_6 * 0.1916558198 + l1_7 * -0.1200399453 + l1_8 * -0.5166151854 + l1_9 * -0.8537164676 + l1_10 * -0.0214448647 + l1_11 * -0.553290271 + l1_12 * -1.2333302892 + l1_13 * -0.8321813811 + l1_14 * -0.4527761741 + l1_15 * 0.9012545631 + l1_16 * 0.415853215 + l1_17 * 0.1270548319 + l1_18 * 0.2000460279 + l1_19 * -0.1741942671 + l1_20 * 0.419830522 + l1_21 * -0.059839291 + l1_22 * -0.3383001769 + l1_23 * 0.1617814073 + l1_24 * 0.3071848006 + l1_25 * -0.3191182045 + l1_26 * -0.4981831822 + l1_27 * -1.467478375 + l1_28 * -0.1676432563 + l1_29 * 1.2574849126)
l2_1 = self.activation_tanh(l1_0 * -0.5514235841 + l1_1 * 0.4759190049 + l1_2 * 0.2103576983 + l1_3 * -0.4754377924 + l1_4 * -0.2362941295 + l1_5 * 0.1155082119 + l1_6 * 0.7424215794 + l1_7 * -0.3674198672 + l1_8 * 0.8401574461 + l1_9 * 0.6096563193 + l1_10 * 0.7437935674 + l1_11 * -0.4898638101 + l1_12 * -0.4168668092 + l1_13 * -0.0365111095 + l1_14 * -0.342675224 + l1_15 * 0.1870268765 + l1_16 * -0.5843050987 + l1_17 * -0.4596547471 + l1_18 * 0.452188522 + l1_19 * -0.6737126684 + l1_20 * 0.6876072741 + l1_21 * -0.8067776704 + l1_22 * 0.7592979467 + l1_23 * -0.0768239468 + l1_24 * 0.370536097 + l1_25 * -0.4363884671 + l1_26 * -0.419285676 + l1_27 * 0.4380251141 + l1_28 * 0.0822528948 + l1_29 * -0.2333910809)
l2_2 = self.activation_tanh(l1_0 * -0.3306539521 + l1_1 * -0.9382247194 + l1_2 * 0.0746711276 + l1_3 * -0.3383838985 + l1_4 * -0.0683232217 + l1_5 * -0.2112358049 + l1_6 * -0.9079234054 + l1_7 * 0.4898595603 + l1_8 * -0.2039825863 + l1_9 * 1.0870698641 + l1_10 * -1.1752901237 + l1_11 * 1.1406403923 + l1_12 * -0.6779626786 + l1_13 * 0.4281048906 + l1_14 * -0.6327670055 + l1_15 * -0.1477678844 + l1_16 * 0.2693637584 + l1_17 * 0.7250738509 + l1_18 * 0.7905904504 + l1_19 * -1.6417250883 + l1_20 * -0.2108095534 + l1_21 * -0.2698557472 + l1_22 * -0.2433656685 + l1_23 * -0.6289943273 + l1_24 * 0.436428207 + l1_25 * -0.8243825184 + l1_26 * -0.8583496686 + l1_27 * 0.0983131026 + l1_28 * -0.4107462518 + l1_29 * 0.5641683087)
l2_3 = self.activation_tanh(l1_0 * 1.7036869992 + l1_1 * -0.6683507666 + l1_2 * 0.2589197112 + l1_3 * 0.032841148 + l1_4 * -0.4454796342 + l1_5 * -0.6196149423 + l1_6 * -0.1073622976 + l1_7 * -0.1926393101 + l1_8 * 1.5280232458 + l1_9 * -0.6136527036 + l1_10 * -1.2722934357 + l1_11 * 0.2888655811 + l1_12 * -1.4338638512 + l1_13 * -1.1903556863 + l1_14 * -1.7659663905 + l1_15 * 0.3703086867 + l1_16 * 1.0409140889 + l1_17 * 0.0167382209 + l1_18 * 0.6045646461 + l1_19 * 4.2388788116 + l1_20 * 1.4399738234 + l1_21 * 0.3308571935 + l1_22 * 1.4501137667 + l1_23 * 0.0426123724 + l1_24 * -0.708479795 + l1_25 * -1.2100800732 + l1_26 * -0.5536278651 + l1_27 * 1.3547250573 + l1_28 * 1.2906250286 + l1_29 * 0.0596007114)
l2_4 = self.activation_tanh(l1_0 * -0.462165126 + l1_1 * -1.0996742176 + l1_2 * 1.0928262999 + l1_3 * 1.806407067 + l1_4 * 0.9289147669 + l1_5 * 0.8069022793 + l1_6 * 0.2374237802 + l1_7 * -2.7143979019 + l1_8 * -2.7779203877 + l1_9 * 0.214383903 + l1_10 * -1.3111536623 + l1_11 * -2.3148813568 + l1_12 * -2.4755355804 + l1_13 * -0.6819733236 + l1_14 * 0.4425615226 + l1_15 * -0.1298218043 + l1_16 * -1.1744832824 + l1_17 * -0.395194848 + l1_18 * -0.2803397703 + l1_19 * -0.4505071197 + l1_20 * -0.8934956598 + l1_21 * 3.3232916348 + l1_22 * -1.7359534851 + l1_23 * 3.8540421743 + l1_24 * 1.4424032523 + l1_25 * 0.2639823693 + l1_26 * 0.3597053634 + l1_27 * -1.0470693728 + l1_28 * 1.4133480357 + l1_29 * 0.6248098695)
l2_5 = self.activation_tanh(l1_0 * 0.2215807411 + l1_1 * -0.5628295071 + l1_2 * -0.8795982905 + l1_3 * 0.9101585104 + l1_4 * -1.0176831976 + l1_5 * -0.0728884401 + l1_6 * 0.6676331658 + l1_7 * -0.7342174108 + l1_8 * 9.4428E-4 + l1_9 * 0.6439774272 + l1_10 * -0.0345236026 + l1_11 * 0.5830977027 + l1_12 * -0.4058921837 + l1_13 * -0.3991888077 + l1_14 * -1.0090426973 + l1_15 * -0.9324780698 + l1_16 * -0.0888749165 + l1_17 * 0.2466351736 + l1_18 * 0.4993304601 + l1_19 * -1.115408696 + | |
2018-07-30 For some reason, cannot use super() in h1text or h2 text, as this
# leads to infinite recursion in javascript...
class h1text(h1):
"""A predefined h1 text element."""
def __init__(self, parent: base_element, h1text: str) -> None:
idstr = ""
h1.__init__(self, parent, idstr, {}, None)
self._textnode = textnode(self, h1text)
class h2text(h2):
"""A predefined h2 text element."""
def __init__(self, parent: base_element, h2text: str) -> None:
idstr = ""
h2.__init__(self, parent, idstr, {}, None)
self._textnode = textnode(self, h2text)
class h3(element):
"""An HTML h3 (header level 3) element
Note:
See https://www.w3schools.com/html/html_headings.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'h3', parent, idstr, attrdct, jsel)
class p(element):
"""A paragraph element.
Note:
See https://www.w3schools.com/html/html_paragraphs.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'p', parent, idstr, attrdct, jsel)
class div(element):
"""A HTML div element.
Note:
See https://www.w3schools.com/html/html_blocks.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'div', parent, idstr, attrdct, jsel)
class span(element):
"""A span element.
Note:
See https://www.w3schools.com/html/html_blocks.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'span', parent, idstr, attrdct, jsel)
class header(element):
"""A header element.
Note:
See https://www.w3schools.com/html/html_blocks.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'header', parent, idstr, attrdct, jsel)
class footer(element):
"""A footer element.
Note:
See https://www.w3schools.com/html/html_blocks.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'footer', parent, idstr, attrdct, jsel)
class spantext(element):
"""A predefined span element used to display text in forms.
The visual appearance of the text is determined by HTML style
sheets (the html class is set to attrclass.
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, text: str) -> None:
generic_element.__init__(self, 'span', parent, idstr, attrdct, None)
self._textnode = textnode(self, text)
def set_text(self, newtext: str) -> None:
self._textnode.set_text(newtext)
class spanhelptext(spantext):
"""A predefined span element used to display help text in forms.
The visual appearance of the text is determined by HTML style
sheets (the html class is set to 'helptext').
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, helptext: str) -> None:
super().__init__(parent, idstr, attrdct, helptext)
self.addClass('helptext')
class spanerrortext(spantext):
"""A predefined span element used to display error text in forms.
The visual appearance of the text is determined by HTML style
sheets (the html class is set to 'w3-pink').
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, errtext: str) -> None:
super().__init__(parent, idstr, attrdct, errtext)
self.addClass('w3-pink')
class a(element):
"""A link element.
Note:
Set the href attribute in attrdct for the link destination.
Note: https://www.w3schools.com/html/html_links.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'a', parent, idstr, attrdct, jsel)
class img(element):
"""An image element.
Note:
See https://www.w3schools.com/html/html_images.asp
Note:
Set the following attributes:
src with image URL,
alt text to display when no image can be displayed
style describe height, and width OR alternatively, set
width height attributes directly.
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'img', parent, idstr, attrdct, jsel)
class Img(img):
def __init__(self, parent: base_element, idstr: str,
urlstr: str, altstr: str, attrdct: dict, jsel) -> None:
attrdct = attrdct or {}
attrdct['src'] = urlstr
attrdct['alt'] = altstr
super().__init__(parent, idstr, attrdct, jsel)
# tables and table elements
class table(element):
"""A table element. With the appropriate HTML definitions, clicking on the
table headers will sort according to that column.
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'table', parent, idstr, attrdct, jsel)
self.sort_colnum = None
self._header_cells: typing.Optional[list] = None
for colnum, pyth in enumerate(self.get_header_cells()):
pyth.setAttribute(base_element.STARATTR_ONCLICK, {'cmd': 'tablesort',
'colnum': colnum})
pyth.addObserver(self, base.MSGD_BUTTON_CLICK)
def get_header_cells(self) -> list:
if self._header_cells is None:
jsrow, rownum = self._el.rows[0], 0
idstub = self.getID()
pyrow = typing.cast(tr, getPyElementByJsEl("{}{}".format(idstub, rownum),
jsrow,
tr,
None))
if pyrow is None:
print("columnsort: pyrow is None")
return
self._header_cells = pyrow.getcells()
return self._header_cells
def getrows(self) -> typing.List['tr']:
"""Return a list of python tr objects from the HTML-defined table.
New python objects are created if they do not already exist.
"""
idstub = self.getID()
return [typing.cast(tr, getPyElementByJsEl("{}{}".format(idstub, rownum),
jsrow,
tr,
None)) for jsrow, rownum in enumerate(self._el.rows)]
def columnsort(self, colnum: int) -> None:
"""Sort the rows of the table using the javascript onclick() attributes
in the header row.
NOTE: if these atrributes are not set, this will fail.
"""
hc = self.get_header_cells()
if colnum >= len(hc):
print("illegal sort colnum {} {}".format(colnum, len(hc)))
return
hc[colnum]._el.onclick()
def rcvMsg(self, whofrom: base.base_obj,
msgdesc: base.MSGdesc_Type,
msgdat: typing.Optional[base.MSGdata_Type]) -> None:
if msgdesc == base.MSGD_BUTTON_CLICK:
print("table GOT BUTTON CLICK")
if msgdat is None:
print("msgdat is None")
return
cmd = msgdat.get("cmd", None)
print("table GOT BUTTON CLICK CMD {}".format(cmd))
if cmd == 'tablesort':
colnum = msgdat.get("colnum", None)
print("GOT table sort colnum {}".format(colnum))
self.sort_colnum = colnum
else:
print('webclient: unrecognised cmd')
return
class tr(element):
"""A table row element
Note:
See https://www.w3schools.com/html/html_tables.asp
"""
def __init__(self, parent: base_element,
idstr: str,
attrdct: typing.Optional[dict],
jsel) -> None:
generic_element.__init__(self, 'tr', parent, idstr, attrdct, jsel)
def getcells(self) -> list:
"""Return a list of cells (th or td elements) in this row as python objects.
New python objects are created if they do not already exist.
idstup is used to generate idstrings for the newly created objects
if the objects do not already have ids.
"""
idstub = self.getID()
retlst, rownum = [], 0
for jscell in self._el.cells:
nn = jscell.nodeName
if nn == 'TD':
cell = getPyElementByJsEl("{}{}".format(idstub, rownum),
jscell,
td,
None)
retlst.append(cell)
rownum += 1
elif nn == 'TH':
cell = getPyElementByJsEl("{}{}".format(idstub, rownum),
jscell,
th,
None)
retlst.append(cell)
rownum += 1
else:
print('getcells: unexpected nodename {}'.format(nn))
return retlst
class th(element):
"""A table header element.
Note:
See https://www.w3schools.com/html/html_tables.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'th', parent, idstr, attrdct, jsel)
class td(element):
"""A table data cell element.
Note:
See https://www.w3schools.com/html/html_tables.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: typing.Optional[dict], jsel) -> None:
generic_element.__init__(self, 'td', parent, idstr, attrdct, jsel)
# orders and unordered lists, and list items
class ol(element):
"""An ordered list of li items.
Note:
See https://www.w3schools.com/html/html_lists.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'ol', parent, idstr, attrdct, jsel)
class ul(element):
"""An unordered list (bullet list) of list items.
Note:
See See https://www.w3schools.com/html/html_lists.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'ul', parent, idstr, attrdct, jsel)
class li(element):
"""A list item element"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'li', parent, idstr, attrdct, jsel)
class label(element):
"""A label element.
A label tag defines a label (i.e. some text that accompanies another element) for
another element such as a button, input or output element etc.
Note:
See https://www.w3schools.com/tags/tag_label.asp
"""
def __init__(self,
parent: base_element,
idstr: str,
attrdct: dict,
labeltext: str,
jsel) -> None:
generic_element.__init__(self, 'label', parent, idstr, attrdct, jsel)
self.set_text(labeltext)
def set_text(self, labeltext: str) -> None:
self.setInnerHTML(labeltext)
self._mytext = labeltext
def get_text(self) -> str:
return self._mytext
class option(element):
"""An option element that goes inside a select element.
Note:
See https://www.w3schools.com/html/html_form_elements.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'option', parent, idstr, attrdct, jsel)
def set_selected(self, on: bool) -> None:
"""Set the selected flag of the option.
Args:
on: whether this option is selected or not.
"""
self._el.selected = on
class select(element, OnChangeMixin):
"""A select element. We also keep a list of options (python objects).
Note:
See https://www.w3schools.com/html/html_form_elements.asp
"""
def __init__(self, parent: base_element, idstr: str, attrdct: dict, jsel) -> None:
generic_element.__init__(self, 'select', parent, idstr, attrdct, jsel)
OnChangeMixin.__init__(self)
self._optlst: typing.List[option] = []
self._optdct: typing.Dict[str, option] = {}
def get_selected(self) -> typing.Tuple[typing.Optional[int], typing.Optional[str]]:
"""
Returns:
the currently selected index and value string.\
It may be that no element is selected. In this case, selectedIndex will
return a value of -1. Return None, None in this case.
"""
sel_ndx = self._el.selectedIndex
if sel_ndx == -1:
return (None, None)
val = self._el.options[sel_ndx].value
return (int(sel_ndx), val)
def set_selected(self, idstr: str) -> None:
"""Make the option with the provided idstr the currently selected
option.
This is achieved by setting the selected attribute of the designated
option item.
If no idstr is found, | |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
from copy import deepcopy
import six
from abc import ABCMeta, abstractmethod
import logging
import warnings
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import pretty_plot
__author__ = "<NAME>, <NAME>"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(six.with_metaclass(ABCMeta)):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o",
color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color,
label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
class Murnaghan(EOSBase):
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return (e0 +
b0 * volume / b1 * (((v0 / volume)**b1) / (b1 - 1.0) + 1.0) -
v0 * b0 / (b1 - 1.0))
class Birch(EOSBase):
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by <NAME>. <NAME>,
<NAME>los.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume)**(2.0/3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.) *
((v0 / volume)**(2.0/3.0) - 1.0) ** 3)
class BirchMurnaghan(EOSBase):
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 +
9. * b0 * v0 / 16. * (eta ** 2 - 1)**2 *
(6 + b1 * (eta ** 2 - 1.) - 4. * eta ** 2))
class PourierTarantola(EOSBase):
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
squiggle = -3.*np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2))
class Vinet(EOSBase):
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 + 2. * b0 * v0 / (b1 - 1.) ** 2
* (2. - (5. + 3. * b1 * (eta - 1.) - 3. * eta)
* np.exp(-3. * (b1 - 1.) * (eta - 1.) / 2.)))
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = - v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
def _func(self, volume, params):
x = volume**(-2. / 3.)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. *
x**(15./2.) * deriv3(x))
b0 = derivV2 / | |
"""Use EDIA to assess quality of model fitness to electron density."""
import numpy as np
from . import Structure, XMap, ElectronDensityRadiusTable
from . import ResolutionBins, BondLengthTable
import argparse
import logging
import os
import time
logger = logging.getLogger(__name__)
class ediaOptions:
def __init__(self):
# General options
self.directory = '.'
self.debug = False
# Density creation options
self.map_type = None
self.resolution = None
self.resolution_min = None
self.scattering = 'xray'
def apply_command_args(self, args):
for key, value in vars(args).items():
if hasattr(self, key):
setattr(self, key, value)
return self
class Weight():
def __init__(self, radius):
# Per definition:
self.b1 = 1.0 #(maximum of first parabola)
self.b2 = -0.4 #(minimum of second parabola)
self.b3 = 0.0 #(maximum of third parabola)
self.c1 = 0.0 # (we want the first parabola to have its maximum at x=0)
self.m1 = -1.0/(radius**2) # This ensures that the density becomes superfluous if p is in d(a)
self.c3 = 2 * radius # (we want the third parabola to have its maximum at x=2*r)
self.r0 = 1.0822*radius # The point where P1 and P2 intersect (pre-defined)
self.r2 = 2*radius # The point where P3 becomes 0.
# Calculate unknowns:
# Unknowns: r1,m2,m3,c2
self.c2 = -(self.b1-self.b2)/(self.m1*self.r0)
self.m2 = (self.r0**2) * (self.m1**2) / ((self.r0**2)*self.m1 - self.b2 + self.b1)
self.r1 = (self.m2*self.c2*self.c3 - self.m2*self.c2*self.c2 -self.b2)/ (self.m2*self.c3 - self.m2*self.c2)
self.m3 = self.m2*(self.r1-self.c2) / (self.r1 - self.c3)
self.P = lambda x,m,c,b: m*(x-c)**2 + b
def __call__(self, dist):
# Calculate the weight:
if(dist<self.r0):
return self.P(dist,self.m1,self.c1,self.b1)
elif(dist<self.r1):
return self.P(dist,self.m2,self.c2,self.b2)
elif(dist<self.r2):
return self.P(dist,self.m3,self.c3,self.b3)
else:
return 0.0
class Point():
def __init__(self,coor):
self.coor = coor
self.S=[]
self.D=[]
def set_Point(self, new_point):
self.coor = new_point.coor
self.S=new_point.S
self.D=new_point.D
class _BaseEDIA():
def __init__(self, conformer, structure, xmap, options):
self.structure = structure
self.conformer = conformer
self.residue = conformer
self.xmap = xmap
self.options = options
self._coor_set = [self.conformer.coor]
self._voxel_volume = self.xmap.unit_cell.calc_volume() / self.xmap.array.size
self.weighter = Weight(1.0)
# Calculate space diagonal and the partitioning factor p
self.d = np.linalg.norm(xmap.voxelspacing)
self.p = np.ceil(self.d/0.7)
abc = np.asarray([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c])
self.grid_to_cartesian = np.transpose( ( self.xmap.unit_cell.frac_to_orth / abc ) * self.xmap.voxelspacing )
self.cartesian_to_grid = np.linalg.inv(self.grid_to_cartesian)
self.Grid = np.zeros_like(xmap.array, dtype=object)
self.mean = xmap.array.mean()
self.sigma = xmap.array.std()
self.Populate_Grid(self.residue)
def Populate_Grid(self,target_residue=None):
for chain in self.structure:
for residue in chain:
for ind in range(len(residue.name)):
atom,element,charge,coor,icode,record,occ,resi = residue.name[ind],residue.e[ind],residue.charge[ind],residue.coor[ind],residue.icode[ind],residue.record[ind],residue.q[ind],residue.resi[ind]
if target_residue!=None:
flag=0
for idx in range(len(target_residue.name)):
if np.linalg.norm(coor-target_residue.coor[idx])<2.16*2+0.2:
flag=1
break
if flag == 0:
continue
grid = np.dot(coor,self.cartesian_to_grid).astype(int) - np.asarray(self.xmap.offset) # (i,j,k)
if element == "H":
continue
if charge == '':
ed_radius = self.calculate_density_radius(element, self.options.resolution,int(residue.b[ind]))
else:
ed_radius = self.calculate_density_radius(element, self.options.resolution,int(residue.b[ind]),charge)
box = (np.ceil(ed_radius*2/self.xmap.voxelspacing)).astype(int)
# Iterate over all grid points in the box and calculate their ownership
for i in range(grid[2]-box[2],grid[2]+box[2]):
for j in range(grid[1]-box[1],grid[1]+box[1]):
for k in range(grid[0]-box[0],grid[0]+box[0]):
try:
dist = np.linalg.norm(coor-self.Grid[i][j][k].coor)
except:
self.Grid[i][j][k]=Point(np.dot(np.asarray([k,j,i])+np.asarray(self.xmap.offset),self.grid_to_cartesian))
dist = np.linalg.norm(coor-self.Grid[i][j][k].coor)
if(dist<ed_radius):
self.Grid[i][j][k].S.append([coor,atom,element,occ,resi])
elif(dist<ed_radius*2):
self.Grid[i][j][k].D.append([coor,atom,element,occ,resi])
# Calculates the atomic radius based on the table
def calculate_density_radius(self,atom, resolution,bfactor,charge="0"):
a = int(np.floor(resolution/0.5)-1)
b = int(np.ceil(resolution/0.5)-1)
if atom not in ElectronDensityRadiusTable.keys():
atom = atom[0]+atom[1:].lower()
if charge not in ElectronDensityRadiusTable[atom].keys():
charge = charge[::-1]
if a == b:
radius = ElectronDensityRadiusTable[atom][charge][a]
else:
radius = ElectronDensityRadiusTable[atom][charge][a]+(ElectronDensityRadiusTable[atom][charge][b]-ElectronDensityRadiusTable[atom][charge][a])*(resolution - ResolutionBins[a])/(ResolutionBins[b] - ResolutionBins[a])
return np.asarray(radius)
def ownership(self, p, dist, ed_radius,S,D,I):
if (dist/ed_radius >= 2.0): # Grid point p is too far from the atom...
o=0.0
elif (dist/ed_radius >= 1.0): # Grid point p is in d(atom)
if len(S)> 0: # Another atom owns the grid point
o=0.0
else:
if len(D)==1: # No other atom owns the grid point, target atom is the only atom in D.
o=1.0
else: # Ownership of the atom is adjusted by the contribution of all atoms in D.
o = 1 - dist/sum([ np.linalg.norm(p-atom[0]) for atom in D ])
else:
if len(I)==1: # Target atom is the only atom that owns the grid point.
o=1.0
else: # Ownership of the atom is adjusted by the contribution of other atoms that own the point.
o = 1 - dist/sum([ np.linalg.norm(p-atom[0]) for atom in I ])
return o
def print_density(self,contour=1.0):
for i in range(0,len(self.xmap.array)):
for j in range(0,len(self.xmap.array[i])):
for k in range(0,len(self.xmap.array[i][j])):
if(self.xmap.array[i][j][k] - self.mean >contour*self.sigma):
coor = np.dot(np.asarray([k,j,i])+np.asarray(self.xmap.offset),self.grid_to_cartesian)
print("HETATM {0:4d} H HOH A {0:3d} {1:8.3f}{2:8.3f}{3:8.3f} 1.00 37.00 H".format(1,coor[0],coor[1],coor[2]))
def print_stats(self):
# Note that values of the offset are based on C,R,S - these are not always ordered like x,y,z
offset=self.xmap.offset
voxelspacing=self.xmap.voxelspacing # These ARE ordered (x,y,z)
print("Unit cell shape:", self.xmap.unit_cell.shape) # These are ordered (z,y,x)
print("Unit cell a,b,c: {0:.2f} {1:.2f} {2:.2f}".format(self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c)) # These ARE ordered (x,y,z)
print("Unit cell alpha,beta,gamma: {0:.2f} {1:.2f} {2:.2f}".format(self.xmap.unit_cell.alpha,self.xmap.unit_cell.beta,self.xmap.unit_cell.gamma))
print("XMap array dimentions: ", [len(self.xmap.array),len(self.xmap.array[0]),len(self.xmap.array[0][0])]) # These are ordered (z,y,x)
abc = np.asarray([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c])
print("abc/voxelspacing:",abc/self.xmap.voxelspacing)
print("Offset: ",offset)
# Returns 1 if atom_a is covalently bonded to atom_b, 0 otherwise.
def covalently_bonded(self,atom_a,atom_b):
error = 2*0.06 # two standard deviations from the largest observed standard deviation for protein bond lengths
try:
if np.linalg.norm(np.asarray(atom_a[0])-np.asarray(atom_b[0])) < float(BondLengthTable[atom_a[2]][atom_b[2]]) + error:
return 1
except:
return 0
return 0
# Find all atoms in 'set' that are not covalently bonded to 'atom_a'
def calculate_non_bonded(self,atom_a,set):
I = []
for atom_b in set:
if atom_a[4] == atom_b[4] and atom_a[3]!=atom_b[3] and atom_a[3]<1.0:
continue
if not self.covalently_bonded(atom_a,atom_b) or np.linalg.norm(np.asarray(atom_a[0])-np.asarray(atom_b[0])) <0.01:
I.append(atom_b)
return I
def calc_edia(self,atom,element,charge,coor,occ,resi,bfactor):
# Identify the closest grid point to the cartesian coordinates of the atom
grid = np.dot(coor,self.cartesian_to_grid).astype(int) - np.asarray(self.xmap.offset) # (x,y,z)
# Look up the electron density radius on the lookup table:
if charge == '':
ed_radius = self.calculate_density_radius(element, self.options.resolution,bfactor)
else:
ed_radius = self.calculate_density_radius(element, self.options.resolution,bfactor,charge)
# Update the parabolas used for Weighing
self.weighter = Weight(ed_radius)
# Define a box of grid points that inscribes the sphere of interest
box = (np.ceil(ed_radius*2/self.xmap.voxelspacing)).astype(int) # (x,y,z)
sum_pos_weights = sum_neg_weights = sum_product = sum_pos_product = sum_neg_product = 0.0
# Iterate over all grid points in the box and calculate their contribution to the EDIA score.
for i in range(grid[2]-box[2],grid[2]+box[2]): # z
for j in range(grid[1]-box[1],grid[1]+box[1]): # y
for k in range(grid[0]-box[0],grid[0]+box[0]): # x
# Identify the coordinates of grid point (k,j,i) of density self.xmap.array[i][j][k]
p = self.Grid[i][j][k].coor
#if(self.xmap.array[i][j][k] - self.mean > 1.2*self.sigma):
# print("HETATM {0:4d} H HOH A {0:3d} {1:8.3f}{2:8.3f}{3:8.3f} 1.00 37.00 H".format(1,p[0],p[1],p[2]))
#continue
dist = np.linalg.norm(coor-p)
# Calculate the distance-dependent weighting factor w
weight = self.weighter(dist)
# Calculate the ownership value o
I = self.calculate_non_bonded([coor,atom,element,occ,resi],self.Grid[i][j][k].S)
o = self.ownership(p, dist, ed_radius,self.Grid[i][j][k].S,self.Grid[i][j][k].D,I)
# Calculate the density score z(p) truncated at 1.2σs
z=min(max((self.xmap.array[i][j][k]-self.mean)/self.sigma,0.0),1.2)
#print(atom,dist,weight,o,z)
# Calculate the sums for EDIA
if weight > 0.0:
sum_pos_weights += weight
sum_pos_product += weight*o*z
else:
sum_neg_weights += weight
sum_neg_product += weight*o*z
sum_product += weight*o*z
return sum_pos_product/sum_pos_weights,sum_neg_product/sum_neg_weights,sum_product/sum_pos_weights
def calc_edia_residue(self,residue):
length={}
ediasum={}
occupancy={}
# Create arrays to store the EDIA components of the
edia = np.zeros(len(residue.name))
edia_plus = np.zeros(len(residue.name))
edia_minus = np.zeros(len(residue.name))
prev_altloc=residue.altloc[0]
# For each atom in the residue:
for ind in range(len(residue.name)):
atom,element,charge,coor,icode,record,occ = residue.name[ind],residue.e[ind],residue.charge[ind],residue.coor[ind],residue.icode[ind],residue.record[ind],residue.q[ind]
# By default, Hydrogens are excluded from the calculation!
if element == "H":
continue
# Store the values of the negative, positive, and full component in the atomic arrays:
edia_plus[ind],edia_minus[ind],edia[ind] = self.calc_edia(atom,element,charge,coor,occ,residue.resi[ind],residue.b[ind])
# Currently, we are truncating the negative values of EDIA at 0.
if edia[ind] < 0.0:
edia[ind] = 0.0
if residue.altloc[ind] not in ediasum:
ediasum[residue.altloc[ind]]=0.0
length[residue.altloc[ind]]=0.0
occupancy[residue.altloc[ind]]=residue.q[ind]
ediasum[residue.altloc[ind]]+=(edia[ind]+0.1)**(-2)
length[residue.altloc[ind]]+=1
EDIAm_Comb=0.0
for key in ediasum:
if length[key] > 0:
if key != "" and "" in ediasum:
flag=1
ediasum[key] += ediasum[""]
length[key] += length[""]
EDIAm = ( ediasum[key] / length[key] ) ** (-0.5) - 0.1
OPIA = self.calc_opia_residue(residue,edia,key)
if key != "":
EDIAm_Comb+=occupancy[key]*EDIAm
print("{0} {1} {2:.2f} {3:.2f} {4:.2f}".format(residue.resi[0],key,occupancy[key],EDIAm,OPIA))
try:
print("{0} Comb {1:.2f} {2:.2f} {3:.2f}".format(residue.resi[0],sum(occupancy.values())-occupancy[""],EDIAm_Comb,OPIA))
except:
print("{0} Comb {1:.2f} {2:.2f} {3:.2f}".format(residue.resi[0],sum(occupancy.values()),EDIAm_Comb,OPIA))
if "" in ediasum and len(list(set(ediasum.keys()))) == 1:
if length[""] > 0:
key=""
EDIAm = ( ediasum[key] / length[key] ) ** (-0.5) - 0.1
OPIA = self.calc_opia_residue(residue,edia,"")
print("{0} A 1.0 {2:.2f} {3:.2f}".format(residue.resi[0],EDIAm,OPIA))
return EDIAm,OPIA
def calc_opia_residue(self,residue,edia,key):
altloc = [ x for i,x in enumerate(residue.altloc) if x==key or x==""]
index_altloc = [ i for i,x in enumerate(residue.altloc) if x==key or x==""]
self.adj_matrix = np.zeros( ( len(altloc),len(altloc) ),dtype=int)
# Calculate adjacency matrix
for x,i in enumerate(index_altloc):
atom_a = [residue.coor[i],residue.name[i],residue.e[i]]
if edia[i] >= 0.8:
for y,j in enumerate(index_altloc):
atom_b = [residue.coor[j],residue.name[j],residue.e[j]]
if self.covalently_bonded(atom_a,atom_b):
self.adj_matrix[x][y]=1
self.adj_matrix[y][x]=1
# Initialize all vertices as not visited
self.visited | |
<gh_stars>10-100
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_lib
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., array_ops.newaxis]
+ np.sqrt(2.) * scale[..., array_ops.newaxis] * grid)
return grid, probs
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
# Create a LogNormal distribution.
dist = transformed_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=loc, scale=scale),
bijector=Exp(event_ndims=0),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = array_ops.shape(dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
tfd = tf.contrib.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
if loc is not None:
loc = ops.convert_to_tensor(loc, name="loc")
if scale is not None:
scale = ops.convert_to_tensor(
scale, dtype=None if loc is None else loc.dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson_lib.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=distribution_lib.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with quadrature param."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a quadrature grid."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
array_ops.shape(self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = math_ops.reduce_prod(self.batch_shape_tensor())
# We need to "sample extra" from the | |
import re
import math
import logging
import datetime
import pysolr
from urllib.parse import urlencode, unquote
from django import urls
from django.core.paginator import Paginator, Page
from django.db import connection, reset_queries
from django.http import QueryDict
from django.conf import settings
from core import models
from core.utils.utils import fulltext_range
from core.utils import utils
from core.title_loader import _normal_lccn
_log = logging.getLogger(__name__)
PROX_DISTANCE_DEFAULT = 5
ESCAPE_CHARS_RE = re.compile(r'(?<!\\)(?P<char>[&|+\-!(){}[\]^"~*?:])')
def conn():
return pysolr.Solr(settings.SOLR)
def page_count():
return conn().search(q='type:page', rows=0).hits
def _solr_escape(value):
"""
Escape un-escaped special characters and return escaped value.
>>> _solr_escape(r'foo+') == r'foo\+'
True
>>> _solr_escape(r'foo\+') == r'foo\+'
True
>>> _solr_escape(r'foo\\+') == r'foo\\+'
True
"""
return ESCAPE_CHARS_RE.sub(r'\\\g<char>', value)
def _sorted_facet_counts(solr_counts, field):
"""
Convert the raw solr facet data (counts, ranges, etc.) from a flat array
into a two-dimensional list sorted by the number of hits. The result will
look something like this: (('field1', count1), ('field2', count2), ...)
"""
raw = solr_counts.get(field, ())
items = []
for i in range(0, len(raw), 2):
items.append((raw[i], raw[i + 1]))
return sorted(items, key = lambda item: int(item[1]), reverse = True)
def title_count():
return conn().search(q='type:title', rows=0).hits
class SolrPaginator(Paginator):
"""
SolrPaginator takes a QueryDict object, builds and executes a solr query for
newspaper pages, and returns a paginator for the search results for use in
a HTML form.
"""
def __init__(self, query):
self.query = query.copy()
# remove words from query as it's not part of the solr query.
if 'words' in self.query:
del self.query['words']
self._q, self.facet_params = page_search(self.query)
try:
self._cur_page = int(self.query.get('page'))
except:
self._cur_page = 1 # _cur_page is 1-based
try:
self._cur_index = int(self.query.get('index'))
except:
self._cur_index = 0
try:
rows = int(self.query.get('rows'))
except:
rows = 10
# set up some bits that the Paginator expects to be able to use
Paginator.__init__(self, None, per_page=rows, orphans=0)
self.overall_index = (self._cur_page - 1) * self.per_page + self._cur_index
self._ocr_list = ['ocr',]
self._ocr_list.extend(['ocr_%s' % l for l in settings.SOLR_LANGUAGES])
def _get_count(self):
"Returns the total number of objects, across all pages."
if not hasattr(self, '_count'):
self._count = conn().search(self._q, rows=0).hits
return self._count
count = property(_get_count)
def highlight_url(self, url, words):
q = QueryDict(None, True)
if words:
q["words"] = " ".join(words)
return url + "#" + q.urlencode()
else:
return url
def pagination_url(self, url, words, page, index):
q = self.query.copy()
q["words"] = " ".join(words)
q["page"] = page
q["index"] = index
return url + "#" + q.urlencode()
def _get_previous(self):
previous_overall_index = self.overall_index - 1
if previous_overall_index >= 0:
p_page = previous_overall_index // self.per_page + 1
p_index = previous_overall_index % self.per_page
o = self.page(p_page).object_list[p_index]
q = self.query.copy()
return self.pagination_url(o.url, o.words, p_page, p_index)
else:
return None
previous_result = property(_get_previous)
def _get_next(self):
next_overall_index = self.overall_index + 1
if next_overall_index < self.count:
n_page = next_overall_index // self.per_page + 1
n_index = next_overall_index % self.per_page
o = self.page(n_page).object_list[n_index]
return self.pagination_url(o.url, o.words, n_page, n_index)
else:
return None
next_result = property(_get_next)
def page(self, number):
"""
Override the page method in Paginator since Solr has already
paginated stuff for us.
"""
number = self.validate_number(number)
# figure out the solr query and execute it
start = self.per_page * (number - 1)
params = {
'fl': 'id,title,date,month,day,sequence,edition_label,section_label',
'hl': 'true',
'hl.snippets': 100, # TODO: make this unlimited
'hl.requireFieldMatch': 'true', # limits highlighting slop
'hl.maxAnalyzedChars': '102400', # increased from default 51200
'hl.fl': ','.join(self._ocr_list),
'rows': self.per_page,
'start': start,
}
params.update(self.facet_params)
sort_field, sort_order = _get_sort(self.query.get('sort'), in_pages=True)
if sort_field and sort_order:
params['sort'] = '%s %s' % (sort_field, sort_order)
solr_response = conn().search(self._q, **params)
# Gather facet data from the solr response
solr_facets = solr_response.facets
field_counts = solr_facets.get('facet_fields')
facets = {
'city': _sorted_facet_counts(field_counts, 'city'),
'county': _sorted_facet_counts(field_counts, 'county'),
'frequency': _sorted_facet_counts(field_counts, 'frequency'),
'language': _sorted_facet_counts(field_counts, 'language'),
'state': _sorted_facet_counts(field_counts, 'state'),
}
# sort by year (desc)
facets['year'] = _sorted_facet_counts(solr_facets['facet_ranges']['year'], 'counts')
facet_gap = self.facet_params['f.year.facet.range.gap']
if facet_gap > 1:
facets['year'] = [('%s-%d' % (y[0], int(y[0])+facet_gap-1), y[1])
for y in facets['year']]
pages = []
for result in solr_response.docs:
page = models.Page.lookup(result['id'])
if not page:
continue
words = set()
coords = solr_response.highlighting[result['id']]
for ocr in self._ocr_list:
for s in coords.get(ocr) or []:
words.update(find_words(s))
page.words = sorted(words, key=lambda v: v.lower())
page.highlight_url = self.highlight_url(page.url, page.words)
pages.append(page)
solr_page = Page(pages, number, self)
solr_page.facets = facets
return solr_page
def pages(self):
"""
pages creates a list of two element tuples (page_num, url)
rather than displaying all the pages for large result sets
it provides windows into the results like digg:
1 2 3 ... 8 9 10 11 12 13 14 ... 87 88 89
"""
pages = []
# build up the segments
before = []
middle = []
end = []
for p in self.page_range:
if p <= 3:
before.append(p)
elif self._num_pages - p <= 3:
end.append(p)
elif abs(p - self._cur_page) < 5:
middle.append(p)
# create the list with '...' where the sequence breaks
last = None
q = self.query.copy()
for p in before + middle + end:
if last and p - last > 1:
pages.append(['...', None])
else:
q['page'] = p
pages.append([p, urlencode(q)])
last = p
return pages
def englishify(self):
"""
Returns some pseudo english text describing the query.
"""
d = self.query
parts = []
if d.get('ortext', None):
parts.append(' OR '.join(d['ortext'].split(' ')))
if d.get('andtext', None):
parts.append(' AND '.join(d['andtext'].split(' ')))
if d.get('phrasetext', None):
parts.append('the phrase "%s"' % d['phrasetext'])
if d.get('proxtext', None):
proxdistance = d.get('proxdistance', PROX_DISTANCE_DEFAULT)
parts.append(d['proxtext'])
return parts
def page_search(d):
"""
Pass in form data for a given page search, and get back
a corresponding solr query.
"""
q = ['+type:page']
simple_fields = ['city', 'county', 'frequency',
'state', 'lccn'
]
for field in simple_fields:
if d.get(field, None):
q.append(query_join(d.getlist(field), field))
ocrs = ['ocr_%s' % l for l in settings.SOLR_LANGUAGES]
lang_req = d.get('language', None)
language = models.Language.objects.get(name=lang_req) if lang_req else None
lang = language.code if language else None
if language:
q.append('+language:%s' % language.name)
ocr_lang = 'ocr_' + lang if lang else 'ocr'
if d.get('ortext', None):
q.append('+((' + query_join(_solr_escape(d['ortext']).split(' '), "ocr"))
if lang:
q.append(' AND ' + query_join(_solr_escape(d['ortext']).split(' '), ocr_lang))
q.append(') OR ' + query_join(_solr_escape(d['ortext']).split(' '), ocr_lang))
else:
q.append(')')
for ocr in ocrs:
q.append('OR ' + query_join(_solr_escape(d['ortext']).split(' '), ocr))
q.append(')')
if d.get('andtext', None):
q.append('+((' + query_join(_solr_escape(d['andtext']).split(' '), "ocr", and_clause=True))
if lang:
q.append('AND ' + query_join(_solr_escape(d['andtext']).split(' '), ocr_lang, and_clause=True))
q.append(') OR ' + query_join(_solr_escape(d['andtext']).split(' '), ocr_lang, and_clause=True))
else:
q.append(')')
for ocr in ocrs:
q.append('OR ' + query_join(_solr_escape(d['andtext']).split(' '), ocr, and_clause=True))
q.append(')')
if d.get('phrasetext', None):
phrase = _solr_escape(d['phrasetext'])
q.append('+((' + 'ocr' + ':"%s"^10000' % (phrase))
if lang:
q.append('AND ocr_' + lang + ':"%s"' % (phrase))
q.append(') OR ocr_' + lang + ':"%s"' % (phrase))
else:
q.append(')')
for ocr in ocrs:
q.append('OR ' + ocr + ':"%s"' % (phrase))
q.append(')')
if d.get('proxtext', None):
distance = d.get('proxdistance', PROX_DISTANCE_DEFAULT)
prox = _solr_escape(d['proxtext'])
q.append('+((' + 'ocr' + ':("%s"~%s)^10000' % (prox, distance))
if lang:
q.append('AND ocr_' + lang + ':"%s"~%s' % (prox, distance))
q.append(') OR ocr_' + lang + ':"%s"~%s' % (prox, distance))
else:
q.append(')')
for ocr in ocrs:
q.append('OR ' + ocr + ':"%s"~%s' % (prox, distance))
q.append(')')
if d.get('sequence', None):
q.append('+sequence:"%s"' % d['sequence'])
if d.get('issue_date', None):
q.append('+month:%d +day:%d' % (int(d['date_month']), int(d['date_day'])))
# yearRange supercedes date1 and date2
year1, year2 = None, None
year_range = d.get('yearRange', None)
if year_range:
split = year_range.split("-")
if len(split) == 2:
year1 = int(split[0])
year2 = int(split[1])
else:
year1 = int(split[0])
year2 = int(split[0])
q.append('+year:[%d TO %d]' % (year1, year2))
else:
date_boundaries = fulltext_range()
date1 = d.get('date1', None)
date2 = d.get('date2', None)
if date1 or date2:
# do NOT apply year min / max to solr query
# do apply it to facets since they require a specific begin / end year
d1 = _solrize_date(str(date1), 'start')
d2 = _solrize_date(str(date2), 'end')
q.append('+date:[%s TO %s]' % (d1, d2))
year1 = date_boundaries[0] if d1 == "*" else int(str(d1)[:4])
year2 = date_boundaries[1] if d2 == "*" else int(str(d2)[:4])
else:
# do not pass any query parameters to solr if no date requested
# but do set the year range for faceting purposes
year1 = date_boundaries[0]
year2 = date_boundaries[1]
# choose a facet range gap such that the number of date ranges returned
# is <= 10. These would be used to populate a select dropdown on search
# results page.
gap = max(1, int(math.ceil((year2 - year1)//10)))
# increment year range end by 1 to | |
"220 pm")
embed20.add_field(name = "❯ **Isotopes**", value = "K (mass = 39, 93.25%), K (mass = 41, 6.73%), K (mass = 40, Unstable)",inline = False)
embed20.set_thumbnail(url = "https://thumbs.dreamstime.com/b/potassium-k-chemical-element-d-rendering-potassium-k-chemical-element-d-rendering-isolated-black-background-110410463.jpg")
await ctx.send(embed = embed20)
elif element == "Calcium" or element == "calcium" or element == "20":
embed21 = discord.Embed(title = "`CALCIUM`", color = discord.Colour.blue())
embed21.add_field(name = "❯ **Name**", value = "Calcium")
embed21.add_field(name = "❯ **Atomic Symbol**", value = "Ca",inline = True)
embed21.add_field(name = "❯ **Atomic Number**", value = "20")
embed21.add_field(name = "❯ **Atomic Mass**", value = "40.078u",inline = True)
embed21.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed21.add_field(name = "❯ **Colour**", value = "White",inline = True)
embed21.add_field(name = "❯ **Valency**", value = "2")
embed21.add_field(name = "❯ **First Ionization energy**", value = "589.8 KJ/mol",inline = True)
embed21.add_field(name = "❯ **Atomic Radius**", value = "180 pm")
embed21.add_field(name = "❯ **Isotopes**", value = "Ca (mass = 40, 96.94%), Ca (mass = 48, Unstable)",inline = False)
embed21.set_thumbnail(url = "https://t3.ftcdn.net/jpg/01/93/19/54/360_F_193195436_00B9Ub0sYEC9jK2lp9qL3bAUZX22djwU.jpg")
await ctx.send(embed = embed21)
elif element == "Scandium" or element == "scandium" or element == "21":
embed22 = discord.Embed(title = "`SCANDIUM`", color = discord.Colour.blue())
embed22.add_field(name = "❯ **Name**", value = "Scandium")
embed22.add_field(name = "❯ **Atomic Symbol**", value = "Sc",inline = True)
embed22.add_field(name = "❯ **Atomic Number**", value = "21")
embed22.add_field(name = "❯ **Atomic Mass**", value = "44.955u",inline = True)
embed22.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed22.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed22.add_field(name = "❯ **Valency**", value = "3")
embed22.add_field(name = "❯ **First Ionization energy**", value = "633.1 KJ/mol",inline = True)
embed22.add_field(name = "❯ **Atomic Radius**", value = "160 pm")
embed22.add_field(name = "❯ **Isotopes**", value = "Sc(mass = 45, 100%), Sc (mass = 46, Unstable),",inline = False)
embed22.set_thumbnail(url = "https://as2.ftcdn.net/jpg/02/63/83/37/500_F_263833761_TPsGPcPpWaWBdWXtjUPuTYdI39pwU8bo.jpg")
await ctx.send(embed = embed22)
elif element == "Titanium" or element == "titanium" or element == "22":
embed23 = discord.Embed(title = "`TITANIUM`", color = discord.Colour.blue())
embed23.add_field(name = "❯ **Name**", value = "Titanium")
embed23.add_field(name = "❯ **Atomic Symbol**", value = "Ti",inline = True)
embed23.add_field(name = "❯ **Atomic Number**", value = "22")
embed23.add_field(name = "❯ **Atomic Mass**", value = "47.867u",inline = True)
embed23.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed23.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed23.add_field(name = "❯ **Valency**", value = "4")
embed23.add_field(name = "❯ **First Ionization energy**", value = "658.8 KJ/mol",inline = True)
embed23.add_field(name = "❯ **Atomic Radius**", value = "140 pm")
embed23.add_field(name = "❯ **Isotopes**", value = "Ti (mass = 48, 73.72%), Ti (mass = 44, unstable)",inline = False)
embed23.set_thumbnail(url = "https://theodoregray.com/periodictable/Tiles/022/s14.JPG")
await ctx.send(embed = embed23)
elif element == "Vanadium" or element == "vanadium" or element == "23":
embed24 = discord.Embed(title = "`VANADIUM`", color = discord.Colour.blue())
embed24.add_field(name = "❯ **Name**", value = "Vanadium")
embed24.add_field(name = "❯ **Atomic Symbol**", value = "V",inline = True)
embed24.add_field(name = "❯ **Atomic Number**", value = "23")
embed24.add_field(name = "❯ **Atomic Mass**", value = "50.94u",inline = True)
embed24.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed24.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed24.add_field(name = "❯ **Valency**", value = "5")
embed24.add_field(name = "❯ **First Ionization energy**", value = "650.9 KJ/mol",inline = True)
embed24.add_field(name = "❯ **Atomic Radius**", value = "135 pm")
embed24.add_field(name = "❯ **Isotopes**", value = "V (mass = 50, 99.75%), V (mass = 51)",inline = False)
embed24.set_thumbnail(url = "https://as1.ftcdn.net/jpg/01/57/16/88/500_F_157168886_00kyiPcfpW21eK88YVxehWEkYVeg0Kor.jpg")
await ctx.send(embed = embed24)
elif element == "Chromium" or element == "chromium" or element == "24":
embed25 = discord.Embed(title = "`CHROMIUM`", color = discord.Colour.blue())
embed25.add_field(name = "❯ **Name**", value = "Chormium")
embed25.add_field(name = "❯ **Atomic Symbol**", value = "Cr",inline = True)
embed25.add_field(name = "❯ **Atomic Number**", value = "24")
embed25.add_field(name = "❯ **Atomic Mass**", value = "51.9966u",inline = True)
embed25.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed25.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed25.add_field(name = "❯ **Valency**", value = "6")
embed25.add_field(name = "❯ **First Ionization energy**", value = "652.9 KJ/mol",inline = True)
embed25.add_field(name = "❯ **Atomic Radius**", value = "140 pm")
embed25.add_field(name = "❯ **Isotopes**", value = "Cr (mass = 51, 83.78%), Cr (mass = 52, unstable)",inline = False)
embed25.set_thumbnail(url = "https://thumbs.dreamstime.com/b/chromium-cr-chemical-element-d-rendering-chromium-cr-chemical-element-d-rendering-isolated-black-background-110410337.jpg")
await ctx.send(embed = embed25)
elif element == "Manganese" or element == "manganese" or element == "25":
embed26 = discord.Embed(title = "`MANGANESE`", color = discord.Colour.blue())
embed26.add_field(name = "❯ **Name**", value = "Manganese")
embed26.add_field(name = "❯ **Atomic Symbol**", value = "Mn",inline = True)
embed26.add_field(name = "❯ **Atomic Number**", value = "25")
embed26.add_field(name = "❯ **Atomic Mass**", value = "54.9386u",inline = True)
embed26.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed26.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed26.add_field(name = "❯ **Valency**", value = "4")
embed26.add_field(name = "❯ **First Ionization energy**", value = "717.3 KJ/mol",inline = True)
embed26.add_field(name = "❯ **Atomic Radius**", value = "140 pm")
embed26.add_field(name = "❯ **Isotopes**", value = "Mn (mass = 55, 100%), Mn (mass = 53)",inline = False)
embed26.set_thumbnail(url = "https://www.sciencepicture.co/_img/search/Manganese-Chemical-Element_spc-id-5048.jpg")
await ctx.send(embed = embed26)
elif element == "Iron" or element == "iron" or element == "26":
embed27 = discord.Embed(title = "`IRON`", color = discord.Colour.blue())
embed27.add_field(name = "❯ **Name**", value = "Iron")
embed27.add_field(name = "❯ **Atomic Symbol**", value = "Fe",inline = True)
embed27.add_field(name = "❯ **Atomic Number**", value = "26")
embed27.add_field(name = "❯ **Atomic Mass**", value = "55.845u",inline = True)
embed27.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed27.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed27.add_field(name = "❯ **Valency**", value = "3")
embed27.add_field(name = "❯ **First Ionization energy**", value = "762.5 KJ/mol",inline = True)
embed27.add_field(name = "❯ **Atomic Radius**", value = "140 pm")
embed27.add_field(name = "❯ **Isotopes**", value = "Fe (mass = 56, 91.75%), Fe (mass = 54, 5.845%), Fe (mass = 57, 2.119%), Fe (mass = 60, unstable)",inline = False)
embed27.set_thumbnail(url = "https://t4.ftcdn.net/jpg/01/93/19/59/360_F_193195925_7VFTJEUgqhemWUDvtQqJDFYCFFEOQ6MN.jpg")
await ctx.send(embed = embed27)
elif element == "Cobalt" or element == "cobalt" or element == "27":
embed28 = discord.Embed(title = "`CHROMIUM`", color = discord.Colour.blue())
embed28.add_field(name = "❯ **Name**", value = "Cobalt")
embed28.add_field(name = "❯ **Atomic Symbol**", value = "Co",inline = True)
embed28.add_field(name = "❯ **Atomic Number**", value = "27")
embed28.add_field(name = "❯ **Atomic Mass**", value = "58.983u",inline = True)
embed28.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed28.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed28.add_field(name = "❯ **Valency**", value = "4")
embed28.add_field(name = "❯ **First Ionization energy**", value = "760.4 KJ/mol",inline = True)
embed28.add_field(name = "❯ **Atomic Radius**", value = "135 pm")
embed28.add_field(name = "❯ **Isotopes**", value = "Co (mass = 59, 100%), Co (mass = 60, unstable)",inline = False)
embed28.set_thumbnail(url = "https://w7.pngwing.com/pngs/75/665/png-transparent-periodic-table-chemical-element-iron-symbol-lawrencium-tin-chemical-element-electronics-chemistry.png")
await ctx.send(embed = embed28)
elif element == "Nickel" or element == "nickel" or element == "28":
embed29 = discord.Embed(title = "`NICKEL`", color = discord.Colour.blue())
embed29.add_field(name = "❯ **Name**", value = "Nickel")
embed29.add_field(name = "❯ **Atomic Symbol**", value = "Ni",inline = True)
embed29.add_field(name = "❯ **Atomic Number**", value = "28")
embed29.add_field(name = "❯ **Atomic Mass**", value = "58.693u",inline = True)
embed29.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed29.add_field(name = "❯ **Colour**", value = "Silver",inline = True)
embed29.add_field(name = "❯ **Valency**", value = "2")
embed29.add_field(name = "❯ **First Ionization energy**", value = "727.1 KJ/mol",inline = True)
embed29.add_field(name = "❯ **Atomic Radius**", value = "135 pm")
embed29.add_field(name = "❯ **Isotopes**", value = "Ni (mass = 58, 68.07%),Ni (mass = 60, 26.223%), Ni (mass = 59, unstable)",inline = False)
embed29.set_thumbnail(url = "https://media.istockphoto.com/vectors/nickel-chemical-element-vector-id1127314361")
await ctx.send(embed = embed29)
elif element == "Copper" or element == "copper" or element == "29":
embed30 = discord.Embed(title = "`COPPER`", color = discord.Colour.blue())
embed30.add_field(name = "❯ **Name**", value = "Copper")
embed30.add_field(name = "❯ **Atomic Symbol**", value = "Cu",inline = True)
embed30.add_field(name = "❯ **Atomic Number**", value = "29")
embed30.add_field(name = "❯ **Atomic Mass**", value = "63.546u",inline = True)
embed30.add_field(name = "❯ **Phase at STP**", value = "Solid")
embed30.add_field(name = "❯ **Colour**", value = "Copper",inline = True)
embed30.add_field(name = "❯ **Valency**", value = "2")
embed30.add_field(name = "❯ **First Ionization eZnrgy**", value = "745.5 KJ/mol",inline = True)
embed30.add_field(name = "❯ **Atomic Radius**", value = "135 pm")
embed30.add_field(name = | |
<reponame>evodify/calls-files-manipulations<gh_stars>1-10
#!/usr/bin/python2
'''
This is a python module to operate on call files.
#File examples:
#Two-character code:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A T/A ./. ./. A/A ./. ./. ./. ./.
chr_1 2 C T/C T/C ./. C/C C/C ./. C/C ./.
chr_1 3 C C/GCC C/C ./. C/C C/C C/C C/C C/C
chr_1 4 T T/T T/T ./. T/T T/T T/T T/T T/T
chr_2 1 A A/A A/A ./. A/A A/A A/A A/A A/A
chr_2 2 C C/C C/C ./. C/C C/C C/C C/C C/C
chr_2 3 C AT/AT AT/AT AT/AT AT/AT AT/AT AT/AT AT/AT AT/AT
chr_2 4 C C/C T/T C/C C/C C/C C/C C/C C/C
chr_2 5 T T/T C/C T/T C/T T/T C/T T/T T/T
chr_3 1 G G/G ./. ./. G/G ./. ./. ./. ./.
chr_3 2 C G/C C/C ./. C/C C/C ./. C/C ./.
chr_3 3 CTT CTT/CTT CTT/C CTT/C CTT/CTT CTT/CTT CTT/CTT CTT/CTT CTT/CTT
chr_3 4 TA T/T T/T ./. T/T T/T T/T T/T T/TA
chr_3 5 G */* G/* ./. G/G G/G G/G C/C G/G
#One-character code:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A W N N A N N N N
chr_1 2 C Y Y N C C N C N
chr_1 3 C N C N C C C C C
chr_1 4 T T T N T T T T T
chr_2 1 A A A N A A A A A
chr_2 2 C C C N C C C C C
chr_2 3 C N N N N N N N N
chr_2 4 C C T C C C C C C
chr_2 5 T T C T Y T Y T T
chr_3 1 G G N N G N N N N
chr_3 2 C S C N C C N C N
chr_3 3 N N N N N N N N N
chr_3 4 N T T N T T T T N
chr_3 5 G - N N G G G C G
Phased:
Note! Some function assumes chromosome number are separated by _.
For example, "chr_1". This format "chr1" may not work all the time.
I am still fixing this issue.
'''
############################# modules #############################
import argparse, sys # for input options
import collections # to perform counting
import random # for randomization
############################# classes ############################
class CommandLineParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2) # command line syntax errors
class callsParser(object):
'''
Parse calls table with genotypes to an object to access
chromosomes/positions/sequences easily.
'''
def __init__(self, filename, samples):
self.filename = filename
self.samples = samples
self.names = []
self.chrmosomes = []
self.positions = []
self.snps = {}
# read file
callsFile = open(self.filename, 'r')
# save samples' names
header_line = callsFile.readline().split()
indexS = indexSamples(self.samples, header_line)
self.names = selectSamples(indexS, header_line)
# make sequence list
self.sequences = [[] for i in range(len(self.names))]
# append sequences
for line in callsFile:
words = line.split()
self.chrmosomes.append(str(words[0]))
self.positions.append(words[1])
GT = selectSamples(indexS, words)
for i in range(len(self.sequences)):
self.sequences[i].append(GT[i])
self.snps[words[0]+':'+words[1]] = GT
callsFile.close()
def __getitem__(self, i):
'''
Enables iteration through chromosomes/positions/sequences by
index and sample names.
'''
if isinstance(i, int): # if index
if i > len(self.names) or i < 0:
raise IndexError("Index is out of range")
return self.sequences[i]
else: # if name
if i not in self.names:
raise KeyError("No sequence with name %s", i)
seqIndex = self.names.index(i)
return self.sequences[seqIndex]
############################# functions ###########################
def flattenList(complexList):
'''
Makes a flat list out of list of lists.
'''
flat_list = []
for sublist in complexList:
for i in sublist:
flat_list.append(i)
return flat_list
def all_missing(genotypes):
'''
Check if all genotypes are missing.
'''
return all(gt == 'N' for gt in genotypes)
def any_missing(genotypes):
'''
Check if any genotype is missing.
'''
return any(gt == 'N' for gt in genotypes)
def checkSampleNames(sampleNames, inputFileName):
'''
Check if samples names are given and if all sample names are
present in a header.
'''
inputFile = open(inputFileName, 'r')
inputFile_header = inputFile.readline().split()
# if no samples specified, use all:
if sampleNames:
sampNames = sampleNames.split(',')
# check if all samples are present in a header
for sample in sampNames:
if sample not in inputFile_header:
raise IOError(
'Sample name "%s" is not found in the header' % (sample))
else:
sampNames = inputFile_header[2:]
print 'Sample names are not specified, all will be used ...'
inputFile.close()
return sampNames
def indexSamples(sampNames, header_words):
'''
extract the index of a given list of sample names.
'''
sampIndex = []
for i in sampNames:
indnumber = header_words.index(i)
sampIndex.append(indnumber)
return sampIndex
def selectSamples(sampIndex, words):
'''
extracts column values for given list of indexes.
'''
sampWords = []
for el in sampIndex:
sampWords.append(words[el])
return sampWords
def if_all_gt_correct(gt, line):
'''
Check if there is any unrecognised genotype.
'''
allowed_states = 'AGCTRYMKSWN-*'
if any(j not in allowed_states for j in gt):
print('WARNING: unrecognised character in the line -> %s' % line)
def countPerPosition(sampWords, characterToCount):
'''
Counts given allele in each position along the genome.
'''
count = collections.Counter(sampWords)
characterCount = count[characterToCount]
return characterCount
def countHeteroPerPosition(sampWords):
'''
Counts heterozygosty in each position along the genome in unphased data.
'''
Hcount = 0.0
for gt in sampWords:
if gt in "RYSWKM":
Hcount += 1.0
elif gt in "ACGTN-":
continue
else:
print('WARNING: character "%s" is not recognized' % gt)
return Hcount
def if_FixedHetero(sampWords):
'''
Returns True if it is a fixed heterozygout (exist in hybrids) and
False if not.
'''
sampWordsNoNs = []
for gt in sampWords: # filter out the missing data
if gt in "ACGT-RYSWKM":
sampWordsNoNs.append(gt)
elif gt == "N":
continue
else:
print('WARNING: character "%s" is not recognized' % gt)
if all(gt in 'RYMKSW' and gt == sampWordsNoNs[0]
for gt in sampWordsNoNs): # check if fixed
return True
else:
return False
def countPerSample(sampWords, countList, characterToCount):
'''
Counts Ns (missing data) in each sample.
'''
for i in range(len(sampWords)):
if sampWords[i] == characterToCount:
countList[i] += 1
def is_polymorphic(sampWords):
'''
Check if the set of genotypes is polymorphic.
'''
# fist skip missing data
noNsGT = []
for i in (sampWords):
if i != 'N':
noNsGT.append(i)
# check if there is polymorphism:
return any(x in 'RYMKSW' or x != noNsGT[0] for x in noNsGT)
def twoToOne(GT):
'''
Converts two character coded genotypes to one character code.
'''
GTone = []
for g in GT:
if '/' not in g: # if one character, e.g. the reference column (REF)
if len(g) != 1: # if indel
g = 'N'
else: # two character
if len(g) != 3: # if indel except single site deletion
g = 'N'
elif g[0] == g[2] and g[0] != '.': #if homozygote and not missing
if g[0] == '*': #recode a single site deletion from '*' to '-'
g = '-'
else:
g = g[0]
# single character heterozygouts:
elif g == 'G/A' or g == 'A/G':
g = 'R'
elif g == 'T/C' or g == 'C/T':
g = 'Y'
elif g == 'A/C' or g == 'C/A':
g = 'M'
elif g == 'G/T' or g == 'T/G':
g = 'K'
elif g == 'G/C' or g == 'C/G':
g = 'S'
elif g == 'A/T' or g == 'T/A':
g = 'W'
else:
g = 'N'
GTone.append(g)
return GTone
def OneToTwo(GT):
'''
Converts two character coded genotypes to one character code.
'''
GTtwo = []
for g in GT:
if '/' not in g: # if one character, e.g. the reference column (REF)
if len(g) != 1: # if indel
g = './.'
# single character heterozygouts:
elif g == 'A':
g = 'A/A'
elif g == 'G':
g = 'G/G'
elif g == 'C':
g = 'C/C'
elif g == 'T':
g = 'T/T'
elif g == 'R':
g = 'A/G'
elif g == 'Y':
g = 'C/T'
elif g == 'M':
g = 'C/A'
elif g == 'K':
g = 'T/G'
elif g == 'S':
g = 'C/G'
elif g == 'W':
g = 'T/A'
elif g == 'N':
g = './.'
else:
print(
'WARNING: character "%s" is not recognized.'
'It will be replaces with N' % g)
g = './.'
GTtwo.append(g)
return GTtwo
def | |
<filename>jarvis.py<gh_stars>1-10
import pyttsx3 #pip install pyttsx3 (For Speak)
import datetime
import speech_recognition as sr #pip install SpeechRecognition
import wikipedia #pip install wikipedia
import smtplib
import webbrowser as wb
import os
import pyautogui #pip install pyautogui (For Screenshot)
import psutil #pip install pustil
import pyjokes #pip install pyjokes
import random
import operator
import json
import wolframalpha
import time
from urllib.request import urlopen
import requests
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def time_():
Time=datetime.datetime.now().strftime("%H:%M:%S") #for 24 hour clock
speak("the current time is")
speak(Time)
Time=datetime.datetime.now().strftime("%I:%M:%S") # for 12-hour clock
speak(Time)
def date():
year = (datetime.datetime.now().year)
month = (datetime.datetime.now().month)
date = (datetime.datetime.now().day)
speak("the current date is")
speak(date)
speak(month)
speak(year)
def wishme():
speak("Welcome back MAK!")
time_()
date()
hour = datetime.datetime.now().hour
if hour >=6 and hour<12:
speak("Good Morning Sir")
elif hour >=12 and hour<18:
speak("Good Afternoon Sir!")
elif hour >=18 and hour <24:
speak("Good Evening Sir!")
else:
speak("Good Night Sir!")
speak("Jarvis at your service. Please tell me how can I help you?")
def TakeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-pk')
print(query)
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
# Enable low security in gmail
server.login('Your email', 'Your password')
server.sendmail('Your email', to, content)
server.close()
def screenshot():
img = pyautogui.screenshot()
img.save("path to save image")
def cpu():
usage = str(psutil.cpu_percent())
speak('CPU is at'+ usage)
battery = psutil.sensors_battery()
speak("Battery is at")
speak(battery.percent)
def jokes():
speak(pyjokes.get_joke())
def Introduction():
speak("I am JARVIS 1.0 , Personal AI assistant , "
"I am created by MAK , "
"I can help you in various regards , "
"I can search for you on the Internet , "
"I can also grab definitions for you from wikipedia , "
"In layman terms , I can try to make your life a bed of roses , "
"Where you just have to command me , and I will do it for you , ")
def Creator():
speak("MAK is an extra-ordinary person ,"
"He has a passion for Robotics, Artificial Intelligence and Machine Learning ,"
"He is very co-operative ,"
"If you are facing any problem regarding the 'Jarvis', He will be glad to help you ")
if __name__ == '__main__':
clear = lambda: os.system('cls')
# This Function will clean any
# command before execution of this python file
clear()
wishme()
while True:
query = TakeCommand().lower()
# All the commands said by user will be
# stored here in 'query' and will be
# converted to lower case for easily
# recognition of command
if 'time' in query:
time_()
elif 'date' in query:
date()
elif 'how are you' in query:
speak("I am fine, Sir Thanks for asking")
speak("How are you Sir?")
if 'fine' in query or "good" in query:
speak("It's good to know that your fine")
else:
speak("I hope you get well soon.")
elif 'wikipedia' in query:
speak("Searching...")
query = query.replace("wikipedia","")
result = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(result)
speak(result)
elif 'open youtube' in query:
speak("What should I search?")
Search_term = TakeCommand().lower()
speak("Here we go to Youtube\n")
wb.open("https://www.youtube.com/results?search_query="+Search_term)
time.sleep(5)
elif 'search google' in query:
speak("What should I search?")
Search_term = TakeCommand().lower()
wb.open('https://www.google.com/search?q='+Search_term)
#elif 'search' in query:
#query = query.replace("query","")
#wb.open(query)
elif "who am i" in query:
speak("If you can talk, then definitely you are a human")
elif "why you came to this world" in query:
speak("Thanks to MAK. further it is a secret")
elif 'word' in query:
speak("opening MS Word")
word = r'Word path'
os.startfile(word)
elif 'what is love' and 'tell me about love' in query:
speak("It is 7th sense that destroy all other senses , "
"And I think it is just a mere illusion , "
"It is waste of time")
elif 'empty recycle bin' in query:
winshell.recycle_bin().empty(confirm = False, show_progress = False, sound = True)
speak("Recycle Bin Recycled")
elif 'send email' in query:
try:
speak("What should I say?")
content = TakeCommand()
speak("Who is the Reciever?")
reciept = input("Enter recieptant's name: ")
to = (reciept)
sendEmail(to,content)
speak(content)
speak("Email has been sent.")
except Exception as e:
print(e)
speak("Unable to send the email.")
elif 'search in chrome' in query:
speak("What should I search ?")
chromepath = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
search = TakeCommand().lower()
wb.get(chromepath).open_new_tab(search+'.com')
elif 'log out' in query:
os.system("shutdown -l")
elif 'restart' in query:
os.system("shutdown /r /t 1")
elif 'shutdown' in query:
os.system("shutdown /s /t 1")
elif 'play songs' in query:
video ='songs path'
audio = 'Songs path'
speak("What songs should i play? Audio or Video")
ans = (TakeCommand().lower())
while(ans != 'audio' and ans != 'video'):
speak("I could not understand you. Please Try again.")
ans = (TakeCommand().lower())
if 'audio' in ans:
songs_dir = audio
songs = os.listdir(songs_dir)
print(songs)
elif 'video' in ans:
songs_dir = video
songs = os.listdir(songs_dir)
print(songs)
speak("select a random number")
rand = (TakeCommand().lower())
while('number' not in rand and rand != 'random'): #used while loop to keep the jarvis on the speak command untill req. command is given.
speak("I could not understand you. Please Try again.") #first used 'rand' before while then again after, so that rand is already defind, and Input is taken and then checked if it is according to reuirement or not. And if it is not which means while loop is true, then commands under 'while loop' will execute untill desired approach.As it will again ask the user for input in the same block.
rand = (TakeCommand().lower())
if 'number' in rand:
rand = int(rand.replace("number ",""))
os.startfile(os.path.join(songs_dir,songs[rand]))
continue #'continue' is used, so that after executing the commands in 'if' or 'elif' block, it will move to the next part of execution (or code). but in this case as this is the last execution of related function then it will move to the next function (i.e. in this code, it will be TakeCommand() )
elif 'random' in rand:
rand = random.randint(1,219)
os.startfile(os.path.join(songs_dir,songs[rand]))
continue
elif 'remember that' in query:
speak("What should I remember ?")
memory = TakeCommand()
speak("You asked me to remember that"+memory)
remember = open('memory.txt','w')
remember.write(memory)
remember.close()
elif 'do you remember anything' in query:
remember =open('memory.txt', 'r')
speak("You asked me to remeber that"+remember.read())
elif "write a note" in query:
speak("What should i write, sir")
note = TakeCommand()
file = open('note.txt', 'w')
speak("Sir, Should i include date and time")
dt = TakeCommand()
if 'yes' in dt or 'sure' in dt:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
file.write(strTime)
file.write(" :- ")
file.write(note)
speak('done')
else:
file.write(note)
elif "show note" in query:
speak("Showing Notes")
file = open("note.txt", "r")
print(file.read())
speak(file.read())
elif "weather" in query:
# Google Open weather website
# to get API of Open weather
api_key = "open weather api"
base_url = "http://api.openweathermap.org/data /2.5/weather?q="
speak(" City name ")
print("City name : ")
city_name = TakeCommand()
complete_url = base_url + "appid =" + api_key + "&q =" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_pressure = y["pressure"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
print(" Temperature (in kelvin unit) = " +str(current_temperature)+"\n atmospheric pressure (in hPa unit) ="+str(current_pressure) +"\n humidity (in percentage) = " +str(current_humidiy) +"\n description = " +str(weather_description))
else:
speak(" City Not Found ")
elif 'news' in query:
try:
jsonObj = urlopen('''news api link''')
data = json.load(jsonObj)
i = 1
speak('here are some top news from the times of india')
print('''=============== TOP HEADLINES ============'''+ '\n')
for item in data['articles']:
print(str(i) + '. ' + item['title'] + '\n')
print(item['description'] + '\n')
speak(str(i) + '. ' + item['title'] + '\n')
i += 1
except Exception as e:
print(str(e))
elif 'take screenshot' in query:
screenshot()
speak("Done!")
elif 'cpu' in query:
cpu()
elif 'joke' in query:
jokes()
elif 'tell me about yourself' and 'who are you' in query:
Introduction()
elif 'tell me about mac' and 'creator' in query:
Creator()
| |
assert len(items) >= 3
def test_project_id_query_filter(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "project_id:1",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200
def test_latest_release_query_filter(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "release:latest",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200
def test_simple_multiple_yaxis(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": ["user_count", "event_count"],
},
format="json",
)
assert response.status_code == 200, response.content
response.data["user_count"]["order"] == 0
assert [attrs for time, attrs in response.data["user_count"]["data"]] == [
[{"count": 1}],
[{"count": 1}],
]
response.data["event_count"]["order"] == 1
assert [attrs for time, attrs in response.data["event_count"]["data"]] == [
[{"count": 1}],
[{"count": 2}],
]
@mock.patch("sentry.snuba.discover.timeseries_query", return_value={})
def test_multiple_yaxis_only_one_query(self, mock_query):
with self.feature("organizations:discover-basic"):
self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": ["user_count", "event_count", "rpm()", "rps()"],
},
format="json",
)
assert mock_query.call_count == 1
def test_invalid_interval(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=24)),
"query": "",
"interval": "1s",
"yAxis": "count()",
},
)
assert response.status_code == 400
def test_out_of_retention(self):
with self.options({"system.event-retention-days": 10}):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(before_now(days=20)),
"end": iso_format(before_now(days=15)),
"query": "",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 400
class OrganizationEventsStatsTopNEvents(APITestCase, SnubaTestCase):
def setUp(self):
super(OrganizationEventsStatsTopNEvents, self).setUp()
self.login_as(user=self.user)
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.project = self.create_project()
self.project2 = self.create_project()
self.user2 = self.create_user()
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=4))
self.event_data = [
{
"data": {
"message": "poof",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"user": {"email": self.user.email},
"fingerprint": ["group1"],
},
"project": self.project2,
"count": 7,
},
{
"data": {
"message": "voof",
"timestamp": iso_format(self.day_ago + timedelta(hours=1, minutes=2)),
"fingerprint": ["group2"],
"user": {"email": self.user2.email},
},
"project": self.project2,
"count": 6,
},
{
"data": {
"message": "very bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group3"],
"user": {"email": "<EMAIL>"},
},
"project": self.project,
"count": 5,
},
{
"data": {
"message": "oh no",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group4"],
"user": {"email": "<EMAIL>"},
},
"project": self.project,
"count": 4,
},
{"data": transaction_data, "project": self.project, "count": 3},
# Not in the top 5
{
"data": {
"message": "sorta bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group5"],
"user": {"email": "<EMAIL>"},
},
"project": self.project,
"count": 2,
},
{
"data": {
"message": "not so bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group6"],
"user": {"email": "<EMAIL>"},
},
"project": self.project,
"count": 1,
},
]
self.events = []
for index, event_data in enumerate(self.event_data):
data = event_data["data"].copy()
for i in range(event_data["count"]):
data["event_id"] = "{}{}".format(index, i) * 16
event = self.store_event(data, project_id=event_data["project"].id)
self.events.append(event)
self.transaction = self.events[4]
self.url = reverse(
"sentry-api-0-organization-events-stats",
kwargs={"organization_slug": self.project.organization.slug},
)
def test_simple_top_events(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "user.email"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_limits(self):
data = {
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "user.email"],
}
with self.feature("organizations:discover-basic"):
data["topEvents"] = 50
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
data["topEvents"] = 0
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
data["topEvents"] = "a"
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
def test_top_events_with_projects(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "project"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[",".join([message, event.project.slug])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_issue(self):
# delete a group to make sure if this happens the value becomes unknown
event_group = self.events[0].group
event_group.delete()
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "issue"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
# Because we deleted the group for event 0
if index == 0 or event.group is None:
issue = "unknown"
else:
issue = event.group.qualified_short_id
results = data[",".join([issue, message])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_functions(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
results = data[self.transaction.transaction]
assert results["order"] == 0
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
def test_top_events_with_functions_on_different_transactions(self):
""" Transaction2 has less events, but takes longer so order should be self.transaction then transaction2 """
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=6))
transaction_data["transaction"] = "/foo_bar/"
transaction2 = self.store_event(transaction_data, project_id=self.project.id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 2
results = data[self.transaction.transaction]
assert results["order"] == 1
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
results = data[transaction2.transaction]
assert results["order"] == 0
assert [attrs for time, attrs in results["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
def test_top_events_with_query(self):
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=6))
transaction_data["transaction"] = "/foo_bar/"
self.store_event(transaction_data, project_id=self.project.id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"query": "transaction:/foo_bar/",
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
transaction2_data = data["/foo_bar/"]
assert transaction2_data["order"] == 0
assert [attrs for time, attrs in transaction2_data["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
def test_top_events_with_rpm(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "rpm()",
"orderby": ["-count()"],
"field": ["message", "user.email", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"] / (3600.0 / 60.0)}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_multiple_yaxis(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": ["rpm()", "count()"],
"orderby": ["-count()"],
"field": ["message", "user.email", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert results["rpm()"]["order"] == 0
assert results["count()"]["order"] == 1
assert [{"count": self.event_data[index]["count"] / (3600.0 / 60.0)}] in [
attrs for time, attrs in results["rpm()"]["data"]
]
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["count()"]["data"]
]
def test_top_events_with_boolean(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=1, minutes=59)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "device.charging"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[",".join(["False", message])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_timestamp(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + | |
+ f"j={self.lattice.j.__str__()}, "
+ f"field={self.lattice.field}, "
+ f"time_series={self.time_series}, "
+ f"interval={self.interval}, "
+ f"frames={self.frames})"
)
@property
def time(self):
return self.gen * self.interval / 1000
def update(self):
"""
Updates the system to the next generation, appending new values to
the history list of each phisical quantity. This function is automatically
called by the animation attribute to render the next frame.
"""
super().update()
self.time_hist.append(self.time)
def __set_axes(self):
for ax in self.ax[1:]:
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:.1e}"))
ax.set(
xlim=(0, self.frames * self.interval / 1000),
xlabel=self.axes_labels["time"],
)
ax.grid(linestyle=":")
self.ax[0].set(ylabel="i", xlabel="j")
self.ax[1].set(ylabel=self.axes_labels["energy"])
self.ax[2].set(ylabel=self.axes_labels["magnet"])
self.ax[3].set(ylabel=self.axes_labels["specific_heat"])
self.ax[4].set(ylabel=self.axes_labels["susceptibility"])
def __init_ani_time_series(self):
self.__set_axes()
self.ax[0].imshow(self.lattice.state, norm=Normalize(vmin=-1.0, vmax=1.0))
def __update_ani_time_series(self, frame):
for ax in self.ax:
ax.clear()
self.update()
self.__set_axes()
self.fig.suptitle(self.__str__())
self.ax[0].imshow(self.lattice.state, norm=Normalize(vmin=-1.0, vmax=1.0))
self.ax[1].plot(self.time_hist, self.mean_energy_hist, color="purple")
self.ax[2].plot(self.time_hist, self.magnet_hist, color="purple")
self.ax[3].plot(self.time_hist, self.specific_heat_hist, color="purple")
self.ax[4].plot(self.time_hist, self.susceptibility_hist, color="purple")
def __init_ani_no_time_series(self):
self.ax.set(ylabel="i", xlabel="j")
self.ax.imshow(self.lattice.state, norm=Normalize(vmin=-1.0, vmax=1.0))
def __update_ani_no_time_series(self, frame):
self.ax.clear()
self.update()
self.ax.set(ylabel="i", xlabel="j")
self.fig.suptitle(self.__str__())
self.ax.imshow(self.lattice.state, norm=Normalize(vmin=-1.0, vmax=1.0))
class CoolingAnimatedIsing(AnimatedIsing):
"""
Animation of the Ising Model at constant external magnetic field,
but with its temperature growing (or decaing) exponentially, given
initial and target values.
Args
------------------
shape : 2-tuple of ints
the shape of the lattice of spins. Default is (128, 128).
temp : float. Default is 5.0
the initial temperature of the lattice as a whole.
final_temp : float. Default is 1.0
the final temperature of the system.
cooling_rate : float. Default is 0.5
the rate with which the temperature will find its way to the final_temp.
(1 / cooling_rate) is the amount of time (in seconds) it takes to achieve about
63% of the way there.
j : float or 2-tuple of floats. Default is (1.0, 1.0)
the coefficient of interaction between neighboring spins in the lattice.
when a tuple is suplied, the first value is the coefficient for row neighbors
and the second value is the coefficient for column neighbors.
field : float. Default is 0.0
the initial value for the external magnetic field.
init_state : {"random", "down", "up"}. Default is "random"
the initial configuration of the spins in the lattice.
time_series : bool. Default is False
wheater or not to include the time series of the macroscopic
physical quantities in the animation
interval : int. Default is 100
the interval between each frame in the animation, in milliseconds
frames : int. Default is 60
the number of frames to include in the animation
Attributes
------------------
gen : int
the current generation of the system. Starts at 0 and is
incremented by a call to update method
init_state : {"random", "down", "up"}; Default is "random"
the initial configuration of the spins in the lattice.
spins : int
the number of the spins in the lattice
lattice : Lattice
an instance of a Lattice object that describes the
current state of the system
animation : FuncAnimation
a matplotlib.animation.FuncAnimation object. The animation is saved
with a call to animation.save("outfile.gif"). More info at
https://matplotlib.org/stable/api/_as_gen/matplotlib.animation.FuncAnimation.html
fig : Figure
a matplotlib.figure.Figure object in which the animation takes place. More info at
https://matplotlib.org/stable/api/figure_api.html?highlight=figure#matplotlib.figure.Figure
ax : Axes or list of Axes
single instance or list of matplotlib.axes.Axes objects. This are the axes in the
figures in the animation. More info at
https://matplotlib.org/stable/api/axes_api.html?highlight=axes#module-matplotlib.axes
init_temp : float
the initial temperature of the lattice as a whole.
temp : float
the current temperature of the lattice, in energy units.
A new value can be assigned anytime.
final_temp : float
the final temperature of the system.
cooling_rate : float
the rate with which the temperature will find its way to the final_temp.
(1 / cooling_rate) is the amount of time (in seconds) it takes to achieve about
63% of the way there.
field : float
the current value of the external magnetic field, oriented
perpendicularlly to the lattice. A positive value represents
a up oriented field. A new value can be assigned anytime.
energy : float
the total energy of the lattice in its current generation.
mag_mom : float
the total magnetic moment of the lattice in its current generation.
mean_energy_hist : list[float]
a list with the values of the mean energy for each past generation.
New values are appended by a call to the update function
magnet_hist : list[float]
a list with the values of the magnetization per spin
for each past generation. New values are appended by a call to
the update function
specific_heat_hist : list[float]
a list with the values of the specific heat per spin
for each past generation. New values are appended by a call to
the update function
susceptibility_hist : list[float]
a list with the values of the magnetic susceptibility
for each past generation. New values are appended by a call to
the update function
"""
def __init__(
self,
shape=(128, 128),
temp=5,
final_temp=1,
cooling_rate=0.5,
j=(1, 1),
field=0,
init_state="random",
time_series=False,
interval=100,
frames=100,
) -> None:
super().__init__(
shape=shape,
temp=temp,
j=j,
field=field,
init_state=init_state,
time_series=time_series,
interval=interval,
frames=frames,
)
self._init_temp = abs(float(self.temp))
self._final_temp = abs(float(final_temp))
self._cooling_rate = abs(float(cooling_rate))
def __repr__(self) -> str:
return (
f"CoolingAnimatedIsing(shape={self.lattice.shape.__str__()}, "
+ f"temp={self.lattice.temp}, "
+ f"final_temp={self.final_temp}, "
+ f"cooling_rate={self.cooling_rate}, "
+ f"j={self.lattice.j.__str__()}, "
+ f"field={self.lattice.field}, "
+ f"time_series={self.time_series}, "
+ f"interval={self.interval}, "
+ f"frames={self.frames})"
)
@property
def init_temp(self):
return self._init_temp
@property
def final_temp(self):
return self._final_temp
@property
def cooling_rate(self):
return self._cooling_rate
def update(self):
"""
Updates the system to the next generation, appending new values to
the history list of each phisical quantity. This function is automatically
called by the animation attribute to render the next frame.
"""
super().update()
self.temp = self.final_temp + (self.init_temp - self.final_temp) * exp(
-self.cooling_rate * self.time
)
class DynamicAnimatedIsing(Ising):
"""
Animation of the Ising Model with both temperature and external magnetic
field varying as functions of time
Args
------------------
shape : 2-tuple of ints; Default is (128, 128)
the shape of the lattice of spins.
temp : callable; Default is lambda t: 2.0
a real valued one variable function that describes the temperature in
the interval [0, interval * frames / 1000]
j : float or 2-tuple of floats; Default is 1.0
the coefficient of interaction between neighboring spins in the lattice.
when a tuple is suplied, the first value is the coefficient for row neighbors
and the second value is the coefficient for column neighbors.
field : callable; Default is lambda t: math.sin(t)
a real valued one variable function that describes the external
magnetic field in the interval [0, interval * frames / 1000]
init_state : {"random", "down", "up"}; Default is "random"
the initial configuration of the spins in the lattice.
time_series : bool. Default is False
wheater or not to include the time series of the macroscopic
physical quantities in the animation
interval : int. Default is 100
the interval between each frame in the animation, in milliseconds
frames : int. Default is 60
the number of frames to include in the animation
Attributes
------------------
gen : int
the current generation of the system. Starts at 0 and is
incremented by a call to update method
init_state : {"random", "down", "up"}; Default is "random"
the initial configuration of the spins in the lattice.
spins : int
the number of the spins in the lattice
lattice : Lattice
an instance of a Lattice object that describes the
current state of the system
animation : FuncAnimation
a matplotlib.animation.FuncAnimation object. The animation is saved
with a call to animation.save("outfile.gif"). More info at
https://matplotlib.org/stable/api/_as_gen/matplotlib.animation.FuncAnimation.html
fig : Figure
a matplotlib.figure.Figure object in which the animation takes place. More info at
https://matplotlib.org/stable/api/figure_api.html?highlight=figure#matplotlib.figure.Figure
ax : Axes or list of Axes
single instance or list of matplotlib.axes.Axes objects. This are the axes in the
figures in the animation. More info at
https://matplotlib.org/stable/api/axes_api.html?highlight=axes#module-matplotlib.axes
temp : float
the current temperature of the lattice, in energy units.
A new value can be assigned anytime.
field : float
the current value of the external magnetic field, oriented
perpendicularlly to the lattice. A positive value represents
a up oriented field. A new value can be assigned anytime.
temp_func : callable
the function passed as temp | |
good value and some bad values.
request.md5checksum = obj.getMd5Checksum()
badAccessions = [
"no such accession", objectList[0].getSourceAccessions()[0]]
for accession in badAccessions:
request.accession = accession
self.verifySearchResultsEmpty(request, path, responseClass)
request.accession = ""
if hasAssemblyId:
badAssemblyIds = [
"no such asssembly", objectList[0].getAssemblyId()]
for assemblyId in badAssemblyIds:
request.assembly_id = assemblyId
self.verifySearchResultsEmpty(request, path, responseClass)
request.assembly_id = ""
def testReferencesSearchFilters(self):
path = '/references/search'
for referenceSet in self.dataRepo.getReferenceSets():
def requestFactory():
request = protocol.SearchReferencesRequest()
request.reference_set_id = referenceSet.getId()
return request
self.verifyReferenceSearchFilters(
referenceSet.getReferences(), False, path, requestFactory,
protocol.SearchReferencesResponse, self.verifyReferencesEqual)
def testReferenceSetsSearchFilters(self):
path = '/referencesets/search'
def requestFactory():
return protocol.SearchReferenceSetsRequest()
self.verifyReferenceSearchFilters(
self.dataRepo.getReferenceSets(), True, path, requestFactory,
protocol.SearchReferenceSetsResponse,
self.verifyReferenceSetsEqual)
def testGetVariantSet(self):
path = "/variantsets"
for dataset in self.dataRepo.getDatasets():
for variantSet in dataset.getVariantSets():
responseObject = self.sendGetObject(
path, variantSet.getId(), protocol.VariantSet)
self.verifyVariantSetsEqual(responseObject, variantSet)
for badId in self.getBadIds():
variantSet = variants.AbstractVariantSet(dataset, badId)
self.verifyGetMethodFails(path, variantSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testGetVariantAnnotationSet(self):
path = "/variantannotationsets"
for dataset in self.dataRepo.getDatasets():
for variantSet in dataset.getVariantSets():
for vas in variantSet.getVariantAnnotationSets():
responseObject = self.sendGetObject(
path, vas.getId(), protocol.VariantAnnotationSet)
self.assertEqual(
vas.getId(), responseObject.id,
"The requested ID should match the returned")
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testGetVariant(self):
# get a variant from the search method
referenceName = '1'
start = 2**15
request = protocol.SearchVariantsRequest()
request.variant_set_ids.append(self.variantSet.getId())
request.reference_name = referenceName
request.start = start
request.end = 2**16
path = '/variants/search'
responseData = self.sendSearchRequest(
path, request, protocol.SearchVariantsResponse)
variants = responseData.variants[:10]
# get 'the same' variant using the get method
for variant in variants:
path = '/variants'
responseObject = self.sendGetObject(
path, variant.id, protocol.Variant)
self.assertEqual(responseObject, variant)
def testGetReferenceSet(self):
path = "/referencesets"
for referenceSet in self.dataRepo.getReferenceSets():
responseObject = self.sendGetObject(
path, referenceSet.getId(), protocol.ReferenceSet)
self.verifyReferenceSetsEqual(responseObject, referenceSet)
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testGetReference(self):
path = "/references"
for referenceSet in self.dataRepo.getReferenceSets():
for reference in referenceSet.getReferences():
responseObject = self.sendGetObject(
path, reference.getId(), protocol.Reference)
self.verifyReferencesEqual(responseObject, reference)
for badId in self.getBadIds():
referenceSet = references.AbstractReferenceSet(badId)
self.verifyGetMethodFails(path, referenceSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testGetCallSet(self):
path = "/callsets"
for dataset in self.dataRepo.getDatasets():
for variantSet in dataset.getVariantSets():
for callSet in variantSet.getCallSets():
responseObject = self.sendGetObject(
path, callSet.getId(), protocol.CallSet)
self.verifyCallSetsEqual(responseObject, callSet)
for badId in self.getBadIds():
callSet = variants.CallSet(variantSet, badId)
self.verifyGetMethodFails(path, callSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testGetReadGroup(self):
path = "/readgroups"
for dataset in self.dataRepo.getDatasets():
for readGroupSet in dataset.getReadGroupSets():
for readGroup in readGroupSet.getReadGroups():
responseObject = self.sendGetObject(
path, readGroup.getId(), protocol.ReadGroup)
self.verifyReadGroupsEqual(responseObject, readGroup)
for badId in self.getBadIds():
readGroup = reads.AbstractReadGroup(readGroupSet, badId)
self.verifyGetMethodFails(path, readGroup.getId())
for badId in self.getBadIds():
readGroupSet = reads.AbstractReadGroupSet(dataset, badId)
self.verifyGetMethodFails(path, readGroupSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testVariantsSearch(self):
referenceName = '1'
request = protocol.SearchVariantsRequest()
request.reference_name = referenceName
request.start = 0
request.end = 0
request.variant_set_ids.append(self.variantSet.getId())
# Request windows is too small, no results
path = '/variants/search'
responseData = self.sendSearchRequest(
path, request, protocol.SearchVariantsResponse)
self.assertEqual("", responseData.next_page_token)
self.assertEqual(0, len(responseData.variants))
# Larger request window, expect results
request.end = 2 ** 15
responseData = self.sendSearchRequest(
path, request, protocol.SearchVariantsResponse)
self.assertTrue(protocol.validate(
protocol.toJson(responseData), protocol.SearchVariantsResponse))
self.assertGreater(len(responseData.variants), 0)
# Verify all results are in the correct range, set and reference
for variant in responseData.variants:
self.assertGreaterEqual(variant.start, 0)
self.assertLessEqual(variant.end, 2 ** 15)
self.assertEqual(variant.variant_set_id, self.variantSet.getId())
self.assertEqual(variant.reference_name, referenceName)
# TODO: Add more useful test scenarios, including some covering
# pagination behavior.
def testVariantAnnotationSetsSearch(self):
self.assertIsNotNone(self.variantAnnotationSet)
request = protocol.SearchVariantAnnotationSetsRequest()
request.variant_set_id = "b4d=="
path = '/variantannotationsets/search'
response = self.sendJsonPostRequest(path, protocol.toJson(request))
responseData = self.deserialize(response.data, protocol.GAException)
self.assertTrue(protocol.validate(protocol.toJson(responseData),
protocol.GAException))
self.assertEqual(responseData.error_code, 758389611)
self.assertEqual(responseData.message,
"Either the resources you are looking for don't exist, or you don't have access to them.")
request.variant_set_id = self.variantSet.getId()
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationSetsResponse)
self.assertTrue(protocol.validate(
protocol.toJson(responseData),
protocol.SearchVariantAnnotationSetsResponse))
self.assertGreater(len(responseData.variant_annotation_sets), 0,
"Expect some results for a known good ID")
# TODO check the instance variables; we should be able to match
# the values from the protocol object we get back with the values
# in the original variantAnnotationSet.
def testVariantAnnotationsSearch(self):
self.assertIsNotNone(self.variantAnnotationSet)
request = protocol.SearchVariantAnnotationsRequest()
# TODO split these into separate tests, and factor out the duplicated
# code.
path = '/variantannotations/search'
request.start = 2**15
request.end = 2**16
request.page_size = 1
request.reference_name = "1"
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertGreater(len(responseData.variant_annotations), 0)
self.assertIsNotNone(
responseData.next_page_token,
"Expected more than one page of results")
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 10
request.reference_name = "1"
request.effects.add().term_id = "ThisIsNotAnEffect"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertEqual(
len(responseData.variant_annotations), 0,
"There should be no results for a nonsense effect")
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 10
request.reference_name = "1"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertGreater(len(responseData.variant_annotations), 0)
for ann in responseData.variant_annotations:
self.assertGreater(
len(ann.transcript_effects), 0,
("When no effects are requested ensure "
"some transcript effects are still present"))
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 5
request.reference_name = "1"
request.effects.add().term_id = "SO:0001627"
request.effects.add().term_id = "B4DID"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
responseLength = len(responseData.variant_annotations)
self.assertGreater(
responseLength, 0,
"There should be some results for a known effect")
for ann in responseData.variant_annotations:
effectPresent = False
for effect in ann.transcript_effects:
for featureType in effect.effects:
if featureType.term_id in [e.term_id for e in request.effects]:
effectPresent = True
self.assertEqual(
True, effectPresent,
"The ontology term should appear at least once")
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 5
request.reference_name = "1"
request.effects.add().term_id = "B4DID"
request.effects.add().term_id = "SO:0001627"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertEqual(
len(responseData.variant_annotations),
responseLength,
"Order shall not affect results")
for ann in responseData.variant_annotations:
effectPresent = False
for effect in ann.transcript_effects:
for featureType in effect.effects:
if featureType.term_id in [e.term_id for e in request.effects]:
effectPresent = True
self.assertEqual(
True,
effectPresent,
"The ontology term should appear at least once")
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 5
request.reference_name = "1"
request.effects.add().term_id = "SO:0001627"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertGreater(len(responseData.variant_annotations), 0,
"There should be some results for a good effect ID")
for ann in responseData.variant_annotations:
effectPresent = False
txIds = [t.id for t in ann.transcript_effects]
self.assertEqual(len(txIds), len(set(txIds)),
"Transcript effects should be unique")
for effect in ann.transcript_effects:
for featureType in effect.effects:
if featureType.term_id in [e.term_id for e in request.effects]:
effectPresent = True
self.assertEqual(True, effectPresent,
"The ontology term should appear at least once")
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSet.getId()
request.start = 0
request.end = 10
request.reference_name = "1"
request.effects.add().term_id = "SO:0001627"
request.effects.add().term_id = "SO:0001791"
response = self.sendJsonPostRequest(path, protocol.toJson(request))
response_data = json.loads(response.data)
response = json.dumps(response_data.get('results', {}))
responseData = self.deserialize(response, protocol.
SearchVariantAnnotationsResponse)
self.assertGreater(len(responseData.variant_annotations), 0)
def testGetFeatureSet(self):
path = "/featuresets"
for dataset in self.dataRepo.getDatasets():
for featureSet in dataset.getFeatureSets():
responseObject = self.sendGetObject(
path, featureSet.getId(), protocol.FeatureSet)
self.verifyFeatureSetsEqual(responseObject, featureSet)
for badId in self.getBadIds():
featureSet = sequence_annotations.AbstractFeatureSet(
dataset, badId)
self.verifyGetMethodFails(path, featureSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
def testFeatureSetsSearch(self):
path = '/featuresets/search'
for dataset in self.dataRepo.getDatasets():
featureSets = dataset.getFeatureSets()
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = dataset.getId()
self.verifySearchMethod(
request, path, protocol.SearchFeatureSetsResponse, featureSets,
self.verifyFeatureSetsEqual)
for badId in self.getBadIds():
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = badId
self.verifySearchMethodFails(request, path)
@unittest.skip("Disabled")
def testGetContinuousSet(self):
path = "/continuoussets"
for dataset in self.dataRepo.getDatasets():
for continuousSet in dataset.getContinuousSets():
responseObject = self.sendGetObject(
path, continuousSet.getId(), protocol.ContinuousSet)
self.verifyContinuousSetsEqual(responseObject, continuousSet)
for badId in self.getBadIds():
continuousSet = continuous.AbstractContinuousSet(
dataset, badId)
self.verifyGetMethodFails(path, continuousSet.getId())
for badId in self.getBadIds():
self.verifyGetMethodFails(path, badId)
@unittest.skip("Disabled")
def testContinuousSetsSearch(self):
path = '/continuoussets/search'
for dataset in self.dataRepo.getDatasets():
continuousSets = dataset.getContinuousSets()
request = protocol.SearchContinuousSetsRequest()
request.dataset_id = dataset.getId()
self.verifySearchMethod(
request, path, protocol.SearchContinuousSetsResponse,
continuousSets, self.verifyContinuousSetsEqual)
for badId in self.getBadIds():
request = protocol.SearchContinuousSetsRequest()
request.dataset_id = badId
self.verifySearchMethodFails(request, path)
def testGetFeature(self):
dataset = self.dataRepo.getDatasets()[0]
featureSet = dataset.getFeatureSets()[0]
request = protocol.SearchFeaturesRequest()
request.feature_set_id = featureSet.getId()
request.reference_name = "chr1"
request.start = 0
request.end = 2**16
path = '/features/search'
responseData = self.sendSearchRequest(
path, request, protocol.SearchFeaturesResponse)
features = responseData.features[:10]
# get 'the same' feature using the get method
for feature in features:
path = '/features'
responseObject = self.sendGetObject(
path, feature.id, protocol.Feature)
self.verifyFeaturesEquivalent(responseObject, feature)
def testFeaturesSearch(self):
dataset = self.dataRepo.getDatasets()[0]
featureSet = dataset.getFeatureSets()[0]
referenceName = 'chr1'
request = | |
import itertools
from datetime import datetime
from numpy import nan
import numpy as np
from pandas.core.common import _possibly_downcast_to_dtype, isnull
from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.expressions as expressions
from pandas.tslib import Timestamp
from pandas.util import py3compat
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
is_numeric = False
is_bool = False
is_object = False
_can_hold_na = False
_downcast_dtype = None
def __init__(self, values, items, ref_items, ndim=2):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
if values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
if len(items) != len(values):
raise ValueError('Wrong number of items passed %d, indices imply %d'
% (len(items), len(values)))
self._ref_locs = None
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
def _gi(self, arg):
return self.values[arg]
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
indexer = com._ensure_platform_int(indexer)
if (indexer == -1).any():
raise AssertionError('Some block items were not in block '
'ref_items')
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
if not isinstance(ref_items, Index):
raise AssertionError('block ref_items must be an Index')
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
name = type(self).__name__
result = '%s: %s, %s, dtype %s' % (
name, com.pprint_thing(self.items), shape, self.dtype)
if py3compat.PY3:
return unicode(result)
return com.console_encode(result)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
if not self.ref_items.equals(other.ref_items):
raise AssertionError('Merge operands must have same ref_items')
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
masked_idx = indexer[indexer != -1]
new_values = com.take_nd(self.values, masked_idx, axis=0,
allow_fill=False)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block into zero or more blocks around columns with given label,
for "deleting" a column without having to copy data by returning views
on the original array.
Returns
-------
generator of Block
"""
loc = self.items.get_loc(item)
if type(loc) == slice or type(loc) == int:
mask = [True] * len(self)
mask[loc] = False
else: # already a mask, inverted
mask = -loc
for s, e in com.split_ranges(mask):
yield make_block(self.values[s:e],
self.items[s:e].copy(),
self.ref_items)
def fillna(self, value, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
new_values = self.values if inplace else self.values.copy()
mask = com.isnull(new_values)
np.putmask(new_values, mask, value)
block = make_block(new_values, self.items, self.ref_items)
if downcast:
block = block.downcast()
return block
def downcast(self, dtypes = None):
""" try to downcast each item to the dict of dtypes if present """
if dtypes is None:
dtypes = dict()
values = self.values
blocks = []
for i, item in enumerate(self.items):
dtype = dtypes.get(item,self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i])
blocks.append(make_block(nv, [ item ], self.ref_items))
continue
nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype))
nv = _block_shape(nv)
blocks.append(make_block(nv, [ item ], self.ref_items))
return blocks
def astype(self, dtype, copy = True, raise_on_error = True):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
self.items, self.ref_items)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if (newb.shape != self.shape or
(not copy and newb.itemsize < self.itemsize)):
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name, newb.itemsize))
return newb
def convert(self, copy = True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
return result
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
return values.tolist()
def replace(self, to_replace, value, inplace=False, filter=None):
""" replace the to_replace value with value, possible to create new blocks here
this is just a call to putmask """
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
if item not in filter:
mask[i] = False
if not mask.any():
if inplace:
return [ self ]
return [ self.copy() ]
return self.putmask(mask, value, inplace=inplace)
def putmask(self, mask, new, inplace=False):
""" putmask the data to the block; it is possible that we may create a new dtype of block
return the resulting block(s) """
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
axis = getattr(new, '_het_axis', 0)
new = new.reindex_axis(self.items, axis=axis, copy=False).values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
axis = getattr(mask, '_het_axis', 0)
mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T
if self._can_hold_element(new):
new = self._try_cast(new)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
for i, item in enumerate(self.items):
m = mask[i]
# need a new block
if m.any():
n = new[i] if isinstance(new, np.ndarray) else new
# type of the new block
dtype, _ = com._maybe_promote(np.array(n).dtype)
# we need to exiplicty astype here to make a copy
nv = new_values[i].astype(dtype)
# we create a new block type
np.putmask(nv, m, n)
else:
nv = new_values[i] if inplace else new_values[i].copy()
nv = _block_shape(nv)
new_blocks.append(make_block(nv, [ item ], self.ref_items))
return new_blocks
if inplace:
return [ self ]
return [ make_block(new_values, self.items, self.ref_items) ]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, missing=None, coerce=False):
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
values = self.values if inplace else self.values.copy()
if values.ndim != 2:
raise NotImplementedError
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
if missing is None:
| |
<reponame>lperezmo/scientific-computing<gh_stars>0
#!/usr/bin/env python
__description__ = \
"""
Tool for randomly creating groups/zoom breakout rooms for groups of students
weighted by a proficiency score.
"""
__author__ = "<NAME>"
__date__ = "2020-03-30"
import numpy as np
import pandas as pd
import json, random, re, random, argparse, sys, os
import urllib.request
class GroupNameGenerator:
"""
Generate random group names by combining adjective/noun pairs.
Filters for words less than or equal to max_word_len and discards
expletives.
"""
def __init__(self,max_word_len=8,loop_timeout=100,all_nouns=False):
"""
max_word_len: longest word length allowed
loop_timeout: after loop_timeout tries, give up making a new unique name
all_nouns: by default, this uses an internal list of animals for the
names. if all_nouns is True, a list of nouns is downloaded.
Warning: this can lead to some strangely inapprorpriate/
uncomfortable names, even with expletive filtering...
"""
self._loop_timeout = loop_timeout
# characters we don't want in names in case they come in from one of our
# servers
bad_char = re.compile("\W")
# Download adjectives
adj = self._download_json("https://github.com/dariusk/corpora/raw/master/data/words/adjs.json")
adj = [a.lower() for a in adj["adjs"] if len(a) <= max_word_len]
# Either use a list of animals included with scripts or random nouns
if all_nouns:
noun = self._download_json("https://github.com/dariusk/corpora/raw/master/data/words/nouns.json")["nouns"]
else:
animal_file = os.path.join(os.path.split(__file__)[0],"data","animals.txt")
noun = []
with open(animal_file,'r') as f:
for line in f.readlines():
noun.append(line.strip().lower())
# Clean up noun list
noun = [n for n in noun if len(n) <= max_word_len]
# Remove expletives
expletives = self._download_json("https://github.com/dariusk/corpora/raw/master/data/words/expletives.json")
expletives = [e.lower() for e in expletives]
expletives.extend(["genitals","genitalia","puberty","virgin"])
# Final, cleaned up list of words
self.noun = [n for n in noun if not bad_char.search(n) and n not in expletives]
self.adj = [a for a in adj if not bad_char.search(a) and a not in expletives]
self._groups_generated = {}
def _download_json(self,url):
"""
Download a json from the url
"""
response = urllib.request.urlopen(url)
return json.loads(response.read().decode('ascii'))
@property
def current_group(self):
"""
Return a new, unique group name.
"""
counter = 0
while True:
# Grab random adjective and noun with same first letter
adj = random.choice(self.adj)
noun = random.choice([n for n in self.noun if n.startswith(adj[0])])
group = "{}_{}".format(adj,noun)
# Make sure that the newest group is unique
try:
self._groups_generated[group]
except KeyError:
self._groups_generated[group] = None
break
# If we've tried a bunch of times, die
if counter > self._loop_timeout:
err = "could not find another unique name ({} total generated)\n".format(len(self._groups_generated))
raise ValueError(err)
counter += 1
return group
def create_partners(scores,score_noise=2.0,num_chunks=4):
"""
Create random partners in a class, with pairing biased such that most-
scoreed students are paired with least-scoreed students. This is done by
sorting the class based on the array scores, breaking the class into
num_chunks chunks, and then making pairs by moving in from outermost to
innermost chunks to create pairs. If there is an odd number of students, one
group of three is created.
arguments
---------
scores: array of scores
score_noise: standard deviation of noise to add to scores. noise ensures that the
same students aren't always at the top and the bottom, and thus that they
don't always get paired.
num_chunks: how many chunks to break the class into for pairing. a value of 4 would
break the class into quartiles, then randomly assign pairs from the
1st and 4th quartile, then from the 2nd and 3rd quartile. This value
must be even.
returns
-------
list of lists containing integer indicating roup assignments
"""
# Names is list of integers corresponding to the order in which the
# scores were fed in.
names = range(len(scores))
# Add gaussian noise to the scores
noisy_scores = np.array(scores) + np.random.normal(0,2,len(scores))
# Sort students by score from lowest to highest
score_name = []
for i in range(len(names)):
score_name.append((noisy_scores[i],names[i]))
score_name.sort()
# number of ways to split the class. force to be even
if num_chunks % 2 != 0:
num_chunks = num_chunks - 1
if num_chunks > len(score_name):
err = "Number of chunks exceeds the number of students.\n"
raise ValueError(err)
partners = []
# Deal with the fact that number of students might not be divisible by
# num_chunks by shaving the top and the bottom students and making them
# partners until it's properly divisible.
remainder = len(score_name) % num_chunks
while remainder > 1:
partners.append([score_name[0][1],score_name[-1][1]])
score_name = score_name[1:-1]
remainder = remainder - 2
# If we've got a student leftover, there are an odd number of students.
# Store lowest student for later
spare_student = None
if remainder == 1:
spare_student = score_name[0]
score_name = score_name[1:]
# Now create chunks
chunk_size = int(len(score_name)/num_chunks)
chunks = [score_name[i:i+chunk_size]
for i in range(0,len(score_name),chunk_size)]
# Now make partners moving from outside chunks to inside chunks
for i in range(int(len(chunks)/2)):
lower_edge = chunks[i][:]
upper_edge = chunks[len(chunks)-1-i][:]
# randomize within chunks
random.shuffle(lower_edge)
random.shuffle(upper_edge)
# Create partners
for j in range(len(lower_edge)):
partners.append([lower_edge[j][1],upper_edge[j][1]])
# If there was a spare student, add them to a random group to make a triple
if spare_student is not None:
index = random.choice(range(len(partners)))
partners[index].append(spare_student[1])
# Shuffle the partners so the lowest student doesn't always appear first
random.shuffle(partners)
return partners
def simple_break(scores,group_size,score_noise=None):
"""
Break a vector of scores into groups of group_size. Tries to assign
one person from each score category. (For a group size of 4, this would
mean low, okay, good, great members of a group).
score_noise specifies how much noise to add to the score. This is useful,
particularly for relatively small groups, because it means you end up
with different groups each time you run it. If None -> 0.1*std_dev(score);
otherwise, it is interpreted as the std_dev of the noise to add. If 0, no
noise is added.
For groups of two, you might consider create_partners rather than
simple_break.
"""
# Figure out what sort of noise to add to the scores. If None,
# give 0.1*standard deviation of scores as noise. If 0, add no noise.
# otherwise, use score_noise as the standard deviation for the noisy
# generator.
if score_noise is None:
score_noise = np.random.normal(0,np.std(scores)/10.,len(scores))
else:
if score_noise == 0:
score_noise = np.zeros(len(scores))
else:
score_noise = np.random.normal(0,score_noise,len(scores))
# Add gaussian noise to the scores
noisy_scores = np.array(scores) + np.random.normal(0,2,len(scores))
# Figure out how many groups to include
num_groups = len(scores) // group_size
# Sort names by scores
to_chop = [(s,i) for i, s in enumerate(noisy_scores)]
to_chop.sort()
# Find extras that don't fit into the groups
num_extras = len(scores) % num_groups
# If there are extra people, rip them from the exact center of the
# score list and stick them on the very end.
extra_peeps = []
if num_extras > 0:
center_start = (len(to_chop) // 2) + (num_extras // 2)
for c in range(center_start,center_start-num_extras,-1):
extra_peeps.append(to_chop.pop(c))
to_chop.extend(extra_peeps)
# Create list of break groups
break_groups = []
for b in range(group_size):
break_groups.append(to_chop[(b*num_groups):((b+1)*num_groups)])
if num_extras > 0:
break_groups.append(to_chop[((b+1)*num_groups):])
# Shuffle within each break group
for bg in break_groups:
random.shuffle(bg)
final_groups = []
for i in range(num_groups):
final_groups.append([])
for j in range(len(break_groups)):
try:
member = break_groups[j][i][1]
final_groups[i].append(member)
except IndexError:
pass
return final_groups
def assign_groups(df,score_column=None,out_column="group_assignment",group_size=2,all_nouns=False):
"""
Assign students in a dataframe into groups. The group assignment will be
added as a column in the data frame.
df: data frame containing student scores
score_column: column with scores. If None, assign all students the same score
out_column: column to write group assignment to.
group_size: group size
all_nouns: whether or not to use all_nouns (rather than just animals) for
group names. This makes number of possible groups larger, but
can also lead to some alarming names.
"""
# If score column is not specified, assign everyone a score of 1
if score_column is None:
score = np.ones(len(df.iloc[:,0]))
else:
try:
score = df[score_column]
except KeyError:
err = "input dataframe does not have column '{}'\n".format(score_column)
raise ValueError(err)
# Sanity check on group size
if group_size < 1 or group_size > len(score) // 2:
err = "group_size must be between 1 and num_students/2\n"
raise ValueError(err)
# Assign groups
if group_size == 2:
groups = create_partners(score)
else:
groups = simple_break(score,group_size)
# Give groups names
final_groups = [None for _ in range(len(score))]
G = GroupNameGenerator(all_nouns=all_nouns)
for group in | |
= self.GetPatches(2)
self.SetPatchDeps(patch2, [patch1.id])
self._SetQuery(series, patch1).AndReturn(patch1)
self.mox.ReplayAll()
self.assertResults(series, [patch2],
[], [patch2])
self.mox.VerifyAll()
def testApplyWithCommittedDeps(self):
"""Test that we apply a change with dependency already committed."""
series = self.GetPatchSeries()
# Use for basic commit check.
patch1 = self.GetPatches(1, is_merged=True)
patch2 = self.GetPatches(1)
self.SetPatchDeps(patch2, [patch1.id])
self._SetQuery(series, patch1).AndReturn(patch1)
self.SetPatchApply(patch2)
# Used to ensure that an uncommitted change put in the lookup cache
# isn't invalidly pulled into the graph...
patch3, patch4, patch5 = self.GetPatches(3)
self._SetQuery(series, patch3).AndReturn(patch3)
self.SetPatchDeps(patch4, [patch3.id])
self.SetPatchDeps(patch5, [patch3.id])
self.mox.ReplayAll()
self.assertResults(series, [patch2, patch4, patch5], [patch2],
[patch4, patch5])
self.mox.VerifyAll()
def testCyclicalDeps(self):
"""Verify that the machinery handles cycles correctly."""
series = self.GetPatchSeries()
patch1, patch2, patch3 = patches = self.GetPatches(3)
self.SetPatchDeps(patch1, [patch2.id])
self.SetPatchDeps(patch2, cq=[patch3.id])
self.SetPatchDeps(patch3, [patch1.id])
self.SetPatchApply(patch1)
self.SetPatchApply(patch2)
self.SetPatchApply(patch3)
self.mox.ReplayAll()
self.assertResults(series, patches, [patch2, patch1, patch3])
self.mox.VerifyAll()
def testComplexCyclicalDeps(self, fail=False):
"""Verify handling of two interdependent cycles."""
series = self.GetPatchSeries()
# Create two cyclically interdependent patch chains.
# Example: Two patch series A1<-A2<-A3<-A4 and B1<-B2<-B3<-B4. A1 has a
# CQ-DEPEND on B4 and B1 has a CQ-DEPEND on A4, so all of the patches must
# be committed together.
chain1, chain2 = chains = self.GetPatches(4), self.GetPatches(4)
for chain in chains:
(other_chain,) = [x for x in chains if x != chain]
self.SetPatchDeps(chain[0], [], cq=[other_chain[-1].id])
for i in range(1, len(chain)):
self.SetPatchDeps(chain[i], [chain[i-1].id])
# Apply the second-last patch first, so that the last patch in the series
# will be pulled in via the CQ-DEPEND on the other patch chain.
to_apply = [chain1[-2]] + [x for x in (chain1 + chain2) if x != chain1[-2]]
# All of the patches but chain[-1] were applied successfully.
for patch in chain1[:-1] + chain2:
self.SetPatchApply(patch)
if fail:
# Pretend that chain[-1] failed to apply.
res = self.SetPatchApply(chain1[-1])
res.AndRaise(cros_patch.ApplyPatchException(chain1[-1]))
applied = []
failed_tot = to_apply
else:
# We apply the patches in this order since the last patch in chain1
# is pulled in via CQ-DEPEND.
self.SetPatchApply(chain1[-1])
applied = chain1[:-1] + chain2 + [chain1[-1]]
failed_tot = []
self.mox.ReplayAll()
self.assertResults(series, to_apply, applied=applied, failed_tot=failed_tot)
self.mox.VerifyAll()
def testFailingComplexCyclicalDeps(self):
"""Verify handling of failing interlocked cycles."""
self.testComplexCyclicalDeps(fail=True)
def testApplyPartialFailures(self):
"""Test that can apply changes correctly when one change fails to apply.
This tests a simple change order where 1 depends on 2 and 1 fails to apply.
Only 1 should get tried as 2 will abort once it sees that 1 can't be
applied. 3 with no dependencies should go through fine.
Since patch1 fails to apply, we should also get a call to handle the
failure.
"""
series = self.GetPatchSeries()
patch1, patch2, patch3, patch4 = patches = self.GetPatches(4)
self.SetPatchDeps(patch1)
self.SetPatchDeps(patch2, [patch1.id])
self.SetPatchDeps(patch3)
self.SetPatchDeps(patch4)
self.SetPatchApply(patch1).AndRaise(
cros_patch.ApplyPatchException(patch1))
self.SetPatchApply(patch3)
self.SetPatchApply(patch4).AndRaise(
cros_patch.ApplyPatchException(patch1, inflight=True))
self.mox.ReplayAll()
self.assertResults(series, patches,
[patch3], [patch2, patch1], [patch4])
self.mox.VerifyAll()
def testComplexApply(self):
"""More complex deps test.
This tests a total of 2 change chains where the first change we see
only has a partial chain with the 3rd change having the whole chain i.e.
1->2, 3->1->2. Since we get these in the order 1,2,3,4,5 the order we
should apply is 2,1,3,4,5.
This test also checks the patch order to verify that Apply re-orders
correctly based on the chain.
"""
series = self.GetPatchSeries()
patch1, patch2, patch3, patch4, patch5 = patches = self.GetPatches(5)
self.SetPatchDeps(patch1, [patch2.id])
self.SetPatchDeps(patch2)
self.SetPatchDeps(patch3, [patch1.id, patch2.id])
self.SetPatchDeps(patch4, cq=[patch5.id])
self.SetPatchDeps(patch5)
for patch in (patch2, patch1, patch3, patch4, patch5):
self.SetPatchApply(patch)
self.mox.ReplayAll()
self.assertResults(
series, patches, [patch2, patch1, patch3, patch4, patch5])
self.mox.VerifyAll()
def testApplyStandalonePatches(self):
"""Simple apply of two changes with no dependent CL's."""
series = self.GetPatchSeries()
patches = self.GetPatches(3)
for patch in patches:
self.SetPatchDeps(patch)
for patch in patches:
self.SetPatchApply(patch)
self.mox.ReplayAll()
self.assertResults(series, patches, patches)
self.mox.VerifyAll()
def MakePool(overlays=constants.PUBLIC_OVERLAYS, build_number=1,
builder_name='foon', is_master=True, dryrun=True, **kwargs):
"""Helper for creating ValidationPool objects for tests."""
kwargs.setdefault('changes', [])
build_root = kwargs.pop('build_root', '/fake_root')
pool = validation_pool.ValidationPool(
overlays, build_root, build_number, builder_name, is_master,
dryrun, **kwargs)
return pool
class MockPatchSeries(partial_mock.PartialMock):
"""Mock the PatchSeries functions."""
TARGET = 'chromite.cbuildbot.validation_pool.PatchSeries'
ATTRS = ('GetDepsForChange', '_GetGerritPatch', '_LookupHelper')
def __init__(self):
partial_mock.PartialMock.__init__(self)
self.deps = {}
self.cq_deps = {}
def SetGerritDependencies(self, patch, deps):
"""Add |deps| to the Gerrit dependencies of |patch|."""
self.deps[patch] = deps
def SetCQDependencies(self, patch, deps):
"""Add |deps| to the CQ dependencies of |patch|."""
self.cq_deps[patch] = deps
def GetDepsForChange(self, _inst, patch):
return self.deps.get(patch, []), self.cq_deps.get(patch, [])
def _GetGerritPatch(self, _inst, dep, **_kwargs):
return dep
_LookupHelper = mock.MagicMock()
class TestSubmitChange(MoxBase):
"""Test suite related to submitting changes."""
def setUp(self):
self.orig_timeout = validation_pool.SUBMITTED_WAIT_TIMEOUT
validation_pool.SUBMITTED_WAIT_TIMEOUT = 4
def tearDown(self):
validation_pool.SUBMITTED_WAIT_TIMEOUT = self.orig_timeout
def _TestSubmitChange(self, results):
"""Test submitting a change with the given results."""
results = [cros_test_lib.EasyAttr(status=r) for r in results]
change = self.MockPatch(change_id=12345, patch_number=1)
pool = self.mox.CreateMock(validation_pool.ValidationPool)
pool.dryrun = False
pool._metadata = metadata_lib.CBuildbotMetadata()
pool._helper_pool = self.mox.CreateMock(validation_pool.HelperPool)
helper = self.mox.CreateMock(validation_pool.gerrit.GerritHelper)
# Prepare replay script.
pool._helper_pool.ForChange(change).AndReturn(helper)
helper.SubmitChange(change, dryrun=False)
for result in results:
helper.QuerySingleRecord(change.gerrit_number).AndReturn(result)
self.mox.ReplayAll()
# Verify results.
retval = validation_pool.ValidationPool._SubmitChange(pool, change)
self.mox.VerifyAll()
return retval
def testSubmitChangeMerged(self):
"""Submit one change to gerrit, status MERGED."""
self.assertTrue(self._TestSubmitChange(['MERGED']))
def testSubmitChangeSubmitted(self):
"""Submit one change to gerrit, stuck on SUBMITTED."""
# The query will be retried 1 more time than query timeout.
results = ['SUBMITTED' for _i in
xrange(validation_pool.SUBMITTED_WAIT_TIMEOUT + 1)]
self.assertTrue(self._TestSubmitChange(results))
def testSubmitChangeSubmittedToMerged(self):
"""Submit one change to gerrit, status SUBMITTED then MERGED."""
results = ['SUBMITTED', 'SUBMITTED', 'MERGED']
self.assertTrue(self._TestSubmitChange(results))
def testSubmitChangeFailed(self):
"""Submit one change to gerrit, reported back as NEW."""
self.assertFalse(self._TestSubmitChange(['NEW']))
class ValidationFailureOrTimeout(MoxBase):
"""Tests that HandleValidationFailure and HandleValidationTimeout functions.
These tests check that HandleValidationTimeout and HandleValidationFailure
reject (i.e. zero out the CQ field) of the correct number of patches, under
various circumstances.
"""
_PATCH_MESSAGE = 'Your patch failed.'
_BUILD_MESSAGE = 'Your build failed.'
def setUp(self):
self._patches = self.GetPatches(3)
self._pool = MakePool(changes=self._patches)
self.PatchObject(
validation_pool.ValidationPool, 'GetCLStatus',
return_value=validation_pool.ValidationPool.STATUS_PASSED)
self.PatchObject(
validation_pool.CalculateSuspects, 'FindSuspects',
return_value=self._patches)
self.PatchObject(
validation_pool.ValidationPool, '_CreateValidationFailureMessage',
return_value=self._PATCH_MESSAGE)
self.PatchObject(validation_pool.ValidationPool, 'SendNotification')
self.PatchObject(validation_pool.ValidationPool, 'RemoveCommitReady')
self.PatchObject(validation_pool.ValidationPool, 'UpdateCLStatus')
self.PatchObject(validation_pool.ValidationPool, 'ReloadChanges',
return_value=self._patches)
self.PatchObject(validation_pool.CalculateSuspects, 'OnlyLabFailures',
return_value=False)
self.PatchObject(validation_pool.CalculateSuspects, 'OnlyInfraFailures',
return_value=False)
self.StartPatcher(parallel_unittest.ParallelMock())
def testPatchesWereRejectedByFailure(self):
"""Tests that all patches are rejected by failure."""
self._pool.HandleValidationFailure([self._BUILD_MESSAGE])
self.assertEqual(
len(self._patches), self._pool.RemoveCommitReady.call_count)
def testPatchesWereRejectedByTimeout(self):
self._pool.HandleValidationTimeout()
self.assertEqual(
len(self._patches), self._pool.RemoveCommitReady.call_count)
def testNoSuspectsWithFailure(self):
"""Tests no change is blamed when there is no suspect."""
self.PatchObject(validation_pool.CalculateSuspects, 'FindSuspects',
return_value=[])
self._pool.HandleValidationFailure([self._BUILD_MESSAGE])
self.assertEqual(0, self._pool.RemoveCommitReady.call_count)
def testPreCQ(self):
self._pool.pre_cq = True
self._pool.HandleValidationFailure([self._BUILD_MESSAGE])
self.assertEqual(0, self._pool.RemoveCommitReady.call_count)
def testPatchesWereNotRejectedByInsaneFailure(self):
self._pool.HandleValidationFailure([self._BUILD_MESSAGE], sanity=False)
self.assertEqual(0, self._pool.RemoveCommitReady.call_count)
class TestCoreLogic(MoxBase):
"""Tests resolution and applying logic of validation_pool.ValidationPool."""
def setUp(self):
self.mox.StubOutWithMock(validation_pool.PatchSeries, 'Apply')
self.mox.StubOutWithMock(validation_pool.PatchSeries, 'ApplyChange')
self.patch_mock = self.StartPatcher(MockPatchSeries())
funcs = ['SendNotification', '_SubmitChange']
for func in funcs:
self.mox.StubOutWithMock(validation_pool.ValidationPool, func)
self.PatchObject(validation_pool.ValidationPool, 'ReloadChanges',
side_effect=lambda x: x)
self.StartPatcher(parallel_unittest.ParallelMock())
def MakePool(self, *args, **kwargs):
"""Helper for creating ValidationPool objects for Mox tests."""
handlers = kwargs.pop('handlers', False)
kwargs['build_root'] = self.build_root
pool = MakePool(*args, **kwargs)
funcs = ['_HandleApplySuccess', '_HandleApplyFailure',
'_HandleCouldNotApply', '_HandleCouldNotSubmit']
if handlers:
for func in funcs:
self.mox.StubOutWithMock(pool, func)
return pool
def MakeFailure(self, patch, inflight=True):
return cros_patch.ApplyPatchException(patch, inflight=inflight)
def GetPool(self, changes, applied=(), tot=(), inflight=(), **kwargs):
pool = self.MakePool(changes=changes, **kwargs)
applied = list(applied)
tot = [self.MakeFailure(x, inflight=False) for x in tot]
inflight = [self.MakeFailure(x, inflight=True) for x in inflight]
# pylint: disable=E1120,E1123
validation_pool.PatchSeries.Apply(
changes, manifest=mox.IgnoreArg()
).AndReturn((applied, tot, inflight))
for patch in applied:
pool._HandleApplySuccess(patch).AndReturn(None)
if tot:
pool._HandleApplyFailure(tot).AndReturn(None)
# We stash this on the pool object so we can reuse it during validation.
# We could stash this in the test instances, but that would break
# for any tests that do multiple pool instances.
pool._test_data = (changes, applied, tot, inflight)
return pool
def testApplySlavePool(self):
"""Verifies that slave calls ApplyChange() directly for each patch."""
slave_pool = self.MakePool(is_master=False)
patches = self.GetPatches(3)
slave_pool.changes = patches
for patch in patches:
# pylint: disable=E1120, E1123
validation_pool.PatchSeries.ApplyChange(patch, manifest=mox.IgnoreArg())
self.mox.ReplayAll()
self.assertEqual(True, slave_pool.ApplyPoolIntoRepo())
self.mox.VerifyAll()
def runApply(self, pool, result):
self.assertEqual(result, pool.ApplyPoolIntoRepo())
self.assertEqual(pool.changes, pool._test_data[1])
failed_inflight = pool.changes_that_failed_to_apply_earlier
expected_inflight = set(pool._test_data[3])
# Intersect the results, since it's possible there were results failed
# results that weren't related to the ApplyPoolIntoRepo call.
self.assertEqual(set(failed_inflight).intersection(expected_inflight),
expected_inflight)
self.assertEqual(pool.changes, pool._test_data[1])
def testPatchSeriesInteraction(self):
"""Verify the interaction between PatchSeries and ValidationPool.
Effectively, this validates data going into PatchSeries, and coming back
out; verifies the hand off to _Handle* functions, but no deeper.
"""
patches = self.GetPatches(3)
apply_pool = self.GetPool(patches, applied=patches, handlers=True)
all_inflight = self.GetPool(patches, inflight=patches, handlers=True)
all_tot = self.GetPool(patches, tot=patches, handlers=True)
mixed = self.GetPool(patches, tot=patches[0:1], inflight=patches[1:2],
applied=patches[2:3], handlers=True)
self.mox.ReplayAll()
self.runApply(apply_pool, True)
self.runApply(all_inflight, False)
self.runApply(all_tot, False)
self.runApply(mixed, True)
self.mox.VerifyAll()
def testHandleApplySuccess(self):
"""Validate steps taken for successfull application."""
patch = self.GetPatches(1)
pool = self.MakePool()
pool.SendNotification(patch, mox.StrContains('has picked up your | |
as ex:
if str(ex).find('tsk_fs_dir_open: path not found'):
log.debug("Path not found : " + path)
else:
log.debug("Exception details:\n", exc_info=True) #traceback.print_exc()
log.error("Failed to get dir info!")
return items
def Open(self, path):
'''Open files less than 200 MB, returns open file handle'''
if self.use_native_hfs_parser:
return self.hfs_native.Open(path)
try:
log.debug("Trying to open file : " + path)
tsk_file = self.macos_FS.open(path)
size = tsk_file.info.meta.size
if size > 209715200:
raise ValueError('File size > 200 MB, use direct TSK file functions!')
f = tempfile.SpooledTemporaryFile(max_size=209715200)
BUFF_SIZE = 20 * 1024 * 1024
offset = 0
while offset < size:
available_to_read = min(BUFF_SIZE, size - offset)
data = tsk_file.read_random(offset, available_to_read)
if not data: break
offset += len(data)
f.write(data)
f.seek(0)
return f
except Exception as ex:
if str(ex).find('tsk_fs_file_open: path not found:') > 0:
log.error("Open() returned 'Path not found' error for path: {}".format(path))
elif str(ex).find('tsk_fs_attrlist_get: Attribute 4352 not found') > 0 or \
(str(ex).find('Read error: Invalid file offset') > 0 and self._IsFileCompressed(tsk_file)) or \
str(ex).find('Read error: Error in metadata') > 0:
log.debug("Known TSK bug caused Error: Failed to open file {}".format(path))
log.debug("Trying to open with Native HFS parser")
try:
if not self.hfs_native.initialized:
self.hfs_native.Initialize(self.pytsk_image, self.macos_partition_start_offset)
return self.hfs_native.Open(path)
except (OSError, ValueError):
log.error("Failed to open file: " + path)
log.debug("Exception details:\n", exc_info=True)
else:
log.exception("Failed to open file {}".format(path))
return None
def ExtractFile(self, tsk_path, destination_path):
'''Extract a file from image to provided destination path'''
if self.use_native_hfs_parser:
return self.hfs_native.ExtractFile(tsk_path, destination_path)
try:
tsk_file = self.macos_FS.open(tsk_path)
size = tsk_file.info.meta.size
BUFF_SIZE = 20 * 1024 * 1024
offset = 0
try:
with open(destination_path, 'wb') as f:
while offset < size:
available_to_read = min(BUFF_SIZE, size - offset)
try:
data = tsk_file.read_random(offset, available_to_read)
if not data: break
offset += len(data)
f.write(data)
except Exception as ex:
if str(ex).find('tsk_fs_attrlist_get: Attribute 4352 not found') > 0 or \
(str(ex).find('Read error: Invalid file offset') > 0 and self._IsFileCompressed(tsk_file)) or \
str(ex).find('Read error: Error in metadata') > 0:
log.debug("Known TSK bug caused Error: Failed to read file {}".format(tsk_path))
log.debug("Trying to read with Native HFS parser")
try:
f.close()
os.remove(destination_path)
if not self.hfs_native.initialized:
self.hfs_native.Initialize(self.pytsk_image, self.macos_partition_start_offset)
return self.hfs_native.ExtractFile(tsk_path,destination_path)
except Exception as ex2:
log.error("Failed to export file: " + tsk_path)
log.debug("Exception details:\n", exc_info=True)
return False
else:
log.exception("Failed to read file {}".format(tsk_path))
return False
f.flush()
f.close()
return True
except Exception as ex:
log.error (" Failed to create file for writing - " + destination_path + "\n" + str(ex))
log.debug("Exception details:", exc_info=True)
except Exception as ex:
if str(ex).find('tsk_fs_file_open: path not found:') > 0:
log.debug("Open() returned 'Path not found' error for path: {}".format(tsk_path))
else:
log.error("Failed to open/find file: " + tsk_path)
return False
def GetArrayFirstElement(self, array, error=''):
'''Safely return zero'th element'''
try:
return array[0]
except IndexError:
pass
return error
def GetVersionDictionary(self):
'''Returns macOS version as dictionary {major:10, minor:5 , micro:0}'''
version_dict = { 'major':0, 'minor':0, 'micro':0 }
info = self.os_version.split(".")
try:
version_dict['major'] = int(info[0])
try:
version_dict['minor'] = int(info[1])
try:
version_dict['micro'] = int(info[2])
except Exception:
pass
except Exception:
pass
except Exception:
pass
return version_dict
def GetUserAndGroupIDForFolder(self, path):
'''
Returns tuple (success, UID, GID) for folder identified by path
If failed to get values, success=False
UID & GID are returned as strings
'''
success, uid, gid = False, 0, 0
try:
path_dir = self.macos_FS.open_dir(path)
uid = str(path_dir.info.fs_file.meta.uid)
gid = str(path_dir.info.fs_file.meta.gid)
success = True
except Exception as ex:
log.error("Exception trying to get uid & gid for folder " + path + ' Exception details: ' + str(ex))
return success, uid, gid
def GetUserAndGroupIDForFile(self, path):
'''
Returns tuple (success, UID, GID) for file identified by path
If failed to get values, success=False
UID & GID are returned as strings
'''
success, uid, gid = False, 0, 0
try:
path_file = self.macos_FS.open(path)
uid = str(path_file.info.meta.uid)
gid = str(path_file.info.meta.gid)
success = True
except Exception as ex:
log.error("Exception trying to get uid & gid for file " + path + ' Exception details: ' + str(ex))
return success, uid, gid
# Private (Internal) functions, plugins should not use these
def _GetSafeFilename(self, name):
'''
Removes illegal characters from filenames
Eg: Windows does not like ?<>/\:*"! in filename
'''
try:
unsafe_chars = '?<>/\:*"!\r\n' if self.is_windows else '/'
return ''.join([c for c in name if c not in unsafe_chars])
except:
pass
return "_error_no_name_"
def _IsFileCompressed(self, tsk_file):
'''For a pytsk3 file entry, determines if a file is compressed'''
try:
return int(tsk_file.info.meta.flags) & pytsk3.TSK_FS_META_FLAG_COMP
except Exception as ex:
log.error (" Unknown exception from _IsFileCompressed() " + str(ex))
return False
def _GetSize(self, entry):
'''For a pytsk3 file entry, gets logical file size, or 0 if error'''
try:
return entry.info.meta.size
except Exception as ex:
log.error (" Unknown exception from _GetSize() " + str(ex))
return 0
def _GetName(self, entry):
'''Return utf8 filename from pytsk entry object'''
try:
return entry.info.name.name.decode("utf8", "ignore")
except UnicodeError:
#log.debug("UnicodeError getting name ")
pass
except Exception as ex:
log.error (" Unknown exception from GetName:" + str(ex))
return ""
def _CheckFileContents(self, f):
f.seek(0)
header = f.read(4)
if len(header) == 4 and header == b'\0\0\0\0':
log.error('File header was zeroed out. If the source is an E01 file, this may be a libewf problem.'\
' Try to use a different version of libewf. Read more about this here:'\
' https://github.com/ydkhatri/mac_apt/wiki/Known-issues-and-Workarounds')
def _IsValidFileOrFolderEntry(self, entry):
try:
if entry.info.name.type == pytsk3.TSK_FS_NAME_TYPE_REG:
return True
elif entry.info.name.type == pytsk3.TSK_FS_NAME_TYPE_DIR:
return True
else:
log.warning(" Found invalid entry - " + self._GetName(entry) + " " + str(entry.info.name.type) )
except Exception:
log.error(" Unknown exception from _IsValidFileOrFolderEntry:" + self._GetName(entry))
log.debug("Exception details:\n", exc_info=True)
return False
def _GetDomainUserInfo(self):
'''Populates self.users with data from /Users/'''
log.debug('Trying to get domain profiles from /Users/')
users_folder = self.ListItemsInFolder('/Users/', EntryType.FOLDERS)
for folder in users_folder:
folder_path = '/Users/' + folder['name']
success, uid, gid = self.GetUserAndGroupIDForFolder(folder_path)
if success:
found_user = False
for user in self.users:
if user.UID == uid:
found_user = True
break
if found_user: continue
else:
target_user = UserInfo()
self.users.append(target_user)
target_user.UID = uid
target_user.GID = gid
#target_user.UUID = unknown
target_user.home_dir = folder_path
target_user.user_name = folder['name']
target_user.real_name = folder['name']
target_user._source = folder_path
def _ReadPasswordPolicyData(self, password_policy_data, target_user):
try:
plist2 = biplist.readPlistFromString(password_policy_data[0])
target_user.failed_login_count = plist2.get('failedLoginCount', 0)
target_user.failed_login_timestamp = plist2.get('failedLoginTimestamp', None)
target_user.last_login_timestamp = plist2.get('lastLoginTimestamp', None)
target_user.password_last_set_time = plist2.get('passwordLastSetTime', None)
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException):
log.exception('Error reading password_policy_data embedded plist')
def _ReadAccountPolicyData(self, account_policy_data, target_user):
try:
plist2 = biplist.readPlistFromString(account_policy_data[0])
target_user.creation_time = CommonFunctions.ReadUnixTime(plist2.get('creationTime', None))
target_user.failed_login_count = plist2.get('failedLoginCount', 0)
target_user.failed_login_timestamp = CommonFunctions.ReadUnixTime(plist2.get('failedLoginTimestamp', None))
target_user.password_last_set_time = CommonFunctions.ReadUnixTime(plist2.get('passwordLastSetTime', None))
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException):
log.exception('Error reading password_policy_data embedded plist')
def _GetUserInfo(self):
'''Populates user info from plists under: /private/var/db/dslocal/nodes/Default/users/'''
#TODO - make a better plugin that gets all user & group info
users_path = '/private/var/db/dslocal/nodes/Default/users'
user_plists = self.ListItemsInFolder(users_path, EntryType.FILES)
for plist_meta in user_plists:
if plist_meta['size'] > 0:
try:
user_plist_path = users_path + '/' + plist_meta['name']
f = self.Open(user_plist_path)
if f!= None:
self.ExportFile(user_plist_path, 'USERS', '', False)
try:
plist = biplist.readPlist(f)
home_dir = self.GetArrayFirstElement(plist.get('home', ''))
if home_dir != '':
#log.info('{} : {}'.format(plist_meta['name'], home_dir))
if home_dir.startswith('/var/'): home_dir = '/private' + home_dir # in mac /var is symbolic link to /private/var
target_user = UserInfo()
self.users.append(target_user)
target_user.UID = str(self.GetArrayFirstElement(plist.get('uid', '')))
target_user.GID = str(self.GetArrayFirstElement(plist.get('gid', '')))
target_user.UUID = self.GetArrayFirstElement(plist.get('generateduid', ''))
target_user.home_dir = home_dir
target_user.user_name = self.GetArrayFirstElement(plist.get('name', ''))
target_user.real_name = self.GetArrayFirstElement(plist.get('realname', ''))
target_user.pw_hint = self.GetArrayFirstElement(plist.get('hint', ''))
target_user._source = user_plist_path
os_version = self.GetVersionDictionary()
if os_version['major'] == 10 and os_version['minor'] <= 9: # Mavericks & earlier
password_policy_data = plist.get('passwordpolicyoptions', None)
if password_policy_data == None:
log.debug('Could not find passwordpolicyoptions for user {}'.format(target_user.user_name))
else:
self._ReadPasswordPolicyData(password_policy_data, target_user)
else: # 10.10 - Yosemite & higher
account_policy_data = plist.get('accountPolicyData', None)
if account_policy_data == None:
pass #log.debug('Could not find accountPolicyData for user {}'.format(target_user.user_name))
else:
self._ReadAccountPolicyData(account_policy_data, target_user)
else:
log.error('Did not find \'home\' in ' + plist_meta['name'])
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException):
log.exception("biplist failed to read plist " + user_plist_path)
self._CheckFileContents(f)
f.close()
except (OSError, KeyError, ValueError, IndexError, TypeError):
log.exception ("Could not open/read plist " + user_plist_path)
self._GetDomainUserInfo()
self._GetDarwinFoldersInfo() # This probably does not apply to OSX < Mavericks !
def _GetDarwinFoldersInfo(self):
'''Gets DARWIN_*_DIR paths by looking up folder permissions'''
users_dir = self.ListItemsInFolder('/private/var/folders', EntryType.FOLDERS)
for unknown1 in users_dir:
unknown1_name = unknown1['name']
unknown1_dir = self.ListItemsInFolder('/private/var/folders/' + unknown1_name, EntryType.FOLDERS)
for unknown2 in unknown1_dir:
unknown2_name = unknown2['name']
path | |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, OMRON SINIC X
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of OMRON SINIC X nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>, <NAME>
import actionlib
import rospy
import o2ac_msgs.msg
from o2ac_routines.helpers import check_for_real_robot, wait_for_UR_program
import std_srvs.srv # toggleCollisions_client
import geometry_msgs.msg # urscript_client
class SkillServerClient():
def __init__(self):
self.pick_screw_from_feeder_client = actionlib.SimpleActionClient('/o2ac_skills/pick_screw_from_feeder', o2ac_msgs.msg.pickScrewFromFeederAction)
self.place_client = actionlib.SimpleActionClient('/o2ac_skills/place', o2ac_msgs.msg.placeAction)
self.regrasp_client = actionlib.SimpleActionClient('/o2ac_skills/regrasp', o2ac_msgs.msg.regraspAction)
self.screw_client = actionlib.SimpleActionClient('/o2ac_skills/screw', o2ac_msgs.msg.screwAction)
self.change_tool_client = actionlib.SimpleActionClient('/o2ac_skills/change_tool', o2ac_msgs.msg.changeToolAction)
self.publishMarker_client = rospy.ServiceProxy('/o2ac_skills/publishMarker', o2ac_msgs.srv.publishMarker)
self.disable_markers = True
self.toggleCollisions_client = rospy.ServiceProxy('/o2ac_skills/toggleCollisions', std_srvs.srv.SetBool)
self.urscript_client = rospy.ServiceProxy('/o2ac_skills/sendScriptToUR', o2ac_msgs.srv.sendScriptToUR)
self.use_real_robot = rospy.get_param("use_real_robot", False)
@check_for_real_robot
def pick_screw_from_feeder(self, robot_name, screw_size, realign_tool_upon_failure=True):
"""
Picks a screw from one of the feeders. The screw tool already has to be equipped!
Use this command to equip the screw tool: do_change_tool_action(self, "b_bot", equip=True, screw_size = 4)
"""
goal = o2ac_msgs.msg.pickScrewFromFeederGoal()
goal.robot_name = robot_name
goal.screw_size = screw_size
rospy.loginfo("Sending pickScrewFromFeeder action goal")
rospy.logdebug(goal)
self.pick_screw_from_feeder_client.send_goal(goal)
rospy.logdebug("Waiting for result")
self.pick_screw_from_feeder_client.wait_for_result(rospy.Duration(60.0))
rospy.logdebug("Getting result")
return self.pick_screw_from_feeder_client.get_result()
def do_place_action(self, robot_name, pose_stamped, tool_name="", screw_size=0):
# Call the place action
goal = o2ac_msgs.msg.placeGoal()
goal.robot_name = robot_name
goal.item_pose = pose_stamped
goal.tool_name = tool_name
goal.screw_size = screw_size
rospy.loginfo("Sending place action goal")
rospy.logdebug(goal)
self.place_client.send_goal(goal)
rospy.logdebug("Waiting for result")
self.place_client.wait_for_result(rospy.Duration(90.0))
rospy.logdebug("Getting result")
return self.place_client.get_result()
def do_regrasp(self, giver_robot_name, receiver_robot_name, grasp_distance=.02):
"""The item goes from giver to receiver."""
goal = o2ac_msgs.msg.regraspGoal()
goal.giver_robot_name = giver_robot_name
goal.receiver_robot_name = receiver_robot_name
goal.grasp_distance = grasp_distance
self.regrasp_client.send_goal(goal)
rospy.loginfo("Performing regrasp with grippers " + giver_robot_name + " and " + receiver_robot_name)
self.regrasp_client.wait_for_result(rospy.Duration(90.0))
return self.regrasp_client.get_result()
@check_for_real_robot
def do_screw_action(self, robot_name, target_hole, screw_height=0.02,
screw_size=4, stay_put_after_screwing=False, loosen_and_retighten_when_done=True):
goal = o2ac_msgs.msg.screwGoal()
goal.target_hole = target_hole
goal.screw_height = screw_height
goal.screw_size = screw_size
goal.robot_name = robot_name
goal.stay_put_after_screwing = stay_put_after_screwing
goal.loosen_and_retighten_when_done = loosen_and_retighten_when_done
rospy.loginfo("Sending screw action goal.")
self.screw_client.send_goal(goal)
self.screw_client.wait_for_result()
res = self.screw_client.get_result()
try:
return res.success
except:
print("failed to return screw result")
print(res)
return False
def do_change_tool_action(self, robot_name, equip=True, screw_size=4):
# DEPRECATED
goal = o2ac_msgs.msg.changeToolGoal()
goal.robot_name = robot_name
goal.equip_the_tool = equip
goal.screw_size = screw_size
rospy.loginfo("Sending changeTool action goal.")
self.change_tool_client.send_goal(goal)
self.change_tool_client.wait_for_result()
return self.change_tool_client.get_result()
def publish_marker(self, pose_stamped, marker_type):
# Publishes a marker to Rviz for visualization
if self.disable_markers:
return True
req = o2ac_msgs.srv.publishMarkerRequest()
req.marker_pose = pose_stamped
req.marker_type = marker_type
self.publishMarker_client.call(req)
return True
def toggle_collisions(self, collisions_on):
"""Turns collisions in MoveIt on and off. Use with caution!"""
req = std_srvs.srv.SetBoolRequest()
req.data = collisions_on
res = self.toggleCollisions_client.call(req)
return res.success
##### URScript with skill server? #####
def move_lin_rel(self, robot_name, relative_translation=[0, 0, 0], relative_rotation=[0, 0, 0], acceleration=0.5, velocity=.03, relative_to_robot_base=False, wait=True, max_wait=30.0):
'''
Does a lin_move relative to the current position of the robot. Uses the robot's TCP.
robot_name = "b_bot" for example
relative_translation: translatory movement relative to current tcp position, expressed in robot's own base frame
relative_rotation: rotatory movement relative to current tcp position, expressed in robot's own base frame
relative_to_robot_base: If true, uses the robot_base coordinates for the relative motion (not workspace_center!)
'''
if rospy.is_shutdown():
return False
# Uses UR coordinates
if not self.use_real_robot:
return True
# Directly calls the UR service
req = o2ac_msgs.srv.sendScriptToURRequest()
req.robot_name = robot_name
req.relative_translation.x = relative_translation[0]
req.relative_translation.y = relative_translation[1]
req.relative_translation.z = relative_translation[2]
req.relative_rotation.x = relative_rotation[0]
req.relative_rotation.y = relative_rotation[1]
req.relative_rotation.z = relative_rotation[2]
req.acceleration = acceleration
req.velocity = velocity
req.lin_move_rel_in_base_csys = relative_to_robot_base
req.program_id = "lin_move_rel"
res = self.urscript_client.call(req)
if wait:
rospy.sleep(1.0)
wait_for_UR_program("/" + robot_name, rospy.Duration.from_sec(max_wait))
return res.success
def move_joints(self, group_name, joint_pose_goal, speed, acceleration):
rospy.logdebug("Real robot is being used. Send joint command to robot controller directly via URScript.")
req = o2ac_msgs.srv.sendScriptToURRequest()
req.program_id = "move_j"
req.robot_name = group_name
req.joint_positions = joint_pose_goal
req.velocity = speed
req.acceleration = acceleration
res = self.urscript_client.call(req)
wait_for_UR_program("/" + group_name, rospy.Duration.from_sec(20.0))
return res.success
def move_lin(self, group_name, pose_goal_stamped, end_effector_link, speed, acceleration, listener):
rospy.logdebug("Real robot is being used. Send linear motion to robot controller directly via URScript.")
req = o2ac_msgs.srv.sendScriptToURRequest()
req.program_id = "lin_move"
req.robot_name = group_name
req.target_pose = self.transformTargetPoseFromTipLinkToURTCP(pose_goal_stamped, group_name, end_effector_link, listener)
req.velocity = speed
req.acceleration = acceleration
res = self.urscript_client.call(req)
wait_for_UR_program("/" + group_name, rospy.Duration.from_sec(30.0))
return res.success
def transformTargetPoseFromTipLinkToURTCP(self, ps, robot_name, end_effector_link, listener):
# This transforms a pose from the end_effector_link set in MoveIt to the TCP used in the UR controller.
# It is used when sending commands to the UR controller directly, without MoveIt/ROS controllers.
rospy.logdebug("Received pose to transform to TCP link:")
rospy.logdebug(str(ps.pose.position.x) + ", " + str(ps.pose.position.y) + ", " + str(ps.pose.position.z))
rospy.logdebug(str(ps.pose.orientation.x) + ", " + str(ps.pose.orientation.y) + ", " + str(ps.pose.orientation.z) + ", " + str(ps.pose.orientation.w))
t = listener.lookupTransform(end_effector_link, robot_name + "_tool0", rospy.Time())
m = geometry_msgs.msg.TransformStamped()
m.header.frame_id = ps.header.frame_id
m.child_frame_id = "temp_goal_pose__"
m.transform.translation.x = ps.pose.position.x
m.transform.translation.y = ps.pose.position.y
m.transform.translation.z = ps.pose.position.z
m.transform.rotation.x = ps.pose.orientation.x
m.transform.rotation.y = ps.pose.orientation.y
m.transform.rotation.z = ps.pose.orientation.z
m.transform.rotation.w = ps.pose.orientation.w
listener.setTransform(m)
m.header.frame_id = "temp_goal_pose__"
m.child_frame_id = "temp_wrist_pose__"
m.transform.translation.x = t[0][0]
m.transform.translation.y = t[0][1]
m.transform.translation.z = t[0][2]
m.transform.rotation.x = t[1][0]
m.transform.rotation.y = t[1][1]
m.transform.rotation.z = t[1][2]
m.transform.rotation.w = t[1][3]
listener.setTransform(m)
ps_wrist = geometry_msgs.msg.PoseStamped()
ps_wrist.header.frame_id = "temp_wrist_pose__"
ps_wrist.pose.orientation.w = 1.0
ps_new = listener.transformPose(ps.header.frame_id, ps_wrist)
rospy.logdebug("New pose:")
rospy.logdebug(str(ps_new.pose.position.x) + ", " + str(ps_new.pose.position.y) + ", " + str(ps_new.pose.position.z))
rospy.logdebug(str(ps_new.pose.orientation.x) + ", " + str(ps_new.pose.orientation.y) + ", " + str(ps_new.pose.orientation.z) + ", " + str(ps_new.pose.orientation.w))
return ps_new
def horizontal_spiral_motion(self, robot_name, max_radius=.01, radius_increment=.001, speed=0.02, spiral_axis="Z"):
if rospy.is_shutdown():
return False
rospy.loginfo("Performing horizontal spiral motion at speed " + str(speed) + " and radius " + str(max_radius))
if not self.use_real_robot:
return True
req = o2ac_msgs.srv.sendScriptToURRequest()
req.program_id = "spiral_motion"
req.robot_name = robot_name
req.max_radius = max_radius
req.radius_increment = radius_increment
req.velocity = speed
req.spiral_axis = spiral_axis
res = self.urscript_client.call(req)
wait_for_UR_program("/" + robot_name, rospy.Duration.from_sec(30.0))
return res.success
def do_insertion(self, robot_name, max_insertion_distance=0.0,
max_approach_distance=0.0, max_force=.0,
max_radius=0.0, radius_increment=.0,
peck_mode=False,
wait=True, horizontal=False):
if not self.use_real_robot:
return True
# Directly calls the UR service rather than the action of the skill_server
req = o2ac_msgs.srv.sendScriptToURRequest()
req.robot_name = robot_name
req.program_id = "insert"
if horizontal:
req.program_id = "horizontal_insertion"
# Original defaults:
# max_approach_distance = .1, max_force = 5,
# max_radius = .001, radius_increment = .0001,
req.max_insertion_distance = max_insertion_distance
req.max_approach_distance = max_approach_distance
req.max_force = max_force
req.peck_mode = peck_mode
req.max_radius = max_radius
req.radius_increment = radius_increment
res = self.urscript_client.call(req)
if wait:
rospy.sleep(2.0)
wait_for_UR_program("/" + robot_name, rospy.Duration.from_sec(30.0))
return res.success
def do_spiral_search(self, robot_name, max_insertion_distance=0.0,
max_approach_distance=0.0, max_force=.0,
max_radius=0.0, radius_increment=.0,
peck_mode=False, wait=True):
if not self.use_real_robot:
return True
# Directly calls the UR service rather than the action of the skill_server
req = o2ac_msgs.srv.sendScriptToURRequest()
req.robot_name = robot_name
req.program_id = "spiral"
# Original defaults:
# max_approach_distance = .1, max_force = 5,
# max_radius = .001, radius_increment = .0001,
req.max_insertion_distance = max_insertion_distance
req.max_approach_distance = max_approach_distance
req.max_force = max_force
req.peck_mode = peck_mode
req.max_radius = max_radius
req.radius_increment = radius_increment
res = self.urscript_client.call(req)
if wait:
rospy.sleep(2.0)
wait_for_UR_program("/" + robot_name, rospy.Duration.from_sec(30.0))
return res.success
def do_helix_motion(self, robot_name, max_force=50,
helix_forward_axis="Z+",
helix_forward_increment=0.01, helix_forward_limit=0.1,
max_radius=0.005, radius_increment=.005,
wait=True):
if not self.use_real_robot:
return True
rospy.loginfo("Performing helix motion with radius " + str(max_radius) + " and forward limit " + str(helix_forward_limit))
req = o2ac_msgs.srv.sendScriptToURRequest()
req.robot_name = robot_name
req.program_id = "helix_motion"
req.helix_forward_axis = helix_forward_axis
req.helix_forward_increment = helix_forward_increment
req.helix_forward_limit = helix_forward_limit
req.max_force | |
the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, default=True
If ``'False'``, the ``cv_results_`` attribute will not include training
scores.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=..., return_train_score=...,
scoring=..., verbose=...)
>>> sorted(clf.cv_results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'mean_train_score', 'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split0_train_score', 'split1_test_score', 'split1_train_score',...
'split2_test_score', 'split2_train_score',...
'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...]
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise',
return_train_score=True):
super(GridSearchCVfastr, self).__init__(
scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score, fastr_plugin=None)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, groups=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, groups, ParameterGrid(self.param_grid))
class RandomizedSearchCVJoblib(BaseSearchCVJoblib):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of | |
1)
name = fh.readline().decode("utf-8").strip().lower()
return name[:1].upper() + name[1:]
def set_inputfunc(func):
"""
Set function to be used for blocking on input from the user. The default
if unset is to use a prompt session from prompt-toolkit reading from stdin
of the process where text_game_maker is running. The provided function is
responsible for blocking until user input is available, and must return the
user input as a string
:param func: function to use for reading user input
"""
info['inputfunc'] = func
def inputfunc(prompt):
"""
Block until user input is available
:param str prompt: string to prompt user for input
:return: user input
:rtype: str
"""
if info['inputfunc'] is None:
history = InMemoryHistory()
session = PromptSession(history=history, enable_history_search=True)
info['prompt_session'] = session
info['inputfunc'] = session.prompt
return info['inputfunc'](prompt)
def set_printfunc(func):
"""
Set function to be used for displaying game output. The default if unset is
to use standard python "print".
:param func: function to pass game output text to be displayed
"""
info['printfunc'] = func
def printfunc(text):
"""
Display game output
:param str text: text to display
:return: value returned by print function
"""
return info['printfunc'](text)
def get_random_name():
"""
Get a random first and second name from old US census data, as a string
e.g. "<NAME>"
:return: random name
:rtype: str
"""
return '%s %s' % (_rand_line(_first_names), _rand_line(_middle_names))
def get_builder_instance():
return info['instance']
def set_builder_instance(ins):
info['instance'] = ins
def get_full_import_name(classobj):
module = classobj.__module__
if module is None or module == str.__class__.__module__:
return classobj.__name__ # Avoid reporting __builtin__
else:
return module + '.' + classobj.__name__
def register_serializable_class(classobj, name):
_serializable_classes[name] = classobj
class SubclassTrackerMetaClass(type):
def __init__(cls, name, bases, clsdict):
if get_serializable_class(name):
raise RuntimeError("There is already a game object class called "
"'%s', please choose a different class name" % name)
cls.full_class_name = get_full_import_name(cls)
register_serializable_class(cls, cls.full_class_name)
super(SubclassTrackerMetaClass, cls).__init__(name, bases, clsdict)
def get_serializable_class(name):
if name not in _serializable_classes:
return None
return _serializable_classes[name]
def add_serializable_callback(callback):
"""
Add a callback object to registry of callbacks that can be securely
referenced in a save file. An ID will be assigned to represent this
callback in save files. When loading a save file, whatever object you
pass here will be used when the same ID is seen.
:param callback: callback object to register
"""
_serializable_callbacks[get_full_import_name(callback)] = callback
def serializable_callback(callback):
"""
Decorator version of add_serializable_callback. Example:
::
from text_game_maker.player.player import serializable_callback
@serializable_callback
def my_callback_function():
pass
"""
add_serializable_callback(callback)
return callback
def serialize_callback(callback):
cb_name = get_full_import_name(callback)
if cb_name not in _serializable_callbacks:
raise RuntimeError("Not allowed to serialize callback '%s'. See"
" text_game_maker.utils.utils.add_serializable_callback"
% cb_name)
return cb_name
def deserialize_callback(callback_name):
if callback_name not in _serializable_callbacks:
raise RuntimeError("Cannot deserialize callback '%s'" % callback_name)
return _serializable_callbacks[callback_name]
def import_module_attribute(fullname):
fields = fullname.split(".")
modname = ".".join(fields[:-1])
classname = fields[-1]
return getattr(importlib.import_module(modname), classname)
def set_sequence_count(count):
info['sequence_count'] = count
def get_sequence_count(count):
return info['sequence_count']
def set_chardelay(delay):
info['chardelay'] = delay
def get_chardelay():
return info['chardelay']
def set_slow_printing(val):
info['slow_printing'] = val
def get_slow_printing():
return info['slow_printing']
def set_last_command(cmd):
info['last_command'] = cmd
def get_last_command():
return info['last_command']
def find_item_class(player, classobj, locations=None, ignore_dark=False):
"""
Find the first item that is an instance of a specific class in the provided
locations
:param text_game_maker.player.player.Player player: player object
:param str name: name of item to find
:param [[text_game_maker.game_objects.items.Item]] locations: location\
lists to search
:return: found item (None if no matching item is found)
:rtype: text_game_maker.items.Item
"""
if (not ignore_dark) and (not player.can_see()):
return None
if locations is None:
locations = player.current.items.values()
for itemlist in locations:
for item in itemlist:
if isinstance(item, classobj):
return item
return None
def find_item(player, name, locations=None, ignore_dark=False):
"""
Find an item by name in the provided locations
:param text_game_maker.player.player.Player player: player object
:param str name: name of item to find
:param [[text_game_maker.game_objects.items.Item]] locations: location\
lists to search. If None, the item list of the current tile is used
:return: found item (None if no matching item is found)
:rtype: text_game_maker.items.Item
"""
if (not ignore_dark) and (not player.can_see()):
return None
if locations is None:
locations = player.current.items.values()
for itemlist in locations:
for item in itemlist:
if item.matches_name(name):
return item
return None
def is_location(player, name):
"""
Checks if text matches the name of an adjacent tile that is connected to
the current tile
:param text_game_maker.player.player.Player player: player object
:param str name: text to check
:return: True if text matches adjacent tile name
:rtype: bool
"""
for direction in player.current.iterate_directions():
if direction and direction.matches_name(name):
return True
for loc in player.current.items:
if name in loc:
return True
return False
def get_all_items(player, locations=None, except_item=None):
"""
Retrieves all items from specified locations
:param text_game_maker.player.player.Player player: player object
:param [[text_game_maker.game_objects.items.Item]] locations: location lists to search.\
If None, the item list of the current room/tile is used
:param object except_item: do not retrive item from location if it is the\
same memory object as except_item. If None, no items are ignored.
:return: list of retreived items
:rtype: [text_game_maker.game_objects.items.Item]
"""
if not player.can_see():
return []
if not locations:
locations = player.current.items.values()
ret = []
for loc in locations:
for item in loc:
if (not except_item is None) and (except_item is item):
continue
if item.scenery:
continue
ret.append(item)
return ret
def find_item_wildcard(player, name, locations=None):
"""
Find the first item whose name matches a wildcard pattern ('*') in specific
locations.
:param text_game_maker.player.player.Player player: player object
:param str name: wildcard pattern
:param [[text_game_maker.game_objects.items.Item]] locations: location\
lists to search. If None, the item list of the current tile is used
:return: found item. If no matching item is found, None is returned.
:rtype: text_game_maker.game_objects.items.Item
"""
if name.startswith('the '):
name = name[4:]
if locations is None:
locations = player.current.items.values()
ret = []
for loc in locations:
for item in loc:
if (not item.scenery) and fnmatch.fnmatch(item.name, name):
return item
return None
def find_person(player, name):
"""
Find a person by name in the current tile
:param text_game_maker.player.player.Player player: player object
:param str name: name of person to search for
:return: found person. If no matching person is found, None is returned.
:rtype: text_game_maker.game_objects.person.Person
"""
for loc in player.current.people:
itemlist = player.current.people[loc]
for item in itemlist:
if (item.name.lower().startswith(name.lower())
or name.lower() in item.name.lower()):
return item
return None
def _inventory_search(player, cmpfunc):
items = []
items.extend(player.pockets.items)
if player.inventory:
items.extend(player.inventory.items)
if player.equipped:
items.append(player.equipped)
for item in items:
if cmpfunc(item):
return item
return None
def find_inventory_item(player, name):
"""
Find an item by name in player's inventory
:param text_game_maker.player.player.Player player: player object
:param str name: name of item to search for
:return: found item. If no matching item is found, None is returned.
:rtype: text_game_maker.game_objects.items.Item
"""
return _inventory_search(player,
lambda x: x.matches_name(name))
def find_any_item(player, name):
"""
Find an item by name in either the player's inventory or in the current tile
:param text_game_maker.player.player.Player player: player object
:param str name: name of item to search for
:return: found item. If no matching item is found, None is returned.
:rtype: text_game_maker.game_objects.items.Item
"""
ret = find_inventory_item(player, name)
if not ret:
return find_item(player, name)
return ret
def find_inventory_item_class(player, classobj):
"""
Find first item in player's inventory which is an instance of a specific
class
:param text_game_maker.player.player.Player player: player object
:param classobj: class to check for instances of
:return: found item. If no matching item is found, None is returned.
:rtype: text_game_maker.game_objects.items.Item
"""
return _inventory_search(player, lambda x: isinstance(x, classobj))
def find_inventory_wildcard(player, name):
"""
Find the first item in player's inventory whose name matches a wildcard
pattern ('*').
:param text_game_maker.player.player.Player player: player object
:param str name: wildcard pattern
:return: found item. If no matching item is found, None is returned.
:rtype: text_game_maker.game_objects.items.Item
"""
for item in player.inventory.items:
if fnmatch.fnmatch(item.name, name):
return item
return None
def find_tile(player, name):
"""
Find an adjacent tile that is connected to the current tile by name
:param text_game_maker.player.player.Player player: player object
:param str name: name of adjacent tile to search for
:return: adjacent matching tile. If no matching tiles are found, None is\
returned
:rtype: text_game_maker.tile.tile.Tile
"""
for tile in player.current.iterate_directions():
if tile and (name in tile.name):
return tile
return None
def add_format_token(token, func):
"""
Add a format token
:param str token: token string to look for
:param func: function to call to obtain replacement text for format token
"""
_format_tokens[token] = func
def replace_format_tokens(text):
"""
Replace format tokens in string (if any)
:param str text: text that | |
# encoding: utf-8
# module _socket
# from (pre-generated)
# by generator 1.147
"""
Implementation module for socket operations.
See the socket module for documentation.
"""
# no imports
# Variables with simple values
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
has_ipv6 = True
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836L
SIO_RCVALL = 2550136833L
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
TCP_MAXSEG = 4
TCP_NODELAY = 1
# functions
def getaddrinfo(host, port, family=None, socktype=None, proto=None, flags=None): # real signature unknown; restored from __doc__
"""
getaddrinfo(host, port [, family, socktype, proto, flags])
-> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct.
"""
return []
def getdefaulttimeout(): # real signature unknown; restored from __doc__
"""
getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None.
"""
return timeout
def gethostbyaddr(host): # real signature unknown; restored from __doc__
"""
gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
"""
pass
def gethostbyname(host): # real signature unknown; restored from __doc__
"""
gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host.
"""
pass
def gethostbyname_ex(host): # real signature unknown; restored from __doc__
"""
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
"""
pass
def gethostname(): # real signature unknown; restored from __doc__
"""
gethostname() -> string
Return the current host name.
"""
return ""
def getnameinfo(sockaddr, flags): # real signature unknown; restored from __doc__
"""
getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr.
"""
pass
def getprotobyname(name): # real signature unknown; restored from __doc__
"""
getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)
"""
return 0
def getservbyname(servicename, protocolname=None): # real signature unknown; restored from __doc__
"""
getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match.
"""
return 0
def getservbyport(port, protocolname=None): # real signature unknown; restored from __doc__
"""
getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match.
"""
return ""
def htonl(integer): # real signature unknown; restored from __doc__
"""
htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order.
"""
return 0
def htons(integer): # real signature unknown; restored from __doc__
"""
htons(integer) -> integer
Convert a 16-bit integer from host to network byte order.
"""
return 0
def inet_aton(string): # real signature unknown; restored from __doc__
"""
inet_aton(string) -> packed 32-bit IP representation
Convert an IP address in string format (192.168.127.12) to the 32-bit packed
binary format used in low-level network functions.
"""
pass
def inet_ntoa(packed_ip): # real signature unknown; restored from __doc__
"""
inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format
"""
pass
def ntohl(integer): # real signature unknown; restored from __doc__
"""
ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order.
"""
return 0
def ntohs(integer): # real signature unknown; restored from __doc__
"""
ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order.
"""
return 0
def setdefaulttimeout(timeout): # real signature unknown; restored from __doc__
"""
setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None.
"""
pass
# classes
class error(IOError):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class gaierror(error):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
class herror(error):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
class SocketType(object):
"""
socket([family[, type[, proto]]]) -> socket object
Open a socket of the given type. The family argument specifies the
address family; it defaults to AF_INET. The type argument specifies
whether this is a stream (SOCK_STREAM, this is the default)
or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0,
specifying the default protocol. Keyword arguments are accepted.
A socket object represents one endpoint of a network connection.
Methods of socket objects (keyword arguments not allowed):
accept() -- accept a connection, returning new socket and client address
bind(addr) -- bind the socket to a local address
close() -- close the socket
connect(addr) -- connect the socket to a remote address
connect_ex(addr) -- connect, return an error code instead of an exception
dup() -- return a new socket object identical to the current one [*]
fileno() -- return underlying file descriptor
getpeername() -- return remote address [*]
getsockname() -- return local address
getsockopt(level, optname[, buflen]) -- get socket options
gettimeout() -- return timeout or None
listen(n) -- start listening for incoming connections
makefile([mode, [bufsize]]) -- return a file object for the socket [*]
recv(buflen[, flags]) -- receive data
recv_into(buffer[, nbytes[, flags]]) -- receive data (into a buffer)
recvfrom(buflen[, flags]) -- receive data and sender's address
recvfrom_into(buffer[, nbytes, [, flags])
-- receive data and sender's address (into a buffer)
sendall(data[, flags]) -- send all data
send(data[, flags]) -- send data, may not send all of it
sendto(data[, flags], addr) -- send data to a given address
setblocking(0 | 1) -- set or clear the blocking I/O flag
setsockopt(level, optname, value) -- set socket options
settimeout(None | float) -- set or clear the timeout
shutdown(how) -- shut down traffic in one or both directions
[*] not available on all platforms!
"""
def accept(self): # real signature unknown; restored from __doc__
"""
accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket representing the
connection, and the address of the client. For IP sockets, the address
info is a pair (hostaddr, port).
"""
pass
def bind(self, address): # real signature unknown; restored from __doc__
"""
bind(address)
Bind the socket to a local address. For IP sockets, the address is a
| |
@pulumi.getter
def description(self) -> Optional[str]:
"""
Permission help text that appears in the admin app assignment and consent experiences.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name for the permission that appears in the admin consent and app assignment experiences.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Is this permission enabled?
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The unique identifier for one of the `OAuth2Permission`.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[bool]:
"""
Is this permission enabled?
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The name of this permission.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ServicePrincipalOauth2Permission(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminConsentDescription":
suggest = "admin_consent_description"
elif key == "adminConsentDisplayName":
suggest = "admin_consent_display_name"
elif key == "isEnabled":
suggest = "is_enabled"
elif key == "userConsentDescription":
suggest = "user_consent_description"
elif key == "userConsentDisplayName":
suggest = "user_consent_display_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServicePrincipalOauth2Permission. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServicePrincipalOauth2Permission.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServicePrincipalOauth2Permission.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_consent_description: Optional[str] = None,
admin_consent_display_name: Optional[str] = None,
id: Optional[str] = None,
is_enabled: Optional[bool] = None,
type: Optional[str] = None,
user_consent_description: Optional[str] = None,
user_consent_display_name: Optional[str] = None,
value: Optional[str] = None):
"""
:param str admin_consent_description: The description of the admin consent.
:param str admin_consent_display_name: The display name of the admin consent.
:param str id: The unique identifier for one of the `OAuth2Permission`.
:param bool is_enabled: Is this permission enabled?
:param str type: The type of the permission.
:param str user_consent_description: The description of the user consent.
:param str user_consent_display_name: The display name of the user consent.
:param str value: The name of this permission.
"""
if admin_consent_description is not None:
pulumi.set(__self__, "admin_consent_description", admin_consent_description)
if admin_consent_display_name is not None:
pulumi.set(__self__, "admin_consent_display_name", admin_consent_display_name)
if id is not None:
pulumi.set(__self__, "id", id)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if type is not None:
pulumi.set(__self__, "type", type)
if user_consent_description is not None:
pulumi.set(__self__, "user_consent_description", user_consent_description)
if user_consent_display_name is not None:
pulumi.set(__self__, "user_consent_display_name", user_consent_display_name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="adminConsentDescription")
def admin_consent_description(self) -> Optional[str]:
"""
The description of the admin consent.
"""
return pulumi.get(self, "admin_consent_description")
@property
@pulumi.getter(name="adminConsentDisplayName")
def admin_consent_display_name(self) -> Optional[str]:
"""
The display name of the admin consent.
"""
return pulumi.get(self, "admin_consent_display_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The unique identifier for one of the `OAuth2Permission`.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[bool]:
"""
Is this permission enabled?
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of the permission.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userConsentDescription")
def user_consent_description(self) -> Optional[str]:
"""
The description of the user consent.
"""
return pulumi.get(self, "user_consent_description")
@property
@pulumi.getter(name="userConsentDisplayName")
def user_consent_display_name(self) -> Optional[str]:
"""
The display name of the user consent.
"""
return pulumi.get(self, "user_consent_display_name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The name of this permission.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ServicePrincipalOauth2PermissionScope(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminConsentDescription":
suggest = "admin_consent_description"
elif key == "adminConsentDisplayName":
suggest = "admin_consent_display_name"
elif key == "userConsentDescription":
suggest = "user_consent_description"
elif key == "userConsentDisplayName":
suggest = "user_consent_display_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServicePrincipalOauth2PermissionScope. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServicePrincipalOauth2PermissionScope.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServicePrincipalOauth2PermissionScope.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_consent_description: Optional[str] = None,
admin_consent_display_name: Optional[str] = None,
enabled: Optional[bool] = None,
id: Optional[str] = None,
type: Optional[str] = None,
user_consent_description: Optional[str] = None,
user_consent_display_name: Optional[str] = None,
value: Optional[str] = None):
"""
:param str admin_consent_description: The description of the admin consent.
:param str admin_consent_display_name: The display name of the admin consent.
:param bool enabled: Is this permission enabled?
:param str id: The unique identifier for one of the `OAuth2Permission`.
:param str type: The type of the permission.
:param str user_consent_description: The description of the user consent.
:param str user_consent_display_name: The display name of the user consent.
:param str value: The name of this permission.
"""
if admin_consent_description is not None:
pulumi.set(__self__, "admin_consent_description", admin_consent_description)
if admin_consent_display_name is not None:
pulumi.set(__self__, "admin_consent_display_name", admin_consent_display_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if id is not None:
pulumi.set(__self__, "id", id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_consent_description is not None:
pulumi.set(__self__, "user_consent_description", user_consent_description)
if user_consent_display_name is not None:
pulumi.set(__self__, "user_consent_display_name", user_consent_display_name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="adminConsentDescription")
def admin_consent_description(self) -> Optional[str]:
"""
The description of the admin consent.
"""
return pulumi.get(self, "admin_consent_description")
@property
@pulumi.getter(name="adminConsentDisplayName")
def admin_consent_display_name(self) -> Optional[str]:
"""
The display name of the admin consent.
"""
return pulumi.get(self, "admin_consent_display_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Is this permission enabled?
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The unique identifier for one of the `OAuth2Permission`.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of the permission.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userConsentDescription")
def user_consent_description(self) -> Optional[str]:
"""
The description of the user consent.
"""
return pulumi.get(self, "user_consent_description")
@property
@pulumi.getter(name="userConsentDisplayName")
def user_consent_display_name(self) -> Optional[str]:
"""
The display name of the user consent.
"""
return pulumi.get(self, "user_consent_display_name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The name of this permission.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetApplicationApiResult(dict):
def __init__(__self__, *,
oauth2_permission_scopes: Sequence['outputs.GetApplicationApiOauth2PermissionScopeResult']):
pulumi.set(__self__, "oauth2_permission_scopes", oauth2_permission_scopes)
@property
@pulumi.getter(name="oauth2PermissionScopes")
def oauth2_permission_scopes(self) -> Sequence['outputs.GetApplicationApiOauth2PermissionScopeResult']:
return pulumi.get(self, "oauth2_permission_scopes")
@pulumi.output_type
class GetApplicationApiOauth2PermissionScopeResult(dict):
def __init__(__self__, *,
admin_consent_description: str,
admin_consent_display_name: str,
enabled: bool,
id: str,
is_enabled: bool,
type: str,
user_consent_description: str,
user_consent_display_name: str,
value: str):
"""
:param str admin_consent_description: The description of the admin consent
:param str admin_consent_display_name: The display name of the admin consent
:param bool enabled: (Optional) Determines if the permission scope is enabled.
:param str id: The unique identifier for one of the `OAuth2Permission` or `AppRole` instances that the resource application exposes.
:param bool is_enabled: Is this permission enabled?
:param str type: Specifies whether the `id` property references an `OAuth2Permission` or an `AppRole`. Possible values are `Scope` or `Role`.
:param str user_consent_description: The description of the user consent
:param str user_consent_display_name: The display name of the user consent
:param str value: The name of this permission
"""
pulumi.set(__self__, "admin_consent_description", admin_consent_description)
pulumi.set(__self__, "admin_consent_display_name", admin_consent_display_name)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "user_consent_description", user_consent_description)
pulumi.set(__self__, "user_consent_display_name", user_consent_display_name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="adminConsentDescription")
def admin_consent_description(self) -> str:
"""
The description of the admin consent
"""
return pulumi.get(self, "admin_consent_description")
@property
@pulumi.getter(name="adminConsentDisplayName")
def admin_consent_display_name(self) -> str:
"""
The display name of the admin consent
"""
return pulumi.get(self, "admin_consent_display_name")
@property
@pulumi.getter
def enabled(self) -> bool:
"""
(Optional) Determines if the permission scope is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique identifier for one of the `OAuth2Permission` or `AppRole` instances that the resource application exposes.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Is this permission enabled?
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies whether the `id` property references an `OAuth2Permission` or an `AppRole`. Possible values are `Scope` or `Role`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userConsentDescription")
def user_consent_description(self) -> str:
"""
The description of the user consent
"""
return pulumi.get(self, "user_consent_description")
@property
@pulumi.getter(name="userConsentDisplayName")
def user_consent_display_name(self) -> str:
"""
The display name of the user consent
"""
return pulumi.get(self, "user_consent_display_name")
@property
@pulumi.getter
def value(self) -> str:
"""
The name of this permission
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetApplicationAppRoleResult(dict):
def __init__(__self__, *,
allowed_member_types: Sequence[str],
description: str,
display_name: str,
enabled: bool,
id: str,
is_enabled: bool,
value: str):
"""
:param Sequence[str] allowed_member_types: Specifies whether this app role definition can be assigned to users and groups, or to other applications (that are accessing this application | |
#!/usr/bin/env ipython
import numpy as np
import re
from GDSII import GDSII
class GDSII_ARef(GDSII):
'''
GDSII_ARef class : subclass of GDSII
GDSII Stream file format release 6.0
Array of structure reference (ARef) Element
The ARef element references a cell and repeats it along an array. A cell
can be referenced before it is defined. The cells are spaced according
the the pitchX, pitchY parameters and the number of repeats are specified
by the nX, nY parameters. By default, the array extends along the
positive X and positive Y axis. However, it is possible to rotate the
array vector counterclockwise by specifying the xRot and yRot parameters.
Alternatively, the array parameters can be specified by nX, nY, xxy, yxy
where xxy is the endpoint for the x array vector and yxy is the endpoint
for the y array vector.
When a cell is referenced it can be subjected to 3 transformation:
reflection about the x axis, magnification and rotation. These
transformations are applied to the cell within its coordinate system.
Therefore, rotation is centered at the origin, magnification simply scales
the value of all vertices and reflection mirrors the layout about the x
axis.
The functions of this class are:
setARef = Adds an array of structure reference
genRecord = Generate the record binary
readRecord = Reads a array of structure reference element record
<NAME>, UH, May 2013
'''
def __init__(self):
super(GDSII_ARef,self).__init__()
self._referenceName = ''
self._reflection = 0
self._mag = 0
self._angle = 0
self._pitchX = 0
self._pitchY = 0
self._nX = 0
self._nY = 0
self._xRot = 0
self._yRot = 0
self._xy = np.array([0,0],dtype=np.int32)
self._yy = np.array([0,0],dtype=np.int32)
self._yy = np.array([0,0],dtype=np.int32)
self._strans = 0
self._cARef = 0x0B00 #Array reference element begin
self._cELFLAG = 0x2601 #ELFLAG property (optional)
self._cPLEX = 0x2F03 #PLEX property (optional)
self._cReferenceName = 0x1206 #Structure name
self._cSTrans = 0x1A01 #Strans property
self._cMag = 0x1B05 #Magnification property
self._cAngle = 0x1C05 #Angle property
self._cColRow = 0x1302 #Colrow property
self._cXY = 0x1003 #XY property
self._cEnd = 0x1100 #Element end
def __repr__(self):
print 'Array reference element'
print 'referenceName: ' , self.referenceName
print 'xy: ' , self.xy[0] , ',' , self.xy[1]
print 'pitchX: ' , self.pitchX
print 'pitchY: ' , self.pitchY
print 'nX: ' , self.nX
print 'nY: ' , self.nY
print 'xRot: ' , self.xRot
print 'yRot: ' , self.yRot
print 'reflection: ' , self.reflection
print 'mag: ' , self.mag
print 'angle: ' , self.angle
return ''
@property
def referenceName(self):
'''
referenceName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
'''
return self._referenceName
@referenceName.setter
def referenceName(self, val):
if not isinstance(val,str):
raise TypeError('GDSII_ARef.referenceName : This parameter must be of type str')
if len(val) > 32:
raise ValueError('GDSII_ARef.referenceName : This parameter cannot be longer than 32 characters')
regex = re.compile('[\W^?^$]')
if not regex.search(val) == None:
raise ValueError('GDSII_ARef.referenceName : This parameter must contain only the following characters: A-Z, a-z, 0-9, _, ? and $')
self._referenceName = val
@property
def xy(self):
'''
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
'''
return self._xy
@xy.setter
def xy(self, val):
if isinstance(val,list):
val = np.array(val,dtype=np.int32)
elif not isinstance(val,np.ndarray):
raise TypeError('GDSII_ARef.xy : This parameter must be of type numpy.ndarray')
if not val.size == 2:
raise TypeError('GDSII_ARef.xy : This parameter must have only 2 elements')
self._xy = val
@property
def xx(self):
'''
xx : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
'''
return self._xx
@xx.setter
def xx(self, val):
if isinstance(val,list):
val = np.array(val,dtype=np.int32)
elif not isinstance(val,np.ndarray):
raise TypeError('GDSII_ARef.xx : This parameter must be of type numpy.ndarray')
if not val.size == 2:
raise TypeError('GDSII_ARef.xx : This parameter must have only 2 elements')
self._xx = val
@property
def yy(self):
'''
yy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
'''
return self._yy
@yy.setter
def yy(self, val):
if isinstance(val,list):
val = np.array(val,dtype=np.int32)
elif not isinstance(val,np.ndarray):
raise TypeError('GDSII_ARef.yy : This parameter must be of type numpy.ndarray')
if not val.size == 2:
raise TypeError('GDSII_ARef.yy : This parameter must have only 2 elements')
self._yy = val
@property
def reflection(self):
'''
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
'''
return self._reflection
@reflection.setter
def reflection(self, val):
if not val in [0,1]:
raise ValueError('GDSII_ARef.reflection : This parameter must be either 0 or 1')
self._reflection = val
self._strans = self._reflection*32768 + int(not self._mag == 1)*4 + int(not self._angle == 0)*2
@property
def mag(self):
'''
mag : float
Magnification factor used to scaled the referenced structure
'''
return self._mag
@mag.setter
def mag(self, val):
self._mag = val
self._strans = self._reflection*32768 + int(not self._mag == 1)*4 + int(not self._angle == 0)*2
@property
def angle(self):
'''
angle : float
Angle in degrees counterclockwise used to rotate the referenced
structure about the origin
'''
return self._angle
@angle.setter
def angle(self, val):
self._angle = val
self._strans = self._reflection*32768 + int(not self._mag == 1)*4 + int(not self._angle == 0)*2
@property
def strans(self):
'''
strans : integer
Enables the transformation of referenced structure by setting
specific bits
Bit Number (0-15) Transformation Enable
0 Reflection about X axis before rotation
13 Absolute magnification
14 Absolute rotation
Others Set to 0
'''
return self._strans
@strans.setter
def strans(self, val):
self._strans = val
@property
def pitchX(self):
'''
pitchX = integer
Array pitch or step along X
'''
return self._pitchX
@pitchX.setter
def pitchX(self, val):
self._pitchX = int(val)
@property
def pitchY(self):
'''
pitchY = integer
Array pitch or step along Y
'''
return self._pitchY
@pitchY.setter
def pitchY(self, val):
self._pitchY = int(val)
@property
def nX(self):
'''
nX = integer
Array repeats along X
'''
return self._nX
@nX.setter
def nX(self, val):
if val < 0 or val >= 32768:
raise ValueError('GDSII_ARef.nX : This parameter must range from 0 to 32767')
self._nX = val
@property
def nY(self):
'''
nY = integer
Array repeats along Y
'''
return self._nY
@nY.setter
def nY(self, val):
if val < 0 or val >= 32768:
raise ValueError('GDSII_ARef.nY : This parameter must range from 0 to 32767')
self._nY = val
@property
def xRot(self):
'''
xRot = float
Array x angle in units of [degrees]
'''
return self._xRot
@xRot.setter
def xRot(self, val):
self._xRot = val
@property
def yRot(self):
'''
yRot = float
Array y angle in units of [degrees]
'''
return self._yRot
@yRot.setter
def yRot(self, val):
self._yRot = val
def setARef(self, referenceName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0):
'''
setARef(referenceName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0)
Adds an array reference element
Parameters
----------
referenceName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
pitchX : integer
Array pitch or step along X
pitchY : integer
Array pitch or step along Y
nX : integer
Array repeats along X
nY : integer
Array repeats along Y
xRot : float
Array x angle in units of [degrees]
yRot : float
Array y angle in units of [degrees]
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in units of [degrees] used to rotate the referenced structure
counterclockwise about the origin
| |
Args:
tensor: A tensor.
axis: The desired axis.
Returns:
tensor: A tensor with the new axis inserted.
"""
return numpy.expand_dims(tensor, axis)
def dtype(tensor: T.Tensor) -> type:
"""
Return the type of the tensor.
Args:
tensor: A tensor.
Returns:
type: The type of the elements in the tensor.
"""
return tensor.dtype
def mix(w: T.Tensor, x: T.Tensor, y: T.Tensor) -> T.Tensor:
"""
Compute a weighted average of two matrices (x and y) and return the result.
Multilinear interpolation.
Note:
Modifies x in place.
Args:
w: The mixing coefficient tensor between 0 and 1 .
x: A tensor.
y: A tensor:
Returns:
tensor = w * x + (1-w) * y
"""
return ne.evaluate('w*x + (1-w)*y')
def mix_(w: T.Tensor, x: T.Tensor, y: T.Tensor) -> None:
"""
Compute a weighted average of two matrices (x and y) and store the results in x.
Useful for keeping track of running averages during training.
x <- w * x + (1-w) * y
Note:
Modifies x in place.
Args:
w: The mixing coefficient tensor between 0 and 1 .
x: A tensor.
y: A tensor:
Returns:
None
"""
ne.evaluate('w*x + (1-w)*y', out=x)
def square_mix_(w: T.Tensor, x: T.Tensor, y: T.Tensor) -> None:
"""
Compute a weighted average of two matrices (x and y^2) and store the results in x.
Useful for keeping track of running averages of squared matrices during training.
x <- w x + (1-w) * y**2
Note:
Modifies x in place.
Args:
w: The mixing coefficient tensor between 0 and 1.
x: A tensor.
y: A tensor:
Returns:
None
"""
ne.evaluate('w*x + (1-w)*y*y', out=x)
def sqrt_div(x: T.Tensor, y: T.Tensor) -> T.Tensor:
"""
Elementwise division of x by sqrt(y).
Args:
x: A tensor:
y: A non-negative tensor.
Returns:
tensor: Elementwise division of x by sqrt(y).
"""
z = T.EPSILON + y
return ne.evaluate('x/sqrt(z)')
def normalize(x: T.Tensor) -> T.Tensor:
"""
Divide x by it's sum.
Args:
x: A non-negative tensor.
Returns:
tensor: A tensor normalized by it's sum.
"""
y = T.EPSILON + x
return x/numpy.sum(y)
def norm(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the L2 norm of a tensor.
Args:
x: A tensor.
axis (optional): the axis for taking the norm
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is none:
float: The L2 norm of the tensor
(i.e., the sqrt of the sum of the squared elements).
else:
tensor: The L2 norm along the specified axis.
"""
return numpy.linalg.norm(x, axis=axis, keepdims=keepdims)
def tmax(x: T.Tensor, axis: int=None, keepdims: bool=False)-> T.FloatingPoint:
"""
Return the elementwise maximum of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the maximum.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall maximum of the elements in the tensor
else:
tensor: The maximum of the tensor along the specified axis.
"""
return numpy.max(x, axis=axis, keepdims=keepdims)
def tmin(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the elementwise minimum of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the minimum.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall minimum of the elements in the tensor
else:
tensor: The minimum of the tensor along the specified axis.
"""
return numpy.min(x, axis=axis, keepdims=keepdims)
def mean(x: T.Tensor, axis: int = None, keepdims: bool = False) -> T.FloatingPoint:
"""
Return the mean of the elements of a tensor along the specified axis.
Args:
x: A float or tensor of rank=2.
axis (optional): The axis for taking the mean.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall mean of the elements in the tensor
else:
tensor: The mean of the tensor along the specified axis.
"""
return numpy.mean(x, axis=axis, keepdims=keepdims)
def center(x: T.Tensor, axis: int=0) -> T.Tensor:
"""
Remove the mean along axis.
Args:
tensor (num_samples, num_units): the array to center
axis (int; optional): the axis to center along
Returns:
tensor (num_samples, num_units)
"""
return subtract(mean(x, axis=axis, keepdims=True), x)
def var(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the variance of the elements of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the variance.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall variance of the elements in the tensor
else:
tensor: The variance of the tensor along the specified axis.
"""
return numpy.var(x, axis=axis, keepdims=keepdims, ddof=1)
def std(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the standard deviation of the elements of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the standard deviation.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall standard deviation of the elements in the tensor
else:
tensor: The standard deviation of the tensor along the specified axis.
"""
return numpy.std(x, axis=axis, keepdims=keepdims, ddof=1)
def cov(x: T.Tensor, y: T.Tensor) -> T.Tensor:
"""
Compute the cross covariance between tensors x and y.
Args:
x (tensor (num_samples, num_units_x))
y (tensor (num_samples, num_units_y))
Returns:
tensor (num_units_x, num_units_y)
"""
num_samples = len(x) - 1
return batch_outer(center(x), center(y)) / num_samples
def corr(x: T.Tensor, y: T.Tensor) -> T.Tensor:
"""
Compute the cross correlation between tensors x and y.
Args:
x (tensor (num_samples, num_units_x))
y (tensor (num_samples, num_units_y))
Returns:
tensor (num_units_x, num_units_y)
"""
covariance = cov(x, y)
std_x = std(x, axis=0) + T.EPSILON
std_y = std(y, axis=0) + T.EPSILON
return divide(outer(std_x, std_y), covariance)
def tsum(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the sum of the elements of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the sum.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall sum of the elements in the tensor
else:
tensor: The sum of the tensor along the specified axis.
"""
return numpy.sum(x, axis=axis, keepdims=keepdims)
def tprod(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.FloatingPoint:
"""
Return the product of the elements of a tensor along the specified axis.
Args:
x: A float or tensor.
axis (optional): The axis for taking the product.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
float: The overall product of the elements in the tensor
else:
tensor: The product of the tensor along the specified axis.
"""
return numpy.prod(x, axis=axis, keepdims=keepdims)
def tany(x: T.Tensor, axis: int=None, keepdims: bool=False) -> T.Boolean:
"""
Return True if any elements of the input tensor are true along the
specified axis.
Args:
x: A float or tensor.
axis (optional): The axis of interest.
keepdims (optional): If this is set to true, the dimension of the | |
{'name': mocked.APIC_NETWORK_EDGE_NAT + '-name',
'admin_state_up': True, 'shared': True,
'status': n_constants.NET_STATUS_ACTIVE,
'tenant_id': 'onetenant',
'router:external': True}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# We need the db_plugin to get invoked from the code being
# tested. However, this was earlier mocked out in the setup,
# hence we reset it here.
self.driver.db_plugin._device_to_port_id = (
self.actual_core_plugin._device_to_port_id)
self.driver.db_plugin.get_bound_port_context = (
self.actual_core_plugin.get_bound_port_context)
self.driver.db_plugin.get_agents = (
self.actual_core_plugin.get_agents)
self.driver.db_plugin.create_or_update_agent = (
self.actual_core_plugin.create_or_update_agent)
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._is_connected_to_ext_net = mock.Mock()
self.driver.agent_type = 'Open vSwitch agent'
details = self.driver.get_gbp_details(
ctx, device='tap%s' % p1['id'], host='h1')
host_snat_ips = details['host_snat_ips']
self.assertEqual(0, len(host_snat_ips))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': ['h1']})
self.assertEqual(0, len(snat_ports))
ipms = details['ip_mapping']
self.assertEqual(0, len(ipms))
def _create_snat_network(self, ctx, tenant_id):
args = {'network': {'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'admin_state_up': True, 'shared': True,
'status': n_constants.NET_STATUS_ACTIVE,
'tenant_id': tenant_id,
'router:external': True}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
return db_net, net_ctx
def _snat_mock_setup(self, tenant_id):
self.driver._is_edge_nat = mock.Mock(return_value=True)
self.driver._is_pre_existing = mock.Mock(return_value=False)
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=tenant_id)
# We need the db_plugin to get invoked from the code being
# tested. However, this was earlier mocked out in the setup,
# hence we reset it here.
self.driver.db_plugin._device_to_port_id = (
self.actual_core_plugin._device_to_port_id)
self.driver.db_plugin.get_bound_port_context = (
self.actual_core_plugin.get_bound_port_context)
self.driver.db_plugin.get_agents = (
self.actual_core_plugin.get_agents)
self.driver.db_plugin.create_or_update_agent = (
self.actual_core_plugin.create_or_update_agent)
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._is_connected_to_ext_net = mock.Mock()
self.driver.agent_type = 'Open vSwitch agent'
def test_1_snat_ip_created_for_vrf_edge_nat(self):
# This test case is more of a functional test and should be revisited.
TEST_TENANT_ID1 = 'onetenant'
TEST_TENANT_ID2 = 'anothertenant'
self._snat_mock_setup(TEST_TENANT_ID1)
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
db_net, net_ctx = self._create_snat_network(ctx, TEST_TENANT_ID1)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id=TEST_TENANT_ID1, expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
hcidr = self.driver.apic_manager.ext_net_dict[
db_net['name']]['host_pool_cidr']
# Create port with a different tenant
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
port1 = p1['port']
self.assertEqual(net['id'], port1['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1, db_net)
# Verify that the port has an SNAT IP, which is
# allocated in the SNAT network tenant ID
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1', details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Simulate a second event on the same host with the same VRF for
# the same external network to check if the earlier allocated SNAT
# IP is returned
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p2:
port2 = p2['port']
self.assertEqual(net['id'], port2['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1', details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Now simulate event of a second host with same VRF
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p3:
port3 = p3['port']
self.assertEqual(net['id'], port3['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(1, len(snat_ports))
self.driver.delete_network_postcommit(net_ctx)
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(0, len(snat_ports))
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
self.assertEqual(0, len(snat_networks))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
def test_2_snat_ips_created_for_2_vrfs_edge_nat(self):
# This test case is more of a functional test and should be revisited.
TEST_TENANT_ID1 = 'onetenant'
TEST_TENANT_ID2 = 'anothertenant'
self._snat_mock_setup(TEST_TENANT_ID1)
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
db_net, net_ctx = self._create_snat_network(ctx, TEST_TENANT_ID1)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id=TEST_TENANT_ID1, expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
# Create port with a different tenant
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
port1 = p1['port']
self.assertEqual(net['id'], port1['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID2)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID2, db_net)
# Verify that the port has an SNAT IP, which is
# allocated in the SNAT network tenant ID
self.assertEqual(db_net['name'],
details['external_segment_name'])
hcidr = self.driver.apic_manager.ext_net_dict[
db_net['name']]['host_pool_cidr']
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID2]})
self.assertEqual(1, len(snat_ports))
# Simulate a second event on the same host with the a different VRF
# for the same external network to check if the earlier allocated
# SNAT IP is returned
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID1,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p2:
port2 = p2['port']
self.assertEqual(net['id'], port2['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID1)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Now simulate event of a second host with same VRF
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p3:
port3 = p3['port']
self.assertEqual(net['id'], port3['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID2)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID2,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID2]})
self.assertEqual(1, len(snat_ports))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(2, len(snat_ports))
self.driver.delete_network_postcommit(net_ctx)
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(0, len(snat_ports))
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
self.assertEqual(0, len(snat_networks))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
def test_create_external_network_postcommit(self):
ctx = context.get_admin_context()
args = {'network': {'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'admin_state_up': True, 'shared': True,
'tenant_id': 'onetenant',
'status': n_constants.NET_STATUS_ACTIVE}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx,
filters={'name': [self.driver._get_snat_db_network_name(db_net)]})
snat_net_id = snat_networks[0]['id']
self.assertEqual(1, len(snat_networks))
seg = ml2_db.get_network_segments(ctx.session, snat_net_id)
self.assertEqual(1, len(seg))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(1, len(subnets))
self.driver.delete_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx,
filters={'name': [self.driver._get_snat_db_network_name(db_net)]})
self.assertEqual(0, len(snat_networks))
seg = ml2_db.get_network_segments(ctx.session, snat_net_id)
self.assertEqual(0, len(seg))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
class TestCiscoApicMechDriverNoFabricL3(TestApicML2IntegratedPhysicalNode):
def setUp(self, service_plugins=None, ml2_opts=None):
# Mock out HA scheduler notifcations
# (irrelevant exceptions if it's not)
self._update_notify = mock.patch(
'neutron.db.l3_hascheduler_db._notify_l3_agent_ha_port_update')
self._update_notify.start()
# Configure reference L3 implementation, which
# disables routing and subnet configuration in the ACI fabric
super(TestCiscoApicMechDriverNoFabricL3, self).setUp(
service_plugins={
'L3_ROUTER_NAT': 'router',
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'})
def tearDown(self):
self._update_notify.stop()
super(TestCiscoApicMechDriverNoFabricL3, self).tearDown()
def test_create_subnet_no_l3(self):
ctx = context.get_admin_context()
tenant1 = self._tenant(neutron_tenant='onetenant')
app_prof1 = self._app_profile(neutron_tenant='onetenant')
self.mgr.ensure_bd_created_on_apic = mock.Mock()
self._register_agent('h1', agent_cfg=AGENT_CONF_OPFLEX)
# Create a network with a subnet, then add
# a port to the subnet and bind it to a host
# (e.g. as if Nova did a port binding for a VM port)
net = self.create_network(
tenant_id='onetenant', is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.mgr.ensure_bd_created_on_apic.assert_called_once_with(
tenant1, mock.ANY,
ctx_owner=mock.ANY,
ctx_name=self._network_vrf_name(net_name=net['id']),
transaction='transaction', unicast_route=False)
# bind to VM-host
self._bind_port_to_host(p1['id'], 'h1')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(bseg_p1['network_type'], 'opflex')
self.assertEqual('cisco_apic_ml2', bdriver)
# We shouldn't be using a PhysDom for opflex network
# type ports -- make sure we didn't use one
self.mgr.ensure_path_created_for_port.assert_not_called()
# Create a router and add an interface from
# the subnet we created above. Make explicit call for
# port binding for the newly created router port as well.
router = self.create_router(api=self.ext_api,
tenant_id='onetenant',
is_admin_context=True)['router']
self.l3_plugin.add_router_interface(ctx,
router['id'],
{'subnet_id': sub['subnet']['id']})
self.mgr.ensure_subnet_created_on_apic.assert_not_called()
router_port = self.driver.db_plugin.get_ports(
ctx, filters={'device_id': [router['id']],
'network_id': [net['id']]})[0]
self.mgr.ensure_path_created_for_port.reset_mock()
# Bind the port to a host that's not running OpFlex.
self._bind_port_to_host(p1['id'], 'lb-app-01')
self.mgr.ensure_path_created_for_port.assert_called()
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(1, len(self._query_dynamic_seg(net['id'])))
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net['id'], 'lb-app-01', bseg_p1['segmentation_id'],
bd_name=None, app_profile_name=app_prof1, transaction=mock.ANY)
self.l3_plugin.remove_router_interface(ctx,
router['id'],
{'port_id': router_port['id']})
self.delete_port(p1['id'], tenant_id=p1['tenant_id'])
self.assertEqual(0, len(self._query_dynamic_seg(net['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net['id'], 'lb-app-01', app_profile_name=app_prof1)
def test_no_l3_in_gbp_details(self):
self._register_agent('h1')
self.driver._get_tenant_vrf = mock.MagicMock()
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(self._tenant(neutron_tenant='onetenant'),
details['ptg_tenant'])
self.assertEqual(self._app_profile(neutron_tenant='onetenant'),
details['app_profile_name'])
self.assertEqual('onetenant',
details['tenant_id'])
self.assertTrue(details['enable_dhcp_optimization'])
self.assertEqual(1, len(details['subnets']))
self.assertEqual(sub['subnet']['id'], details['subnets'][0]['id'])
self.assertIsNone(details.get('ip_mapping'))
self.assertIsNone(details.get('floating_ip'))
self.assertIsNone(details.get('host_snat_ips'))
self.driver._get_tenant_vrf.assert_called_with(net['tenant_id'])
def test_phys_port_on_shared_public_opflex_network(self):
pass
class TestExtensionAttributes(ApicML2IntegratedTestBase):
def test_route_leak_network_lifecycle(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201)['network']
self.assertEqual(False,
net[ALLOW_ROUTE_LEAK])
| |
<reponame>giubby84/btclib
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Entropy conversion functions.
Depending on the function, input entropy can be expressed
as raw (i.e. binary 0/1 string), bytes, or integer
and their equivalent representations.
Leading zeros in raw or bytes entropy
are never considered redundant padding.
Output entropy is always raw.
"""
import math
import secrets
from hashlib import sha512
from typing import Iterable, List, Optional, Tuple, Union
from .alias import BinStr, Entropy, Octets
from .utils import bytes_from_octets
_bits = 128, 160, 192, 224, 256, 512
_dice_sides = (4, 6, 8, 12, 20, 24, 30, 48, 60, 120)
def _indexes_from_entropy(entropy: BinStr, base: int) -> List[int]:
"""Return the digit indexes for the provided raw entropy.
Return the list of integer indexes into a digit set,
usually a language word-list,
for the provided raw (i.e. binary 0/1 string) entropy;
leading zeros are not considered redundant padding.
"""
bits = len(entropy)
int_entropy = int(entropy, 2)
indexes = []
while int_entropy:
int_entropy, index = divmod(int_entropy, base)
indexes.append(index)
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
nwords = math.ceil(bits / bits_per_digit)
while len(indexes) < nwords:
indexes.append(0)
return list(reversed(indexes))
def _entropy_from_indexes(indexes: List[int], base: int) -> BinStr:
"""Return the raw entropy from a list of word-list indexes.
Return the raw (i.e. binary 0/1 string) entropy
from the provided list of integer indexes into
a given language word-list.
"""
entropy = 0
for index in indexes:
entropy = entropy * base + index
binentropy = bin(entropy)[2:] # remove '0b'
# do not lose leading zeros entropy
bits_per_digit = int(math.log(base, 2))
bits = len(indexes) * bits_per_digit
binentropy = binentropy.zfill(bits)
return binentropy
OneOrMoreInt = Union[int, Iterable[int]]
def binstr_from_entropy(entr: Entropy, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input entropy.
Input entropy can be expressed as:
- raw (i.e. binary 0/1 string) entropy
- bytes (no hex-string, as they would conflict with
raw entropy representation)
- integer (int, no string starting with "0b"/"0x")
In the case of raw entropy and bytes,
entropy is never padded to satisfy the bit-size requirement;
instead,
integer entropy is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
In all cases if more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(entr, str):
return binstr_from_binstr(entr, bits)
elif isinstance(entr, bytes):
return binstr_from_bytes(entr, bits)
elif isinstance(entr, int):
return binstr_from_int(entr, bits)
m = "Entropy must be raw binary 0/1 string, bytes, or int; "
m += f"not '{type(entr).__name__}'"
raise TypeError(m)
def binstr_from_bytes(bytes_entropy: Octets, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input Octets entropy.
Input entropy can be expressed as hex-string or bytes;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
bytes_entropy = bytes_from_octets(bytes_entropy)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(bytes_entropy) * 8
if n_bits > bits[-1]:
n_bits = bits[-1]
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise ValueError(m)
int_entropy = int.from_bytes(bytes_entropy, "big")
# only the leftmost bits will be retained
return binstr_from_int(int_entropy, n_bits)
def binstr_from_int(int_entropy: Union[int, str], bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input integer entropy.
Input entropy can be expressed as int
or string starting with "0x"/"0b";
it is front-padded with zeros digits
as much as necessary to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if isinstance(int_entropy, str):
int_entropy = int_entropy.strip().lower()
if int_entropy[:2] == "0b":
int_entropy = int(int_entropy, 2)
elif int_entropy[:2] == "0x":
int_entropy = int(int_entropy, 16)
if not isinstance(int_entropy, int):
m = "Entropy must be an int, not "
m += f"{type(int_entropy).__name__}"
raise TypeError(m)
if int_entropy < 0:
raise ValueError(f"Negative entropy: {int_entropy}")
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
# convert to binary string and remove leading '0b'
bin_str = bin(int_entropy)[2:]
n_bits = len(bin_str)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return bin_str[: bits[-1]]
# pad up to the next allowed bit length
n_bits = next(v for i, v in enumerate(bits) if v >= n_bits)
return bin_str.zfill(n_bits)
def binstr_from_binstr(str_entropy: str, bits: OneOrMoreInt = _bits) -> BinStr:
"""Return raw entropy from the input raw entropy.
Input entropy must be expressed as raw entropy;
it is never padded to satisfy the bit-size requirement.
If more bits than required are provided,
the leftmost ones are retained.
Default bit-sizes are 128, 160, 192, 224, 256, or 512 bits.
"""
if not isinstance(str_entropy, str):
m = "Entropy must be a str, not "
m += f"{type(str_entropy).__name__}"
raise TypeError(m)
# check if it is a valid binary string
int(str_entropy, 2)
# if a single int, make it a tuple
if isinstance(bits, int):
bits = (bits,)
# ascending unique sorting of allowed bits
bits = sorted(set(bits))
n_bits = len(str_entropy)
if n_bits > bits[-1]:
# only the leftmost bits are retained
return str_entropy[: bits[-1]]
if n_bits not in bits:
m = f"Wrong number of bits: {n_bits} instead of {bits}"
raise ValueError(m)
return str_entropy
def collect_rolls(bits: int) -> Tuple[int, List[int]]:
dice_sides = 0
while dice_sides not in _dice_sides:
automate = False
msg = f"{_dice_sides}"
msg = "dice sides " + msg[:-1]
msg += "; prefix with 'a' to automate rolls, hit enter for 'a6'): "
dice_sides_str = input(msg)
dice_sides_str = dice_sides_str.lower()
if dice_sides_str in ["", "a"]:
automate = True
dice_sides = 6
else:
if dice_sides_str.startswith("a"):
automate = True
dice_sides_str = dice_sides_str[1:]
try:
dice_sides = int(dice_sides_str)
except Exception:
dice_sides = 0
bits_per_roll = math.floor(math.log2(dice_sides))
base = 2 ** bits_per_roll
if not automate:
print(f"rolls are used only if in 1..{base}")
rolls: List[int] = []
min_roll_number = math.ceil(bits / bits_per_roll)
for i in range(min_roll_number):
x = 0
while x < 1 or x > base:
try:
if automate:
x_str = str(1 + secrets.randbelow(dice_sides))
else:
x_str = input(f"roll #{i+1}/{min_roll_number}: ")
x = int(x_str)
except Exception:
x = 0
rolls.append(x)
print(f"collected {min_roll_number} usable D{dice_sides} rolls")
return dice_sides, rolls
def binstr_from_rolls(
bits: int, dice_sides: int, rolls: List[int], shuffle: bool = True
) -> BinStr:
"""Return raw entropy from the input dice rolls.
Dice rolls are represented by integers in the [1-dice_sides] range;
there must be enough rolls to satisfy the bit-size requirement.
Only rolls having value in the [1-base] range are used,
with base being the highest power of 2 that is lower than the
dice_sides (e.g. for a traditional D6 dice, only rolls having value
in [1-4] are used; for a D20 dice, only rolls having value in
[1-16] are used; etc.). Rolls can also be shuffled.
If more bits than required are provided,
the leftmost ones are retained.
"""
if dice_sides < 2:
raise ValueError(f"invalid dice base: {dice_sides}, must be >= 2")
bits_per_roll = math.floor(math.log2(dice_sides))
# used base
base = 2 ** bits_per_roll
if shuffle:
secrets.SystemRandom().shuffle(rolls)
min_roll_number = math.ceil(bits / bits_per_roll)
i = 0
for r in rolls:
# collect only usable rolls in [1-base)]
if 0 < r and r <= base:
i *= base
i += r - 1
min_roll_number -= 1
# reject invalid rolls not in [1-dice_sides)]
elif r < 1 or r > dice_sides:
msg = f"invalid roll: {r} is not in [1-{dice_sides}]"
raise ValueError(msg)
if min_roll_number > 0:
msg = f"Too few rolls in the usable [1-{base}] range, missing {min_roll_number} rolls"
| |
COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# SegmentedBluePaletteColorLookupTableData
0x00281223L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
},
# ReplacedProcedureStepSequence
0x00741224L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
None: ['Unified Procedure Step'],
},
# DistanceSourceToEntrance
0x00400306L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Modality Performed Procedure Step', 'Image'],
},
# EffectiveDateTime
0x00686226L: {
'IMPLANT TEMPLATE GROUP IOD': ['Implant Template Group'],
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Implant Template Group', 'Implant Assembly', 'Implant Template'],
},
# MemoryAllocation
0x20000060L: {
'FILM SESSION IOD': ['Film Session'],
None: ['Film Session'],
},
# ConcatenationFrameOffsetNumber
0x00209228L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment', 'Segmentation'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# SharedFunctionalGroupsSequence
0x52009229L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment', 'Segmentation'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# TotalCollimationWidth
0x00189307L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# LabelText
0x22000002L: {
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
None: ['Image', 'Media Creation Management'],
},
# PixelIntensityRelationship
0x00281040L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# PerFrameFunctionalGroupsSequence
0x52009230L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment', 'Segmentation'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# WaveformDisplayBackgroundCIELabValue
0x003A0231L: {
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
None: ['Waveform'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'HEMODYNAMIC IOD': ['Waveform'],
'GENERAL ECG IOD': ['Waveform'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'AMBULATORY ECG IOD': ['Waveform'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'12-LEAD ECG IOD': ['Waveform'],
},
# DateTime
0x0040A120L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# WaveformSequence
0x54000100L: {
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
None: ['Waveform'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'HEMODYNAMIC IOD': ['Waveform'],
'GENERAL ECG IOD': ['Waveform'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'AMBULATORY ECG IOD': ['Waveform'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'12-LEAD ECG IOD': ['Waveform'],
},
# ReferencedPresentationStateSequence
0x00089237L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
},
# IlluminationBandwidth
0x00220057L: {
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
None: ['Image'],
},
# TotalNumberOfExposures
0x00400301L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
None: ['Modality Performed Procedure Step'],
},
# SeriesType
0x00541000L: {
'PET IMAGE IOD': ['Series'],
None: ['Series'],
},
# SecondaryCaptureDeviceID
0x00181010L: {
'SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Equipment'],
None: ['Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED CDA IOD': ['Equipment'],
},
# Modality
0x00080060L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series', 'Equipment'],
'RT BRACHY TREATMENT RECORD IOD': ['Series'],
'RT STRUCTURE SET IOD': ['Series'],
'RT PLAN IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'RT TREATMENT SUMMARY RECORD IOD': ['Series'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series', 'Equipment'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Series'],
'BASIC TEXT SR IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'ENCAPSULATED PDF IOD': ['Series', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'CHEST CAD SR IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'X-RAY RADIATION DOSE SR IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'PROCEDURE LOG IOD': ['Series'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series', 'Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series', 'Equipment'],
'COMPREHENSIVE SR IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'RT ION PLAN IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'RT DOSE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'MAMMOGRAPHY CAD SR IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'RT BEAMS TREATMENT RECORD IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'RT IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series', 'Equipment'],
None: ['Series', 'Equipment', 'Modality Performed Procedure Step'],
'SEGMENTATION IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'COLON CAD SR IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
| |
<reponame>mudbungie/NetExplorer<filename>env/lib/python3.4/site-packages/bulbs/base/client.py
# -*- coding: utf-8 -*-
#
# Copyright 2011 <NAME> (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
Bulbs supports pluggable backends. These are the abstract base classes that
provides the server-client interface. Implement these to create a new client.
"""
import inspect
from bulbs.config import Config, DEBUG
from bulbs.registry import Registry
from bulbs.utils import get_logger
from .typesystem import TypeSystem
SERVER_URI = "http://localhost"
log = get_logger(__name__)
# TODO: Consider making these real Python Abstract Base Classes (import abc)
class Request(object):
def __init__(self, config, content_type):
"""
Initializes a client object.
:param root_uri: the base URL of Rexster.
"""
self.config = config
self.content_type = content_type
self._initialize()
def _initialize(self):
pass
class Result(object):
"""
Abstract base class for a single result, not a list of results.
:param result: The raw result.
:type result: dict
:param config: The graph Config object.
:type config: Config
:ivar raw: The raw result.
:ivar data: The data in the result.
"""
def __init__(self, result, config):
self.config = config
# The raw result.
self.raw = result
# The data in the result.
self.data = None
def get_id(self):
"""
Returns the element ID.
:rtype: int
"""
raise NotImplementedError
def get_type(self):
"""
Returns the element's base type, either "vertex" or "edge".
:rtype: str
"""
raise NotImplementedError
def get_data(self):
"""
Returns the element's property data.
:rtype: dict
"""
raise NotImplementedError
def get_uri(self):
"""
Returns the element URI.
:rtype: str
"""
raise NotImplementedError
def get_outV(self):
"""
Returns the ID of the edge's outgoing vertex (start node).
:rtype: int
"""
raise NotImplementedError
def get_inV(self):
"""
Returns the ID of the edge's incoming vertex (end node).
:rtype: int
"""
raise NotImplementedError
def get_label(self):
"""
Returns the edge label (relationship type).
:rtype: str
"""
raise NotImplementedError
def get_index_name(self):
"""
Returns the index name.
:rtype: str
"""
raise NotImplementedError
def get_index_class(self):
"""
Returns the index class, either "vertex" or "edge".
:rtype: str
"""
raise NotImplementedError
def get(self, attribute):
"""
Returns the value of a client-specific attribute.
:param attribute: Name of the attribute:
:type attribute: str
:rtype: str
"""
return self.raw[attribute]
class Response(object):
"""
Abstract base class for the response returned by the request.
:param response: The raw response.
:type response: Depends on Client.
:param config: Config object.
:type config: bulbs.config.Config
:ivar config: Config object.
:ivar headers: Response headers.
:ivar content: A dict containing the response content.
:ivar results: A generator of Neo4jResult objects, a single Neo4jResult object,
or None, depending on the number of results returned.
:ivar total_size: The number of results returned.
:ivar raw: Raw HTTP response. Only set when log_level is DEBUG.
"""
result_class = Result
def __init__(self, response, config):
self.config = config
self.handle_response(response)
self.headers = self.get_headers(response)
self.content = self.get_content(response)
self.results, self.total_size = self.get_results()
self.raw = self._maybe_get_raw(response, config)
def _maybe_get_raw(self,response, config):
"""Returns the raw response if in DEBUG mode."""
# don't store raw response in production else you'll bloat the obj
if config.log_level == DEBUG:
return response
def handle_response(self, response):
"""
Check the server response and raise exception if needed.
:param response: Raw server response.
:type response: Depends on Client.
:rtype: None
"""
raise NotImplementedError
def get_headers(self, response):
"""
Returns a dict containing the headers from the response.
:param response: Raw server response.
:type response: tuple
:rtype: httplib2.Response
"""
raise NotImplementedError
def get_content(self, response):
"""
Returns a dict containing the content from the response.
:param response: Raw server response.
:type response: tuple
:rtype: dict or None
"""
raise NotImplementedError
def get_results(self):
"""
Returns the results contained in the response.
:return: A tuple containing two items: 1. Either a generator of Neo4jResult objects,
a single Neo4jResult object, or None, depending on the number of results
returned; 2. An int representing the number results returned.
:rtype: tuple
"""
raise NotImplementedError
def get(self, attribute):
"""Return a client-specific attribute."""
return self.content[attribute]
def one(self):
"""
Returns one result or raises an error if there is more than one result.
:rtype: Result
"""
# If you're using this utility, that means the results attribute in the
# Response object should always contain a single result object,
# not multiple items. But gremlin returns all results as a list
# even if the list contains only one element. And the Response class
# converts all lists to a generator of Result objects. Thus in that case,
# we need to grab the single Result object out of the list/generator.
if self.total_size > 1:
log.error('resp.results contains more than one item.')
raise ValueError
if inspect.isgenerator(self.results):
result = next(self.results)
else:
result = self.results
return result
class Client(object):
"""
Abstract base class for the low-level server client.
:param config: Optional Config object. Defaults to default Config.
:type config: bulbs.config.Config
:cvar default_uri: Default URI for the database.
:cvar request_class: Request class for the Client.
:ivar config: Config object.
:ivar registry: Registry object.
:ivar type_system: TypeSystem object.
:ivar request: Request object.
Example:
>>> from bulbs.neo4jserver import Neo4jClient
>>> client = Neo4jClient()
>>> script = client.scripts.get("get_vertices")
>>> response = client.gremlin(script, params=None)
>>> result = response.results.next()
"""
default_uri = SERVER_URI
request_class = Request
def __init__(self, config=None):
self.config = config or Config(self.default_uri)
self.registry = Registry(self.config)
self.type_system = TypeSystem()
self.request = self.request_class(self.config, self.type_system.content_type)
# Vertex Proxy
def create_vertex(self, data):
"""
Creates a vertex and returns the Response.
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def get_vertex(self, _id):
"""
Gets the vertex with the _id and returns the Response.
:param data: Vertex ID.
:type data: int
:rtype: Response
"""
raise NotImplementedError
def get_all_vertices(self):
"""
Returns a Response containing all the vertices in the Graph.
:rtype: Response
"""
raise NotImplementedError
def update_vertex(self, _id, data):
"""
Updates the vertex with the _id and returns the Response.
:param _id: Vertex ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def delete_vertex(self, _id):
"""
Deletes a vertex with the _id and returns the Response.
:param _id: Vertex ID.
:type _id: dict
:rtype: Response
"""
raise NotImplementedError
# Edge Proxy
def create_edge(self, outV, label, inV, data=None):
"""
Creates a edge and returns the Response.
:param outV: Outgoing vertex ID.
:type outV: int
:param label: Edge label.
:type label: str
:param inV: Incoming vertex ID.
:type inV: int
:param data: Property data.
:type data: dict or None
:rtype: Response
"""
raise NotImplementedError
def get_edge(self, _id):
"""
Gets the edge with the _id and returns the Response.
:param data: Edge ID.
:type data: int
:rtype: Response
"""
raise NotImplementedError
def get_all_edges(self):
"""
Returns a Response containing all the edges in the Graph.
:rtype: Response
"""
raise NotImplementedError
def update_edge(self, _id, data):
"""
Updates the edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def delete_edge(self, _id):
"""
Deletes a edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:rtype: Response
"""
raise NotImplementedError
# Vertex Container
def outE(self, _id, label=None):
"""
Returns the outgoing edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def inE(self, _id, label=None):
"""
Returns the incoming edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def bothE(self, _id, label=None):
"""
Returns the incoming and outgoing edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def outV(self, _id, label=None):
"""
Returns the out-adjacent vertices of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
| |
from __future__ import absolute_import
import torch
import numpy as np
import pandas as pd
import scipy
import copy
from pysurvival import HAS_GPU
from pysurvival import utils
from pysurvival.utils import neural_networks as nn
from pysurvival.utils import optimization as opt
from pysurvival.models import BaseModel
from pysurvival.models._coxph import _CoxPHModel
from pysurvival.models._coxph import _baseline_functions
class CoxPHModel(BaseModel):
""" Cox proportional hazards model:
-------------------------------
The purpose of the model is to evaluate simultaneously
the effect of several factors on survival.
In other words, it allows us to examine how specified factors
influence the rate of a particular event happening
at a particular point in time.
The Cox model is expressed by the hazard function h(t)
(the risk of dying at time t. )
It can be estimated as follow:
h(t, x)=h_0(t)*exp(<x, W>)
Then the Survival function can be calculated as follow:
H(t, x) = cumsum( h(t, x) )
S(t, x) = exp( -H(t, x) )
Reference:
* http://www.sthda.com/english/wiki/cox-proportional-hazards-model
"""
def get_summary(self, alpha = 0.95, precision=3):
""" Providing the summary of the regression results:
* standard errors
* z-score
* p-value
"""
# Flattening the coef
W_flat = self.weights.flatten()
# calculating standard error
self.std_err = np.sqrt(self.inv_Hessian.diagonal())/self.std_scale
# Confidence Intervals
alpha = scipy.stats.norm.ppf((1. + alpha) / 2.)
lower_ci = np.round( W_flat - alpha * self.std_err, precision)
upper_ci = np.round( W_flat + alpha * self.std_err, precision)
z = np.round(W_flat / self.std_err , precision)
p_values = np.round(scipy.stats.chi2.sf( np.square(z), 1), precision)
W = np.round(W_flat, precision)
std_err = np.round(self.std_err, precision)
# Creating summary
df = np.c_[self.variables, W, std_err,
lower_ci, upper_ci, z, p_values]
df = pd.DataFrame(data = df,
columns = ['variables', 'coef', 'std. err',
'lower_ci', 'upper_ci',
'z', 'p_values'])
self.summary = df
return df
def fit(self, X, T, E, init_method='glorot_normal', lr = 1e-2,
max_iter = 100, l2_reg = 1e-2, alpha = 0.95,
tol = 1e-3, verbose = True ):
"""
Fitting a proportional hazards regression model using
the Efron's approximation method to take into account tied times.
As the Hessian matrix of the log-likelihood can be
calculated without too much effort, the model parameters are
computed using the Newton_Raphson Optimization scheme:
W_new = W_old - lr*<Hessian^(-1), gradient>
Arguments:
---------
* `X` : **array-like**, *shape=(n_samples, n_features)* --
The input samples.
* `T` : **array-like** --
The target values describing when the event of interest or
censoring occurred.
* `E` : **array-like** --
The values that indicate if the event of interest occurred
i.e.: E[i]=1 corresponds to an event, and E[i] = 0 means censoring,
for all i.
* `init_method` : **str** *(default = 'glorot_uniform')* --
Initialization method to use. Here are the possible options:
* `glorot_uniform`: Glorot/Xavier uniform initializer
* `he_uniform`: He uniform variance scaling initializer
* `uniform`: Initializing tensors with uniform (-1, 1) distribution
* `glorot_normal`: Glorot normal initializer,
* `he_normal`: He normal initializer.
* `normal`: Initializing tensors with standard normal distribution
* `ones`: Initializing tensors to 1
* `zeros`: Initializing tensors to 0
* `orthogonal`: Initializing tensors with a orthogonal matrix,
* `lr`: **float** *(default=1e-4)* --
learning rate used in the optimization
* `max_iter`: **int** *(default=100)* --
The maximum number of iterations in the Newton optimization
* `l2_reg`: **float** *(default=1e-4)* --
L2 regularization parameter for the model coefficients
* `alpha`: **float** *(default=0.95)* --
Confidence interval
* `tol`: **float** *(default=1e-3)* --
Tolerance for stopping criteria
* `verbose`: **bool** *(default=True)* --
Whether or not producing detailed logging about the modeling
Example:
--------
#### 1 - Importing packages
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from pysurvival.models.simulations import SimulationModel
from pysurvival.models.semi_parametric import CoxPHModel
from pysurvival.utils.metrics import concordance_index
from pysurvival.utils.display import integrated_brier_score
#%pylab inline # To use with Jupyter notebooks
#### 2 - Generating the dataset from a Log-Logistic parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'log-logistic',
risk_type = 'linear',
censored_parameter = 10.1,
alpha = 0.1, beta=1.2 )
# Generating N random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features = 3)
#### 3 - Creating the modeling dataset
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Creating an instance of the Cox PH model and fitting the data.
# Building the model
coxph = CoxPHModel()
coxph.fit(X_train, T_train, E_train, lr=0.5, l2_reg=1e-2,
init_method='zeros')
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(coxph, X_test, T_test, E_test) #0.92
print('C-index: {:.2f}'.format(c_index))
ibs = integrated_brier_score(coxph, X_test, T_test, E_test, t_max=10,
figure_size=(20, 6.5) )
References:
-----------
* https://en.wikipedia.org/wiki/Proportional_hazards_model#Tied_times
* <NAME> (1974). "The Efficiency of Cox's Likelihood
Function for Censored Data". Journal of the American Statistical
Association. 72 (359): 557-565.
"""
# Collecting features names
N, self.num_vars = X.shape
if isinstance(X, pd.DataFrame):
self.variables = X.columns.tolist()
else:
self.variables = ['x_{}'.format(i) for i in range(self.num_vars)]
# Checking the format of the data
X, T, E = utils.check_data(X, T, E)
order = np.argsort(-T)
T = T[order]
E = E[order]
X = self.scaler.fit_transform( X[order, :] )
self.std_scale = np.sqrt( self.scaler.var_ )
# Initializing the model
self.model = _CoxPHModel()
# Creating the time axis
self.model.get_times(T, E)
# Initializing the parameters
W = np.zeros(self.num_vars)
W = opt.initialization(init_method, W, False).flatten()
W = W.astype(np.float64)
# Optimizing to find best parameters
epsilon=1e-9
self.model.newton_optimization(X, T, E, W, lr, l2_reg, tol, epsilon,
max_iter, verbose)
# Saving the Cython attributes in the Python object
self.weights = np.array( self.model.W )
self.loss = self.model.loss
self.times = np.array( self.model.times)
self.gradient = np.array( self.model.gradient )
self.Hessian = np.array( self.model.Hessian )
self.inv_Hessian = np.array( self.model.inv_Hessian )
self.loss_values = np.array( self.model.loss_values )
self.grad2_values = np.array( self.model.grad2_values )
# Computing baseline functions
score = np.exp( np.dot(X, self.weights) )
baselines = _baseline_functions(score, T, E)
# Saving the Cython attributes in the Python object
self.baseline_hazard = np.array( baselines[1] )
self.baseline_survival = np.array( baselines[2] )
del self.model
self.get_time_buckets()
# Calculating summary
self.get_summary(alpha)
return self
def predict(self, x, t = None):
"""
Predicting the hazard, density and survival functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
* t: float (default=None)
Time at which hazard, density and survival functions
should be calculated. If None, the method returns
the functions for all times t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Sacling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
# Calculating risk_score, hazard, density and survival
phi = np.exp( np.dot(x, self.weights) )
hazard = self.baseline_hazard*phi.reshape(-1, 1)
survival = np.power(self.baseline_survival, phi.reshape(-1, 1))
density = hazard*survival
if t is None:
return hazard, density, survival
else:
min_index = [ abs(a_j_1-t) for (a_j_1, a_j) in self.time_buckets ]
index = np.argmin(min_index)
return hazard[:, index], density[:, index], survival[:, index]
def predict_risk(self, x, use_log = False):
"""
Predicting the risk score functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
# Calculating risk_score
risk_score = np.exp( np.dot(x, self.weights) )
if not use_log:
risk_score | |
#!/usr/bin/env python3
import unittest as ut
import subtest_fix
import os
import sys
import glob
import argparse
import copy
import tempfile
from itertools import combinations
import c4.cmany as cmany
import c4.cmany.util as util
import c4.cmany.main as main
import c4.cmany.cmake as cmake
from multiprocessing import cpu_count as cpu_count
srcdir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src')
sys.path.insert(0, srcdir)
maincmd = [sys.executable, '-m', 'c4.cmany.main', '--show-args']
projdir = os.path.dirname(__file__)
compiler_set = os.environ.get('CMANY_TEST_COMPILERS', None)
build_types = os.environ.get('CMANY_TEST_BUILDTYPES', 'Debug,Release')
test_projs = os.environ.get('CMANY_TEST_PROJS', 'hello,libhello')
proj_targets = {
'hello': {
'lib': [],
'exe': ['hello'],
},
'libhello': {
'lib': ['hello', 'hello_static'],
'exe': ['test_hello', 'test_hello_static'],
},
}
flag_bundle_set = {
'none': {
'spec': 'none',
'expected': {
'none': {'vars': [], 'defines': [], 'cxxflags': [], 'flags': [], },
},
},
'foo': {
'spec': '\'foo: -V FOO_VAR=1 -D FOO_DEF=1 -X "wall" -C "wall"\'',
'expected': {
'foo': {'vars': ['FOO_VAR=1'], 'defines': ['FOO_DEF=1'], 'cxxflags': ['wall'], 'flags': ['wall'], },
},
},
'bar': {
'spec': '\'bar: -V BAR_VAR=1 -D BAR_DEF=1 -X "g3" -C "g3"\'',
'expected': {
'bar': {'vars': ['BAR_VAR=1'], 'defines': ['BAR_DEF=1'], 'cxxflags': ['g3'], 'flags': ['g3'], },
},
},
}
variant_set = [flag_bundle_set[v]['spec'] for v in ('none', 'foo', 'bar')]
variant_tests = {
'variant_test00-null':[],
'variant_test01-none_explicit':['none'],
'variant_test10-foo_only':['foo'],
'variant_test11-none_foo':['none', 'foo'],
'variant_test20-bar_only':['bar'],
'variant_test21-none_bar':['none', 'bar'],
'variant_test30-foobar_only':['foo', 'bar'],
'variant_test31-foobar_only':['none', 'foo', 'bar'],
}
def _get_variant_spec(test_name):
blueprint = variant_tests[test_name]
if not blueprint:
return []
li = ['-v'] + [','.join(flag_bundle_set[v]['spec']) for v in blueprint]
variants = cmany.Variant.create_variants(li)
return li, variants
# unset environment variables which affect the behaviour of child invokations
# of cmany
os.environ['CMANY_ARGS'] = ''
os.environ['CMANY_PFX_ARGS'] = ''
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CMakeTestProj:
def __init__(self, proj):
self.proj = proj
self.root = util.chkf(projdir, proj)
if proj_targets.get(proj) is None:
raise Exception("no target info for project " + proj)
self.libs = proj_targets[proj]['lib']
self.exes = proj_targets[proj]['exe']
self.targets = self.libs + self.exes
self.multi_target = (len(self.targets) > 1)
# http://stackoverflow.com/questions/17176887/python-get-all-permutation-of-a-list-w-o-repetitions
self.target_combinations = []
for i in range(1, len(self.targets) + 1):
self.target_combinations += list(combinations(self.targets, i))
def run(self, args_, custom_root=None):
args = copy.deepcopy(args_)
root = self.root
if custom_root is not None:
with util.setcwd(self.root):
root = os.path.abspath(custom_root)
if not os.path.exists(root):
os.makedirs(root)
projdir = os.path.abspath('.')
args.append(projdir)
args = maincmd + args
with util.setcwd(root):
tmpfile, tmpname = tempfile.mkstemp(prefix="_cmany_tmp.out.")
with util.stdout_redirected(tmpfile):
#print("----->run():", self.proj, "at", os.getcwd(), " ".join(args))
util.runsyscmd(args)
#print("----->finished run():", self.proj, "at", os.getcwd(), " ".join(args))
# close the mkstemp handle
outsock = os.fdopen(tmpfile, "r")
outsock.close()
# read the input
with open(tmpname, "r") as fh:
output = fh.read()
# remove the tmpfile
os.remove(tmpname)
#print("\n"*2, self.root, args[4:], "output len=", len(output), output[:min(len(output), 256)]+".................\n\n")
return output
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# prepare inputs
test_projs = util.splitesc(test_projs, ',')
projs = [CMakeTestProj(p) for p in test_projs]
if compiler_set is None:
compiler_set = [cmany.Compiler.default()]
else:
compiler_set = [cmany.Compiler(c) for c in util.splitesc(compiler_set, ',')]
build_types = [cmany.BuildType(b) for b in util.splitesc(build_types, ',')]
variant_set = cmany.Variant.create_variants(variant_set)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def run_projs(testobj, args, check_fn=None):
numbuilds = len(compiler_set) * len(build_types) * len(variant_set)
#
# run with default parameters
bd = '.test/0--default--build'
id = '.test/0--default--install'
for p in projs:
with testobj.subTest(msg="default parameters", proj=p.proj):
p.run(args + ['--build-dir', bd, '--install-dir', id])
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=cmany.Compiler.default(),
build_type=cmany.BuildType.default(),
variant=cmany.Variant.default(),
numbuilds=1)
check_fn(tb)
#
# run with default parameters in a non root dir
rd = '.test/1--non_root_dir'
for p in projs:
with testobj.subTest(msg="run in a non root dir", proj=p.proj):
p.run(args, custom_root=rd)
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=cmany.Compiler.default(),
build_type=cmany.BuildType.default(),
variant=cmany.Variant.default(),
numbuilds=1)
check_fn(tb)
#
if numbuilds == 1:
return
#
# run all sys,arch,compiler,buildtype,variant combinations at once
bd = '.test/2.1--comps{}--types{}--variants{}--build'.format(len(compiler_set), len(build_types), len(variant_set))
id = '.test/2.1--comps{}--types{}--variants{}--install'.format(len(compiler_set), len(build_types), len(variant_set))
for p in projs:
with testobj.subTest(msg="run all combinations at once", proj=p.proj):
p.run(args + ['--build-dir', bd,
'--install-dir', id,
'-c', ','.join([c.name if c.is_msvc else c.path for c in compiler_set]),
'-t', ','.join([str(b) for b in build_types]),
'-v', ','.join([v.full_specs for v in variant_set])
])
if check_fn:
for c in compiler_set:
for t in build_types:
for v in variant_set:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=numbuilds)
check_fn(tb)
#
# run all sys,arch,compiler,buildtype,variant combinations at once - envargs
bd = '.test/2.2--comps{}--types{}--variants{}--build'.format(len(compiler_set), len(build_types), len(variant_set))
id = '.test/2.2--comps{}--types{}--variants{}--install'.format(len(compiler_set), len(build_types), len(variant_set))
for p in projs:
with testobj.subTest(msg="run all combinations at once", proj=p.proj):
os.environ['CMANY_ARGS'] = '-c {} -t {} -v {}'.format(
','.join([c.name if c.is_msvc else c.path for c in compiler_set]),
','.join([str(b) for b in build_types]),
','.join([v.full_specs for v in variant_set])
)
#util.logwarn('export CMANY_ARGS={}'.format(os.environ['CMANY_ARGS']))
p.run(args + ['--build-dir', bd,
'--install-dir', id,
])
os.environ['CMANY_ARGS'] = ''
if check_fn:
for c in compiler_set:
for t in build_types:
for v in variant_set:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=numbuilds)
check_fn(tb)
#
# run sys,arch,compiler,buildtype combinations individually
for p in projs:
for c in compiler_set:
for t in build_types:
for v in variant_set:
with testobj.subTest(msg="run all combinations individually",
proj=p.proj, compiler=c, build_type=t, variant=v):
bd = '.test/3.1--{}--{}--{}--build'.format(c, t, v.name)
id = '.test/3.1--{}--{}--{}--install'.format(c, t, v.name)
p.run(args + ['--build-dir', bd,
'--install-dir', id,
'-c', c.name if c.is_msvc else c.path,
'-t', str(t),
'-v', v.full_specs,
])
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=1)
check_fn(tb)
#
# run sys,arch,compiler,buildtype combinations individually - envargs
for p in projs:
for c in compiler_set:
for t in build_types:
for v in variant_set:
with testobj.subTest(msg="run all combinations individually - envargs",
proj=p.proj, compiler=c, build_type=t, variant=v):
bd = '.test/3.2--envargs--{}--{}--{}--build'.format(c, t, v.name)
id = '.test/3.2--envargs--{}--{}--{}--install'.format(c, t, v.name)
os.environ['CMANY_ARGS'] = '-c {} -t {} -v {}'.format(
c.name if c.is_msvc else c.path,
str(t),
v.full_specs)
#util.logwarn('export CMANY_ARGS={}'.format(os.environ['CMANY_ARGS']))
p.run(args + ['--build-dir', bd, '--install-dir', id])
os.environ['CMANY_ARGS'] = ''
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=1)
check_fn(tb)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class TestBuild:
def __init__(self, proj, buildroot, installroot, compiler, build_type, variant, numbuilds):
self.proj = proj
self.buildroot = buildroot
self.installroot = installroot
self.compiler = compiler
self.build_type = build_type
self.variant = variant
self.numbuilds = numbuilds
self.flags = cmany.BuildFlags('all_builds')
self.build_obj = cmany.Build(proj_root=self.proj.root,
build_root=os.path.join(self.proj.root, self.buildroot),
install_root=os.path.join(self.proj.root, self.installroot),
system=cmany.System.default(),
arch=cmany.Architecture.default(),
build_type=build_type,
compiler=compiler,
variant=variant,
flags=self.flags,
num_jobs=cpu_count(),
kwargs={}
)
def checkc(self, tester):
tester.assertEqual(self.nsiblings(self.buildroot), self.numbuilds, msg=self.buildroot + str(self.siblings(self.buildroot)))
build_type = cmake.getcachevar(self.build_obj.builddir, 'CMAKE_BUILD_TYPE')
tester.assertEqual(build_type, str(self.build_type))
def checkv(self, tester):
pass
def checkb(self, tester):
self.checkc(tester)
def checki(self, tester):
tester.assertEqual(self.nsiblings(self.installroot), self.numbuilds)
def nsiblings(self, dir):
return len(self.siblings(dir))
def siblings(self, dir):
res = os.path.join(self.proj.root, dir, '*')
ch = glob.glob(res)
return ch
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Outputs(dict):
"a class to store several outputs which should be the same"
def add_one(self, k, kk, vv):
l = self.get(k)
if l is None:
l = []
self[k] = l
l.append((kk, vv))
def compare_outputs(self, test):
for k, outs in self.items():
rk, rv = outs[0]
rv = self._filter_output(rv)
for kk, vv in outs[1:]:
vv = self._filter_output(vv)
test.assertEqual(rv, vv, "{}: refkey: '{}' vs key '{}'".format(k, rk, kk))
def _filter_output(self, s):
# the first three lines contain the command, so skip them
out = "\n".join(s.split("\n")[3:])
return out
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test00Help(ut.TestCase):
# TODO: grab the output and compare it to make sure it is the same
def setUp(self):
super().setUp()
self.maxDiff = None
# make sure --show-args is not used to projs.run()
global maincmd
self.maincmd = maincmd
maincmd = [c for c in maincmd if c != '--show-args']
def tearDown(self):
super().tearDown()
global maincmd
maincmd = self.maincmd
cmany_help = Outputs()
def test00_cmany_help_short(self):
out = projs[0].run(['-h'])
__class__.cmany_help.add_one('-h', '-h', out)
def test01_cmany_help_long(self):
out = projs[0].run(['--help'])
__class__.cmany_help.add_one('-h', '--help', out)
def test0x_cmany_help_compare(self):
__class__.cmany_help.compare_outputs(self)
sc_help_short = Outputs()
def test10_subcommand_help_short(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run([c, '-h'])
__class__.sc_help_short.add_one(c, c, out)
def test11_subcommand_help_short_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run([a, '-h'])
__class__.sc_help_short.add_one(c, a, out)
def test1x_subcommand_help_compare(self):
__class__.sc_help_short.compare_outputs(self)
sc_help_short_rev = Outputs()
def test20_subcommand_help_short_rev(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run(['h', c])
__class__.sc_help_short_rev.add_one(c, c, out)
def test21_subcommand_help_short_rev_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run(['h', a])
__class__.sc_help_short_rev.add_one(c, a, out)
def test2x_subcommand_help_compare(self):
__class__.sc_help_short_rev.compare_outputs(self)
sc_help_long = Outputs()
def test30_subcommand_help_long(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run([c, '--help'])
__class__.sc_help_long.add_one(c, c, out)
def test31_subcommand_help_long_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run([a, '--help'])
__class__.sc_help_long.add_one(c, a, out)
def test3x_subcommand_help_long_compare(self):
__class__.sc_help_long.compare_outputs(self)
sc_help_long_rev = Outputs()
def test40_subcommand_help_long_rev(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run(['help', c])
__class__.sc_help_long_rev.add_one(c, c, out)
def test41_subcommand_help_long_rev_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run(['help', a])
__class__.sc_help_long_rev.add_one(c, a, out)
def test4x_subcommand_help_long_compare(self):
__class__.sc_help_long_rev.compare_outputs(self)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class | |
print("tmp scores = ", scores)
# normalize scores, so that they sume up to explained_var
sum_scores = scores.sum()
residual = max(explained_var, 0.0) - sum_scores
if residual > 0.0:
correction = residual / (iteration + 1)
scores[taken] += correction
scores[max_explained_var_index_long] += correction
# scores = scores * explained_var / (sum_scores+1e-6) #TODO:CORRECT THIS; INSTEAD OF FACTOR USE ADDITIVE TERM
if verbose:
print("normalized scores = ", scores, "sum to:", scores.sum(), "explained_var =", explained_var)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
last_score = scores[max_explained_var_index_long]
last_explained_var = explained_var
# handle variables not used, assign equal scores to all of them
preserve_last_evaluation = True
if preserve_last_evaluation and max_comp < dim_out:
# The score of the last feature found will be modified, as well as of not yet found features
# TODO: Take care of negative values
if last_score <= 0.0:
last_score = 0.01 # Just some value is needed here
remaining_output_features = len(temp_explained_vars) # including feature already processed
remaining_ordered_explained_variances_short_index = numpy.argsort(temp_explained_vars)[::-1]
remaining_ordered_explained_variances_long_index = indices_available[
remaining_ordered_explained_variances_short_index]
remaining_ordered_explained_variances = temp_explained_vars[
remaining_ordered_explained_variances_short_index] + 0.0
remaining_total_contribution = last_score
print("last_score=", last_score)
beta = 0.95
remaining_ordered_explained_variances[
remaining_ordered_explained_variances <= 0.0] = 0.0001 # To avoid division over zero, numerical hack
# numpy.clip(remaining_ordered_explained_variances, 0.0, None) fails here!!!!
print("remaining_ordered_explained_variances=", remaining_ordered_explained_variances)
minimum = remaining_ordered_explained_variances.min() # first element
ev_sum = remaining_ordered_explained_variances.sum()
normalized_scores = (remaining_total_contribution / (ev_sum - remaining_output_features * minimum) * beta) * \
(remaining_ordered_explained_variances - minimum) + \
((1.0 - beta) / remaining_output_features) * remaining_total_contribution
print("normalized_scores=", normalized_scores)
print("remaining_ordered_explained_variances_long_index=", remaining_ordered_explained_variances_long_index)
print(scores.dtype)
print(normalized_scores.dtype)
scores[remaining_ordered_explained_variances_long_index] = normalized_scores
else:
# rest_explained_variance = total_variance-last_explained_var
sum_scores = scores.sum()
rest_explained_variance = total_variance - sum_scores
if verbose:
print("rest_explained_variance=", rest_explained_variance)
correction = rest_explained_variance / dim_out
scores += correction
if (scores == 0.0).any():
print("WARNING, removing 0.0 scores!")
scores += 0.0001
# num_unused = dim_out - max_comp
# scores[available_mask] = min(rest_explained_variance / num_unused, last_score)
# sum_scores = scores.sum()
# scores = scores * explained_var / (sum_scores+1e-6)
if verbose:
print("final scores: ", scores)
if verbose and linear and False:
for i in indices_available:
taken.append(i)
scores[taken] = numpy.arange(dim_out - 1, -1, -1) # **2 #WARNING!!! QUADRATIC SCORES!!!
scores = scores * total_variance / scores.sum()
print("Overriding with linear scores:", scores)
return scores
# TODO: Remove this node, it is now obsolete
class IEVMNode(mdp.Node):
""" Node implementing simple Incremental Explained Variance Maximization.
Extracted features are moderately useful for reconstruction, although this node does
itself provide reconstruction.
The expansion function is optional, as well as performing PCA on the scores.
The added variance of the first k-outputs is equal to the explained variance of such k-outputs.
"""
def __init__(self, input_dim=None, output_dim=None, expansion_funcs=None, k=5, max_comp=None,
max_num_samples_for_ev=None, max_test_samples_for_ev=None, use_pca=False, use_sfa=False,
max_preserved_sfa=2.0, second_weighting=False, operation="average", out_sfa_filter=False, **argv):
super(IEVMNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
if expansion_funcs is not None:
self.exp_node = GeneralExpansionNode(funcs=expansion_funcs)
else:
self.exp_node = None
self.sfa_node = None
self.second_weighting = second_weighting
self.use_pca = use_pca
self.use_sfa = use_sfa
if use_sfa and not use_pca:
er = "Combination of use_sfa and use_pca not considered. Please activate use_pca or deactivate use_sfa"
raise Exception(er)
self.k = k
self.max_comp = max_comp
self.max_num_samples_for_ev = max_num_samples_for_ev
self.max_test_samples_for_ev = max_test_samples_for_ev
self.feature_scaling_factor = 0.5 # Factor that prevents amplitudes of features from growing across the network
self.exponent_variance = 0.5
self.operation = operation
self.max_preserved_sfa = max_preserved_sfa
self.out_sfa_filter = out_sfa_filter
@staticmethod
def is_trainable():
return True
def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None,
n_parallel=None, **argv):
num_samples, self.input_dim = x.shape
if self.output_dim is None:
self.output_dim = self.input_dim
if self.max_comp is None:
self.max_comp = min(self.input_dim, self.output_dim)
else:
self.max_comp = min(self.max_comp, self.input_dim, self.output_dim)
print("Training IEVMNode...")
self.x_mean = x.mean(axis=0) # Remove mean before expansion
x = x - self.x_mean
if self.exp_node is not None: # Expand data
print("expanding x...")
exp_x = self.exp_node.execute(x)
else:
exp_x = x
self.expanded_dim = exp_x.shape[1]
self.exp_x_mean = exp_x.mean(axis=0)
self.exp_x_std = exp_x.std(axis=0)
print("self.exp_x_mean=", self.exp_x_mean)
print("self.exp_x_std=", self.exp_x_std)
if (self.exp_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std # Remove media and variance from expansion
print("ranking n_exp_x ...")
rankings = rank_expanded_signals_max(x, n_exp_x, x, n_exp_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=True)
rankings *= self.feature_scaling_factor
print("rankings=", rankings)
if (rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1 = numpy.argsort(rankings)[::-1] # Sort in decreasing ranking
self.magn1 = rankings
print("self.perm1=", self.perm1)
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
print("ranking s_x_1 ...")
rankings_B = rank_expanded_signals_max(x, s_x_1, x, s_x_1, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=False)
print("rankings_B=", rankings_B)
if (rankings_B == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1_B = numpy.argsort(rankings_B)[::-1] # Sort in decreasing ranking
self.magn1_B = rankings_B
print("self.perm1_B=", self.perm1_B)
# WARNING, this only works for normalized s_x_1
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
else:
s_x_1B = s_x_1
if self.use_sfa:
self.sfa_node = mdp.nodes.SFANode()
# TODO: Preserve amplitude
self.sfa_node.train(s_x_1B, block_size=block_size, train_mode=train_mode)
# , node_weights=None, edge_weights=None, scheduler = None, n_parallel=None)
self.sfa_node.stop_training()
print("self.sfa_node.d", self.sfa_node.d)
# Adaptive mechanism based on delta values
if isinstance(self.max_preserved_sfa, float):
self.num_sfa_features_preserved = (self.sfa_node.d <= self.max_preserved_sfa).sum()
elif isinstance(self.max_preserved_sfa, int):
self.num_sfa_features_preserved = self.max_preserved_sfa
else:
ex = "Cannot handle type of self.max_preserved_sfa"
print(ex)
raise Exception(ex)
# self.num_sfa_features_preserved = 10
sfa_x = self.sfa_node.execute(s_x_1B)
# TODO: Change internal variables of SFANode, so that we do not need to zero some components
# TODO: Is this equivalent to truncation of the matrices??? PERHAPS IT IS NOT !!!
sfa_x[:, self.num_sfa_features_preserved:] = 0.0
proj_sfa_x = self.sfa_node.inverse(sfa_x)
sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved]
# Notice that sfa_x has WEIGHTED zero-mean, thus we correct this here?
self.sfa_x_mean = sfa_x.mean(axis=0)
self.sfa_x_std = sfa_x.std(axis=0)
print("self.sfa_x_mean=", self.sfa_x_mean)
print("self.sfa_x_std=", self.sfa_x_std)
sfa_x -= self.sfa_x_mean
sfa_removed_x = s_x_1B - proj_sfa_x # Remove sfa projection of data
else:
self.num_sfa_features_preserved = 0
sfa_x = numpy.ones((num_samples, 0))
sfa_removed_x = s_x_1B
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
self.pca_node = mdp.nodes.PCANode(output_dim=pca_out_dim)
self.pca_node.train(sfa_removed_x)
# TODO:check that pca_out_dim > 0
pca_x = self.pca_node.execute(sfa_removed_x)
self.pca_x_mean = pca_x.mean(axis=0)
self.pca_x_std = pca_x.std(axis=0)
print("self.pca_x_std=", self.pca_x_std)
if (self.pca_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
# TODO: Is this step needed? if heuristic works well this weakens algorithm
n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std
else:
n_pca_x = sfa_removed_x[:, 0:pca_out_dim]
# Concatenate SFA and PCA signals and rank them preserving SFA components in ordering
if self.use_pca or self.use_sfa:
# TODO: Either both signals conserve magnitudes or they are both normalized
sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1)
sfa_pca_rankings = rank_expanded_signals_max(x, sfa_pca_x, x, sfa_pca_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev,
verbose=False)
sfa_pca_rankings *= self.feature_scaling_factor
# Only one magnitude normalization by node, but where should it be done? I guess after last transformation
print("sfa_pca_rankings=", sfa_pca_rankings)
if (sfa_pca_rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.magn2 = sfa_pca_rankings
perm2a = numpy.arange(self.num_sfa_features_preserved, dtype="int")
perm2b = numpy.argsort(sfa_pca_rankings[self.num_sfa_features_preserved:])[::-1]
self.perm2 = numpy.concatenate((perm2a, perm2b + self.num_sfa_features_preserved))
print("second permutation=", self.perm2)
# WARNING, this only works for normalized sfa_pca_x
s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking
s_x_2 = s_x_2[:, self.perm2] # Permute with slow features first, and then most important signal first
else:
s_x_2 = n_pca_x
# Tuncating output_dim components
s_x_2_truncated = s_x_2[:, 0:self.output_dim]
# Filtering output through SFA
if self.out_sfa_filter:
self.out_sfa_node = mdp.nodes.SFANode()
self.out_sfa_node.train(s_x_2_truncated, block_size=block_size, train_mode=train_mode)
self.out_sfa_node.stop_training()
sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated)
else:
sfa_filtered = s_x_2_truncated
self.stop_training()
# def __init__(self, funcs, input_dim = None, dtype = None, \
# use_pseudoinverse=True, use_hint=False, max_steady_factor=1.5, \
# delta_factor=0.6, min_delta=0.00001):
#
#
#
# self.sfa_node.train(x, **argv)
def _is_invertible(self):
return True
def _execute(self, x):
x_orig = x + 0.0
num_samples = x.shape[0]
zm_x = x - self.x_mean
if self.exp_node:
exp_x = self.exp_node.execute(zm_x)
else:
exp_x = zm_x
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std
if numpy.isnan(n_exp_x).any() or numpy.isinf(n_exp_x).any():
print("n_exp_x=", n_exp_x)
quit()
n_exp_x[numpy.isnan(n_exp_x)] = 0.0
if numpy.isnan(self.magn1).any():
print("self.magn1=", self.magn1)
quit()
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking_B
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
| |
+ " - img_data_string_placeholder"
self.sc_picture_answer_index += 1
excel_sheet.write(row_index, column_index, column_data, body_cell_format)
# Bilder für Fragen-Text
if isinstance(column_data, byteobj.ByteString) == True:
column_data = str(row[self.db_entry_to_index_dict[
'description_img_name_' + str(self.picture_index)]]) + " - img_data_string_placeholder"
image_data = row[
self.db_entry_to_index_dict['description_img_data_' + str(self.picture_index)]]
excel_sheet.write(row_index, column_index, column_data, body_cell_format)
# column_index += 1
# Hier werden die Bilder (physisch) in die Ordner abgelegt
# Die zusätzliche Abfrage ist leider notwendig, da u.U. einfache Strings als 'TRUE' bei der "isinstance(column_data,byteobj.ByteString)" Abfrage eingestuft werden
# Diese einfachen Strings können aber natürlich nicht als Bild geschrieben werden
if row[
self.db_entry_to_index_dict['description_img_data_' + str(self.picture_index)]] != "":
with open(os.path.normpath(
os.path.join(self.project_root_path, "Datenbank_Export", "image_files",
self.database_dir_name, str(row[self.db_entry_to_index_dict[
'description_img_name_' + str(self.picture_index)]]) + '.png')),
'wb') as image_file:
image_file.write(image_data)
self.picture_index += 1
column_index += 1
row_index += 1
# Variablen zurücksetzen, für nächste Frage/Zeile
self.picture_index = 1
self.picture_definitions_answer_index = 1
self.picture_terms_answer_index = 1
self.sc_picture_answer_index = 1
# Closing workbook
excel.close()
print(" abgeschlossen!")
print(str(row_index) + ' Zeilen exportiert ---> ' + excel.filename)
print("________________________________________________")
print(self.export_filetype_choice)
# Exportiert die Datenbank als ".xlsx" und konvertiert die Datei nach ".ods"
if self.export_filetype_choice == "no":
dataframe = pd.read_excel(os.path.normpath(
os.path.join(self.project_root_path, "Datenbank_Export", self.xlsx_workbook_name)))
with ExcelWriter(os.path.normpath(
os.path.join(self.project_root_path, "Datenbank_Export", self.ods_workbook_name)).format(
'ods')) as writer:
dataframe.to_excel(writer, engine='ods')
messagebox.showinfo("Datenbank exportieren", "Datenbank wurde exportiert!")
###### ILIAS BESTEHENDER POOL IMPORT IN DB
def import_illias_pool_oder_test_in_db(self):
# Pfade für Datenbanken
#self.project_root_path = project_root_path
#self.database_formelfrage_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", "ilias_formelfrage_db.db"))
#self.database_singlechoice_path = os.path.normpath(os.path.join(self.project_root_path,"Test_Generator_Datenbanken", "ilias_singlechoice_db.db"))
#self.database_multiplechoice_path = os.path.normpath(os.path.join(self.project_root_path,"Test_Generator_Datenbanken", "ilias_multiplechoice_db.db"))
#self.database_zuordnungsfrage_path = os.path.normpath(os.path.join(self.project_root_path,"Test_Generator_Datenbanken", "ilias_zuordnungsfrage_db.db"))
self.ilias_question_type = []
self.ilias_question_title = []
self.ilias_question_description_title = []
self.ilias_question_description_main = []
# SINGLE CHOICE
self.ilias_response_text = []
self.ilias_response_pts = []
self.ilias_response_img_label = []
self.ilias_response_img_string_base64_encoded = []
self.ilias_response_img_path = []
self.ilias_picture_preview_pixel = []
##########
# MULTIPLE CHOICE
self.mc_ilias_response_text = []
self.mc_ilias_response_pts = []
self.mc_ilias_response_img_label = []
self.mc_ilias_response_img_string_base64_encoded = []
self.mc_ilias_response_img_path = []
self.mc_ilias_picture_preview_pixel = []
##########
# Es werden bis zu drei Bilder im Fragen-Text aufgenommen
self.ilias_test_question_description_image_name_1 = []
self.ilias_test_question_description_image_data_1 = []
self.ilias_test_question_description_image_uri_1 = []
self.ilias_test_question_description_image_name_2 = []
self.ilias_test_question_description_image_data_2 = []
self.ilias_test_question_description_image_uri_2 = []
self.ilias_test_question_description_image_name_3 = []
self.ilias_test_question_description_image_data_3 = []
self.ilias_test_question_description_image_uri_3 = []
self.ilias_test_duration = []
self.ilias_question_author = []
self.description_singlechoice_del_index = []
self.description_multiplechoice_del_index = []
self.description_matchedquestion_del_index = []
self.all_sc_questions_points = []
self.mattext_text_all_mc_answers = []
self.all_mc_questions_points = []
self.mc_questions_correct_points = []
self.mc_questions_false_points = []
self.mattext_text_all_mq_answers = []
self.mattext_text_all_mq_answers_collection = []
self.mattText_text_all_mq_answers = []
self.sc_answer_list_nr = ""
self.mc_answer_list_nr = ""
self.mq_answer_list_nr = ""
self.mq_answer_matchings = []
self.mq_number_of_answers_per_question = []
self.mq_number_of_answers_per_question_temp = []
self.mq_answer_matchings_points = []
self.mq_answer_matching_per_question = []
self.mq_response_img_label = []
self.mq_response_img_data = []
self.mq_response_img_path = []
self.mq_matching_ids = []
self.mq_matching_ids_points = []
self.mq_len_list = []
self.number_of_answers_per_question_sc = []
self.number_of_answers_per_question_mc = []
self.number_of_answers_per_question_mq = []
self.ilias_question_title_sc = []
self.ilias_question_author_sc = []
self.ilias_question_type_ff_question_index = []
self.ilias_question_type_sc_question_index = []
self.ilias_question_type_mc_question_index = []
self.ilias_question_type_mq_question_index = []
### Hier wird die ausgewählte XML nach möglichen Fragen-Typen durchsucht und entsprechende flag gesetzt
self.formelfrage_flag = 0
self.singlechoice_flag = 0
self.multiplechoice_flag = 0
self.matchingquestion_flag = 0
self.formelfrage_number_of_questions = 0
self.singlechoice_number_of_questions = 0
self.multiplechoice_number_of_questions = 0
self.matchingquestion_number_of_questions = 0
# Auswahl der Datei die bearbeitet werden soll
filename = filedialog.askdirectory(initialdir=pathlib.Path().absolute(), title="Select a File")
self.select_test_import_file = filename
# Ordner-Name splitten um automatisiert die enthaltene qti.xml Datei einlesen zu können
self.ilias_folder_name = self.select_test_import_file.rsplit('/', 1)[-1]
self.ilias_folder_name_split1 = self.ilias_folder_name[:15]
self.ilias_folder_name_split2 = self.ilias_folder_name.rsplit('_', 1)[-1]
self.ilias_test_qti_file = os.path.normpath(os.path.join(self.select_test_import_file, self.ilias_folder_name_split1 + "qti_" + self.ilias_folder_name_split2 + ".xml"))
# XML Datei einlesen -> Root Verzeichnis bestimmen
self.mytree = ET.parse(self.ilias_test_qti_file)
self.myroot = self.mytree.getroot()
# Alle Fragentypen aus der XML Datei aufnehmen
for qtimetadatafield in self.myroot.iter('qtimetadatafield'):
if qtimetadatafield.find('fieldlabel').text == "QUESTIONTYPE":
self.ilias_question_type.append(qtimetadatafield.find('fieldentry').text)
#################### ALLE FRAGEN-INDEXE DEN FRAGENTYPEN ZUORDNEN
for i in range(len(self.ilias_question_type)):
if self.ilias_question_type[i] == "assFormulaQuestion":
self.ilias_question_type_ff_question_index.append(str(i))
self.formelfrage_flag = 1
self.formelfrage_number_of_questions += 1
elif self.ilias_question_type[i] == "SINGLE CHOICE QUESTION":
self.ilias_question_type_sc_question_index.append(str(i))
self.singlechoice_flag = 1
self.singlechoice_number_of_questions += 1
elif self.ilias_question_type[i] == "MULTIPLE CHOICE QUESTION":
self.ilias_question_type_mc_question_index.append(str(i))
self.multiplechoice_flag = 1
self.multiplechoice_number_of_questions += 1
elif self.ilias_question_type[i] == "MATCHING QUESTION":
self.ilias_question_type_mq_question_index.append(str(i))
self.matchingquestion_flag = 1
self.matchingquestion_number_of_questions += 1
else:
print("Keine Fragen gefunden")
print("Anzahl Formelfrage: " + str(self.formelfrage_number_of_questions))
print("Anzahl SingleChoice: " + str(self.singlechoice_number_of_questions))
print("Anzahl MultipleChoice: " + str(self.multiplechoice_number_of_questions))
print("Anzahl Zuordnungsfrage: " + str(self.matchingquestion_number_of_questions))
################# FRAGEN-BESCHREIBUNG (FRAGEN-TEXT) SAMMELN
# Fragen-Beschreibung, aller Fragen, sammeln
for flow in self.myroot.iter('flow'):
for material in flow.iter('material'):
if "" in material.find('mattext').text:
# Wenn in dem Fragentext "img" enthalten ist, gibt es immer auch ein Bild zu der Frage
if "il_0_mob_" in material.find('mattext').text:
self.ilias_question_description_main.append(material.find('mattext').text)
#Bildname hinzufügen
if material.find('matimage').attrib.get('label'):
self.ilias_test_question_description_image_name_1.append(material.find('matimage').attrib.get('label'))
# Bild Pfad hinzufügen
if material.find('matimage').attrib.get('uri'):
self.ilias_test_question_description_image_uri_1.append(material.find('matimage').attrib.get('uri'))
else:
self.ilias_question_description_main.append(material.find('mattext').text)
self.ilias_test_question_description_image_name_1.append("EMPTY")
self.ilias_test_question_description_image_uri_1.append("EMPTY")
self.ilias_test_question_description_image_name_2.append("EMPTY")
self.ilias_test_question_description_image_uri_2.append("EMPTY")
self.ilias_test_question_description_image_name_3.append("EMPTY")
self.ilias_test_question_description_image_uri_3.append("EMPTY")
################# FRAGEN HAUPTATTRIBUTE AUSLESEN
# Zu den Hauputattributen gehören z.B. "Fragen-Titel", "Fragen-Beschreibung", "Autor" etc.
# Fragen-Titel auslesen
for item in self.myroot.iter('item'):
self.ilias_question_title.append(item.get('title'))
# Fragen-Einleitungstext auslesen
# Wenn der Eintrag nicht existiert, dann Eintrag erstellen und "" einfügen
for qticomment in self.myroot.iter('qticomment'):
if qticomment.text == None:
qticomment.text = ""
for item in self.myroot.iter('item'):
if "" in item.find('qticomment').text:
self.ilias_question_description_title.append(item.find('qticomment').text)
# Test-Dauer auslesen (wenn Eintrag existiert
for item in self.myroot.iter('item'):
if "" in item.find('duration').text:
self.ilias_test_duration.append(item.find('duration').text)
# Fragen-Autor auslesen
for qtimetadatafield in self.myroot.iter('qtimetadatafield'):
if qtimetadatafield.find('fieldlabel').text == "AUTHOR":
self.ilias_question_author.append(qtimetadatafield.find('fieldentry').text)
########### FRAGEN AUSLESEN JE NACH FRAGEN-TYP
# Fragen auslesen: Single Choice
if self.singlechoice_flag == 1:
XML_Interface.read_singlechoice_questions(self)
# Fragen auslesen: Formelfrage
if self.formelfrage_flag == 1:
XML_Interface.read_formelfrage_questions(self)
# Fragen auslesen: Multiple Choice
if self.multiplechoice_flag == 1:
XML_Interface.read_multiplechoice_questions(self)
# Fragen auslesen: Matching Question
if self.matchingquestion_flag == 1:
XML_Interface.read_matching_questions(self)
################ FRAGEN_BESCHREIBUNG (FRAGEN-TEXT) FILTERN
# Single Choice Antworten entfernen
for i in range(len(self.ilias_question_description_main)):
for j in range(len(self.ilias_response_text)):
if self.ilias_question_description_main[i] == self.ilias_response_text[j]:
self.description_singlechoice_del_index.append(i)
# Remove any dublicates, dict's können keine Elemente mehrfach besitzen. Daher werden alle doppelten Einträge entfernt
# Doppelte Einträge entstehen wenn die Antwort bzw. die Beschreibung genau gleich lautet
# Z.B. Zeigerdiagramm, Zeigerdiagramm
self.description_singlechoice_del_index = list(dict.fromkeys(self.description_singlechoice_del_index))
for i in range(len(self.description_singlechoice_del_index)):
if len(self.description_singlechoice_del_index) > 0:
self.ilias_question_description_main.pop(self.description_singlechoice_del_index[i]-i)
self.ilias_test_question_description_image_name_1.pop(self.description_singlechoice_del_index[i]-i)
self.ilias_test_question_description_image_uri_1.pop(self.description_singlechoice_del_index[i]-i)
# Multiple Choice Antworten entfernen
for i in range(len(self.ilias_question_description_main)):
for j in range(len(self.mattext_text_all_mc_answers)):
if self.ilias_question_description_main[i] == self.mattext_text_all_mc_answers[j]:
self.description_multiplechoice_del_index.append(i)
for i in range(len(self.description_multiplechoice_del_index)):
if len(self.description_multiplechoice_del_index) > 0:
self.ilias_question_description_main.pop(self.description_multiplechoice_del_index[i]-i)
self.ilias_test_question_description_image_name_1.pop(self.description_multiplechoice_del_index[i]-i)
self.ilias_test_question_description_image_uri_1.pop(self.description_multiplechoice_del_index[i]-i)
# Matched Questions Antworten entfernen
for i in range(len(self.ilias_question_description_main)):
for j in range(len(self.mattText_text_all_mq_answers)):
if self.ilias_question_description_main[i] == self.mattText_text_all_mq_answers[j]:
self.description_matchedquestion_del_index.append(i)
# Remove any dublicates, dict's können keine Elemente mehrfach besitzen. Daher werden alle doppelten Einträge entfernt
# Doppelte Einträge entstehen wenn die Antwort bzw. die Beschreibung genau gleich lautet
# Z.B. Zeigerdiagramm, Zeigerdiagramm
self.description_matchedquestion_del_index = list(dict.fromkeys(self.description_matchedquestion_del_index))
for i in range(len(self.description_matchedquestion_del_index)):
if len(self.description_matchedquestion_del_index) > 0:
self.ilias_question_description_main.pop(self.description_matchedquestion_del_index[i]-i)
self.ilias_test_question_description_image_name_1.pop(self.description_matchedquestion_del_index[i]-i)
self.ilias_test_question_description_image_uri_1.pop(self.description_matchedquestion_del_index[i]-i)
########### FRAGEN IN DATENBANK SCHREIBEN
# Schreiben
# if self.singlechoice_flag == 1:
# XML_Interface.write_data_to_database_sc(self)
if self.formelfrage_flag == 1:
XML_Interface.write_data_to_database_ff(self)
# if self.multiplechoice_flag == 1:
# XML_Interface.write_data_to_database_mc(self)
# if self.matchingquestion_flag == 1:
# XML_Interface.write_data_to_database_mq(self)
####### Single Choice Fragen
def read_singlechoice_questions(self):
# SINGLE CHOICE Punkte für Antworten
for respcondition in self.myroot.iter('respcondition'):
for varequal in respcondition.iter('varequal'):
if varequal.attrib.get('respident') == "MCSR":
for setvar in respcondition.iter('setvar'):
self.ilias_response_pts.append(setvar.text)
# SINGLE CHOICE Antworten und Bilder
for response_lid in self.myroot.iter('response_lid'):
if response_lid.attrib.get('ident') == "MCSR": # SR -> Single Choice
for render_choice in response_lid.iter('render_choice'):
for response_label in render_choice.iter('response_label'):
for material in response_label.iter('material'):
if material.find('matimage') == None:
self.ilias_response_img_label.append("EMPTY")
self.ilias_response_img_string_base64_encoded.append("EMPTY")
else:
self.ilias_response_img_label.append(material.find('matimage').attrib.get('label'))
self.ilias_response_img_string_base64_encoded.append(material.find('matimage').text)
for mattext in material.iter('mattext'):
self.ilias_response_text.append(mattext.text)
self.count=[]
##################################### Anzahl der Antworten pro SC-Frage
# Durch diese Iteration und Abfrage nach MCSR (=Single Choice), werden alle Antworten der SC-Fragen aufgelistet
for response_lid in self.myroot.iter('response_lid'):
if response_lid.attrib.get('ident') == 'MCSR':
for render_choice in response_lid.iter('render_choice'):
# Zu Beginn jedes Anwort-Blocks wird ein "$" geschrieben, um hinterher zu splitten
self.sc_answer_list_nr += "$"
for response_label in render_choice.iter('response_label'):
self.sc_answer_list_nr += str(response_label.attrib.get('ident'))
self.ilias_test_question_type_collection_sc_answers = self.sc_answer_list_nr.split("$")
self.ilias_test_question_type_collection_sc_answers.pop(0) # Durch split() enthält erstes Feld keine Daten
for i in range(len(self.ilias_test_question_type_collection_sc_answers)):
self.number_of_answers_per_question_sc.append(str( int(max(self.ilias_test_question_type_collection_sc_answers[i]))+1))
#################################### Punkte für Fragen
##################################### Haupt-Fragentext aufzählen
self.ilias_number_of_response_variables = 10
self.ilias_response_text_1, self.ilias_response_pts_1, self.ilias_response_img_label_1, self.ilias_response_img_string_base64_encoded_1 = [], [], [], []
self.ilias_response_text_2, self.ilias_response_pts_2, self.ilias_response_img_label_2, self.ilias_response_img_string_base64_encoded_2 = [], [], [], []
self.ilias_response_text_3, self.ilias_response_pts_3, self.ilias_response_img_label_3, self.ilias_response_img_string_base64_encoded_3 = [], [], [], []
self.ilias_response_text_4, self.ilias_response_pts_4, self.ilias_response_img_label_4, self.ilias_response_img_string_base64_encoded_4 = [], [], [], []
self.ilias_response_text_5, self.ilias_response_pts_5, self.ilias_response_img_label_5, self.ilias_response_img_string_base64_encoded_5 = [], [], [], []
self.ilias_response_text_6, self.ilias_response_pts_6, self.ilias_response_img_label_6, self.ilias_response_img_string_base64_encoded_6 = [], [], [], []
self.ilias_response_text_7, self.ilias_response_pts_7, self.ilias_response_img_label_7, self.ilias_response_img_string_base64_encoded_7 = [], [], [], []
self.ilias_response_text_8, self.ilias_response_pts_8, self.ilias_response_img_label_8, self.ilias_response_img_string_base64_encoded_8 = [], [], [], []
self.ilias_response_text_9, self.ilias_response_pts_9, self.ilias_response_img_label_9, self.ilias_response_img_string_base64_encoded_9 = [], [], [], []
self.ilias_response_text_10, self.ilias_response_pts_10, self.ilias_response_img_label_10, self.ilias_response_img_string_base64_encoded_10 = [], [], [], []
t = 0
for i in range(len(self.ilias_test_question_type_collection_sc_answers)):
if i == 1:
t = int(max(self.ilias_test_question_type_collection_sc_answers[0])) + 1
if | |
self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/servicebindingmaps".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def servicebindingmaps_query(self, data, tenant_id=None, api_version="v2.1"):
"""
Queries db for limit number of service bindings that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/servicebindingmaps/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def serviceendpoints(self, data, tenant_id=None, api_version="v2.2"):
"""
Create a new Service Endpoint
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.2)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/serviceendpoints".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def serviceendpoints_query(self, data, tenant_id=None, api_version="v2.2"):
"""
Queries db for limit number of service bindings that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.2)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/serviceendpoints/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def servicelabels(self, data, tenant_id=None, api_version="v2.0"):
"""
Create a new Service Label
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/servicelabels".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def servicelabels_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Queries db for limit number of service labels that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/servicelabels/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def signup(self, data, tenant_id=None, api_version="v2.0"):
"""
Signup new operators
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/signup".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_correlationevents_query(self, data, tenant_id=None, api_version="v2.0"):
"""
POST Query_Correlationevents_S API Function
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/correlationevents/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_extensions(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create site level extension configuration
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/extensions".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_extensions_query(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Query site level extensions that match query params
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/extensions/query".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_natlocalprefixes(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create an association between site and NAT Prefix.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/natlocalprefixes".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_networkpolicylocalprefixes(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create an association between site and Network local Prefix.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/networkpolicylocalprefixes".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def site_prioritypolicylocalprefixes(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create an association between site and Priority local Prefix.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/prioritypolicylocalprefixes".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def sites(self, data, tenant_id=None, api_version="v4.5"):
"""
Create a new site
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v4.5)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = | |
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2015-2021 UT-BATTELLE, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Perturbation Growth Test:
This tests the null hypothesis that the reference (n) and modified (m) model
ensembles represent the same atmospheric state after each physics parameterization
is applied within a single time-step using the two-sample (n and m) T-test for equal
averages at a 95% confidence level. Ensembles are generated by repeating the
simulation for many initial conditions, with each initial condition subject to
multiple perturbations.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import math
import argparse
# import logging
from pprint import pprint
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats
from netCDF4 import Dataset
import livvkit
from livvkit.util import elements as el
from livvkit.util import functions as fn
from evv4esm.utils import bib2html
# logger = logging.getLogger(__name__)
def parse_args(args=None):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config',
type=fn.read_json,
default='test/pge_pc0101123.json',
help='A JSON config file containing a `pg` dictionary defining ' +
'the options.')
args = parser.parse_args(args)
name = args.config.keys()[0]
config = args.config[name]
return name, config
def _instance2sub(instance_number, total_perturbations):
"""
Converts an instance number (ii) to initial condition index (ci) and
perturbation index (pi) subscripts
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
perturbation_index = (instance_number - 1) % total_perturbations
initial_condition = (instance_number - 1 - perturbation_index) // total_perturbations
return initial_condition, perturbation_index
def _sub2instance(initial_condition, perturbation_index, total_perturbations):
"""
Converts initial condition index (ci) and perturbation index (pi) subscripts
to an instance number (ii)
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
instance = initial_condition * total_perturbations + perturbation_index + 1
return instance
def rmse_writer(file_name, rmse, perturbation_names, perturbation_variables, init_file_template, model_name):
"""
Opens and writes a netcdf file for PGE curves
This function is here purely to avoid duplicate
codes so that it is easy to maintain code longterm
"""
with Dataset(file_name, 'w') as nc:
ninit, nprt_m1, nvars = rmse.shape
nc.createDimension('ninit', ninit)
nc.createDimension('nprt', nprt_m1 + 1)
nc.createDimension('nprt_m1', nprt_m1)
nc.createDimension('nvars', nvars)
nc_init_cond = nc.createVariable('init_cond_files', str, 'ninit')
nc_perturbation = nc.createVariable('perturbation_names', str, 'nprt')
nc_variables = nc.createVariable('perturbation_variables', str, 'nvars')
nc_rmse = nc.createVariable('rmse', 'f8', ('ninit', 'nprt_m1', 'nvars'))
# NOTE: Assignment to netcdf4 variable length string array can be done
# via numpy arrays, or in a for loop using integer indices.
# NOTE: Numpy arrays can't be created from a generator for some dumb reason,
# so protect with list
nc_perturbation[:] = np.array(list(perturbation_names))
nc_variables[:] = np.array(list(perturbation_variables))
nc_rmse[:] = rmse[:]
for icond in range(0, ninit):
# NOTE: Zero vs One based indexing
nc_init_cond[icond] = init_file_template.format(model_name, 'i', icond+1)
def variables_rmse(ifile_test, ifile_cntl, var_list, var_pefix=''):
"""
Compute RMSE difference between perturbation and control for a set of
variables
Args:
ifile_test: Path to a NetCDF dataset for a perturbed simulation
ifile_cntl: Path to a NetCDF dataset for the control simulation
var_list (list): List of all variables to analyze
var_pefix: Optional prefix (e.g., t_, qv_) to apply to the variable
returns:
rmse (pandas.DataFrame): A dataframe containing the RMSE and maximum
difference details between the perturbed and control simulation
"""
with Dataset(ifile_test) as ftest, Dataset(ifile_cntl) as fcntl:
lat = ftest.variables['lat']
lon = ftest.variables['lon']
rmse = pd.DataFrame(columns=('RMSE', 'max diff', 'i', 'j', 'control', 'test', 'lat', 'lon'), index=var_list)
# reshape for RMSE
dims = len(ftest.variables[var_pefix + var_list[0]].dimensions)
if dims == 3: # see if it is SE grid
nx, ny = ftest.variables[var_pefix + var_list[0]][0, ...].shape
nz = 1
else:
nx, ny, nz = ftest.variables[var_pefix + var_list[0]][0, ...].shape
for ivar, vvar in enumerate(var_list):
var = var_pefix + vvar
if var in ftest.variables:
vtest = ftest.variables[var.strip()][0, ...] # first dimension is time (=0)
vcntl = fcntl.variables[var.strip()][0, ...] # first dimension is time (=0)
vrmse = math.sqrt(((vtest - vcntl)**2).mean()) / np.mean(vcntl)
diff = abs(vtest[...] - vcntl[...])
ind_max = np.unravel_index(diff.argmax(), diff.shape)
rmse.loc[vvar] = (vrmse, diff[ind_max], ind_max[0], ind_max[1],
vcntl[ind_max], vtest[ind_max],
lat[ind_max[1]], lon[ind_max[1]])
return rmse
def _print_details(details):
for set_ in details:
print('-' * 80)
print(set_)
print('-' * 80)
pprint(details[set_])
def main(args):
nvar = len(args.variables)
nprt = len(args.perturbations)
# for test cases (new environment etc.)
# logger.debug("PGN_INFO: Test case comparison...")
rmse_prototype = {}
for icond in range(args.ninit):
prt_rmse = {}
for iprt, prt_name in enumerate(args.perturbations):
if prt_name == 'woprt':
continue
iinst_ctrl = _sub2instance(icond, 0, nprt)
ifile_ctrl = os.path.join(args.ref_dir,
args.instance_file_template.format('', args.component, iinst_ctrl, '_woprt'))
# logger.debug("PGN_INFO:CNTL_TST:" + ifile_cntl)
iinst_test = _sub2instance(icond, iprt, nprt)
ifile_test = os.path.join(args.test_dir,
args.instance_file_template.format(
args.test_case + '.', args.component, iinst_test, '_' + prt_name))
# logger.debug("PGN_INFO:TEST_TST:" + ifile_test)
prt_rmse[prt_name] = variables_rmse(ifile_test, ifile_ctrl, args.variables, 't_')
rmse_prototype[icond] = pd.concat(prt_rmse)
rmse = pd.concat(rmse_prototype)
comp_rmse = np.reshape(rmse.RMSE.values, (args.ninit, nprt-1, nvar))
rmse_writer(os.path.join(args.test_dir, 'comp_cld.nc'),
comp_rmse, args.perturbations.keys(), args.variables, args.init_file_template, args.init_model)
details = OrderedDict()
with Dataset(os.path.join(args.ref_dir, args.pge_cld)) as ref_cld:
ref_dims = ref_cld.variables['rmse'].shape
cmp_dims = (args.ninit, nprt - 1, nvar)
try:
assert(ref_dims == cmp_dims)
except AssertionError as e:
be = BaseException(
'PGE curve dimensions (ninit, nptr, nvar) should be the same:\n'
' CLD:{} COMP:{}'.format(ref_dims, cmp_dims))
six.raise_from(be, e)
ref_rmse = ref_cld.variables['rmse'][...]
details['ref. data'] = ref_rmse
pge_ends_cld = ref_rmse[:, :, -1]
pge_ends_comp = comp_rmse[:, :, -1]
# run the t-test
pge_ends_cld = pge_ends_cld.flatten()
pge_ends_comp = pge_ends_comp.flatten()
t_stat, p_val = stats.ttest_ind(pge_ends_cld, pge_ends_comp)
if np.isnan((t_stat, p_val)).any() or np.isinf((t_stat, p_val)).any():
details['T test (t, p)'] = (None, None)
else:
details['T test (t, p)'] = '({:.3f}, {:.3f})'.format(t_stat, p_val)
# logger.warn(" T value:" + str(t_stat))
# logger.warn(" P value:" + str(p_val))
crit = 0.05
if t_stat is None:
details['h0'] = '-'
elif p_val < crit:
details['h0'] = 'reject'
else:
details['h0'] = 'accept'
# logger.debug("PGN_INFO: POST PROCESSING PHASE ENDS")
details['test data'] = rmse
ref_max_y = ref_rmse.max(axis=(0, 1)).astype(np.double)
ref_min_y = ref_rmse.min(axis=(0, 1)).astype(np.double)
cmp_max_y = comp_rmse.max(axis=(0, 1)).astype(np.double)
cmp_min_y = comp_rmse.min(axis=(0, 1)).astype(np.double)
img_file = os.path.relpath(os.path.join(args.img_dir, 'plot_comp.png'), os.getcwd())
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 8), sharey='all', gridspec_kw={'width_ratios': [3, 1]})
plt.rc('font', family='serif')
ax1.semilogy(ref_max_y, color='C0')
ax1.semilogy(ref_min_y, color='C0')
ax1.fill_between(range(ref_dims[-1]), ref_min_y, ref_max_y, color='C0', alpha=0.5)
ax1.semilogy(cmp_max_y, color='C1')
ax1.semilogy(cmp_min_y, color='C1')
ax1.fill_between(range(cmp_dims[-1]), cmp_min_y, cmp_max_y, color='C1', alpha=0.5)
ax1.set_xticks(range(len(args.variables)))
ax1.set_xticklabels(args.variables, rotation=45, ha='right')
ax1.set_ylabel('Temperature RMSE (K)')
patch_list = [mpatches.Patch(color='C0', alpha=0.5, label='Ref.'),
mpatches.Patch(color='C1', alpha=0.5, label='Test')]
ax1.legend(handles=patch_list, loc='upper left')
scale_std = 1/np.sqrt(len(pge_ends_comp))
tval_crit = stats.t.ppf(1 - crit, df=len(pge_ends_comp) - 1)
ax2.errorbar(1, pge_ends_cld.mean(), xerr=np.stack([[0.1, 0.1]]).T,
fmt='none', ecolor='C0')
# Note: Because these are so close to zero, but are best plotted on a
# semilogy plot, the mean ± 2*σ/√N range or the mean ± Tc*σ/√N, where
# Tc is the critical t test value, can cross zero.
ax2.errorbar(1, pge_ends_comp.mean(), yerr=pge_ends_comp.std() * tval_crit * scale_std,
fmt='oC1', elinewidth=20, ecolor='C1', alpha=0.5)
# ax2.errorbar(0.5, pge_ends_comp.mean(), yerr=pge_ends_comp.std() * 2 * scale_std,
# fmt='k.', elinewidth=20, ecolor='C1', alpha=0.5)
ax2.set_xlim([0.8, 1.2])
ax2.set_xticks([1])
ax2.set_xticklabels([args.variables[-1]], rotation=45, ha='right')
plt.tight_layout()
plt.savefig(img_file, bbox_inches='tight')
plt.close(fig)
img_desc = 'Left: The evolution of the maximum temperature (K) RMSE over a ' \
'single time | |
<reponame>pllim/halotools
"""
Module containing the `~halotools.mock_observables.mean_delta_sigma` function
used to calculate galaxy-galaxy lensing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from functools import partial
import multiprocessing
from .engines import mean_delta_sigma_engine
from ..mock_observables_helpers import (get_num_threads, get_separation_bins_array,
get_period, enforce_sample_respects_pbcs, enforce_sample_has_correct_shape)
from ..pair_counters.rectangular_mesh_2d import RectangularDoubleMesh2D
from ..pair_counters.mesh_helpers import _set_approximate_2d_cell_sizes
from ..pair_counters.mesh_helpers import _cell1_parallelization_indices
from ..pair_counters.mesh_helpers import _enclose_in_square
from ...utils.array_utils import custom_len
__all__ = ('mean_delta_sigma', )
__author__ = ('<NAME>', '<NAME>')
def mean_delta_sigma(galaxies, particles, effective_particle_masses,
rp_bins, period=None, verbose=False, num_threads=1,
approx_cell1_size=None, approx_cell2_size=None,
per_object=False):
r"""
Calculate :math:`\Delta\Sigma(r_p)`, the galaxy-galaxy lensing signal
as a function of projected distance.
The `mean_delta_sigma` function calculates :math:`\Delta\Sigma(r_p)` by calculating
the excess surface density of particles in cylinders surrounding the input galaxies.
The input particles should be a random downsampling of particles in the
same simulation snapshot as the model galaxies.
By using the ``effective_particle_masses`` argument, the function works equally well
with DM-only simulations as with hydro simulations that include
multiple species of particles with different masses and/or different downsampling rates.
Example calls to this function appear in the documentation below.
See also :ref:`galaxy_catalog_analysis_tutorial3`.
Parameters
----------
galaxies : array_like
Numpy array of shape (num_gal, 3) containing 3-d positions of galaxies.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
See the :ref:`mock_obs_pos_formatting` documentation page for
instructions on how to transform your coordinate position arrays into the
format accepted by the ``galaxies`` and ``particles`` arguments.
particles : array_like
Numpy array of shape (num_ptcl, 3) containing 3-d positions of particles.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
effective_particle_masses : float or ndarray
Float or array storing the effective mass of each particle in units of Msun with h=1 units.
If passing in an ndarray, must be of shape (num_ptcl, ),
one array element for every particle.
If passing in a single float, it will be assumed that every particle
has the same mass (as is the case in a typical DM-only simulation).
The effective mass is simply the actual mass multiplied by the downsampling rate.
For example, if your simulation has a particle mass of 10**8 and you are using a
sample of particles that have been randomly downsampled at a 1% rate, then
your effective particle mass will be 10**10.
See the Examples section below for how this can be calculated
from Halotools-provided catalogs.
rp_bins : array_like
Numpy array of shape (num_rbins, ) of projected radial boundaries
defining the bins in which the result is calculated.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
period : array_like
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
per_object : bool, optional
Boolean flag specifying whether the function will return the per-object
lensing signal. Default is False, in which the returned array will be
an average over the entire sample. If True, the returned ndarray will
have shape (num_gal, num_rbins)
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for sample2. See comments for
``approx_cell1_size`` for details.
Returns
-------
Delta_Sigma : array_like
Numpy array of shape (num_rbins-1, ) storing :math:`\Delta\Sigma(r_p)`
in comoving units of :math:`h M_{\odot} / {\rm Mpc}^2` assuming h=1.
If per_object is True, Delta_Sigma will instead have shape (num_gal, num_rbins)
Examples
--------
For demonstration purposes we will calculate `mean_delta_sigma` using a mock
catalog generated with the `~halotools.sim_manager.FakeSim`
that is generated on-the-fly.
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
Now let's populate this halo catalog with mock galaxies.
>>> from halotools.empirical_models import PrebuiltHodModelFactory
>>> model = PrebuiltHodModelFactory('leauthaud11', threshold = 11.)
>>> model.populate_mock(halocat)
Now we retrieve the positions of our mock galaxies and transform the arrays
into the shape of the ndarray expected by the `~halotools.mock_observables.mean_delta_sigma`
function. We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> x = model.mock.galaxy_table['x']
>>> y = model.mock.galaxy_table['y']
>>> z = model.mock.galaxy_table['z']
>>> galaxies = np.vstack((x, y, z)).T
The `~halotools.mock_observables.return_xyz_formatted_array` function
also performs this same transformation, and can also be used to place mock
galaxies into redshift-space for additional observational realism.
Let's do the same thing for a set of particle data:
>>> px = model.mock.ptcl_table['x']
>>> py = model.mock.ptcl_table['y']
>>> pz = model.mock.ptcl_table['z']
>>> particles = np.vstack((px, py, pz)).T
The default Halotools catalogs come with ~1e6 particles.
Using this many particles may be overkill: in many typical use-cases,
the `mean_delta_sigma` function converges at the percent-level using
an order of magnitude fewer particles.
The code below shows how to (optionally) downsample these particles
using a Halotools convenience function.
>>> from halotools.utils import randomly_downsample_data
>>> num_ptcls_to_use = int(1e4)
>>> particles = randomly_downsample_data(particles, num_ptcls_to_use)
>>> particle_masses = np.zeros(num_ptcls_to_use) + halocat.particle_mass
>>> total_num_ptcl_in_snapshot = halocat.num_ptcl_per_dim**3
>>> downsampling_factor = total_num_ptcl_in_snapshot/float(len(particles))
>>> effective_particle_masses = downsampling_factor * particle_masses
>>> rp_bins = np.logspace(-1, 1, 10)
>>> period = model.mock.Lbox
>>> ds = mean_delta_sigma(galaxies, particles, effective_particle_masses, rp_bins, period)
Take care with the units. The values for :math:`\Delta\Sigma` returned by
the `mean_delta_sigma` functions are in *comoving* units of
:math:`h M_{\odot} / {\rm Mpc}^2` assuming h=1,
whereas the typical units used to plot :math:`\Delta\Sigma` are in
*physical* units of :math:`M_{\odot} / {\rm pc}^2` using the value of
little h appropriate for your assumed cosmology.
The code shown above demonstrates how to calculate :math:`\Delta\Sigma` via the excess
surface density of mass using the z-axis as the axis of projection. However, it may be useful
to project along the other Cartesian axes, for example to help beat down sample variance.
While the `mean_delta_sigma` function is written to always use the "third" dimension as the
projection axis, you can easily hack the code to project along, say, the y-axis by simply
transposing your y- and z-coordinates when you pack them into a 2-d array:
>>> particles = np.vstack((px, pz, py)).T
>>> galaxies = np.vstack((x, z, y)).T
Using the above ``particles`` and ``galaxies`` and otherwise calling the `mean_delta_sigma`
function as normal will instead calculate the surface mass density by projecting
along the y-axis.
See also
--------
:ref:`galaxy_catalog_analysis_tutorial3`
"""
# Process the inputs with the helper function
result = _mean_delta_sigma_process_args(
galaxies, particles, effective_particle_masses, rp_bins,
period, num_threads, approx_cell1_size, approx_cell2_size)
x1in, y1in, x2in, y2in, w2in = result[0:5]
rp_bins, period, num_threads, PBCs, approx_cell1_size, approx_cell2_size = result[5:]
xperiod, yperiod = period[:2]
rp_max = np.max(rp_bins)
search_xlength, search_ylength = rp_max, rp_max
# Compute the estimates for the cell sizes
approx_cell1_size, approx_cell2_size = (_set_approximate_2d_cell_sizes(
approx_cell1_size, approx_cell2_size, period))
approx_x1cell_size, approx_y1cell_size = approx_cell1_size
approx_x2cell_size, approx_y2cell_size = approx_cell2_size
# Build the rectangular mesh
double_mesh = RectangularDoubleMesh2D(
x1in, y1in, x2in, y2in,
approx_x1cell_size, approx_y1cell_size,
approx_x2cell_size, approx_y2cell_size,
search_xlength, search_ylength, xperiod, yperiod, PBCs)
# Create a function object that has a single argument, for parallelization
# purposes
counting_engine = partial(mean_delta_sigma_engine, double_mesh, x1in, y1in,
x2in, y2in, w2in, rp_bins)
# # Calculate the cell1 indices that will be looped over by the engine
num_threads, cell1_tuples = _cell1_parallelization_indices(
double_mesh.mesh1.ncells, num_threads)
if num_threads | |
the maximum number of terminal nodes
if len(terminal_nodes) >= plot_n_taxa:
break
# Now we need to go through and make sure to add any taxon which is >= 25% of the selected CAGs
for tax_id in cag_taxa_df.query(
"prop_specific >= 0.25"
)["tax_id"].drop_duplicates().values:
add_taxon(tax_id, terminal_nodes, included_nodes, tax)
# Make a DataFrame with all of the included taxa for the betta results
betta_taxa_df = pd.DataFrame({
tax.name(terminal_tax_id): {
rank: tax.anc_at_rank(terminal_tax_id, rank)
for rank in rank_order
}
for terminal_tax_id in list(terminal_nodes)
}).T
# Sort alphabetically by scientific name
betta_taxa_df = betta_taxa_df.reindex(
index=betta_taxa_df.applymap(
tax.name
).sort_values(
by=rank_order
).index
)
# To make plotting more understandible, we can fill the most specific taxonomic rank
# with the name of whatever organism has been measured immediately above it
betta_taxa_df = betta_taxa_df.apply(
lambda c: c.fillna(
betta_taxa_df.apply(
lambda r: r.dropna(
).values[-1] if r.dropna().shape[0] > 0 else "",
axis=1
).apply(
lambda s: " {} ".format(s)
)
) if c.name == rank_order[-1] else c
)
# Drop any rows with no taxa assigned
betta_taxa_df = betta_taxa_df.reindex(
index=[
i
for i in betta_taxa_df.index.values
if pd.isnull(i) is False
]
)
# For each taxon, figure out the x- and y-coordinates from the taxonomy table
plot_dat = []
x_pos = 0
for rank_name, rank_taxa in betta_taxa_df.items():
x_pos -= 2
for org_name in rank_taxa.dropna().unique():
# The index position is the start of the vertical range
y_start = rank_taxa.tolist().index(org_name)
# The number of cells with this organism in the height of the vertical range
y_height = (rank_taxa == org_name).sum()
# The y position is in the middle of the vertical range
y_pos = y_start + (y_height / 2)
# Get the first non-null ancestor for this node
if rank_order.index(rank_name) == 0:
ancestor = None
else:
ancestor = betta_taxa_df.loc[
betta_taxa_df[rank_name] == org_name, :rank_name
].iloc[
0
].dropna()
if ancestor.shape[0] > 1:
ancestor = ancestor.values[-2]
else:
ancestor = None
plot_dat.append({
"name": tax.name(org_name.strip(" ")),
"tax_id": org_name,
"x": x_pos,
"y": y_pos,
"ancestor": ancestor if ancestor != org_name else None,
"rank": rank_name
})
plot_dat = pd.DataFrame(plot_dat)
# Add the wald statistic
plot_dat = plot_dat.assign(
Wald=plot_dat["name"].fillna(
""
).apply(
lambda s: s.strip(" ")
).apply(
betta.set_index("label")["wald"].get
).fillna(0)
).set_index(
"tax_id"
)
# SETTING UP THE PLOT AREA
# SET UP DIFFERENTLY DEPENDING ON WHETHER THE FUNCTIONS WILL BE INCLUDED
if include_functions:
fig, axarr = plt.subplots(
4,
3,
figsize=figsize,
sharey="row",
gridspec_kw={
'height_ratios': [
6,
# Set the height of the functional subplot differently if a smaller number of functions are being plotted
6 if show_only_functions is None else 6 * (len(show_only_functions.split(",")) / plot_n_functions),
1,
1
],
'width_ratios': [3, 1, 3],
}
)
heatmap_ax = axarr[0, 0]
taxa_wald_ax = axarr[0, 1]
tree_ax = axarr[0, 2]
func_ax = axarr[1, 0]
func_wald_ax = axarr[1, 1]
cag_wald_ax = axarr[2, 0]
cag_size_ax = axarr[3, 0]
empty_subplots = [
axarr[1, 2],
axarr[2, 1],
axarr[2, 2],
axarr[3, 1],
axarr[3, 2],
]
else:
fig, axarr = plt.subplots(
3,
3,
figsize=figsize,
sharey="row",
gridspec_kw={
'height_ratios': [6, 1, 1],
'width_ratios': [3, 1, 3],
}
)
heatmap_ax = axarr[0, 0]
taxa_wald_ax = axarr[0, 1]
tree_ax = axarr[0, 2]
cag_wald_ax = axarr[1, 0]
cag_size_ax = axarr[2, 0]
empty_subplots = [
axarr[1, 1],
axarr[1, 2],
axarr[2, 1],
axarr[2, 2],
]
# Clear the unused subplots
for ax in empty_subplots:
ax.axis("off")
ax.grid(False)
# Add the lines and labels to the taxa plot
for org_name, r in plot_dat.iterrows():
# Label each organism
tree_ax.text(
r["x"],
r["y"] + 0.5,
tax.name(org_name.strip(" ")),
horizontalalignment="center",
verticalalignment="center",
rotation=5,
zorder=2,
)
if r["ancestor"] is not None:
# Add a line to the ancestor
if r["x"] - plot_dat.loc[r["ancestor"], "x"] == 1.:
tree_ax.plot(
[r["x"], plot_dat.loc[r["ancestor"], "x"]],
[r["y"], plot_dat.loc[r["ancestor"], "y"]],
linestyle="--",
alpha=0.5,
color="grey",
zorder=1,
)
else:
tree_ax.plot(
[r["x"], plot_dat.loc[r["ancestor"], "x"] -
1, plot_dat.loc[r["ancestor"], "x"]],
[r["y"], r["y"], plot_dat.loc[r["ancestor"], "y"]],
linestyle="--",
alpha=0.5,
color="grey",
zorder=1,
)
# Draw the points for the tree over the lines
sns.scatterplot(
data=plot_dat,
x="x",
y="y",
hue="Wald",
hue_norm=(-plot_dat["Wald"].abs().max(), plot_dat["Wald"].abs().max()),
palette="RdBu",
linewidth=1,
edgecolor="black",
zorder=2,
ax=tree_ax
)
tree_ax.axis("off")
tree_ax.grid(False)
tree_ax.legend(bbox_to_anchor=[1.1, 0.9])
# Now make a plot with the proportion of genes assigned to the top CAGs
cag_prop_df = pd.DataFrame({
cag_id: {
row_ix: d.loc[
d["name"].isin(row_taxa.apply(
tax.name).dropna().drop_duplicates().values),
"prop_exact"
].sum()
for row_ix, row_taxa in betta_taxa_df.iterrows()
}
for cag_id, d in cag_taxa_df.groupby("CAG")
})
# Reorder the DataFrame for plotting to match the taxonomic tree
cag_prop_df = cag_prop_df.reindex(
index=betta_taxa_df.index.values,
columns=cag_prop_df.columns.values[
leaves_list(linkage(
cag_prop_df.T,
method="ward",
metric="euclidean",
optimal_ordering=True,
))
]
)
# PLOT THE CAG TAXONOMIC HEATMAP
sns.heatmap(
data=cag_prop_df.rename(
columns=lambda cag_id: "CAG {}".format(cag_id)
),
cmap="Blues",
ax=heatmap_ax,
cbar=False,
xticklabels=1,
yticklabels=1
)
heatmap_ax.xaxis.set_ticks_position('top')
heatmap_ax.tick_params(
axis="x",
rotation=90,
)
# PLOT THE WALD BY TERMINAL NODE IN TAXA GRAPH
taxa_wald = betta.set_index("label").reindex(
index=cag_prop_df.index.values
)["wald"]
taxa_wald.plot(
kind="barh",
ax=taxa_wald_ax,
align="edge",
width=1,
)
taxa_wald_ax.xaxis.set_ticks_position('top')
taxa_wald_ax.set_title("Wald Statistic")
taxa_wald_ax.tick_params(
axis="x",
labelbottom=False,
bottom=False,
labeltop=True,
top=True
)
if include_functions:
# MAKE THE CAG FUNCTION HEATMAP
betta_func_df, betta_func_details = get_betta_functions(
hdf_fp,
parameter,
exclude_terms=exclude_terms,
exclude_prefixes=exclude_prefixes,
show_only_functions=show_only_functions,
cags_to_plot=cag_prop_df.columns.values,
plot_n_functions=plot_n_functions
)
# PLOT THE CAG FUNCTION WALD STATISTICS
betta_func_details["wald"].plot(
kind="barh",
width=1,
align="edge",
ax=func_wald_ax,
)
func_wald_ax.set_xlabel("Wald Statistic")
# Set the xlim to be the same for both of the Wald barplots for taxa and functions
wald_min = min(betta_func_details["wald"].min(), taxa_wald.min(), 0)
wald_max = max(betta_func_details["wald"].max(), taxa_wald.max(), 0)
wald_span = wald_max - wald_min
func_wald_ax.set_xlim(
wald_min - (wald_span * 0.05),
wald_max + (wald_span * 0.05)
)
taxa_wald_ax.set_xlim(
wald_min - (wald_span * 0.05),
wald_max + (wald_span * 0.05)
)
sns.heatmap(
data=betta_func_df,
cmap="Blues",
ax=func_ax,
cbar=False,
xticklabels=[],
yticklabels=1,
)
# Rotate the yticklabels
func_ax.set_yticklabels(func_ax.get_yticklabels(), rotation=0)
# Make a plot with the CAG size in the lowest subplot
cag_annot.reindex(
cag_prop_df.columns.values
)["size"].rename(
index=lambda cag_id: "CAG {}".format(cag_id)
).apply(
np.log10
).plot(
kind="bar",
ax=cag_size_ax,
width=1,
)
# Make a plot with the CAG wald statistic in the second lowest subplot
cag_annot.reindex(
cag_prop_df.columns.values
)["wald"].plot(
kind="bar",
ax=cag_wald_ax,
width=1,
)
# Customize the axis labels
cag_size_ax.set_ylabel(
"CAG Size \n(# of genes, log10)",
)
cag_wald_ax.set_xticks([])
cag_wald_ax.set_xlabel("")
cag_wald_ax.set_ylabel(
"Wald Statistic",
)
# # Set the limits of the horizontal CAG axis
# cag_wald_ax.set_xlim(-0.5, cag_annot.shape[0] - 0.5)
# cag_size_ax.set_xlim(-0.5, cag_annot.shape[0] - 0.5)
plt.tight_layout(w_pad=0.05, h_pad=0.1)
# Adjust the vertical position of the row labels for the taxonomic heatmap
heatmap_ax.set_yticks([
v + 0.5
for v in range(cag_prop_df.shape[0])
])
if pdf is not None:
pdf.savefig(bbox_inches="tight")
plt.close()
def plot_top_annotations(betta, pdf=None, exclude_terms=[], exclude_prefixes=[]):
for annotation, annotation_df in betta.groupby("annotation"):
plot_df = annotation_df.copy(
).set_index(
"label"
)
# Remove any blacklisted annotations
to_remove = []
for d in plot_df.index.values:
# Skip excluded terms
if len(exclude_terms) > 0 and d in exclude_terms:
to_remove.append(d)
continue
# Skip excluded prefixes
if any([d.startswith(n) for n in exclude_prefixes]):
to_remove.append(d)
continue
if len(to_remove) > 0:
plot_df = plot_df.drop(index=to_remove)
fig, ax = plt.subplots()
ax.axvline(x=0, linestyle="--", alpha=0.5)
y = 0
label_list = []
for label, r in plot_df.sort_values(
by="abs_wald",
ascending=False
).head(
20
).iterrows(
):
ax.plot(
[r["estimate"] - (r["std_error"] / 2), r["estimate"] + (r["std_error"] / 2)],
[y, y],
color="black"
)
ax.scatter([r["estimate"]], [y], color="black")
y += 1
label_list.append(label)
ax.set_yticks(list(range(len(label_list))))
ax.set_yticklabels(label_list)
ax.set_title(annotation)
plt.tight_layout()
if pdf is not None:
pdf.savefig(bbox_inches="tight")
plt.close()
if __name__ == "__main__":
log_formatter = logging.Formatter(
"%(asctime)s %(levelname)-8s [Geneshot Plot Betta] %(message)s"
)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# Write logs to STDOUT
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
parser = argparse.ArgumentParser(
description="""
Plot estimated coefficients aggregated by taxa and function using betta.
Note: compatible with output from geneshot v0.6.0 and higher.
Example Usage:
plot_betta_associations.py \
--hdf <HDF_FP> \
--parameter <PARAMETER> \
--out <PDF_FP>
Required:
--hdf: Path to results HDF5 file generated by geneshot
--parameter: Name of the parameter from the formula to be displayed
--out: Path to output PDF
Run with --help for a complete list of optional parameters.
"""
)
parser.add_argument(
"--hdf",
type=str,
required=True,
help="Path to results HDF5 file generated by geneshot"
)
parser.add_argument(
"--parameter",
type=str,
required=True,
help="Name of the parameter from the formula to be displayed"
)
parser.add_argument(
"--out",
type=str,
required=True,
help="Path to output PDF"
)
parser.add_argument(
"--cag-annotations",
type=str,
default=None,
help="If specified, write out a table of annotations for all displayed CAGs in CSV format"
)
parser.add_argument(
"--n-cags",
type=int,
default=50,
help="Number of CAGs to include"
)
parser.add_argument(
"--min-cag-size",
type=int,
default=10,
help="Only display CAGs containing at least this many genes"
)
parser.add_argument(
"--max-cag-size",
type=int,
default=None,
help="If specified, exclude CAGs made up of more than this | |
"""
Material.
Classes to create and manage music materials and lyrics.
"""
import abjad
from itertools import cycle
class Lyrics:
"""Lyrics."""
def __init__(self, target):
"""Initializer."""
self.lyrics = None
self.target = target
self.name = target + "_Lyrics"
def write_lyrics(self, lyrics):
"""Method to write lyrics attribute to a ``muda.Lyrics`` instance."""
self.lyrics = lyrics
class Material:
"""Material."""
def __init__(self, name):
"""Initializer."""
self.name = name
self.container = abjad.Container()
self.lyrics = None
self.container.name = name
def __call__(self) -> abjad.Container():
"""It returns ``self.container``."""
return self.container
def write(self, lilypond_string, name=None):
"""It creates container from lilypond string and append to a ``muda.Material()`` instance."""
self.container.append(abjad.Container(lilypond_string, name=name))
def alternating_materials(self, annotated_durations: list, makers: dict):
"""Create alternating materials according to a list of named durations."""
assert isinstance(
annotated_durations[0], list
), "Each duration set must be a list."
material_names = [dur[0].annotation for dur in annotated_durations]
material_names = list(dict.fromkeys(material_names))
for dur in annotated_durations:
for maker, value in makers.items():
if maker == dur[0].annotation:
if isinstance(value, str):
self.container.append(
abjad.Container(
makers[maker], name=maker, identifier="% " + maker
)
)
else:
selection = makers[maker](dur)
if isinstance(selection[0], abjad.Tuplet):
sel = abjad.select(selection).components(abjad.Container)
for container in sel:
container.name = maker
container.identifier = "% " + maker
self.container.append(selection)
else:
self.container.append(
abjad.Container(
selection, name=maker, identifier="% " + maker
)
)
containers = abjad.select(self.container).components(abjad.Container)
# write comments in lilypond code to identify materials
for i, name in enumerate(material_names):
j = 0
for container in containers:
if container.name and container.identifier:
if name in container.name:
container.name = container.name + "_" + str(j)
container.identifier = container.identifier + "_" + str(j)
if isinstance(container, abjad.Tuplet):
string = container.name
comment1 = abjad.LilyPondComment(string)
abjad.attach(comment1, container[0])
comment2 = abjad.LilyPondComment(string, format_slot="after")
abjad.attach(comment2, container[-1])
j += 1
def write_pitches(self, pitches):
"""Write pitches to notes in the Material instance."""
logical_ties = abjad.select(self.container).leaves().logical_ties(pitched=True)
for i, logical_tie in enumerate(logical_ties):
index = i % len(pitches)
pitch = pitches[index]
for note in logical_tie:
note.written_pitch = pitch
def write_pitches_by_name(self, annotated_pitches):
"""Write pitches to logical ties in named container."""
for material_name, pitches in annotated_pitches.items():
selectable = self.select_material(self.container, material_name)
selection = abjad.select(selectable).leaves().logical_ties(pitched=True)
for i, logical_tie in enumerate(selection):
index = i % len(pitches)
pitch = pitches[index]
for note in logical_tie:
note.written_pitch = pitch
def write_pitches_by_duration(
self,
annotated_pitches: dict,
annotated_durations: list,
randomize=0,
):
"""write pitches to notes according to annotated durations."""
assert isinstance(
annotated_durations[0], list
), "each duration set must be a list."
chords_test = False
for key, pitches in annotated_pitches.items():
for item in pitches:
if isinstance(item, list):
chords_test = True
if chords_test:
# convert notes to chords
conversion = abjad.select(self.container).leaves(pitched=True)
for note in conversion:
chord = abjad.Chord("c'", note.written_duration)
abjad.mutate.replace(note, chord)
# select by duration:
abjad_durations = [sum(dur) for dur in annotated_durations]
selector = (
abjad.select()
.leaves()
.partition_by_durations(
abjad_durations,
cyclic=False,
fill=abjad.Exact,
in_seconds=False,
overhang=True,
)
)
selections = selector(self.container)
for key in annotated_pitches:
pitches = cycle(annotated_pitches[key])
for selection, duration in zip(selections, annotated_durations):
logical_ties = (
abjad.select(selection).leaves().logical_ties(pitched=True)
)
for a, logical_tie in enumerate(logical_ties):
for item in duration:
if item.annotation == key:
pitch = next(pitches)
print(pitch)
for b, chord in enumerate(logical_tie):
if isinstance(chord, abjad.Chord):
chord.written_pitches = pitch
else:
chord.written_pitch = pitch
def show_selection_indices(self, select: abjad.select, material_name: str):
"""Illustrate selection with numbers."""
if isinstance(material_name, str):
material_name = [material_name]
if material_name is None:
selectables = [self.container]
else:
selectables = []
for mat_name in material_name:
selectable = self.select_material(
self.container, material_name=mat_name
)
selectables.append(selectable)
for selectable in selectables:
selection = select(selectable)
for i, leaf in enumerate(selection):
str_ = r"\tiny {\null { \raise #2 {%i}}}" % i
abjad.attach(
abjad.Markup(str_, direction=abjad.Up),
leaf,
)
def see_leaves_number(self, select="leaves", pitched=None):
"""Illustrate ``muda.Material.container`` with leaves number."""
if select == "leaves":
selection = abjad.select(self.container).leaves(
pitched=pitched, grace=False
)
for i, leaf in enumerate(selection):
str_ = r"\tiny {\null { \raise #2 {%i}}}" % i
abjad.attach(
abjad.Markup(str_, direction=abjad.Up),
leaf,
)
elif select == "logical_ties":
selection = (
abjad.select(self.container)
.leaves()
.logical_ties(pitched=pitched, grace=False)
)
for i, leaf in enumerate(selection):
str_ = r"\tiny {\null { \raise #2 {%i}}}" % i
abjad.attach(
abjad.Markup(str_, direction=abjad.Up),
leaf[0],
)
# abjad.show(self.container)
def see_materials_leaves_number(self, pitched=True):
"""Illustrate ``muda.Material.container`` with materials leaves number."""
selection1 = abjad.select(self.container).components(abjad.Container)
for container in selection1:
if container.name is not None and container.name is not self.name:
if isinstance(container[0], abjad.Container):
if container[0].name is not container.name:
selection2 = abjad.select(container).leaves(
pitched=pitched, grace=False
)
for i, leaf in enumerate(selection2):
abjad.attach(
abjad.Markup(str(i), direction=abjad.Up),
leaf,
)
if i == 0:
abjad.attach(
abjad.Markup(container.name, direction=abjad.Up),
leaf,
)
else:
selection2 = abjad.select(container).leaves(
pitched=pitched, grace=False
)
for i, leaf in enumerate(selection2):
abjad.attach(
abjad.Markup(str(i), direction=abjad.Up),
leaf,
)
if i == 0:
abjad.attach(
abjad.Markup(container.name, direction=abjad.Up),
leaf,
)
# abjad.show(self.container)
def dynamics(
self,
dynamics: dict,
material_name=None,
):
"""
:param dynamics: dict (key: str, value: abjad.select)
:param material_name: str
"""
if material_name is None:
selectable = self.container
else:
selectable = self.select_material(
self.container, material_name=material_name
)
for key, select in dynamics.items():
selection = select(selectable)
if abjad.Dynamic.is_dynamic_name(key):
if isinstance(selection, abjad.Leaf):
abjad.attach(abjad.Dynamic(key), selection)
else:
for _ in selection:
abjad.attach(abjad.Dynamic(key), _)
elif isinstance(selection[0], abjad.Selection):
for sel in selection:
abjad.hairpin(key, sel)
else:
abjad.hairpin(key, selection)
def write_indicators(
self,
material_name=None,
dynamics=None,
articulations=None,
slur_up=None,
slur_down=None,
change_staffs_names=None,
pitched=True,
):
"""Write indicators to leaves."""
if material_name is not None:
selection1 = abjad.select(self.container).components(abjad.Container)
for container in selection1:
if container.name is not None and (
(isinstance(material_name, list) or material_name in container.name)
):
selection2 = abjad.select(container).leaves(pitched=pitched)
if dynamics:
for key in dynamics:
if dynamics[key] == "all":
for i, leaf in enumerate(selection2[0::2]):
a = i
b = i + 1
abjad.hairpin(key, selection2[a:b])
else:
for i in dynamics[key]:
if isinstance(i, tuple):
a, b = i
b = b + 1
abjad.hairpin(key, selection2[a:b])
else:
abjad.hairpin(key, selection2[i])
# attach slurs
if slur_up:
for n in slur_up:
a, b = n
b = b
abjad.attach(abjad.StartSlur(), selection2[a])
abjad.attach(abjad.StopSlur(), selection2[b])
if slur_down:
for n in slur_down:
a, b = n
abjad.attach(
abjad.StartSlur(direction=abjad.Down), selection2[a]
)
abjad.attach(abjad.StopSlur(), selection2[b])
if articulations:
for key in articulations:
for i in articulations[key]:
abjad.attach(abjad.Articulation(key), selection2[i])
else:
selection = abjad.select(self.container).leaves(pitched=pitched)
if dynamics:
for key in dynamics:
for i in dynamics[key]:
if isinstance(i, tuple):
a, b = i
b = b + 1
abjad.hairpin(key, selection[a:b])
else:
abjad.hairpin(key, selection[i])
# attach slurs
if slur_up:
for n in slur_up:
a, b = n
b = b
abjad.attach(abjad.StartSlur(), selection[a])
abjad.attach(abjad.StopSlur(), selection[b])
if slur_down:
for n in slur_down:
a, b = n
abjad.attach(abjad.StartSlur(direction=abjad.Down), selection[a])
abjad.attach(abjad.StopSlur(), selection[b])
if articulations:
for key in articulations:
for i in articulations[key]:
abjad.attach(abjad.Articulation(key), selection[i])
# # change staff
# rh_staff = score['Piano_Staff'][0]
# lh_staff = score['Piano_Staff'][1]
# voice_four = score['LH_Voice_Four']
# staff_change1 = abjad.StaffChange(lh_staff)
# staff_change2 = abjad.StaffChange(rh_staff)
# abjad.attach(staff_change2, voice_four[-5])
# abjad.attach(staff_change1, voice_four[-2])
def attach(
self,
argument,
select: abjad.select,
material_name=None,
):
"""Attach ``argument`` to leaves."""
if isinstance(argument, str or list):
argument = abjad.LilyPondLiteral(argument)
if isinstance(material_name, str):
material_name = [material_name]
if material_name is None:
selectables = [self.container]
else:
selectables = []
for mat_name in material_name:
selectable = self.select_material(
self.container, material_name=mat_name
)
selectables.append(selectable)
for selectable in selectables:
selection = select(selectable)
if isinstance(selection, abjad.Leaf):
abjad.attach(argument, selection)
else:
for _ in selection:
abjad.attach(argument, _)
def note_heads(self, argument, select: abjad.select or list, material_name):
"""Change leaves note heads."""
if isinstance(material_name, str): # it can be a list
material_name = [material_name]
if isinstance(select, abjad.Selection): # it can be a list
select = [select]
if material_name is None:
selectables = [self.container]
else:
selectables = []
for mat_name in material_name:
selectable = self.select_material(
self.container, material_name=mat_name
)
selectables.append(selectable)
for selectable in selectables:
for sel in select:
selection = sel(selectable)
if isinstance(selection, abjad.Leaf):
abjad.override(selection).NoteHead.style = argument
else:
for leaf in selection:
abjad.override(leaf).NoteHead.style = argument
#
# selection = abjad.select(self.container[:]).components(abjad.Container)
# for container in selection:
# if container.name is not None and material_name in container.name:
# if select is None:
# abjad.override(container).NoteHead.style = argument
# else:
# selection2 = select(container)
# if isinstance(selection2, abjad.Leaf):
# abjad.override(selection2).NoteHead.style = argument
# else:
# for leaf in selection2:
# abjad.override(leaf).NoteHead.style = argument
def retrograde(self, material_name):
"""Retrograde components in container."""
selection = abjad.select(self.container[:]).components(abjad.Container)
for container in selection:
if container.name is not None and material_name in container.name:
items = abjad.select(container).items
# print(container.components)
new_container = abjad.Container(name=container.name)
for item in reversed(items):
for comp in reversed(item.components):
if isinstance(comp, abjad.Tuplet):
new_tuplet = abjad.Tuplet(
multiplier=comp.multiplier, denominator=comp.denominator
)
for it in reversed(comp.components):
new_tuplet.append(it)
new_container.append(new_tuplet)
elif isinstance(comp, abjad.Container):
new_sub_container = abjad.Container()
for it | |
map-model CC for '%s': %.3f " %(scaled_map_id,cc),
file = self.log)
print ("\n",79*"=","\nDone with local and overall sharpening\n",79*"=",
file = self.log)
def set_map_id_lists(self,kw):
if kw.get('overall_sharpen_before_and_after_local'):
kw['sharpen_all_maps'] = True
if kw.get('map_id') is None:
kw['map_id'] = 'map_manager'
if kw.get('map_id_to_be_scaled_list') is None:
kw['map_id_to_be_scaled_list'] = [kw['map_id']]
if kw.get('sharpen_all_maps') and \
kw.get('map_id_1') and kw.get('map_id_2'): # half-map sharpening
kw['map_id_to_be_scaled_list'].append(kw['map_id_1'])
kw['map_id_to_be_scaled_list'].append(kw['map_id_2'])
if kw.get('map_id_scaled_list') is None:
kw['map_id_scaled_list'] = []
for id in kw['map_id_to_be_scaled_list']:
kw['map_id_scaled_list'].append("%s_scaled" %(id))
return kw
def external_sharpen(self,
map_id = 'map_manager',
map_id_external_map = 'external_map',
map_id_to_be_scaled_list = None,
map_id_scaled_list = None,
exclude_points_outside_density = None,
minimum_boxes_inside_density = None,
resolution = None,
d_min = None,
k_sol = None,
b_sol = None,
n_bins = None,
n_boxes = None,
core_box_size = None,
box_cushion = None,
smoothing_radius = None,
local_sharpen = None,
anisotropic_sharpen = None,
expected_ssqr_list = None,
expected_ssqr_list_rms = None,
tlso_group_info = None,
get_tls_from_u = None,
overall_sharpen_before_and_after_local = False,
get_scale_as_aniso_u = None,
use_dv_weighting = None,
n_direction_vectors = None,
run_analyze_anisotropy = True,
sharpen_all_maps = False,
nproc = None,
):
'''
Scale map_id with scale factors identified from map_id vs
map_id_external_map
Changes the working map_manager
resolution is nominal resolution of map
d_min is minimum resolution to use in calculation of Fourier coefficients
'''
from libtbx import adopt_init_args
kw_obj = group_args()
adopt_init_args(kw_obj, locals())
kw = kw_obj() # save calling parameters in kw as dict
del kw['adopt_init_args'] # REQUIRED
del kw['kw_obj'] # REQUIRED
# Checks
assert self.get_map_manager_by_id(map_id)
assert self.get_map_manager_by_id(map_id_external_map)
# Allow sharpening globally before and after local sharpening
if local_sharpen and overall_sharpen_before_and_after_local:
print ("\nRunning external sharpening (global, local, global)\n",
file = self.log)
return self._sharpen_overall_local_overall(kw = kw,
method = self.external_sharpen)
kw = self.set_map_id_lists(kw)
print ("Running external map sharpening ", file = self.log)
kw['map_id_2'] = map_id_external_map
kw['is_external_based'] = True
kw['remove_overall_anisotropy'] = False # REQUIRED
del kw['map_id_external_map']
kw['model_map_ids_to_leave_as_is'] = [map_id_external_map] # do not remove aniso
self._sharpen_map(**kw)
def half_map_sharpen(self,
map_id = 'map_manager',
map_id_1 = 'map_manager_1',
map_id_2 = 'map_manager_2',
map_id_scaled_list = None,
map_id_to_be_scaled_list = None,
exclude_points_outside_density = None,
minimum_boxes_inside_density = None,
resolution = None,
d_min = None,
k_sol = None,
b_sol = None,
n_bins = None,
n_boxes = None,
core_box_size = None,
box_cushion = None,
smoothing_radius = None,
rmsd = None,
local_sharpen = None,
anisotropic_sharpen = None,
minimum_low_res_cc = None,
get_scale_as_aniso_u = None,
use_dv_weighting = None,
n_direction_vectors = None,
run_analyze_anisotropy = True,
spectral_scaling = True,
expected_rms_fc_list = None,
expected_ssqr_list = None,
expected_ssqr_list_rms = None,
tlso_group_info = None,
get_tls_from_u = None,
model_id_for_rms_fc = None,
replace_aniso_with_tls_equiv = None,
max_abs_b = None,
nproc = None,
optimize_b_eff = None,
equalize_power = None,
overall_sharpen_before_and_after_local = False,
get_tls_info_only = None,
coordinate_shift_to_apply_before_tlso = None,
sharpen_all_maps = False,
remove_overall_anisotropy = True,
):
'''
Scale map_id with scale factors identified from map_id_1 vs map_id_2
Changes the working map_manager unless map_id_scaled_list is set.
max_abs_b applies if get_scale_as_aniso_u and anisotropic_sharpen and
local_sharpen are set. It limits range of anisotropic B. Default is
100 at 4 A, proportional to resolution squared
resolution is nominal resolution of map
d_min is minimum resolution to use in calculation of Fourier coefficients
'''
from libtbx import adopt_init_args
kw_obj = group_args()
adopt_init_args(kw_obj, locals())
kw = kw_obj() # save calling parameters in kw as dict
del kw['adopt_init_args'] # REQUIRED
del kw['kw_obj'] # REQUIRED
# Checks
assert self.get_map_manager_by_id(map_id)
# Set what maps are going to be sharpened and new names
kw = self.set_map_id_lists(kw)
# Allow sharpening globally before and after local sharpening
if local_sharpen and overall_sharpen_before_and_after_local:
print ("\nRunning half-map sharpening (global, local, global)\n",
file = self.log)
return self._sharpen_overall_local_overall(kw = kw,
method = self.half_map_sharpen)
print ("\nRunning half-map sharpening\n", file = self.log)
print("Scale factors will be identified using the "+
"maps '%s' and '%s' in map_model_manager '%s'" %(
kw['map_id_1'],kw['map_id_2'], self.name), file = self.log)
print("Maps to be scaled are '%s' in map_model_manager '%s'" %(
str(kw['map_id_to_be_scaled_list']),self.name),file = self.log)
print("Sharpened maps after half-map sharpening will be in "+
"'%s' in map_model_manager '%s'" %(
str(kw['map_id_scaled_list']),self.name),file = self.log)
if tlso_group_info: # convert to lists
convert_tlso_group_info_to_lists(tlso_group_info)
if kw['get_tls_from_u'] is None:
kw['get_tls_from_u'] = True
# Now get scaling from comparison of the two half-maps
# apply the scaling to map_id_to_be_scaled
if get_tls_info_only:
return self._sharpen_map(**kw)
else:
self._sharpen_map(**kw)
def model_sharpen(self,
map_id = 'map_manager',
model_id = 'model',
map_id_scaled_list = None,
map_id_to_be_scaled_list = None,
exclude_points_outside_density = True,
minimum_boxes_inside_density = True,
resolution = None,
d_min = None,
k_sol = None,
b_sol = None,
find_k_sol_b_sol = True,
d_min_for_k_sol_b_sol = 6.,
n_bins = None,
n_boxes = None,
core_box_size = None,
box_cushion = None,
smoothing_radius = None,
rmsd = None,
local_sharpen = None,
anisotropic_sharpen = None,
minimum_low_res_cc = 0.20,
get_scale_as_aniso_u = None,
use_dv_weighting = None,
n_direction_vectors = None,
run_analyze_anisotropy = True,
spectral_scaling = True,
expected_rms_fc_list = None,
expected_ssqr_list = None,
expected_ssqr_list_rms = None,
tlso_group_info = None,
get_tls_from_u = None,
find_tls_from_model = None,
model_id_for_rms_fc = None,
replace_aniso_with_tls_equiv = None,
max_abs_b = None,
nproc = None,
optimize_b_eff = None,
equalize_power = None,
map_id_model_map = 'model_map_for_scaling',
optimize_with_model = None,
overall_sharpen_before_and_after_local = False,
mask_around_model = True,
get_tls_info_only = None,
coordinate_shift_to_apply_before_tlso = None,
sharpen_all_maps = False,
remove_overall_anisotropy = True,
):
'''
Scale map_id with scale factors identified from map_id vs model
Changes the working map_manager unless map_id_scaled is set.
max_abs_b applies if get_scale_as_aniso_u and anisotropic_sharpen and
local_sharpen are set. It limits range of anisotropic B. Default is
100 at 4 A, proportional to resolution squared
resolution is nominal resolution of map
d_min is minimum resolution to use in calculation of Fourier coefficients
'''
from libtbx import adopt_init_args
kw_obj = group_args()
adopt_init_args(kw_obj, locals())
kw = kw_obj() # save calling parameters in kw as dict
del kw['adopt_init_args'] # REQUIRED
del kw['kw_obj'] # REQUIRED
# Checks
assert self.get_map_manager_by_id(map_id)
assert self.get_model_by_id(model_id)
# Set what maps are going to be sharpened and new names
kw = self.set_map_id_lists(kw)
print ("\nRunning model-based sharpening ", file = self.log)
if local_sharpen:
print("Sharpening will be local",file = self.log)
if anisotropic_sharpen:
if get_scale_as_aniso_u:
if replace_aniso_with_tls_equiv:
print("Sharpening will be anisotropic and converted to TLS",
file = self.log)
else:
print("Sharpening will be anisotropic and converted to aniso U",
file = self.log)
else:
print("Sharpening will be anisotropic",file = self.log)
print("Scale factors will be identified using the "+
"map '%s' and model '%s' in map_model_manager '%s'" %(
kw['map_id'], kw['model_id'], self.name), file = self.log)
print("Map to be scaled is '%s' in map_model_manager '%s'" %(
str(kw['map_id_to_be_scaled_list']),self.name),file = self.log)
print("Scaled map will be in '%s' in map_model_manager '%s'" %(
str(kw['map_id_scaled_list']),self.name),file = self.log)
if model_id_for_rms_fc is None:
kw['model_id_for_rms_fc'] = kw['model_id']
if tlso_group_info: # convert to lists
convert_tlso_group_info_to_lists(tlso_group_info)
if get_tls_from_u is None:
get_tls_from_u = True
elif find_tls_from_model:
# If we are going to use TLS groups from the model, check them here
if not self.get_model_by_id(model_id):
raise Sorry("Need model for find_tls_from_model")
if get_tls_from_u is None:
get_tls_from_u = True
tlso_group_info = get_tlso_group_info_from_model(
self.get_model_by_id(model_id),
nproc = nproc,
log = self.log)
kw['tlso_group_info'] = tlso_group_info
# Allow sharpening globally before and after local sharpening
if local_sharpen and overall_sharpen_before_and_after_local:
return self._sharpen_overall_local_overall(kw = kw,
method = self.model_sharpen)
del kw['find_k_sol_b_sol'] # REQUIRED
del kw['d_min_for_k_sol_b_sol'] # REQUIRED
del kw['mask_around_model'] # REQUIRED
del kw['model_id'] # REQUIRED
del kw['map_id_model_map'] # REQUIRED
del kw['optimize_with_model'] # REQUIRED
del kw['find_tls_from_model'] # REQUIRED
del kw['overall_sharpen_before_and_after_local'] # REQUIRED
# Make a copy of this map_model manager so we can modify it without
# changing the original
working_mmm = self.deep_copy()
working_mmm.set_name('working_mmm')
# Working resolution is resolution * d_min_ratio
if d_min is None:
d_min = working_mmm._get_d_min_from_resolution(resolution)
print ("High-resolution limit: "+
"%5.2f A based on nominal resolution of %5.2f A" %(
d_min, resolution if resolution else working_mmm.resolution()),
file = self.log)
map_id_to_be_scaled = kw['map_id_to_be_scaled_list'][0]
cc = self.map_model_cc(map_id=map_id_to_be_scaled, model_id=model_id)
print ("Map-model CC before sharpening: %.3f " %(cc), file = self.log)
map_coeffs = working_mmm.get_map_manager_by_id(
map_id_to_be_scaled).map_as_fourier_coefficients( d_min = d_min)
working_n_bins =working_mmm._set_n_bins(n_bins = n_bins,
d_min = d_min, map_coeffs = map_coeffs,
local_sharpen = local_sharpen)
f_array = get_map_coeffs_as_fp_phi(map_coeffs, n_bins = working_n_bins,
d_min = d_min).f_array
# Generate map from model using existing possibly anisotropic B
model=working_mmm.get_model_by_id(model_id)
if find_k_sol_b_sol and (k_sol is None) and (b_sol is None):
# Find k_sol and b_sol
local_mmm = working_mmm.extract_all_maps_around_model(
stay_inside_current_map = True)
local_mmm.mask_all_maps_around_atoms(
mask_atoms_atom_radius = 2.* d_min,
soft_mask =True)
d_min_for_k_sol_b_sol = max(d_min, d_min_for_k_sol_b_sol)
kb_info = local_mmm.find_k_sol_b_sol(local_mmm.get_model_by_id(model_id),
d_min = d_min_for_k_sol_b_sol,
model_map_id = map_id_model_map,
comparison_map_id = map_id)
| |
# if label can't apply to column but is defined raise error
if not applyc and self[label] == []:
for colname in self.colnames():
if self[label,colname] != []:
self.valid_check_error.append("Given metadata not allowed for a column: %s, %s, %s\n" %(label, colname, self[label,colname]))
#raise BADCTextFileMetadataInvalid("Given metadata not allowed for a column: %s, %s, %s" %(label, colname, self[label,colname]))
# values have wrong number of fields
if applyg:
for values in self[label]:
if maxo != -1 and len(values) > maxo:
self.valid_check_error.append("Max number of metadata fields (%s) exceeded for %s: %s\n" % (maxo, label, values))
#raise BADCTextFileMetadataInvalid("Max number of metadata fields (%s) exceeded for %s: %s" % (maxo, label, values))
if len(values) < mino:
self.valid_check_error.append("Min number of metadata fields (%s) not given for %s: %s\n" % (mino, label, values,))
#raise BADCTextFileMetadataInvalid("Min number of metadata fields (%s) not given for %s: %s" % (mino, label, values,))
if applyc:
for colname in self.colnames():
if label in self._metadata.varRecords[colname]:
values = self._metadata.varRecords[colname][label]
if maxo != -1 and len(values) > maxo:
self.valid_check_error.append("Max number of metadata fields (%s) exceeded for %s: %s\n" % (maxo, label, values,))
#raise BADCTextFileMetadataInvalid("Max number of metadata fields (%s) exceeded for %s: %s" % (maxo, label, values,))
if len(values) < mino:
self.valid_check_error.append("Min number of metadata fields (%s) not given for %s: %s\n" % (mino, label, values,))
#raise BADCTextFileMetadataInvalid("Min number of metadata fields (%s) not given for %s: %s" % (mino, label, values,))
#see if values are OK
if self.valid_check_error != []:
raise BADCTextFileMetadataInvalid(self.valid_check_error)
else:
for values in self[label]:
try:
check(values)
except:
raise BADCTextFileMetadataInvalid("Metadata field values invalid %s: %s [%s]\n" % (label, values,sys.exc_value))
for colname in self.colnames():
for values in self[label,colname]:
check(values)
def check_colrefs(self):
long_namesCnt = []
for long_names in self._metadata:
ref = long_names[2]
long_namesCnt.append(ref)
if len(long_namesCnt) == len(self.colnames()):
try:
for colName in long_namesCnt:
if not colName in self.colnames():
raise
except:
raise BADCTextFileMetadataInvalid('Column names %s not in column header list %s'% (colName,','.join(self.colnames())))
else:
raise BADCTextFileMetadataInvalid('Not all column headings given %s'% ','.join(self.colnames()))
def check_complete(self, level='basic'):
#self.check_colrefs()
self.check_valid()
self.basicCheckErrors = []
for label in BADCTextFile.MDinfo:
applyg, applyc, mino, maxo, mandb, mandc, check, meaning = BADCTextFile.MDinfo[label]
#[G,C,min,max,basic,complete]
# find level for check
if level=='basic':
mand = mandb
else:
mand = mandc
#if its not mandatory skip
if not mand:
continue
print(level, label)
print('doing this')
# if applies globally then there should be a global record or
# one at least one variable
if applyg:
if self[label] != []:
#found global value. next label
continue
for colname in self.colnames():
if self[label,colname] != []:
break
else:
self.basicCheckErrors.append("Basic global metadata not there: %s\n" % label)
#raise BADCTextFileMetadataIncomplete("Basic global metadata not there: %s" % label)
# if applies to column only then there should be a record for
# each variable
elif applyc and mand==2:
for colname in self.colnames():
try:
if self._metadata.varRecords[colname][label] == []:
raise
except:
self.basicCheckErrors.append('Basic column metadata not there: "%s" not there for %s\n' % (label, colname))
#raise BADCTextFileMetadataIncomplete('Basic column metadata not there: "%s" not there for %s' % (label, colname))
if self.basicCheckErrors != []:
raise BADCTextFileMetadataIncomplete(self.basicCheckErrors)
def colnames(self):
return tuple(self._data.colnames)
def nvar(self):
return self._data.nvar()
def __len__(self):
return len(self._data)
def __getitem__(self, i):
# -- ref change
if type(i) == int:
return self._data[i]
else:
return self._metadata[i]
def add_variable(self,colname,data=()):
# -- ref change
self._data.add_variable(colname, data)
def add_datarecord(self, datavalues):
self._data.add_data_row(datavalues)
def add_metadata(self, label, values, ref='G'):
self._metadata.add_record(label, values, ref)
def __repr__(self):
return self.cvs()
def cdl(self):
# create a CDL file (to make NetCDF)
s = "// This CDL file was generated from a BADC text file file\n"
s = s + "netcdf foo { \n"
s = s + "dimensions:\n point = %s;\n\n" % len(self)
s = s + "variables: \n"
for colname in self.colnames():
print(colname)
try:
varname = "var%s" % int(colname.strip())
except:
varname = colname
print(varname)
vartype = self['type', colname][0][0]
s = s + " %s %s(point);\n" % (vartype, varname)
s = s + "\n"
s = s + self._metadata.cdl()
s = s + "\n"
s = s + "data:\n"
for i in range(self.nvar()):
varname = "var%s" % self._data.colnames[i]
values = string.join(self[i], ', ')
s =s + "%s = %s;\n" % (varname, values)
s = s + "}\n"
return s
def NASA_Ames(self):
# create a NASA-Ames file 1001 FFI
header = []
# find creator and institute
c = ''
inst = ''
for creator in self['creator']:
c = c + creator[0] + '; '
if len(creator) == 2:
inst = inst + creator[1] + '; '
if inst == '': inst = 'Unknown'
header.append(c[:-2])
header.append(inst[:-2])
# find source (DPT)
s = ''
for source in self['source']:
s = s + source[0] + '; '
header.append(s[:-2])
# find activiey
a = ''
for activity in self['activity']:
a = a + activity[0] + '; '
header.append(a[:-2])
# disk 1 of 1
header.append("1 1")
# dates
date_valid = self['date_valid']
date_valid = min(date_valid)
date_valid = date_valid[0]
date_valid = date_valid.replace('-', ' ')
last_revised_date = self['last_revised_date']
last_revised_date = min(last_revised_date)
last_revised_date = last_revised_date[0]
last_revised_date = last_revised_date.replace('-', ' ')
header.append("%s %s" % (date_valid, last_revised_date))
# ??
header.append('0.0')
# coord variable
coord = self['coordinate_variables'][0][0]
coord = self['long_name',int(coord)][0]
coord = "%s (%s)" % (coord[0], coord[1])
header.append(coord)
# number of variables not coord variable
header.append("%s" % (self.nvar()-1))
#scale factors
sf_line = ''
for i in range(1,self.nvar()):
sf = self['scale_factor',i]
if len(sf)==0: sf = "1.0"
else: sf = sf[0][0]
sf_line = sf_line + "%s " % sf
header.append(sf_line)
#scale factors
max_line = ''
for i in range(1,self.nvar()):
vm = self['valid_max',i]
if len(vm)==0: vm = "1.0e99"
else: vm = vm[0][0]
vr = self['valid_range',i]
if len(vr)==0: vr = "1.0e99"
else: vr = vr[0][1]
vm = min(float(vm), float(vr))
max_line = max_line + "%s " % vm
header.append(max_line)
# variable names
for i in range(1,self.nvar()):
long_name = self['long_name',i][0]
long_name = "%s (%s)" % (long_name[0], long_name[1])
header.append(long_name)
# normal comments
header.append('1')
header.append('File created from BADC text file')
# special comments - all metadata to go in
s = StringIO()
cvswriter = csv.writer(s)
self._metadata.csv(cvswriter)
metadata = s.getvalue()
nlines = metadata.count('\n')
header.append("%s" % (nlines+2))
header.append("BADC-CSV style metadata:")
header.append(s.getvalue())
# make header
header="%s 1001\n%s" % (len(header)+nlines, string.join(header,'\n'))
# data space seperated
data = ''
for i in range(len(self)):
data = data + string.join(self._data.getrow(i)) + '\n'
return header+data
def cvs(self):
s = StringIO()
cvswriter = csv.writer(s, lineterminator='\n' )
self._metadata.csv(cvswriter)
self._data.csv(cvswriter)
return s.getvalue()
class BADCTextFileData:
# class to hold data in the files
# BADCTextFileData is an aggregation of variables
def __init__(self):
self.variables = []
self.colnames = []
def add_variable(self, name, values):
if len(self.variables) == 0 or len(values) == len(self.variables[0]):
self.variables.append(BADCTextFileVariable(values))
self.colnames.append(name)
else:
raise BADCTextFileError("Wrong length of data")
def add_data_row(self, values):
if self.nvar() == 0 and len(values) != 0:
for v in values:
self.variables.append(BADCTextFileVariable((v,)))
elif self.nvar() == len(values):
for i in range(len(values)):
self.variables[i].append(values[i])
else:
raise BADCTextFileError("Wrong length of data")
def __len__(self):
# number of data rows
if len(self.variables) == 0:
return 0
else:
return len(self.variables[0])
def nvar(self):
# number of variables
return len(self.variables)
def __getitem__(self, i):
if type(i) == int:
return self.variables[i].values
else:
col, row = i
return self.variables[col][row]
def getrow(self,i):
row = []
for j in range(self.nvar()):
row.append(self.variables[j][i])
return row
def csv(self, csvwriter):
csvwriter.writerow(('Data',))
csvwriter.writerow(self.colnames)
for i in range(len(self)):
csvwriter.writerow(self.getrow(i))
csvwriter.writerow(('End Data',))
class BADCTextFileVariable:
# class to hold 1D data.
def __init__(self, values=[]):
self.set_values(values)
def __len__(self):
return len(self.values)
def __getitem__(self, i):
return self.values[i]
def append(self,v):
self.values.append(v)
def set_values(self, values):
self.values = list(values)
class BADCTextFileMetadata:
def __init__(self):
# records in label, value form. Where label is the metadata label e.g. title and value is a tuple
# e.g. ("my file",)
self.globalRecords = []
self.varRecords = {}
def __getitem__(self, requested_item):
# if the item is selected | |
[Required] The ID of the Google Cloud Platform project that the
job belongs to.
region: [Required] The Cloud Dataproc region in which to handle the
request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsListRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsListRequest object.
Enums:
JobStateMatcherValueValuesEnum: [Optional] Specifies enumerated categories
of jobs to list.
Fields:
clusterName: [Optional] If set, the returned jobs list includes only jobs
that were submitted to the named cluster.
jobStateMatcher: [Optional] Specifies enumerated categories of jobs to
list.
pageSize: [Optional] The number of results to return in each response.
pageToken: [Optional] The page token, returned by a previous call, to
request the next page of results.
projectId: [Required] The ID of the Google Cloud Platform project that the
job belongs to.
region: [Required] The Cloud Dataproc region in which to handle the
request.
"""
class JobStateMatcherValueValuesEnum(_messages.Enum):
"""[Optional] Specifies enumerated categories of jobs to list.
Values:
ALL: <no description>
ACTIVE: <no description>
NON_ACTIVE: <no description>
"""
ALL = 0
ACTIVE = 1
NON_ACTIVE = 2
clusterName = _messages.StringField(1)
jobStateMatcher = _messages.EnumField('JobStateMatcherValueValuesEnum', 2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
region = _messages.StringField(6, required=True)
class DataprocProjectsRegionsJobsSubmitRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsSubmitRequest object.
Fields:
projectId: [Required] The ID of the Google Cloud Platform project that the
job belongs to.
region: [Required] The Cloud Dataproc region in which to handle the
request.
submitJobRequest: A SubmitJobRequest resource to be passed as the request
body.
"""
projectId = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)
class DataprocProjectsRegionsOperationsCancelRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsDeleteRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsListRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation collection.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class DiagnoseClusterOutputLocation(_messages.Message):
"""The location where output from diagnostic command can be found.
Fields:
outputUri: [Output-only] The Google Cloud Storage URI of the diagnostic
output. This will be a plain text file with summary of collected
diagnostics.
"""
outputUri = _messages.StringField(1)
class DiagnoseClusterRequest(_messages.Message):
"""A request to collect cluster diagnostic information."""
class DiagnoseClusterResults(_messages.Message):
"""The location of diagnostic output.
Fields:
outputUri: [Output-only] The Google Cloud Storage URI of the diagnostic
output. This is a plain text file with a summary of collected
diagnostics.
"""
outputUri = _messages.StringField(1)
class DiskConfig(_messages.Message):
"""Specifies the config of disk options for a group of VM instances.
Fields:
bootDiskSizeGb: [Optional] Size in GB of the boot disk (default is 500GB).
numLocalSsds: [Optional] Number of attached SSDs, from 0 to 4 (default is
0). If SSDs are not attached, the boot disk is used to store runtime
logs and HDFS data. If one or more SSDs are attached, this runtime bulk
data is spread across them, and the boot disk contains only basic config
and installed binaries.
"""
bootDiskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
numLocalSsds = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class GceClusterConfig(_messages.Message):
"""Common config settings for resources of Google Compute Engine cluster
instances, applicable to all instances in the cluster.
Messages:
MetadataValue: The Google Compute Engine metadata entries to add to all
instances.
Fields:
metadata: The Google Compute Engine metadata entries to add to all
instances.
networkUri: The Google Compute Engine network to be used for machine
communications. Cannot be specified with subnetwork_uri. If neither
network_uri nor subnetwork_uri is specified, the "default" network of
the project is used, if it exists. Cannot be a "Custom Subnet Network"
(see https://cloud.google.com/compute/docs/subnetworks for more
information). Example: `https://www.googleapis.com/compute/v1/projects/[
project_id]/regions/global/default`.
serviceAccountScopes: The URIs of service account scopes to be included in
Google Compute Engine instances. The following base set of scopes is
always included: *
https://www.googleapis.com/auth/cloud.useraccounts.readonly *
https://www.googleapis.com/auth/devstorage.read_write *
https://www.googleapis.com/auth/logging.write If no scopes are
specfied, the following defaults are also provided: *
https://www.googleapis.com/auth/bigquery *
https://www.googleapis.com/auth/bigtable.admin.table *
https://www.googleapis.com/auth/bigtable.data *
https://www.googleapis.com/auth/devstorage.full_control
subnetworkUri: The Google Compute Engine subnetwork to be used for machine
communications. Cannot be specified with network_uri. Example:
`https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-
east1/sub0`.
tags: The Google Compute Engine tags to add to all instances.
zoneUri: [Required] The zone where the Google Compute Engine cluster will
be located. Example: `https://www.googleapis.com/compute/v1/projects/[pr
oject_id]/zones/[zone]`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""The Google Compute Engine metadata entries to add to all instances.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
metadata = _messages.MessageField('MetadataValue', 1)
networkUri = _messages.StringField(2)
serviceAccountScopes = _messages.StringField(3, repeated=True)
subnetworkUri = _messages.StringField(4)
tags = _messages.StringField(5, repeated=True)
zoneUri = _messages.StringField(6)
class HadoopJob(_messages.Message):
"""A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.
Messages:
PropertiesValue: [Optional] A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
Fields:
archiveUris: [Optional] HCFS URIs of archives to be extracted in the
working directory of Hadoop drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, or .zip.
args: [Optional] The arguments to pass to the driver. Do not include
arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
properties, since a collision may occur that causes an incorrect job
submission.
fileUris: [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to
be copied to the working directory of Hadoop drivers and distributed
tasks. Useful for naively parallel tasks.
jarFileUris: [Optional] Jar file URIs to add to the CLASSPATHs of the
Hadoop driver and tasks.
loggingConfig: [Optional] The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file containing
the class must be in the default CLASSPATH or specified in
`jar_file_uris`.
mainJarFileUri: The HCFS URI of the jar file containing the main class.
Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-
metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar'
'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
properties: [Optional] A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""[Optional] A mapping of property names to values, used to configure
Hadoop. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/hadoop/conf/*-site
and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class HiveJob(_messages.Message):
"""A Cloud Dataproc job for running Hive queries on YARN.
Messages:
PropertiesValue: [Optional] A mapping of property names and values, used
to configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
ScriptVariablesValue: [Optional] Mapping of query variable names to values
(equivalent to the Hive | |
<reponame>MarineLasbleis/GrowYourIC<filename>GrowYourIC/data.py<gh_stars>1-10
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Module data.py
This module define the classes SeismicData() to handle seismic data set.
These datasets define the geographic repartition of raypath in the inner core.
functions:
read_from_file: to read a file with seismic data
classes:
SeismicData: base class
SeismicFromFile: data obtained from a file (real data)
PerfectSamplingEquator
PerfectSamplingEquatorRadial
RandomData: random data, well partitioned on the horizontal, and between 15 and 106km
PerfectSamplingSurface
"""
from __future__ import division
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt # for figures
import pandas as pd
# personal routines
from . import positions
# import geodynamic
from . import plot_data
# Read from files with pandas:
## example: pd.read_table(self.filename, sep='\s+', names=self.slices, skiprows=10)
## example: pd.read_table(self.filename, sep='\s+', header=None)[nb_slices]
class SeismicData(object):
""" Class for seismic data """
def __init__(self):
self.data_points = []
self.size = None
self.proxy = 0.
self.name = None
self.shortname = None
def __getitem__(self, key):
return self.data_points[key]
def extract_xyz(self, type_of_point="bottom_turning_point"):
"""Extract the cartesian coordinates of the points in the data set"""
x, y, z = np.empty([self.size, 1]), np.empty(
[self.size, 1]), np.empty([self.size, 1])
for i, ray in enumerate(self.data_points):
point = getattr(ray, type_of_point)
x[i] = point.x
y[i] = point.y
z[i] = point.z
return x, y, z
def extract_rtp(self, type_of_point="bottom_turning_point"):
"""Extract the radius, theta (latitute), phi (longitude) of the points"""
r, theta, phi = np.empty([self.size, 1]), np.empty(
[self.size, 1]), np.empty([self.size, 1])
for i, ray in enumerate(self.data_points):
point = getattr(ray, type_of_point)
r[i] = point.r
theta[i] = point.theta
phi[i] = point.phi
return r, theta, phi
def extract_zeta(self):
""" zeta values for all raypath (requires in and out points) """
zeta = np.empty([self.size, 1])
for i, ray in enumerate(self.data_points):
zeta[i] = ray.calc_zeta()
return zeta
def map_plot(self, geodyn_model=''):
""" plot data on a map."""
# user should plot on map in the main code.
m, fig = plot_data.setting_map()
colormap = plt.cm.get_cmap('RdYlBu')
_, theta, phi = self.extract_rtp("bottom_turning_point")
x, y = m(phi, theta)
proxy = np.array([self.proxy]).T.astype(float)
sc = m.scatter(x, y, c=proxy, zorder=10, cmap=colormap)
# TODO : make a function to plot great circles correctly!
# r1, theta1, phi1 = self.extract_in() #use extract_rtp()
#r2, theta2, phi2 = self.extract_out()
# for i, t in enumerate(theta1):
# z, w = m.gcpoints(phi1[i], theta1[i], phi2[i], theta2[i], 200)#
# m.plot(z, w, zorder=5, c="black")
# m.drawgreatcircle(phi1[i], theta1[i], phi2[i], theta2[i], zorder=5, c="black")
title = "Dataset: {},\n geodynamic model: {}".format(
self.name, geodyn_model)
plt.title(title)
plt.colorbar(sc)
def phi_plot(self, geodyn_model=''):
""" Plot proxy as function of longitude """
_fig, ax = plt.subplots()
_, _, phi = self.extract_rtp("bottom_turning_point")
ax.plot(phi, self.proxy, '.')
title = "Dataset: {},\n geodynamic model: {}".format(
self.name, geodyn_model)
plt.title(title)
plt.xlabel("longitude of bottom turning point")
plt.ylabel("proxy")
def distance_plot(self, geodyn_model='',
point=positions.SeismoPoint(1., 0., 0.)):
""" Plot proxy as function of the angular distance with point G """
fig, ax = plt.subplots()
_, theta, phi = self.extract_rtp("bottom_turning_point")
theta1, phi1 = point.theta, point.phi
distance = positions.angular_distance_to_point(
theta, phi, theta1, phi1)
ax.plot(distance, self.proxy, '.')
title = "Dataset: {},\n geodynamic model: {}".format(
self.name, geodyn_model)
plt.title(title)
plt.xlabel(
"Angular distance between turning point and ({} {})".format(theta1, phi1))
plt.ylabel("proxy")
class SeismicFromFile(SeismicData):
""" Seismic data set from a data file (csv) """
def __init__(self, filename="WD11.dat", RICB=1221.,
name="Data set from Waszek and Deuss 2011", shortname="WD11", N="all"):
SeismicData.__init__(self)
self.filename = filename
self.rICB = RICB
if N == "all":
self.limited_nber_points = [False, 0]
else:
self.limited_nber_points = [True, N]
self.isitknowndataset()
def isitknowndataset(self, verbose=True):
""" Check if the data set is already known. If not, explain how to add one.
Required variables to specify:
self.name and self.shortname : names to be printed on figures and filenames (text)
self.data_points : all the raypaths (numpy array)
self.size : total size of the data set (int, number of points)
"""
if self.filename[-8:] == "WD11.dat":
self.name = "Data set from Waszek and Deuss 2011"
self.shortname = "WD11"
self.WD11()
if verbose:
print("Waszek and Deuss 2011 successfully loaded. {} trajectories.".format(
self.size))
elif self.filename[-24:] == "DF_sample_ksi_sorted.dat":
self.name = "Data set from <NAME>"
self.shortname = "Steph."
self.Stephenson()
if verbose:
print(
"Data set successfully loaded. {} trajectories.".format(
self.size))
else:
print("There is an Error. You tried to load a data file of real distribution, but the file was not recognized.")
def WD11(self):
""" the data set is the Waszek and Deuss 2011 in the file WD11.dat """
self.slices = ["PKIKP-PKiKP travel time residual", "turn lat",
"turn lon", "turn depth", "in lat", "in lon", "out lat", "out lon"]
self.data = pd.read_table(
self.filename, sep='\s+', names=self.slices, skiprows=10)
if self.limited_nber_points[0] == True:
print(self.limited_nber_points[1])
self.data = self.data.iloc[:self.limited_nber_points[1]]
self.size = self.data.shape[0]
self.data_points = []
for _, row in self.data.iterrows():
ray = positions.Raypath()
ray.add_b_t_point(positions.SeismoPoint(
1. - row["turn depth"] / self.rICB, row["turn lat"], row["turn lon"]))
in_point = positions.SeismoPoint(1., row["in lat"], row["in lon"])
out_point = positions.SeismoPoint(
1., row["out lat"], row["out lon"])
ray.add_property({'in_point': in_point, 'out_point': out_point})
ray.residual = row["PKIKP-PKiKP travel time residual"]
self.data_points = np.append(self.data_points, ray)
def Stephenson(self): # the "turn depth" is actually the "turn radius" !
self.slices = ["turn lat", "turn lon", "turn depth", "in lat", "in lon",
"out lat", "out lon", "travel time residual relative to ak135"]
# [12,13,14,15,16,17,18,24]
nb_slices = [11, 12, 13, 14, 15, 16, 17, 24]
self.data = pd.read_table(
self.filename, sep='\s+', header=None)[nb_slices]
if self.limited_nber_points[0] == True:
print(self.limited_nber_points[1])
self.data = self.data.iloc[:self.limited_nber_points[1]]
self.data.columns = self.slices
self.size = self.data.shape[0]
for _, row in self.data.iterrows():
ray = positions.Raypath()
ray.add_b_t_point(positions.SeismoPoint(
row["turn depth"] / self.rICB, row["turn lat"], row["turn lon"]))
in_point = positions.SeismoPoint(1., row["in lat"], row["in lon"])
out_point = positions.SeismoPoint(
1., row["out lat"], row["out lon"])
ray.add_property({'in_point': in_point, 'out_point': out_point})
# careful here with the names of the column for residual!
ray.residual = row["travel time residual relative to ak135"]
self.data_points = np.append(self.data_points, ray)
def real_residual(self):
""" Extract the values of residuals from the data. """
value = []
for ray in self.data_points:
value = np.append(value, ray.residual)
return value
class PerfectSamplingEquator(SeismicData):
""" Perfect sampling on the equator cross section, on a cartesian grid. """
def __init__(self, N, rICB=1.):
SeismicData.__init__(self)
self.rICB = rICB
self.N = N
self.name = "Perfect sampling in the equatorial plane"
self.shortname = "equatorial"
for x in np.linspace(-self.rICB, self.rICB, N):
for y in np.linspace(-self.rICB, self.rICB, N):
ray = positions.Raypath()
ray.add_b_t_point(positions.CartesianPoint(x, y, 0.))
if ray.bottom_turning_point.r <= self.rICB:
self.data_points = np.append(self.data_points, ray)
self.size = len(self.data_points)
def plot_c_vec(self, modelgeodyn, proxy=1,
cm=plt.get_cmap('summer'), nameproxy=""):
""" Plot contourf of the proxy + streamlines of the flow in meridional cross section.
Args:
modelgeodyn: a geodyn.Model instance
proxy: the values to be plot are either defined as self.proxy,
given as proxy in the function, or set to 1 if not given.
"""
fig, ax = plt.subplots()
ax.set_aspect('equal')
x1 = np.linspace(-self.rICB, self.rICB, self.N)
y1 = np.linspace(-self.rICB, self.rICB, self.N)
X, Y = np.meshgrid(x1, y1)
Z = -1. * np.ones_like(X)
x, y, _ = self.extract_xyz("bottom_turning_point")
for it, pro in enumerate(proxy):
ix = [i for i, j in enumerate(x1) if j == x[it]]
iy = [i for i, j in enumerate(y1) if j == y[it]]
Z[ix, iy] = pro
mask_Z = Z == -1
Z = np.ma.array(Z, mask=mask_Z)
sc = ax.contourf(Y, X, Z, 10, cmap=cm)
#sc2 = ax.contour(Y, X, Z, 10, colors='w')
Vx, Vy = np.empty((self.N, self.N)), np.empty((self.N, self.N))
for ix, _ in enumerate(x1):
for iy, _ in enumerate(y1):
velocity = modelgeodyn.velocity(
modelgeodyn.tau_ic, [X[ix, iy], Y[ix, iy], 0.])
Vx[ix, iy] = velocity[0]
Vy[ix, iy] = velocity[1]
Vx = np.ma.array(Vx, mask=mask_Z)
Vy = np.ma.array(Vy, mask=mask_Z)
#ax.quiver(X, Y, Vx, Vy)
ax.streamplot(X, Y, Vx, Vy, color='black',
arrowstyle='->', density=0.5)
theta = np.linspace(0., 2 * np.pi, 1000)
ax.plot(np.sin(theta), np.cos(theta), 'k', lw=3)
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
cbar = plt.colorbar(sc)
cbar.set_label(nameproxy)
title = "Geodynamical model: {}".format(modelgeodyn.name)
plt.title(title)
plt.axis("off")
# plt.show()
def plot_c(self, modelgeodyn, proxy=1,
cm=plt.get_cmap('summer'), nameproxy=""):
""" Plot contourf of the proxy in meridional cross section -- no stream lines.
Args:
modelgeodyn: a geodyn.Model instance
proxy: the values to be plot are either defined as self.proxy,
given as proxy in the function, or set to 1 if not given.
"""
fig, ax = plt.subplots()
ax.set_aspect('equal')
x1 = np.linspace(-self.rICB, self.rICB, self.N)
y1 = np.linspace(-self.rICB, self.rICB, self.N)
X, Y = np.meshgrid(x1, y1)
Z = -1. * np.ones_like(X)
x, y, _ = self.extract_xyz("bottom_turning_point")
for it, pro in enumerate(proxy):
ix = [i for | |
QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(20, 10))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_rect_center_mapunits', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignCenter)
def testDrawBackgroundPointCenterAlignFixedSizeMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(20, 10))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRenderPoint(format, 'background_point_center_mapunits', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignCenter)
def testDrawBackgroundRectangleRightAlignFixedSizeMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(20, 10))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_rect_right_mapunits', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignRight)
def testDrawBackgroundPointRightAlignFixedSizeMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(20, 10))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRenderPoint(format, 'background_point_right_mapunits', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignRight)
def testDrawBackgroundRectangleFixedSizeMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_rect_mm', QgsTextRenderer.Background)
def testDrawBackgroundRectangleFixedSizePixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(60, 80))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_rect_pixels', QgsTextRenderer.Background)
def testDrawBackgroundRectBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_rect_buffer_pixels', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundRectRightAlignBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_rect_right_buffer_pixels', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignRight,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundRectCenterAlignBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_rect_center_buffer_pixels', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignCenter,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundPointBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRenderPoint(format, 'background_point_buffer_pixels', QgsTextRenderer.Background,
point=QPointF(100, 100))
def testDrawBackgroundPointRightAlignBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRenderPoint(format, 'background_point_right_buffer_pixels', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignRight,
point=QPointF(100, 100))
def testDrawBackgroundPointCenterAlignBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 50))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRenderPoint(format, 'background_point_center_buffer_pixels', QgsTextRenderer.Background,
alignment=QgsTextRenderer.AlignCenter,
point=QPointF(100, 100))
def testDrawBackgroundRectBufferMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(4, 6))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_rect_buffer_mapunits', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundRectBufferMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(10, 16))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_rect_buffer_mm', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundEllipse(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeEllipse)
format.background().setSize(QSizeF(60, 80))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_ellipse_pixels', QgsTextRenderer.Background)
def testDrawBackgroundSvgFixedPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(60, 80))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_svg_fixed_pixels', QgsTextRenderer.Background)
def testDrawBackgroundSvgFixedMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(20, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_svg_fixed_mapunits', QgsTextRenderer.Background)
def testDrawBackgroundSvgFixedMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(30, 30))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_svg_fixed_mm', QgsTextRenderer.Background)
def testDrawBackgroundRotationSynced(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setRotation(45) # should be ignored
format.background().setRotationType(QgsTextBackgroundSettings.RotationSync)
assert self.checkRender(format, 'background_rotation_sync', QgsTextRenderer.Background, angle=20)
def testDrawBackgroundSvgBufferPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(30, 30))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'background_svg_buffer_pixels', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundSvgBufferMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(4, 4))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_svg_buffer_mapunits', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundSvgBufferMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
format.background().setSvgFile(svg)
format.background().setType(QgsTextBackgroundSettings.ShapeSVG)
format.background().setSize(QSizeF(10, 10))
format.background().setSizeType(QgsTextBackgroundSettings.SizeBuffer)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_svg_buffer_mm', QgsTextRenderer.Background,
rect=QRectF(100, 100, 100, 100))
def testDrawBackgroundRotationFixed(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setRotation(45)
format.background().setRotationType(QgsTextBackgroundSettings.RotationFixed)
assert self.checkRender(format, 'background_rotation_fixed', QgsTextRenderer.Background, angle=20)
def testDrawRotationOffset(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setRotation(45)
format.background().setRotationType(QgsTextBackgroundSettings.RotationOffset)
assert self.checkRender(format, 'background_rotation_offset', QgsTextRenderer.Background, angle=20)
def testDrawBackgroundOffsetMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setOffset(QPointF(30, 20))
format.background().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_offset_mm', QgsTextRenderer.Background)
def testDrawBackgroundOffsetMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setOffset(QPointF(10, 5))
format.background().setOffsetUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_offset_mapunits', QgsTextRenderer.Background)
def testDrawBackgroundRadiiMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setRadii(QSizeF(6, 4))
format.background().setRadiiUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_radii_mm', QgsTextRenderer.Background)
def testDrawBackgroundRadiiMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.background().setRadii(QSizeF(3, 2))
format.background().setRadiiUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'background_radii_mapunits', QgsTextRenderer.Background)
def testDrawBackgroundOpacity(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setOpacity(0.6)
assert self.checkRender(format, 'background_opacity', QgsTextRenderer.Background)
def testDrawBackgroundFillColor(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setFillColor(QColor(50, 100, 50))
assert self.checkRender(format, 'background_fillcolor', QgsTextRenderer.Background)
def testDrawBackgroundStroke(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setStrokeColor(QColor(50, 100, 50))
format.background().setStrokeWidth(3)
format.background().setStrokeWidthUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'background_outline', QgsTextRenderer.Background)
def testDrawBackgroundEffect(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.background().setEnabled(True)
format.background().setType(QgsTextBackgroundSettings.ShapeRectangle)
format.background().setSize(QSizeF(30, 20))
format.background().setSizeType(QgsTextBackgroundSettings.SizeFixed)
format.background().setPaintEffect(QgsBlurEffect.create({'blur_level': '10', 'enabled': '1'}))
assert self.checkRender(format, 'background_effect', QgsTextRenderer.Background, text=['test'])
def testDrawText(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRender(format, 'text_bold', QgsTextRenderer.Text, text=['test'])
def testDrawTextPoint(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRenderPoint(format, 'text_point_bold', QgsTextRenderer.Text, text=['test'])
def testDrawTextNamedStyle(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
# need to call getTestFont to make sure font style is installed and ready to go
temp_font = getTestFont('Bold Oblique') # NOQA
format.setFont(getTestFont())
format.setNamedStyle('Bold Oblique')
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRender(format, 'text_named_style', QgsTextRenderer.Text, text=['test'])
def testDrawTextColor(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(0, 255, 0))
assert self.checkRender(format, 'text_color', QgsTextRenderer.Text, text=['test'])
def testDrawTextOpacity(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setOpacity(0.7)
assert self.checkRender(format, 'text_opacity', QgsTextRenderer.Text, text=['test'])
def testDrawTextBlendMode(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(100, 100, 100))
format.setBlendMode(QPainter.CompositionMode_Difference)
assert self.checkRender(format, 'text_blend_mode', QgsTextRenderer.Text, text=['test'])
def testDrawTextAngle(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRender(format, 'text_angled', QgsTextRenderer.Text, angle=90 / 180 * 3.141, text=['test'])
def testDrawTextMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(5)
format.setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'text_mapunits', QgsTextRenderer.Text, text=['test'])
def testDrawTextPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(50)
format.setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'text_pixels', QgsTextRenderer.Text, text=['test'])
def testDrawMultiLineText(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(30)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRender(format, 'text_multiline', QgsTextRenderer.Text, text=['test', 'multi', 'line'])
def testDrawMultiLineTextPoint(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(30)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
assert self.checkRenderPoint(format, 'text_point_multiline', QgsTextRenderer.Text, text=['test', 'multi', 'line'])
def testDrawLineHeightText(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(30)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setLineHeight(1.5)
assert self.checkRender(format, 'text_line_height', QgsTextRenderer.Text, text=['test', 'multi', 'line'])
def testDrawBufferSizeMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'text_buffer_mm', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferDisabled(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(False)
assert self.checkRender(format, 'text_disabled_buffer', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferSizeMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'text_buffer_mapunits', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferSizePixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(10)
format.buffer().setSizeUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'text_buffer_pixels', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferColor(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.buffer().setColor(QColor(0, 255, 0))
assert self.checkRender(format, 'text_buffer_color', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferOpacity(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.buffer().setOpacity(0.5)
assert self.checkRender(format, 'text_buffer_opacity', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferFillInterior(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.buffer().setFillBufferInterior(True)
assert self.checkRender(format, 'text_buffer_interior', QgsTextRenderer.Buffer, text=['test'])
def testDrawBufferEffect(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setEnabled(True)
format.buffer().setSize(2)
format.buffer().setSizeUnit(QgsUnitTypes.RenderMillimeters)
format.buffer().setPaintEffect(QgsBlurEffect.create({'blur_level': '10', 'enabled': '1'}))
assert self.checkRender(format, 'text_buffer_effect', QgsTextRenderer.Buffer, text=['test'])
def testDrawShadow(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_enabled', QgsTextRenderer.Text, text=['test'])
def testDrawShadowOffsetAngle(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetAngle(0)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_offset_angle', QgsTextRenderer.Text, text=['test'])
def testDrawShadowOffsetMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(10)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'shadow_offset_mapunits', QgsTextRenderer.Text, text=['test'])
def testDrawShadowOffsetPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(10)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'shadow_offset_pixels', QgsTextRenderer.Text, text=['test'])
def testDrawShadowBlurRadiusMM(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
format.shadow().setBlurRadius(1)
format.shadow().setBlurRadiusUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_radius_mm', QgsTextRenderer.Text, text=['test'])
def testDrawShadowBlurRadiusMapUnits(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
format.shadow().setBlurRadius(3)
format.shadow().setBlurRadiusUnit(QgsUnitTypes.RenderMapUnits)
assert self.checkRender(format, 'shadow_radius_mapunits', QgsTextRenderer.Text, text=['test'])
def testDrawShadowBlurRadiusPixels(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(1.0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
format.shadow().setBlurRadius(3)
format.shadow().setBlurRadiusUnit(QgsUnitTypes.RenderPixels)
assert self.checkRender(format, 'shadow_radius_pixels', QgsTextRenderer.Text, text=['test'])
def testDrawShadowOpacity(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setOpacity(0.5)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_opacity', QgsTextRenderer.Text, text=['test'])
def testDrawShadowColor(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setColor(QColor(255, 255, 0))
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_color', QgsTextRenderer.Text, text=['test'])
def testDrawShadowScale(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setScale(50)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_scale_50', QgsTextRenderer.Text, text=['test'])
def testDrawShadowScaleUp(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.setColor(QColor(255, 255, 255))
format.shadow().setEnabled(True)
format.shadow().setShadowPlacement(QgsTextShadowSettings.ShadowText)
format.shadow().setScale(150)
format.shadow().setBlurRadius(0)
format.shadow().setOffsetDistance(5)
format.shadow().setOffsetUnit(QgsUnitTypes.RenderMillimeters)
assert self.checkRender(format, 'shadow_scale_150', QgsTextRenderer.Text, text=['test'])
def testDrawShadowBackgroundPlacement(self):
format = QgsTextFormat()
format.setFont(getTestFont('bold'))
format.setSize(60)
| |
row)
if idx == 0:
if not (x == 0 and y == 0):
logging.warning(
'The background component did not start at top left corner but at x={}, y={}!'.format(x, y))
continue
if area < ignore_pred_under:
continue
# dilate bbox
x_center = x + w // 2
y_center = y + h // 2
w = w * bbox_dilation_ratio + 2 * bbox_dilation_size
h = h * bbox_dilation_ratio + 2 * bbox_dilation_size
x = x_center - w // 2
y = y_center - h // 2
ymin, xmin, ymax, xmax = y, x, y + h, x + w
# convert to integers
ymin, xmin, ymax, xmax = [int(item) for item in (ymin, xmin, ymax, xmax )]
bbox_coord_list.append((ymin, xmin, ymax, xmax))
area_list.append(area)
if area_list:
# sort by area_list in descending order, the largest bbox is bbox_coord_list[0]
area_list, bbox_coord_list = list(zip(*sorted(zip(area_list, bbox_coord_list), reverse=True)))
return bbox_coord_list, area_list
def get_bbox_coord_for_largest_cc_in_binary_array(binary_array, **kwargs):
bbox = get_bbox_coord_list_from_binary_array(binary_array, **kwargs)[0][0]
return bbox
def get_largest_foreground_mask(image_array, background_value='auto'):
"""Find the largest foreground connected component
Connected component anlaysis with output = cv2.connectedComponentsWithStats():
Labels = output[1] is an array with the same shape as the input binary array, with each component
labeled with a different integer (BG is 0).
Args:
image_array: binary array where background is 0
Returns:
fg_mask_array: boolean numpy array. True for largest foreground connected component
"""
if background_value == 'auto':
# set to 20 percentile of the image
lower_clip = np.percentile(image_array, 5)
upper_clip = np.percentile(image_array, 30)
if np.abs(upper_clip - lower_clip) / np.max(image_array) < 0.02:
background_value = upper_clip
else:
logging.warning('difference 5th and 30th percentile is {}\nManually inspect this image'.format(
np.abs(upper_clip - lower_clip)))
background_value = lower_clip
else:
assert isinstance(background_value, np.int)
binary_array = image_array > background_value
output = cv2.connectedComponentsWithStats(binary_array.astype(np.uint8))
stats = output[2]
if len(stats) > 1 :
# if there are at least two components returned
# find the idx of the largest fg component by area (excluding 0th row, i.e., the BG)
idx = np.argmax(stats[1:, -1]) + 1
fg_mask_array = (output[1] == idx)
else:
logging.debug('Only one component in the image. Check raw image!')
fg_mask_array = None
return fg_mask_array
def get_ar(bbox):
"""Get aspect ratio of bbox"""
ymin, xmin, ymax, xmax = bbox
width, height = xmax - xmin, ymax - ymin
ar = max(width, height) / min(width, height)
return ar
def large_ar_suppression(boxes, ar_threshold=2):
"""Filter out bbox with aspect ratio larger than ar_threshold"""
return [bbox for bbox in boxes if get_ar(bbox) <= ar_threshold]
def get_minmax_size(bbox):
"""Get aspect ratio of bbox"""
ymin, xmin, ymax, xmax = bbox
width, height = xmax - xmin, ymax - ymin
min_size = min(width, height)
max_size = max(width, height)
return min_size, max_size
def non_max_suppression_fast(boxes, threshold=0.5, option='union', max_iterations=1):
""" NMS to combine bboxes
Adapted from https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Args:
boxes: in the order of (ymin, xmin, ymax, xmax)
overlapThresh:
option: method to postprocess the bbox coordinates
'union': find the bbox for the union of the overlapping boxes
'original': find the original bbox, from right to left
Returns:
"""
def concate_list(arrays, concateidx):
"""method to help track the resource of combined bounding boxes
Args:
arrays: list of list, represent the indices
concateidx: indices of list to be merged
Returns: merged flat list
"""
result = []
for idx in concateidx:
result.extend(arrays[idx])
return result
merged_boxes_sources = [[i] for i in list(range(len(boxes)))]
for i_iter in range(max_iterations):
num_bbox_before_nms = len(boxes)
# if there are no boxes, return an empty list
if num_bbox_before_nms == 0:
return [], []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
boxes = np.array(boxes).astype("float")
# grab the coordinates of the bounding boxes
# x1, y1 == xmin, ymin
# x2, y2 == xmax, ymax
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
merged_boxes = []
new_merged_boxes_sources = []
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
# NB. use the area of the moving box as overlap denominator
# <TODO> add union area calculation
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list
idxs_idx_to_delete = np.concatenate(([last],
np.where(overlap > threshold)[0]))
if option == 'union':
# return the bbox of the union
xx1 = np.min(x1[idxs[idxs_idx_to_delete]])
yy1 = np.min(y1[idxs[idxs_idx_to_delete]])
xx2 = np.max(x2[idxs[idxs_idx_to_delete]])
yy2 = np.max(y2[idxs[idxs_idx_to_delete]])
merged_boxes.append((yy1, xx1, yy2, xx2))
# merged_boxes_sources.append(idxs[idxs_idx_to_delete])
new_merged_boxes_sources.append(concate_list(merged_boxes_sources, idxs[idxs_idx_to_delete]))
elif option == 'original':
merged_boxes.append(boxes[i])
merged_boxes_sources.append(i)
else:
raise ValueError('Unsupported option {}'.format(option))
idxs = np.delete(idxs, idxs_idx_to_delete)
merged_boxes = np.array(merged_boxes).astype(np.int)
# the original bbox coord
boxes = merged_boxes
merged_boxes_sources = new_merged_boxes_sources
num_bbox_after_nms = len(boxes)
# no bbox has been merged in this iteration
if num_bbox_before_nms == num_bbox_after_nms:
logging.debug('Finish NMS at {} out of {} requested iterations'.format(i_iter + 1, max_iterations))
return boxes, merged_boxes_sources
return boxes, merged_boxes_sources
def calculate_union_area(boxes):
""" calculate the union area of several bounding boxes
Args:
boxes: list of bounding boxes, each one in the order of (ymin, xmin, ymax, xmax)
Returns: union area
"""
# convert to np array if the input is a list
boxes = np.array(boxes)
width = max(boxes[:, 3])
height = max(boxes[:, 2])
canvas = np.zeros([width + 1, height + 1])
for i in range(len(boxes)):
canvas[boxes[i, 1]:boxes[i, 3] + 1, boxes[i, 0]:boxes[i, 2] + 1] = 1
return np.sum(canvas)
def _get_valid_length(line_scan):
"""Helper function for calculating valid length in one line_scan.
Used in calculate_union_area_v2
"""
sum_length = 0
acc = 0
last_x = 0
for current_x in sorted(line_scan):
if acc > 0:
sum_length += current_x - last_x
acc += line_scan[current_x]
last_x = current_x
return sum_length
def calculate_union_area_v2(boxes):
"""Calculate the union area of several bounding boxes
This algorithm is inspired by numerical integration.
Scan a line through the whole image. Calculate the 'valid length (height)'
of each scanning position, and the intervals (width) during which the
'valid length' stays the same.
Args:
boxes: list of bounding boxes, each one in the order of (ymin, xmin, ymax, xmax)
Returns: union area
"""
# convert to np array if the input is a list
boxes = np.array(boxes)
START = 1
END = -START
# key: y axis of the changing line
# value list of tuple(x axis,status(beginning/ending of a meeting) )
boundary = {}
for box in boxes:
y0, x0, y1, x1 = box
if y0 not in boundary:
boundary[y0] = []
if y1 + 1 not in boundary:
boundary[y1 + 1] = []
# starting and ending of a bounding box are 'changing lines'
# since in our case, area means number of pixels
# and [x0,x1],[y0,y1] are inclusive,
# so '+1' is needed for x1 and y1
# in line y0, a meeting starts at x0 and ends at x1
boundary[y0].append((x0, START))
boundary[y0].append((x1 + 1, END))
# in line y1 + 1, there will be no more meeting
# the effect needs to be negated
boundary[y1 + 1].append((x0, END))
boundary[y1 + 1].append((x1 + 1, START))
# valid length in each line is equivalent to
# 'meeting scheduling' interview problem.
# previous line's y value with a changing line scan
# first value does not matter
# as long as valid_length is set | |
2
IN_PROGRESS = 3
class TargetRender(proto.Message):
r"""Details of rendering for a single target.
Attributes:
rendering_build (str):
Output only. The resource name of the Cloud Build ``Build``
object that is used to render the manifest for this target.
Format is
``projects/{project}/locations/{location}/builds/{build}``.
rendering_state (google.cloud.deploy_v1.types.Release.TargetRender.TargetRenderState):
Output only. Current state of the render
operation for this Target.
"""
class TargetRenderState(proto.Enum):
r"""Valid states of the render operation."""
TARGET_RENDER_STATE_UNSPECIFIED = 0
SUCCEEDED = 1
FAILED = 2
IN_PROGRESS = 3
rendering_build = proto.Field(
proto.STRING,
number=1,
)
rendering_state = proto.Field(
proto.ENUM,
number=2,
enum='Release.TargetRender.TargetRenderState',
)
name = proto.Field(
proto.STRING,
number=1,
)
uid = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
annotations = proto.MapField(
proto.STRING,
proto.STRING,
number=4,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=5,
)
create_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
render_start_time = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
render_end_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
skaffold_config_uri = proto.Field(
proto.STRING,
number=17,
)
skaffold_config_path = proto.Field(
proto.STRING,
number=9,
)
build_artifacts = proto.RepeatedField(
proto.MESSAGE,
number=10,
message='BuildArtifact',
)
delivery_pipeline_snapshot = proto.Field(
proto.MESSAGE,
number=11,
message='DeliveryPipeline',
)
target_snapshots = proto.RepeatedField(
proto.MESSAGE,
number=12,
message='Target',
)
render_state = proto.Field(
proto.ENUM,
number=13,
enum=RenderState,
)
etag = proto.Field(
proto.STRING,
number=16,
)
skaffold_version = proto.Field(
proto.STRING,
number=19,
)
target_artifacts = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=20,
message='TargetArtifact',
)
target_renders = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=22,
message=TargetRender,
)
class BuildArtifact(proto.Message):
r"""Description of an a image to use during Skaffold rendering.
Attributes:
image (str):
Image name in Skaffold configuration.
tag (str):
Image tag to use. This will generally be the
full path to an image, such as "gcr.io/my-
project/busybox:1.2.3" or "gcr.io/my-
project/busybox@sha256:abc123".
"""
image = proto.Field(
proto.STRING,
number=3,
)
tag = proto.Field(
proto.STRING,
number=2,
)
class TargetArtifact(proto.Message):
r"""The artifacts produced by a target render operation.
Attributes:
artifact_uri (str):
Output only. URI of a directory containing
the artifacts. This contains deployment
configuration used by Skaffold during a rollout,
and all paths are relative to this location.
skaffold_config_path (str):
Output only. File path of the resolved
Skaffold configuration relative to the URI.
manifest_path (str):
Output only. File path of the rendered
manifest relative to the URI.
"""
artifact_uri = proto.Field(
proto.STRING,
number=4,
oneof='uri',
)
skaffold_config_path = proto.Field(
proto.STRING,
number=2,
)
manifest_path = proto.Field(
proto.STRING,
number=3,
)
class ListReleasesRequest(proto.Message):
r"""The request object for ``ListReleases``.
Attributes:
parent (str):
Required. The ``DeliveryPipeline`` which owns this
collection of ``Release`` objects.
page_size (int):
Optional. The maximum number of ``Release`` objects to
return. The service may return fewer than this value. If
unspecified, at most 50 ``Release`` objects will be
returned. The maximum value is 1000; values above 1000 will
be set to 1000.
page_token (str):
Optional. A page token, received from a previous
``ListReleases`` call. Provide this to retrieve the
subsequent page.
When paginating, all other provided parameters match the
call that provided the page token.
filter (str):
Optional. Filter builds to be returned. See
https://google.aip.dev/160 for more details.
order_by (str):
Optional. Field to sort by. See
https://google.aip.dev/132#ordering for more
details.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
filter = proto.Field(
proto.STRING,
number=4,
)
order_by = proto.Field(
proto.STRING,
number=5,
)
class ListReleasesResponse(proto.Message):
r"""The response object from ``ListReleases``.
Attributes:
releases (Sequence[google.cloud.deploy_v1.types.Release]):
The ``Release`` objects.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
releases = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Release',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
unreachable = proto.RepeatedField(
proto.STRING,
number=3,
)
class GetReleaseRequest(proto.Message):
r"""The request object for ``GetRelease``.
Attributes:
name (str):
Required. Name of the ``Release``. Format must be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}/releases/{release_name}.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateReleaseRequest(proto.Message):
r"""The request object for ``CreateRelease``,
Attributes:
parent (str):
Required. The parent collection in which the ``Release``
should be created. Format should be
projects/{project_id}/locations/{location_name}/deliveryPipelines/{pipeline_name}.
release_id (str):
Required. ID of the ``Release``.
release (google.cloud.deploy_v1.types.Release):
Required. The ``Release`` to create.
request_id (str):
Optional. A request ID to identify requests.
Specify a unique request ID so that if you must
retry your request, the server will know to
ignore the request if it has already been
completed. The server will guarantee that for at
least 60 minutes since the first request.
For example, consider a situation where you make
an initial request and the request times out. If
you make the request again with the same request
ID, the server can check if original operation
with the same request ID was received, and if
so, will ignore the second request. This
prevents clients from accidentally creating
duplicate commitments.
The request ID must be a valid UUID with the
exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
validate_only (bool):
Optional. If set to true, the request is
validated and the user is provided with an
expected result, but no actual change is made.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
release_id = proto.Field(
proto.STRING,
number=2,
)
release = proto.Field(
proto.MESSAGE,
number=3,
message='Release',
)
request_id = proto.Field(
proto.STRING,
number=4,
)
validate_only = proto.Field(
proto.BOOL,
number=5,
)
class Rollout(proto.Message):
r"""A ``Rollout`` resource in the Google Cloud Deploy API.
A ``Rollout`` contains information around a specific deployment to a
``Target``.
Attributes:
name (str):
Optional. Name of the ``Rollout``. Format is
projects/{project}/
locations/{location}/deliveryPipelines/{deliveryPipeline}/
releases/{release}/rollouts/[a-z][a-z0-9-]{0,62}.
uid (str):
Output only. Unique identifier of the ``Rollout``.
description (str):
Description of the ``Rollout`` for user purposes. Max length
is 255 characters.
annotations (Sequence[google.cloud.deploy_v1.types.Rollout.AnnotationsEntry]):
User annotations. These attributes can only
be set and used by the user, and not by Google
Cloud Deploy. See
https://google.aip.dev/128#annotations for more
details such as format and size limitations.
labels (Sequence[google.cloud.deploy_v1.types.Rollout.LabelsEntry]):
Labels are attributes that can be set and used by both the
user and by Google Cloud Deploy. Labels must meet the
following constraints: Each resource is limited to 64
labels. Keys must conform to the regexp:
``[a-zA-Z][a-zA-Z0-9_-]{0,62}``. Values must conform to the
regexp: ``[a-zA-Z0-9_-]{0,63}``. Both keys and values are
additionally constrained to be <= 128 bytes in size.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time at which the ``Rollout`` was created.
approve_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time at which the ``Rollout`` was approved.
enqueue_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time at which the ``Rollout`` was enqueued.
deploy_start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time at which the ``Rollout`` started
deploying.
deploy_end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time at which the ``Rollout`` finished
deploying.
target_id (str):
Required. The ID of Target to which this ``Rollout`` is
deploying.
approval_state (google.cloud.deploy_v1.types.Rollout.ApprovalState):
Output only. Approval state of the ``Rollout``.
state (google.cloud.deploy_v1.types.Rollout.State):
Output only. Current state of the ``Rollout``.
failure_reason (str):
Output only. Reason the build failed. Empty
if the build succeeded.
deploying_build (str):
Output only. The resource name of the Cloud Build ``Build``
object that is used to deploy the Rollout. Format is
``projects/{project}/locations/{location}/builds/{build}``.
etag (str):
This checksum is computed by the server based
on the value of other fields, and may be sent on
update and delete requests to ensure the client
has an up-to-date value before proceeding.
"""
class ApprovalState(proto.Enum):
r"""Valid approval states of a ``Rollout``."""
APPROVAL_STATE_UNSPECIFIED = 0
NEEDS_APPROVAL = 1
DOES_NOT_NEED_APPROVAL = 2
APPROVED = 3
REJECTED = 4
class State(proto.Enum):
r"""Valid states of a ``Rollout``."""
STATE_UNSPECIFIED = 0
SUCCEEDED = 1
FAILED = 2
IN_PROGRESS = 3
PENDING_APPROVAL = 4
APPROVAL_REJECTED = 5
PENDING = 6
PENDING_RELEASE = 7
name = proto.Field(
proto.STRING,
number=1,
)
uid = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
annotations = proto.MapField(
proto.STRING,
proto.STRING,
number=4,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=5,
)
create_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
approve_time = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
enqueue_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
deploy_start_time = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
deploy_end_time = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
target_id = proto.Field(
proto.STRING,
number=18,
)
approval_state = proto.Field(
proto.ENUM,
number=12,
enum=ApprovalState,
)
state = proto.Field(
proto.ENUM,
number=13,
enum=State,
)
failure_reason = proto.Field(
proto.STRING,
number=14,
)
deploying_build = proto.Field(
proto.STRING,
number=17,
)
etag = | |
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from collections import OrderedDict
from mock import patch, call, MagicMock, Mock
os.environ['JUJU_UNIT_NAME'] = 'cinder'
import cinder_utils as cinder_utils
from test_utils import CharmTestCase
TO_PATCH = [
# helpers.core.hookenv
'config',
'log',
'juju_log',
'relation_get',
'relation_set',
'local_unit',
# helpers.core.host
'lsb_release',
'mounts',
'umount',
'mkdir',
'service_restart',
# ceph utils
# storage_utils
'create_lvm_physical_volume',
'create_lvm_volume_group',
'deactivate_lvm_volume_group',
'is_lvm_physical_volume',
'list_lvm_volume_group',
'relation_ids',
'relation_set',
'remove_lvm_physical_volume',
'ensure_loopback_device',
'is_block_device',
'is_device_mounted',
'zap_disk',
'os_release',
'get_os_codename_install_source',
'configure_installation_source',
'is_elected_leader',
'templating',
'install_alternative',
'os_application_version_set',
# fetch
'apt_update',
'apt_upgrade',
'apt_install',
'apt_purge',
'apt_autoremove',
'filter_missing_packages',
'service_stop',
'service_start',
# cinder
'ceph_config_file',
'token_cache_pkgs',
'enable_memcache',
]
MOUNTS = [
['/mnt', '/dev/fakevbd']
]
DPKG_OPTIONS = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
FDISKDISPLAY = """
Disk /dev/fakevbd doesn't contain a valid partition table
Disk /dev/fakevbd: 21.5 GB, 21474836480 bytes
16 heads, 63 sectors/track, 41610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
"""
class TestCinderUtils(CharmTestCase):
def setUp(self):
super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH)
self.config.side_effect = self.test_config.get_all
self.apache24_conf_dir = '/etc/apache2/conf-available'
self.charm_ceph_conf = '/var/lib/charm/cinder/ceph.conf'
self.ceph_conf = '/etc/ceph/ceph.conf'
self.cinder_conf = '/etc/cinder/cinder.conf'
def svc_enabled(self, svc):
return svc in self.test_config.get('enabled-services')
def test_all_services_enabled(self):
'It determines all services are enabled based on config'
self.test_config.set('enabled-services', 'all')
enabled = []
for s in ['volume', 'api', 'scheduler']:
enabled.append(cinder_utils.service_enabled(s))
self.assertEqual(enabled, [True, True, True])
def test_service_enabled(self):
'It determines services are enabled based on config'
self.test_config.set('enabled-services', 'api,volume,scheduler')
self.assertTrue(cinder_utils.service_enabled('volume'))
def test_service_not_enabled(self):
'It determines services are not enabled based on config'
self.test_config.set('enabled-services', 'api,scheduler')
self.assertFalse(cinder_utils.service_enabled('volume'))
@patch.object(cinder_utils, 'get_subordinate_release_packages')
def test_determine_purge_packages(
self,
mock_get_subordinate_release_packages):
'Ensure no packages are identified for purge prior to rocky'
self.os_release.return_value = 'queens'
self.assertEqual(cinder_utils.determine_purge_packages(), [])
@patch.object(cinder_utils, 'get_subordinate_release_packages')
def test_determine_purge_packages_rocky(
self,
mock_get_subordinate_release_packages):
'Ensure python packages are identified for purge at rocky'
self.os_release.return_value = 'rocky'
self.assertEqual(cinder_utils.determine_purge_packages(),
sorted(set([p for p in cinder_utils.COMMON_PACKAGES
if p.startswith('python-')] +
['python-cinder', 'python-memcache'])))
@patch.object(cinder_utils, 'get_subordinate_release_packages')
@patch('cinder_utils.service_enabled')
def test_determine_packages_all(
self,
service_enabled,
mock_get_subordinate_release_packages):
'It determines all packages required when all services enabled'
service_enabled.return_value = True
self.os_release.return_value = 'icehouse'
pkgs = cinder_utils.determine_packages()
self.assertEqual(sorted(pkgs),
sorted(cinder_utils.COMMON_PACKAGES +
cinder_utils.VOLUME_PACKAGES +
cinder_utils.API_PACKAGES +
cinder_utils.SCHEDULER_PACKAGES))
@patch.object(cinder_utils, 'get_subordinate_release_packages')
@patch('cinder_utils.service_enabled')
def test_determine_packages_all_rocky(
self,
service_enabled,
mock_get_subordinate_release_packages):
'Check python3 packages are installed @ rocky'
service_enabled.return_value = True
self.os_release.return_value = 'rocky'
pkgs = cinder_utils.determine_packages()
self.assertEqual(
sorted(pkgs),
sorted([p for p in cinder_utils.COMMON_PACKAGES
if not p.startswith('python-')] +
cinder_utils.VOLUME_PACKAGES +
cinder_utils.API_PACKAGES +
cinder_utils.SCHEDULER_PACKAGES +
cinder_utils.PY3_PACKAGES +
cinder_utils.PY3_API_PACKAGES))
@patch.object(cinder_utils, 'get_subordinate_release_packages')
@patch('cinder_utils.service_enabled')
def test_determine_packages_subset(self, service_enabled,
mock_get_subordinate_release_packages):
'It determines packages required for a subset of enabled services'
service_enabled.side_effect = self.svc_enabled
self.test_config.set('openstack-origin', 'cloud:xenial-newton')
self.os_release.return_value = 'newton'
self.token_cache_pkgs.return_value = ['memcached']
self.test_config.set('enabled-services', 'api')
pkgs = cinder_utils.determine_packages()
common = cinder_utils.COMMON_PACKAGES
self.assertEqual(
sorted(pkgs),
sorted(common + cinder_utils.API_PACKAGES + ['memcached']))
self.test_config.set('enabled-services', 'volume')
pkgs = cinder_utils.determine_packages()
common = cinder_utils.COMMON_PACKAGES
self.assertEqual(
sorted(pkgs),
sorted(common + cinder_utils.VOLUME_PACKAGES + ['memcached']))
self.test_config.set('enabled-services', 'api,scheduler')
pkgs = cinder_utils.determine_packages()
common = cinder_utils.COMMON_PACKAGES
self.assertEqual(
sorted(pkgs),
sorted(common + cinder_utils.API_PACKAGES + ['memcached'] +
cinder_utils.SCHEDULER_PACKAGES))
@patch('cinder_utils.restart_map')
def test_services(self, restart_map):
restart_map.return_value = OrderedDict([
('test_conf1', ['svc1']),
('test_conf2', ['svc2', 'svc3', 'svc1']),
])
self.assertEqual(cinder_utils.services(), ['svc1', 'svc2', 'svc3'])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_all_enabled(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.return_value = []
self.enable_memcache.return_value = True
ex_map = OrderedDict([
('/etc/cinder/cinder.conf', ['cinder-api', 'cinder-volume',
'cinder-scheduler', 'haproxy']),
('/etc/cinder/api-paste.ini', ['cinder-api']),
('/etc/haproxy/haproxy.cfg', ['haproxy']),
('/etc/memcached.conf', ['memcached']),
('/etc/apache2/sites-available/openstack_https_frontend.conf',
['apache2']),
])
for cfg in ex_map.keys():
self.assertEqual(cinder_utils.resource_map()[cfg]['services'],
ex_map[cfg])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_no_api(self, path_exists,
service_enabled):
service_enabled.side_effect = self.svc_enabled
self.test_config.set('enabled-services', 'scheduler,volume')
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.return_value = []
ex_map = OrderedDict([
('/etc/cinder/cinder.conf', ['cinder-volume',
'cinder-scheduler']),
])
for cfg in ex_map.keys():
self.assertEqual(cinder_utils.resource_map()[cfg]['services'],
ex_map[cfg])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_backup_backend(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': ['rid1'],
'ceph': []}[x]
self.assertTrue(
'cinder-backup' in
cinder_utils.resource_map()[self.cinder_conf]['services'])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_no_backup(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertFalse(
'cinder-backup' in
cinder_utils.resource_map()[self.cinder_conf]['services'])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_no_ceph_conf(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertFalse(self.charm_ceph_conf in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_ceph_conf(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': ['rid1']}[x]
self.assertTrue(self.charm_ceph_conf in
cinder_utils.resource_map().keys())
self.mkdir.assert_has_calls(
[call('/etc/ceph'),
call('/var/lib/charm/cinder')]
)
self.install_alternative.assert_called_with(
'ceph.conf',
'/etc/ceph/ceph.conf',
self.charm_ceph_conf)
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_old_apache(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.side_effect = lambda x: x not in [self.apache24_conf_dir]
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertTrue(
'/etc/apache2/sites-available/openstack_https_frontend' in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_apache24(self, path_exists, service_enabled):
service_enabled.return_value = True
path_exists.side_effect = lambda x: x in [self.apache24_conf_dir]
self.os_release.return_value = 'havana'
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertTrue(
'/etc/apache2/sites-available/openstack_https_frontend.conf' in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
def test_filter_services_selective(self, service_enabled):
service_enabled.side_effect = self.svc_enabled
self.test_config.set('enabled-services', 'scheduler,volume')
self.assertEqual(
cinder_utils.filter_services(['cinder-api', 'cinder-volume',
'haproxy']),
['cinder-volume']
)
@patch('cinder_utils.service_enabled')
def test_filter_services_all(self, service_enabled):
service_enabled.return_value = True
self.test_config.set('enabled-services', 'scheduler,volume')
self.assertEqual(
cinder_utils.filter_services(['cinder-api', 'cinder-volume',
'haproxy']),
['cinder-api', 'cinder-volume', 'haproxy']
)
@patch('cinder_utils.resource_map')
def test_restart_map(self, resource_map):
resource_map.return_value = OrderedDict([
('/etc/testfile1.conf', {
'hook_contexts': ['dummyctxt1', 'dummyctxt2'],
'services': ['svc1'],
}),
('/etc/testfile2.conf', {
'hook_contexts': ['dummyctxt1', 'dummyctxt3'],
'services': [],
}),
])
ex_map = OrderedDict([
('/etc/testfile1.conf', ['svc1']),
])
self.assertEqual(cinder_utils.restart_map(), ex_map)
def test_clean_storage_unmount(self):
'It unmounts block device when cleaning storage'
self.is_lvm_physical_volume.return_value = False
self.zap_disk.return_value = True
self.mounts.return_value = MOUNTS
cinder_utils.clean_storage('/dev/fakevbd')
self.umount.called_with('/dev/fakevbd', True)
def test_clean_storage_lvm_wipe(self):
'It removes traces of LVM when cleaning storage'
self.mounts.return_value = []
self.is_lvm_physical_volume.return_value = True
cinder_utils.clean_storage('/dev/fakevbd')
self.remove_lvm_physical_volume.assert_called_with('/dev/fakevbd')
self.deactivate_lvm_volume_group.assert_called_with('/dev/fakevbd')
self.zap_disk.assert_called_with('/dev/fakevbd')
def test_clean_storage_zap_disk(self):
'It removes traces of LVM when cleaning storage'
self.mounts.return_value = []
self.is_lvm_physical_volume.return_value = False
cinder_utils.clean_storage('/dev/fakevbd')
self.zap_disk.assert_called_with('/dev/fakevbd')
def test_parse_block_device(self):
self.assertTrue(cinder_utils._parse_block_device(None),
(None, 0))
self.assertTrue(cinder_utils._parse_block_device('fakevdc'),
('/dev/fakevdc', 0))
self.assertTrue(cinder_utils._parse_block_device('/dev/fakevdc'),
('/dev/fakevdc', 0))
self.assertTrue(cinder_utils._parse_block_device('/dev/fakevdc'),
('/dev/fakevdc', 0))
self.assertTrue(cinder_utils._parse_block_device('/mnt/loop0|10'),
('/mnt/loop0', 10))
self.assertTrue(cinder_utils._parse_block_device('/mnt/loop0'),
('/mnt/loop0', cinder_utils.DEFAULT_LOOPBACK_SIZE))
@patch('subprocess.check_output')
def test_has_partition_table(self, _check):
_check.return_value = FDISKDISPLAY.encode()
block_device = '/dev/fakevbd'
cinder_utils.has_partition_table(block_device)
_check.assert_called_with(['fdisk', '-l', '/dev/fakevbd'], stderr=-2)
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'ensure_lvm_volume_group_non_existent')
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
@patch.object(cinder_utils, 'list_thin_logical_volume_pools')
def test_configure_lvm_storage(self, list_thin_pools,
extend_lvm, reduce_lvm, clean_storage,
ensure_non_existent):
devices = ['/dev/fakevbd', '/dev/fakevdc']
self.is_device_mounted.return_value = False
self.is_lvm_physical_volume.return_value = False
self.is_block_device.return_value = True
self.ensure_loopback_device.side_effect = lambda x, y: x
cinder_utils.configure_lvm_storage(devices, 'test', True, True)
clean_storage.assert_has_calls(
[call('/dev/fakevbd'),
call('/dev/fakevdc')]
)
self.create_lvm_physical_volume.assert_has_calls(
[call('/dev/fakevbd'),
call('/dev/fakevdc')]
)
self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
reduce_lvm.assert_called_with('test')
extend_lvm.assert_called_with('test', '/dev/fakevdc')
ensure_non_existent.assert_called_with('test')
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'has_partition_table')
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
@patch.object(cinder_utils, 'list_thin_logical_volume_pools')
@patch.object(cinder_utils, 'extend_logical_volume_by_device')
def test_configure_lvm_storage_unused_dev(self, extend_lv_by_dev,
list_thin_pools,
extend_lvm, reduce_lvm,
clean_storage, has_part):
devices = ['/dev/fakevbd', '/dev/fakevdc']
self.is_device_mounted.return_value = False
self.is_lvm_physical_volume.return_value = False
self.is_block_device.return_value = True
has_part.return_value = False
self.ensure_loopback_device.side_effect = lambda x, y: x
list_thin_pools.return_value = ['vg/thinpool']
cinder_utils.configure_lvm_storage(devices, 'test', False, True)
clean_storage.assert_has_calls(
[call('/dev/fakevbd'),
call('/dev/fakevdc')]
)
self.create_lvm_physical_volume.assert_has_calls(
[call('/dev/fakevbd'),
call('/dev/fakevdc')]
)
self.create_lvm_volume_group.assert_called_with('test', '/dev/fakevbd')
reduce_lvm.assert_called_with('test')
extend_lvm.assert_called_with('test', '/dev/fakevdc')
extend_lv_by_dev.assert_called_once_with('vg/thinpool',
'/dev/fakevdc')
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'has_partition_table')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
def test_configure_lvm_storage_used_dev(self, reduce_lvm, has_part):
devices = ['/dev/fakevbd', '/dev/fakevdc']
self.is_lvm_physical_volume.return_value = False
has_part.return_value = True
cinder_utils.configure_lvm_storage(devices, 'test', False, True)
reduce_lvm.assert_called_with('test')
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'ensure_lvm_volume_group_non_existent')
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
def test_configure_lvm_storage_loopback(self, extend_lvm, reduce_lvm,
clean_storage,
ensure_non_existent):
devices = ['/mnt/loop0|10']
self.ensure_loopback_device.return_value = '/dev/loop0'
self.is_device_mounted.return_value = False
self.is_lvm_physical_volume.return_value = False
self.is_block_device.return_value = False
cinder_utils.configure_lvm_storage(devices, 'test', True, True)
clean_storage.assert_called_with('/dev/loop0')
self.ensure_loopback_device.assert_called_with('/mnt/loop0', '10')
self.create_lvm_physical_volume.assert_called_with('/dev/loop0')
self.create_lvm_volume_group.assert_called_with('test', '/dev/loop0')
reduce_lvm.assert_called_with('test')
self.assertFalse(extend_lvm.called)
ensure_non_existent.assert_called_with('test')
@patch.object(cinder_utils, 'lvm_volume_group_exists')
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
@patch.object(cinder_utils, 'list_thin_logical_volume_pools')
def test_configure_lvm_storage_existing_vg(self, list_thin_pools,
extend_lvm, reduce_lvm,
clean_storage, lvm_exists):
def pv_lookup(device):
devices = {
'/dev/fakevbd': True,
'/dev/fakevdc': False
}
return devices[device]
def vg_lookup(device):
devices = {
'/dev/fakevbd': 'test',
'/dev/fakevdc': None
}
return devices[device]
devices = ['/dev/fakevbd', '/dev/fakevdc']
self.is_device_mounted.return_value = False
self.is_block_device.return_value = True
lvm_exists.return_value = False
self.is_lvm_physical_volume.side_effect = pv_lookup
self.list_lvm_volume_group.side_effect = vg_lookup
self.ensure_loopback_device.side_effect = lambda x, y: x
cinder_utils.configure_lvm_storage(devices, 'test', True, True)
clean_storage.assert_has_calls(
[call('/dev/fakevdc')]
)
self.create_lvm_physical_volume.assert_has_calls(
[call('/dev/fakevdc')]
)
reduce_lvm.assert_called_with('test')
extend_lvm.assert_called_with('test', '/dev/fakevdc')
self.assertFalse(self.create_lvm_volume_group.called)
@patch.object(cinder_utils, 'lvm_volume_group_exists')
@patch('cinder_utils.log_lvm_info', Mock())
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
@patch.object(cinder_utils, 'list_thin_logical_volume_pools')
def test_configure_lvm_storage_different_vg(self, list_thin_pools,
extend_lvm, reduce_lvm,
clean_storage, lvm_exists):
def pv_lookup(device):
devices = {
'/dev/fakevbd': True,
'/dev/fakevdc': True
| |
server-side information.
This is not available for all types of repositories. The information
will be specific to that type of repository.
"""
name = 'info'
singleton = True
allowed_methods = ('GET',)
mimetype_item_resource_name = 'repository-info'
@webapi_check_local_site
@webapi_check_login_required
@webapi_response_errors(DOES_NOT_EXIST, REPO_NOT_IMPLEMENTED,
REPO_INFO_ERROR)
def get(self, request, *args, **kwargs):
"""Returns repository-specific information from a server."""
try:
repository = repository_resource.get_object(request, *args,
**kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
try:
tool = repository.get_scmtool()
return 200, {
self.item_result_key: tool.get_repository_info()
}
except NotImplementedError:
return REPO_NOT_IMPLEMENTED
except:
return REPO_INFO_ERROR
repository_info_resource = RepositoryInfoResource()
class RepositoryResource(WebAPIResource):
"""Provides information on a registered repository.
Review Board has a list of known repositories, which can be modified
through the site's administration interface. These repositories contain
the information needed for Review Board to access the files referenced
in diffs.
"""
model = Repository
name_plural = 'repositories'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the repository.',
},
'name': {
'type': str,
'description': 'The name of the repository.',
},
'path': {
'type': str,
'description': 'The main path to the repository, which is used '
'for communicating with the repository and '
'accessing files.',
},
'tool': {
'type': str,
'description': 'The name of the internal repository '
'communication class used to talk to the '
'repository. This is generally the type of the '
'repository.'
}
}
uri_object_key = 'repository_id'
item_child_resources = [repository_info_resource]
autogenerate_etags = True
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
@webapi_check_login_required
def get_queryset(self, request, local_site_name=None, *args, **kwargs):
local_site = _get_local_site(local_site_name)
return self.model.objects.accessible(request.user,
visible_only=True,
local_site=local_site)
def serialize_tool_field(self, obj):
return obj.tool.name
def has_access_permissions(self, request, repository, *args, **kwargs):
return repository.is_accessible_by(request.user)
def has_modify_permissions(self, request, repository, *args, **kwargs):
return repository.is_mutable_by(request.user)
def has_delete_permissions(self, request, repository, *args, **kwargs):
return repository.is_mutable_by(request.user)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get_list(self, request, *args, **kwargs):
"""Retrieves the list of repositories on the server.
This will only list visible repositories. Any repository that the
administrator has hidden will be excluded from the list.
"""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular repository.
This will only return basic information on the repository.
Authentication information, hosting details, and repository-specific
information are not provided.
"""
pass
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(BAD_HOST_KEY, INVALID_FORM_DATA, NOT_LOGGED_IN,
PERMISSION_DENIED, REPO_AUTHENTICATION_ERROR,
SERVER_CONFIG_ERROR, UNVERIFIED_HOST_CERT,
UNVERIFIED_HOST_KEY)
@webapi_request_fields(
required={
'name': {
'type': str,
'description': 'The human-readable name of the repository.',
},
'path': {
'type': str,
'description': 'The path to the repository.',
},
'tool': {
'type': str,
'description': 'The ID of the SCMTool to use.',
},
},
optional={
'bug_tracker': {
'type': str,
'description': 'The URL to a bug in the bug tracker for '
'this repository, with ``%s`` in place of the '
'bug ID.',
},
'encoding': {
'type': str,
'description': 'The encoding used for files in the '
'repository. This is an advanced setting '
'and should only be used if you absolutely '
'need it.',
},
'mirror_path': {
'type': str,
'description': 'An alternate path to the repository.',
},
'password': {
'type': str,
'description': 'The password used to access the repository.',
},
'public': {
'type': bool,
'description': 'Whether or not review requests on the '
'repository will be publicly accessible '
'by users on the site. The default is true.',
},
'raw_file_url': {
'type': str,
'description': "A URL mask used to check out a particular "
"file using HTTP. This is needed for "
"repository types that can't access files "
"natively. Use ``<revision>`` and "
"``<filename>`` in the URL in place of the "
"revision and filename parts of the path.",
},
'trust_host': {
'type': bool,
'description': 'Whether or not any unknown host key or '
'certificate should be accepted. The default '
'is false, in which case this will error out '
'if encountering an unknown host key or '
'certificate.',
},
'username': {
'type': str,
'description': 'The username used to access the repository.',
},
},
)
def create(self, request, name, path, tool, trust_host=False,
bug_tracker=None, encoding=None, mirror_path=None,
password=<PASSWORD>, public=None, raw_file_url=None, username=None,
local_site_name=None, *args, **kwargs):
"""Creates a repository.
This will create a new repository that can immediately be used for
review requests.
The ``tool`` is a registered SCMTool ID. This must be known beforehand,
and can be looked up in the Review Board administration UI.
Before saving the new repository, the repository will be checked for
access. On success, the repository will be created and this will
return :http:`201`.
In the event of an access problem (authentication problems,
bad/unknown SSH key, or unknown certificate), an error will be
returned and the repository information won't be updated. Pass
``trust_host=1`` to approve bad/unknown SSH keys or certificates.
"""
local_site = _get_local_site(local_site_name)
if not Repository.objects.can_create(request.user, local_site):
return _no_access_error(request.user)
try:
scmtool = Tool.objects.get(name=tool)
except Tool.DoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'tool': ['This is not a valid SCMTool'],
}
}
error_result = self._check_repository(scmtool.get_scmtool_class(),
path, username, password,
local_site, trust_host)
if error_result is not None:
return error_result
if public is None:
public = True
repository = Repository.objects.create(
name=name,
path=path,
mirror_path=mirror_path or '',
raw_file_url=raw_file_url or '',
username=username or '',
password=password or '',
tool=scmtool,
bug_tracker=bug_tracker or '',
encoding=encoding or '',
public=public,
local_site=local_site)
return 201, {
self.item_result_key: repository,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED,
INVALID_FORM_DATA, SERVER_CONFIG_ERROR,
BAD_HOST_KEY, UNVERIFIED_HOST_KEY,
UNVERIFIED_HOST_CERT, REPO_AUTHENTICATION_ERROR)
@webapi_request_fields(
optional={
'bug_tracker': {
'type': str,
'description': 'The URL to a bug in the bug tracker for '
'this repository, with ``%s`` in place of the '
'bug ID.',
},
'encoding': {
'type': str,
'description': 'The encoding used for files in the '
'repository. This is an advanced setting '
'and should only be used if you absolutely '
'need it.',
},
'mirror_path': {
'type': str,
'description': 'An alternate path to the repository.',
},
'name': {
'type': str,
'description': 'The human-readable name of the repository.',
},
'password': {
'type': str,
'description': 'The password used to access the repository.',
},
'path': {
'type': str,
'description': 'The path to the repository.',
},
'public': {
'type': bool,
'description': 'Whether or not review requests on the '
'repository will be publicly accessible '
'by users on the site. The default is true.',
},
'raw_file_url': {
'type': str,
'description': "A URL mask used to check out a particular "
"file using HTTP. This is needed for "
"repository types that can't access files "
"natively. Use ``<revision>`` and "
"``<filename>`` in the URL in place of the "
"revision and filename parts of the path.",
},
'trust_host': {
'type': bool,
'description': 'Whether or not any unknown host key or '
'certificate should be accepted. The default '
'is false, in which case this will error out '
'if encountering an unknown host key or '
'certificate.',
},
'username': {
'type': str,
'description': 'The username used to access the repository.',
},
'archive_name': {
'type': bool,
'description': "Whether or not the (non-user-visible) name of "
"the repository should be changed so that it "
"(probably) won't conflict with any future "
"repository names.",
},
},
)
def update(self, request, trust_host=False, *args, **kwargs):
"""Updates a repository.
This will update the information on a repository. If the path,
username, or password has changed, Review Board will try again to
verify access to the repository.
In the event of an access problem (authentication problems,
bad/unknown SSH key, or unknown certificate), an error will be
returned and the repository information won't be updated. Pass
``trust_host=1`` to approve bad/unknown SSH keys or certificates.
"""
try:
repository = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, repository):
return _no_access_error(request.user)
for field in ('bug_tracker', 'encoding', 'mirror_path', 'name',
'password', 'path', 'public', 'raw_file_url',
'username'):
value = kwargs.get(field, None)
if value is not None:
setattr(repository, field, value)
# Only check the repository if the access information has changed.
if 'path' in kwargs or 'username' in kwargs or 'password' in kwargs:
error_result = self._check_repository(
repository.tool.get_scmtool_class(),
repository.path,
repository.username,
repository.password,
repository.local_site,
trust_host)
if error_result is not None:
return error_result
# If the API call is requesting that we archive the name, we'll give it
# a name which won't overlap with future user-named repositories. This
# should usually | |
<filename>workflows/pipe-common/pipeline/api/api.py
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import fnmatch
import json
import os
import requests
import sys
import time
import urllib3
from region import CloudRegion
from datastorage import DataStorage
from datastorage import DataStorageWithShareMount
# Date format expected by Pipeline API
DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
# date format for filename generation
FILE_DATE_FORMAT = "%Y%m%d"
class Tool:
def __init__(self, image, cpu, ram, registry, registryId, toolGroupId):
self.image = image
"""
Task requirements to CPU resources. The CPU resource is measured in cpus.
Fractional values are allowed. You can use the suffix m to mean mili.
For example 100m cpu is 100 milicpu, and is the same as 0.1 cpu.
For example, '500m' - means task requires half of CPU available.
"""
self.cpu = cpu
"""
Task requirements to RAM resources. The RAM resource is measured in bytes.
You can express RAM as a plain integer or a fixed-point integer with one of
these suffixes: E, P, T, G, M, K, Ei, Pi, Ti, Gi, Mi, Ki.
For example, '6Gi' - means task requires 6Gb of available RAM.
"""
self.ram = ram
self.registry = registry
self.labels = []
self.endpoints = []
self.registryId = registryId
self.toolGroupId = toolGroupId
self.description = ''
self.shortDescription = ''
self.defaultCommand = ''
self.tool_id = 0
self.disk = 0
self.instanceType = ''
def to_json(self):
fields = self.__dict__
if 'tool_id' in fields:
fields.pop('tool_id', None)
return json.dumps(fields, sort_keys=True, indent=4)
class DataStorageRule:
def __init__(self, file_mask, move_to_sts):
self.file_mask = file_mask
self.move_to_sts = move_to_sts
def match(self, path):
return fnmatch.fnmatch(path, self.file_mask)
@staticmethod
def match_any(rules, path):
for rule in rules:
if rule.move_to_sts and rule.match(path):
return True
return False
@staticmethod
def read_from_file(path):
if not os.path.exists(path):
return []
rules = []
with open(path, 'r') as rules_file:
data = rules_file.readline().strip()
if not data:
return []
try:
for rule in json.loads(data):
rules.append(DataStorageRule(rule['fileMask'], rule['moveToSts']))
except ValueError:
return rules
return rules
@staticmethod
def write_to_file(path, data):
with open(path, 'w') as rules_file:
rules_file.write(str(data))
# enumeration with task statuses, supported by Pipeline API
class TaskStatus:
SUCCESS, FAILURE, RUNNING, STOPPED, PAUSED = range(5)
# enumeration with task statuses, supported by Pipeline API
class CommmitStatus:
NOT_COMMITTED, COMMITTING, FAILURE, SUCCESS = range(4)
# Represents a log entry in format supported by Pipeline API
class LogEntry:
def __init__(self, run_id, status, text, task, instance):
self.runId = run_id
self.date = datetime.datetime.utcnow().strftime(DATE_FORMAT)
self.status = status
self.logText = text
self.taskName = task
self.instance = instance
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
# Represents a status entry in format supported by Pipeline API
class StatusEntry:
def __init__(self, status):
self.endDate = datetime.datetime.utcnow().strftime(DATE_FORMAT)
self.status = status
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class AclClass:
PIPELINE = 'PIPELINE'
FOLDER = 'FOLDER'
DATA_STORAGE = 'DATA_STORAGE'
DOCKER_REGISTR = 'DOCKER_REGISTR'
TOOL = 'TOOL'
TOOL_GROUP = 'TOOL_GROUP'
CONFIGURATION = 'CONFIGURATION'
METADATA_ENTITY = 'METADATA_ENTITY'
ATTACHMENT = 'ATTACHMENT'
# Represents a PipelineApi Configuration
class PipelineAPI:
"""Represents a PipelineApi Configuration"""
# Pipeline API endpoint for sending log entries
LOG_URL = 'run/{}/log'
# Pipeline API endpoint for sending status updates
STATUS_URL = 'run/{}/status'
COMMIT_STATUS_URL = 'run/{}/commitStatus'
TOOL_URL = 'tool/load?image={image}®istry={registry}'
TOOL_VERSIONS_URL = 'tool/{tool_id}/tags'
ENABLE_TOOL_URL = 'tool/register'
UPDATE_TOOL_URL = 'tool/update'
RUN_URL = 'run'
GET_RUN_URL = '/run/{}'
GET_TASK_URL = '/run/{}/task?taskName={}'
FILTER_RUNS = 'run/filter'
DATA_STORAGE_URL = "/datastorage"
DATA_STORAGE_RULES_URL = "datastorage/rule/load"
REGISTRY_CERTIFICATES_URL = "dockerRegistry/loadCerts"
REGISTRY_LOAD_ALL_URL = "dockerRegistry/loadTree"
TOOL_GROUP_IN_REGISTRY_LOAD_ALL_URL = "/toolGroup/list?registry={}"
TOOL_GROUP_LOAD_URL = "/toolGroup?id={}"
SEARCH_RUNS_URL = "/run/search"
LOAD_PIPELINE_URL = "/pipeline/{}/load"
LOAD_ALL_PIPELINES_URL = "pipeline/loadAll"
FIND_PIPELINE_URL = "/pipeline/find?id={}"
CLONE_PIPELINE_URL = "/pipeline/{}/clone"
LOAD_WRITABLE_STORAGES = "/datastorage/mount"
LOAD_AVAILABLE_STORAGES = "/datastorage/available"
LOAD_AVAILABLE_STORAGES_WITH_MOUNTS = "/datastorage/availableWithMounts"
LOAD_METADATA = "/metadata/load"
LOAD_ENTITIES_DATA = "/metadataEntity/entities"
LOAD_DTS = "/dts"
LOAD_CONFIGURATION = '/configuration/%d'
GET_PREFERENCE = '/preferences/%s'
TOOL_VERSION_SETTINGS = '/tool/%d/settings'
ADD_PIPELINE_REPOSITORY_HOOK = '/pipeline/%s/addHook'
FOLDER_REGISTER = '/folder/register'
FOLDER_DELETE = '/folder/%d/delete'
PIPELINE_CREATE = '/pipeline/register'
PIPELINE_DELETE = '/pipeline/%d/delete'
ISSUE_URL = '/issues'
COMMENT_URL = '/comments'
NOTIFICATION_URL = '/notification'
REGION_URL = '/cloud/region'
# Pipeline API default header
RESPONSE_STATUS_OK = 'OK'
MAX_PAGE_SIZE = 400
def __init__(self, api_url, log_dir, attempts=3, timeout=5, connection_timeout=10):
urllib3.disable_warnings()
token = os.environ.get('API_TOKEN')
self.api_url = api_url
self.log_dir = log_dir
self.header = {'content-type': 'application/json',
'Authorization': 'Bearer {}'.format(token)}
self.attempts = attempts
self.timeout = timeout
self.connection_timeout = connection_timeout
def check_response(self, response):
if response.status_code != 200:
sys.stderr.write("API responded with status {}\n".format(str(response.status_code)))
return False
data = response.json()
if 'status' in data and data['status'] == self.RESPONSE_STATUS_OK:
return True
if 'message' in data:
sys.stderr.write("API returned error message: {}\n".format(data['message']))
return False
sys.stderr.write("API responded with not expected message: {}\n".format(str(response)))
return False
def execute_request(self, url, method='get', data=None):
count = 0
while count < self.attempts:
count += 1
try:
if method == 'get':
response = requests.get(url, headers=self.header, verify=False, timeout=self.connection_timeout)
elif method == 'post':
response = requests.post(url, data=data, headers=self.header, verify=False,
timeout=self.connection_timeout)
elif method == 'delete':
response = requests.delete(url, headers=self.header, verify=False, timeout=self.connection_timeout)
elif method == 'put':
response = requests.put(url, data=data, headers=self.header, verify=False,
timeout=self.connection_timeout)
else:
raise RuntimeError('Unsupported request method: {}'.format(method))
if self.check_response(response):
result = response.json()
return result['payload'] if 'payload' in result else None
except Exception as e:
sys.stderr.write('An error has occurred during request to API: {}', str(e.message))
time.sleep(self.timeout)
raise RuntimeError('Exceeded maximum retry count {} for API request'.format(self.attempts))
def load_tool(self, image, registry):
result = requests.get(str(self.api_url) + self.TOOL_URL.format(image=image, registry=registry),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load tool {}. API response: {}'.format(image, result.json()['message']))
payload = result.json()['payload']
tool = Tool(payload['image'], payload['cpu'], payload['ram'], payload['registry'], payload['registryId'],
payload['toolGroupId'])
if 'labels' in payload:
tool.labels = payload['labels']
if 'endpoints' in payload:
tool.endpoints = payload['endpoints']
if 'description' in payload:
tool.description = payload['description']
if 'shortDescription' in payload:
tool.shortDescription = payload['shortDescription']
if 'defaultCommand' in payload:
tool.defaultCommand = payload['defaultCommand']
if 'instanceType' in payload:
tool.instanceType = payload['instanceType']
if 'disk' in payload:
tool.disk = payload['disk']
if 'id' in payload:
tool.tool_id = payload['id']
return tool
def load_tool_versions(self, tool_id):
result = requests.get(str(self.api_url) + self.TOOL_VERSIONS_URL.format(tool_id=tool_id),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load tool versions {}. API response: {}'.format(tool_id, result.json()['message']))
return result.json()['payload']
def enable_tool(self, tool):
result = requests.post(str(self.api_url) + self.ENABLE_TOOL_URL, data=tool.to_json(),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to enable tool {}/{}. API response: {}'.format(tool.registry, tool.image, result.json()['message']))
def update_tool(self, tool):
result = requests.post(str(self.api_url) + self.UPDATE_TOOL_URL, data=tool.to_json(),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to update tool {}/{}. API response: {}'.format(tool.registry, tool.image, result.json()['message']))
def load_datastorage_rules(self, pipeline_id):
params = {"pipelineId": pipeline_id}
result = requests.get(str(self.api_url) + self.DATA_STORAGE_RULES_URL,
headers=self.header, params=params, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
return None
result_json = result.json()
if not 'payload' in result_json:
return None
payload = json.dumps(result_json['payload'])
if payload:
return payload
return None
def load_certificates(self):
result = requests.get(str(self.api_url) + self.REGISTRY_CERTIFICATES_URL, headers=self.header, verify=False)
result_json = result.json()
if hasattr(result_json, 'error') or result_json['status'] != self.RESPONSE_STATUS_OK:
return None
if not 'payload' in result_json:
return None
payload = json.dumps(result_json['payload'])
return json.loads(payload)
def load_run(self, run_id):
try:
result = self.execute_request(str(self.api_url) + self.GET_RUN_URL.format(run_id))
return {} if result is None else result
except Exception as e:
raise RuntimeError("Failed to load run.", "Error message: {}".format(str(e.message)))
def load_task(self, run_id, task_name, parameters=None):
url = self.GET_TASK_URL.format(run_id, task_name)
if parameters:
url += "¶meters={}".format(parameters)
result = requests.get(str(self.api_url) + url, headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load task {}. API response: {}'.format(run_id, result.json()['message']))
if 'payload' in result.json():
return result.json()['payload']
else:
return None
def launch_pipeline(self, pipeline_id, pipeline_version, parameters,
cmd=None, docker=None, instance=None, disk=None, parent_node_id=None, parent_run_id=None):
request = {'pipelineId': pipeline_id, 'version': pipeline_version, 'params': parameters}
if parent_node_id:
request['parentNodeId'] = parent_node_id
if parent_run_id:
request['parentRunId'] = parent_run_id
if cmd:
request['cmdTemplate'] = cmd
if docker:
request['dockerImage'] = docker
if instance:
request['instanceType'] = instance
if disk:
request['hddSize'] = disk
result = requests.post(str(self.api_url) + self.RUN_URL,
data=json.dumps(request), headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError(result.json()['message'])
return result.json()['payload']
def launch_pod(self, parent_run, cmd, docker_image):
request = {'cmdTemplate': cmd, 'dockerImage': docker_image, 'useRunId': parent_run}
result = requests.post(str(self.api_url) + self.RUN_URL,
data=json.dumps(request), headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError(result.json()['message'])
return result.json()['payload']['podId']
def load_child_pipelines(self, | |
<gh_stars>1-10
from matrx.objects.env_object import EnvObject
""" A number of standard, often used objects. """
class SquareBlock(EnvObject):
"""
An example of a simple object with a set of attributes that are always the same. In this case that it is not
traversable, and is visualized as a square. Otherwise it takes all default properties from an EnvObject and has
not other custom properties.
Parameters
----------
location : tuple
Location of door.
name : string. Optional, default "Block"
Name of block, defaults to "Block"
**kwargs:
Additional properties that should be added to the object.
"""
def __init__(self, location, name="Block", visualize_colour="#4286f4", **kwargs):
# hardcoded props
kwargs['is_traversable'] = False
kwargs['visualize_shape'] = 0
super().__init__(name=name, location=location, class_callable=SquareBlock,
visualize_colour=visualize_colour, **kwargs)
class Door(EnvObject):
"""
Door base object, can be used to define rooms. An example of an object that is and ordinary EnvObject but has
a method on which two Actions depend; OpenDoorAction and CloseDoorAction. This method alters the is_traversable
property accordingly.
It also has two colors which the
door visualization changes into when open or closed.
Parameters
----------
location : tuple
Location of door.
name : string. Optional, default "Door"
Name of object, defaults to "Door"
open_colour : string. Optional, default "#006400"
Colour when open
closed_colour : string. Optional, default "#640000"
Colour when closed
**kwargs:
Dict of additional properties that should be added to the object as well.
"""
def __init__(self, location, is_open, name="Door", open_colour="#006400", closed_colour="#640000",
**kwargs):
# Whether the door is by default open or closed is stored in the defaults.py and obtained like this;
self.is_open = is_open
# We save the colours for open and close and assign the appriopriate value based on current state
self.open_colour = open_colour
self.closed_colour = closed_colour
current_color = self.closed_colour
if self.is_open:
current_color = self.open_colour
# If the door is open or closed also determines its is_traversable property
is_traversable = self.is_open
# hardcoded prop
kwargs['is_movable'] = False
super().__init__(location=location, name=name, is_traversable=is_traversable, visualize_colour=current_color,
is_open=self.is_open, class_callable=Door, customizable_properties=['is_open'], **kwargs)
def open_door(self):
""" Opens the door, changes the colour and sets the properties as such.
"""
# Set the attribute
self.is_open = True
# Set the appropriate property
self.change_property("is_open", self.is_open)
# Traversable depends on this as well
self.is_traversable = self.is_open
# Also change the colour
self.visualize_colour = self.open_colour
def close_door(self):
""" Closes the door, changes the colour and sets the properties as such.
"""
# Set the attribute
self.is_open = False
# Set the appropriate property
self.change_property("is_open", self.is_open)
# Traversable depends on this as well
self.is_traversable = self.is_open
# Also change the colour
self.visualize_colour = self.closed_colour
class Wall(EnvObject):
"""
A simple Wall object. Is not traversable, the colour can be set but has otherwise the default EnvObject property
values.
Parameters
----------
location : tuple
The location of the wall.
name : string. Optional, default "Wall"
The name, default "Wall".
visualize_colour: string. Optional, default "#000000" (black)
A Hex string indicating the colour of the wall.
kwargs: dict (optional)
A dictionary of keyword arguments that can be used to add additional properties
"""
def __init__(self, location, name="Wall", visualize_colour="#000000", **kwargs):
# a wall is immovable and impassable
kwargs['is_traversable'] = False
kwargs['is_movable'] = False
is_traversable = False # Walls are never traversable
super().__init__(name=name, location=location, visualize_colour=visualize_colour, class_callable=Wall,
**kwargs)
class AreaTile(EnvObject):
"""
A simple AreaTile object. Is always traversable, not movable, the colour can be set but has otherwise the
default EnvObject property values. Can be used to define different areas in the GridWorld.
Parameters
----------
location : tuple
The location of the area.
name : string. Optional, default "AreaTile"
The name, default "AreaTile".
visualize_colour : string. Optional, default is "#b7b7b7"
hex colour code for tile. default is grey.
visualize_opacity : float. Optional, default 0.8.
Opacity of the object. By default 0.8
visualize_depth : int. Optional, default=101
depth of visualization. By default 101: just above agent and other objects Higher means higher priority.
**kwargs : Optional.
Set of additional properties that should be added to the object as well.
"""
def __init__(self, location, name="AreaTile", visualize_colour="#8ca58c", visualize_depth=None,
visualize_opacity=1.0, **kwargs):
# a floor is always passable and immovable
kwargs['is_traversable'] = True
kwargs['is_movable'] = False
super().__init__(name=name, location=location, visualize_colour=visualize_colour, class_callable=AreaTile,
visualize_depth=visualize_depth, visualize_opacity=visualize_opacity,
**kwargs)
class SmokeTile(AreaTile):
"""
An object representing one tile of smoke. Is always traversable, not movable,
and square shaped. Can be transparent.
Parameters
----------
location : tuple
The location of the area.
name : String. Optional,default:"SmokeTile"
The name, default "SmokeTile".
visualize_colour : string. Optional, default is "#b7b7b7"
hex colour code for tile. default is grey.
visualize_opacity : float. Optional, default 0.8.
Opacity of the object. By default 0.8
visualize_depth : int. Optional, default=101
depth of visualization. By default 101: just above agent and other objects Higher means higher priority.
kwargs: dict (optional)
A dictionary of keyword arguments that can be used to add additional properties
"""
def __init__(self, location, name="SmokeTile", visualize_colour="#b7b7b7", visualize_opacity=0.8,
visualize_depth=101, **kwargs):
super().__init__(name=name, location=location, visualize_colour=visualize_colour,
visualize_opacity=visualize_opacity, visualize_depth=visualize_depth,
**kwargs)
class Battery(EnvObject):
"""
A simple example of an object with an update_properties method that is called each simulation step. It also has
two default properties that are unique to this object; start_energy_level, and energy_decay. These are added
as properties by passing them as keyword arguments to the constructor of EnvObject. In addition this constructor
also makes a current_enery_level attribute which is also treated as a property by giving it to the EnvObject
constructor as well. All other properties are obtained from the defaults.py as defined for all EnvObject,
except for the size (which is set to be 0.25) and the colour (which is a shade of green turning to red based on
the current_energy_level).
Its update_properties method simply decays the current energy level with the given factor and the colour
accordingly.
Parameters
----------
location : list
The location of the battery.
name: String (optional).
Defaults to 'Battery'.
start_energy_level: float (optional)
Defaults to 1.0
energy_decay: float (optional)
Defaults to 0.01, meaning the energy decreases with 1% of its current value each simulation step.
"""
def __init__(self, location, name="Battery", start_energy_level=1.0, energy_decay=0.01):
self.start_energy_level = start_energy_level
self.current_energy_level = start_energy_level
self.energy_decay = energy_decay
super().__init__(name=name, location=location,
visualize_shape=0, # a battery is always square
visualize_size=0.25, # a battery is always 1/4th of a grid square of the visualization
customizable_properties=["current_energy_level"], # the current energy level can be changed
visualize_colour="#32b432",
energy_decay=self.energy_decay,
current_energy_level=self.current_energy_level,
class_callable=Battery)
def update(self, grid_world, state):
"""
Updates the current energy level, changes the property accordingly, and also change the visualization color.
Parameters
----------
grid_world: Gridworld
The state of the world. Not used.
Returns
-------
The new properties: Dict
"""
# Calculate the new energy level
self.current_energy_level = self.current_energy_level * (1 - self.energy_decay)
if self.current_energy_level < 0.0001:
self.current_energy_level = 0
# Updates the energy level property, if we do not do this the property will not reflect the actual value
self.change_property(property_name="current_energy_level", property_value=self.current_energy_level)
# Change the color (we shift from green to red)
hex_color = self.visualize_colour
if self.current_energy_level != 0: # if energy level is not zero.
new_red = int(50 + 130 * (1 - self.current_energy_level / self.start_energy_level)) # >red as less energy
new_green = int(50 + 130 * (self.current_energy_level / self.start_energy_level)) # >green, as more energy
hex_color = '#{:02x}{:02x}{:02x}'.format(new_red, new_green, 50)
self.visualize_colour = hex_color # we do not need to set this property, as it is not a custom property
# Return the properties themselves.
return self.properties
class CollectionTarget(EnvObject):
""" An invisible object that tells which objects needs collection.
This invisible object is linked to `CollectionDropOffTile` object(s) and is used by the `CollectionGoal` to
identify which objects should be collected and dropped off at the tiles. This object is just a regular object
but contains three additional properties:
- collection_objects: See parameter doc.
- collection_zone_name: See parameter doc.
- is_invisible: A boolean denoting that this object is invisible. This boolean has no effect in MATRX, except to
denote that this object is not an actual visible object.
- is_drop_off_target: Denotes this object as containing the descriptions of the to be collected | |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Appointment
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import List as ListType
from pydantic import Field
from . import backboneelement, domainresource, fhirtypes
class Appointment(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A booking of a healthcare event among patient(s), practitioner(s), related
person(s) and/or device(s) for a specific date/time. This may result in one
or more Encounter(s).
"""
resource_type = Field("Appointment", const=True)
appointmentType: fhirtypes.CodeableConceptType = Field(
None,
alias="appointmentType",
title=(
"The style of appointment or patient that has been booked in the slot "
"(not service type)"
),
description=None,
# if property is element of this resource.
element_property=True,
)
basedOn: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="basedOn",
title="The service request this appointment is allocated to assess",
description=(
"The service request this appointment is allocated to assess (e.g. "
"incoming referral or procedure request)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ServiceRequest"],
)
cancelationReason: fhirtypes.CodeableConceptType = Field(
None,
alias="cancelationReason",
title="The coded reason for the appointment being cancelled",
description=(
"The coded reason for the appointment being cancelled. This is often "
"used in reporting/billing/futher processing to determine if further "
"actions are required, or specific fees apply."
),
# if property is element of this resource.
element_property=True,
)
comment: fhirtypes.String = Field(
None,
alias="comment",
title="Additional comments",
description="Additional comments about the appointment.",
# if property is element of this resource.
element_property=True,
)
comment__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_comment", title="Extension field for ``comment``."
)
created: fhirtypes.DateTime = Field(
None,
alias="created",
title="The date that this appointment was initially created",
description=(
"The date that this appointment was initially created. This could be "
"different to the meta.lastModified value on the initial entry, as this"
" could have been before the resource was created on the FHIR server, "
"and should remain unchanged over the lifespan of the appointment."
),
# if property is element of this resource.
element_property=True,
)
created__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_created", title="Extension field for ``created``."
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Shown on a subject line in a meeting request, or appointment list",
description=(
"The brief description of the appointment as would be shown on a "
"subject line in a meeting request, or appointment list. Detailed or "
"expanded information should be put in the comment field."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
end: fhirtypes.Instant = Field(
None,
alias="end",
title="When appointment is to conclude",
description="Date/Time that the appointment is to conclude.",
# if property is element of this resource.
element_property=True,
)
end__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_end", title="Extension field for ``end``."
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="External Ids for this item",
description=(
"This records identifiers associated with this appointment concern that"
" are defined by business processes and/or used to refer to it when a "
"direct URL reference to the resource itself is not appropriate (e.g. "
"in CDA documents, or in written / printed documentation)."
),
# if property is element of this resource.
element_property=True,
)
minutesDuration: fhirtypes.PositiveInt = Field(
None,
alias="minutesDuration",
title="Can be less than start/end (e.g. estimate)",
description=(
"Number of minutes that the appointment is to take. This can be less "
"than the duration between the start and end times. For example, where"
" the actual time of appointment is only an estimate or if a 30 minute "
"appointment is being requested, but any time would work. Also, if "
"there is, for example, a planned 15 minute break in the middle of a "
"long appointment, the duration may be 15 minutes less than the "
"difference between the start and end."
),
# if property is element of this resource.
element_property=True,
)
minutesDuration__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_minutesDuration", title="Extension field for ``minutesDuration``."
)
participant: ListType[fhirtypes.AppointmentParticipantType] = Field(
...,
alias="participant",
title="Participants involved in appointment",
description="List of participants involved in the appointment.",
# if property is element of this resource.
element_property=True,
)
patientInstruction: fhirtypes.String = Field(
None,
alias="patientInstruction",
title="Detailed information and instructions for the patient",
description=(
"While Appointment.comment contains information for internal use, "
"Appointment.patientInstructions is used to capture patient facing "
"information about the Appointment (e.g. please bring your referral or "
"fast from 8pm night before)."
),
# if property is element of this resource.
element_property=True,
)
patientInstruction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_patientInstruction",
title="Extension field for ``patientInstruction``.",
)
priority: fhirtypes.UnsignedInt = Field(
None,
alias="priority",
title="Used to make informed decisions if needing to re-prioritize",
description=(
"The priority of the appointment. Can be used to make informed "
"decisions if needing to re-prioritize appointments. (The iCal Standard"
" specifies 0 as undefined, 1 as highest, 9 as lowest priority)."
),
# if property is element of this resource.
element_property=True,
)
priority__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_priority", title="Extension field for ``priority``."
)
reasonCode: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonCode",
title="Coded reason this appointment is scheduled",
description=(
"The coded reason that this appointment is being scheduled. This is "
"more clinical than administrative."
),
# if property is element of this resource.
element_property=True,
)
reasonReference: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="reasonReference",
title="Reason the appointment is to take place (resource)",
description=(
"Reason the appointment has been scheduled to take place, as specified "
"using information from another resource. When the patient arrives and "
"the encounter begins it may be used as the admission diagnosis. The "
"indication will typically be a Condition (with other resources "
"referenced in the evidence.detail), or a Procedure."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Condition",
"Procedure",
"Observation",
"ImmunizationRecommendation",
],
)
requestedPeriod: ListType[fhirtypes.PeriodType] = Field(
None,
alias="requestedPeriod",
title=(
"Potential date/time interval(s) requested to allocate the appointment "
"within"
),
description=(
"A set of date ranges (potentially including times) that the "
"appointment is preferred to be scheduled within. The duration "
"(usually in minutes) could also be provided to indicate the length of "
"the appointment to fill and populate the start/end times for the "
"actual allocated time. However, in other situations the duration may "
"be calculated by the scheduling system."
),
# if property is element of this resource.
element_property=True,
)
serviceCategory: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="serviceCategory",
title=(
"A broad categorization of the service that is to be performed during "
"this appointment"
),
description=None,
# if property is element of this resource.
element_property=True,
)
serviceType: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="serviceType",
title="The specific service that is to be performed during this appointment",
description=None,
# if property is element of this resource.
element_property=True,
)
slot: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="slot",
title="The slots that this appointment is filling",
description=(
"The slots from the participants' schedules that will be filled by the "
"appointment."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Slot"],
)
specialty: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="specialty",
title=(
"The specialty of a practitioner that would be required to perform the "
"service requested in this appointment"
),
description=None,
# if property is element of this resource.
element_property=True,
)
start: fhirtypes.Instant = Field(
None,
alias="start",
title="When appointment is to take place",
description="Date/Time that the appointment is to take place.",
# if property is element of this resource.
element_property=True,
)
start__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_start", title="Extension field for ``start``."
)
status: fhirtypes.Code = Field(
...,
alias="status",
title=(
"proposed | pending | booked | arrived | fulfilled | cancelled | noshow"
" | entered-in-error | checked-in | waitlist"
),
description=(
"The overall status of the Appointment. Each of the participants has "
"their own participation status which indicates their involvement in | |
+ 17*mckin**16 - 30*mbkin**14*q_cut +
122*mbkin**12*mckin**2*q_cut + 1566*mbkin**10*mckin**4*q_cut +
3382*mbkin**8*mckin**6*q_cut + 3382*mbkin**6*mckin**8*q_cut +
1566*mbkin**4*mckin**10*q_cut + 122*mbkin**2*mckin**12*q_cut -
30*mckin**14*q_cut - 17*mbkin**12*q_cut**2 + 180*mbkin**10*mckin**2*q_cut**2 +
2125*mbkin**8*mckin**4*q_cut**2 + 3656*mbkin**6*mckin**6*q_cut**2 +
2125*mbkin**4*mckin**8*q_cut**2 + 180*mbkin**2*mckin**10*q_cut**2 -
17*mckin**12*q_cut**2 + 50*mbkin**10*q_cut**3 + 62*mbkin**8*mckin**2*q_cut**3 -
1104*mbkin**6*mckin**4*q_cut**3 - 1104*mbkin**4*mckin**6*q_cut**3 +
62*mbkin**2*mckin**8*q_cut**3 + 50*mckin**10*q_cut**3 - 15*mbkin**8*q_cut**4 +
22*mbkin**6*mckin**2*q_cut**4 + 34*mbkin**4*mckin**4*q_cut**4 +
22*mbkin**2*mckin**6*q_cut**4 - 15*mckin**8*q_cut**4 - 2*mbkin**6*q_cut**5 -
198*mbkin**4*mckin**2*q_cut**5 - 198*mbkin**2*mckin**4*q_cut**5 -
2*mckin**6*q_cut**5 + 5*mbkin**4*q_cut**6 + 60*mbkin**2*mckin**2*q_cut**6 +
5*mckin**4*q_cut**6 - 18*mbkin**2*q_cut**7 - 18*mckin**2*q_cut**7 + 10*q_cut**8)*
np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))/(mbkin**2 + mckin**2 - q_cut + mbkin**2*
np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4))))/mbkin**24 +
(10368*mckin**8*(mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)**2*(31*mbkin**10 - 153*mbkin**8*mckin**2 -
1138*mbkin**6*mckin**4 - 1138*mbkin**4*mckin**6 - 153*mbkin**2*mckin**8 +
31*mckin**10 - 29*mbkin**8*q_cut - 100*mbkin**6*mckin**2*q_cut -
162*mbkin**4*mckin**4*q_cut - 100*mbkin**2*mckin**6*q_cut - 29*mckin**8*q_cut -
29*mbkin**6*q_cut**2 - 125*mbkin**4*mckin**2*q_cut**2 - 125*mbkin**2*mckin**4*
q_cut**2 - 29*mckin**6*q_cut**2 + 26*mbkin**4*q_cut**3 + 82*mbkin**2*mckin**2*
q_cut**3 + 26*mckin**4*q_cut**3 - 4*mbkin**2*q_cut**4 - 4*mckin**2*q_cut**4 + 5*q_cut**5)*
np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))/(mbkin**2 + mckin**2 - q_cut + mbkin**2*
np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)))**2)/mbkin**22 -
(622080*mckin**12*(3*mbkin**4 + 8*mbkin**2*mckin**2 + 3*mckin**4)*
((mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)**(3/2)*
np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))/(mbkin**2 + mckin**2 - q_cut + mbkin**2*
np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)))**3)/mbkin**12)/
(540*((mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)**(3/2)*
((np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)*(mbkin**6 - 7*mbkin**4*mckin**2 -
7*mbkin**2*mckin**4 + mckin**6 - mbkin**4*q_cut - mckin**4*q_cut -
mbkin**2*q_cut**2 - mckin**2*q_cut**2 + q_cut**3))/mbkin**6 -
(12*mckin**4*np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j +
(mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4))/(mbkin**2 + mckin**2 - q_cut +
mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 -
2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/mbkin**4))))/mbkin**4)**
3) + (((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/
mbkin**4 + q_cut**2/mbkin**4)*(-72*mbkin**2*muG*
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)**2*(-((1 + mckin**2/mbkin**2)**2*
(-6 - (33*mckin**2)/mbkin**2 + (1163*mckin**4)/mbkin**4 -
(5343*mckin**6)/mbkin**6 + (6489*mckin**8)/mbkin**8 +
(22085*mckin**10)/mbkin**10 - (10023*mckin**12)/mbkin**12 +
(771*mckin**14)/mbkin**14 + (17*mckin**16)/mbkin**16)) +
((-10 - (115*mckin**2)/mbkin**2 + (1918*mckin**4)/mbkin**4 -
(8212*mckin**6)/mbkin**6 - (6930*mckin**8)/mbkin**8 -
(1330*mckin**10)/mbkin**10 - (11526*mckin**12)/mbkin**12 -
(5492*mckin**14)/mbkin**14 + (1428*mckin**16)/mbkin**16 +
(29*mckin**18)/mbkin**18)*q_cut)/mbkin**2 +
(2*(-8 + (5*mckin**2)/mbkin**2 + (578*mckin**4)/mbkin**4 -
(3580*mckin**6)/mbkin**6 - (14183*mckin**8)/mbkin**8 -
(13151*mckin**10)/mbkin**10 - (4092*mckin**12)/mbkin**12 +
(642*mckin**14)/mbkin**14 + (21*mckin**16)/mbkin**16)*q_cut**2)/
mbkin**4 + ((30 + (192*mckin**2)/mbkin**2 - (3912*mckin**4)/mbkin**4 +
(2716*mckin**6)/mbkin**6 + (10742*mckin**8)/mbkin**8 +
(2184*mckin**10)/mbkin**10 - (3172*mckin**12)/mbkin**12 -
(84*mckin**14)/mbkin**14)*q_cut**3)/mbkin**6 -
(2*(-10 + (102*mckin**2)/mbkin**2 - (253*mckin**4)/mbkin**4 +
(327*mckin**6)/mbkin**6 - (299*mckin**8)/mbkin**8 +
(69*mckin**10)/mbkin**10 + (20*mckin**12)/mbkin**12)*q_cut**4)/
mbkin**8 + (2*(-23 + (43*mckin**2)/mbkin**2 + (1111*mckin**4)/mbkin**
4 + (1490*mckin**6)/mbkin**6 + (1026*mckin**8)/mbkin**8 +
(51*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
(2*mckin**2*(23 - (218*mckin**2)/mbkin**2 - (158*mckin**4)/mbkin**4 +
(3*mckin**6)/mbkin**6)*q_cut**6)/mbkin**14 -
(2*(-13 + (52*mckin**2)/mbkin**2 + (186*mckin**4)/mbkin**4 +
(26*mckin**6)/mbkin**6)*q_cut**7)/mbkin**14 +
((-10 + (39*mckin**2)/mbkin**2 + (9*mckin**4)/mbkin**4)*q_cut**8)/
mbkin**16 + (5*mckin**2*q_cut**9)/mbkin**20) - 36*muG*mupi*
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)**2*(-((1 + mckin**2/mbkin**2)**2*
(-6 - (33*mckin**2)/mbkin**2 + (1163*mckin**4)/mbkin**4 -
(5343*mckin**6)/mbkin**6 + (6489*mckin**8)/mbkin**8 +
(22085*mckin**10)/mbkin**10 - (10023*mckin**12)/mbkin**12 +
(771*mckin**14)/mbkin**14 + (17*mckin**16)/mbkin**16)) +
((-10 - (115*mckin**2)/mbkin**2 + (1918*mckin**4)/mbkin**4 -
(8212*mckin**6)/mbkin**6 - (6930*mckin**8)/mbkin**8 -
(1330*mckin**10)/mbkin**10 - (11526*mckin**12)/mbkin**12 -
(5492*mckin**14)/mbkin**14 + (1428*mckin**16)/mbkin**16 +
(29*mckin**18)/mbkin**18)*q_cut)/mbkin**2 +
(2*(-8 + (5*mckin**2)/mbkin**2 + (578*mckin**4)/mbkin**4 -
(3580*mckin**6)/mbkin**6 - (14183*mckin**8)/mbkin**8 -
(13151*mckin**10)/mbkin**10 - (4092*mckin**12)/mbkin**12 +
(642*mckin**14)/mbkin**14 + (21*mckin**16)/mbkin**16)*q_cut**2)/
mbkin**4 + ((30 + (192*mckin**2)/mbkin**2 - (3912*mckin**4)/mbkin**4 +
(2716*mckin**6)/mbkin**6 + (10742*mckin**8)/mbkin**8 +
(2184*mckin**10)/mbkin**10 - (3172*mckin**12)/mbkin**12 -
(84*mckin**14)/mbkin**14)*q_cut**3)/mbkin**6 -
(2*(-10 + (102*mckin**2)/mbkin**2 - (253*mckin**4)/mbkin**4 +
(327*mckin**6)/mbkin**6 - (299*mckin**8)/mbkin**8 +
(69*mckin**10)/mbkin**10 + (20*mckin**12)/mbkin**12)*q_cut**4)/
mbkin**8 + (2*(-23 + (43*mckin**2)/mbkin**2 + (1111*mckin**4)/mbkin**
4 + (1490*mckin**6)/mbkin**6 + (1026*mckin**8)/mbkin**8 +
(51*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
(2*mckin**2*(23 - (218*mckin**2)/mbkin**2 - (158*mckin**4)/mbkin**4 +
(3*mckin**6)/mbkin**6)*q_cut**6)/mbkin**14 -
(2*(-13 + (52*mckin**2)/mbkin**2 + (186*mckin**4)/mbkin**4 +
(26*mckin**6)/mbkin**6)*q_cut**7)/mbkin**14 +
((-10 + (39*mckin**2)/mbkin**2 + (9*mckin**4)/mbkin**4)*q_cut**8)/
mbkin**16 + (5*mckin**2*q_cut**9)/mbkin**20) +
36*muG**2*((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/
mbkin**4 + q_cut**2/mbkin**4)**2*(-18 - (231*mckin**2)/mbkin**2 +
(1641*mckin**4)/mbkin**4 - (5542*mckin**6)/mbkin**6 +
(26*mckin**8)/mbkin**8 + (672*mckin**10)/mbkin**10 -
(58660*mckin**12)/mbkin**12 - (20842*mckin**14)/mbkin**14 +
(26856*mckin**16)/mbkin**16 - (4297*mckin**18)/mbkin**18 -
(85*mckin**20)/mbkin**20 + ((-18 - (239*mckin**2)/mbkin**2 -
(530*mckin**4)/mbkin**4 + (1020*mckin**6)/mbkin**6 -
(37266*mckin**8)/mbkin**8 + (34518*mckin**10)/mbkin**10 -
(10102*mckin**12)/mbkin**12 - (24964*mckin**14)/mbkin**14 +
(7196*mckin**16)/mbkin**16 + (145*mckin**18)/mbkin**18)*q_cut)/
mbkin**2 + (2*(16 - (67*mckin**2)/mbkin**2 - (4098*mckin**4)/mbkin**
4 + (6356*mckin**6)/mbkin**6 - (27619*mckin**8)/mbkin**8 -
(24327*mckin**10)/mbkin**10 - (7848*mckin**12)/mbkin**12 +
(3554*mckin**14)/mbkin**14 + (105*mckin**16)/mbkin**16)*q_cut**2)/
mbkin**4 - (2*(-51 - (400*mckin**2)/mbkin**2 + (6112*mckin**4)/mbkin**
4 + (474*mckin**6)/mbkin**6 - (16791*mckin**8)/mbkin**8 -
(5188*mckin**10)/mbkin**10 + (7926*mckin**12)/mbkin**12 +
(210*mckin**14)/mbkin**14)*q_cut**3)/mbkin**6 -
(2*(14 - (94*mckin**2)/mbkin**2 - (1493*mckin**4)/mbkin**4 +
(11407*mckin**6)/mbkin**6 + (4013*mckin**8)/mbkin**8 +
(681*mckin**10)/mbkin**10 + (100*mckin**12)/mbkin**12)*q_cut**4)/
mbkin**8 + (2*(-75 - (97*mckin**2)/mbkin**2 + (6039*mckin**4)/mbkin**
4 + (6826*mckin**6)/mbkin**6 + (5110*mckin**8)/mbkin**8 +
(255*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
(2*mckin**2*(55 - (814*mckin**2)/mbkin**2 - (694*mckin**4)/mbkin**4 +
(15*mckin**6)/mbkin**6)*q_cut**6)/mbkin**14 +
(130*(mbkin**6 - 4*mbkin**4*mckin**2 - 14*mbkin**2*mckin**4 -
2*mckin**6)*q_cut**7)/mbkin**20 + (5*(-10 + (39*mckin**2)/mbkin**2 +
(9*mckin**4)/mbkin**4)*q_cut**8)/mbkin**16 + (25*mckin**2*q_cut**9)/
mbkin**20) - 24*mbkin*(-((-1 + mckin**2/mbkin**2)**4*
(1 + mckin**2/mbkin**2)**2*(503 - (9464*mckin**2)/mbkin**2 +
(69322*mckin**4)/mbkin**4 - (179128*mckin**6)/mbkin**6 -
(217124*mckin**8)/mbkin**8 + (134968*mckin**10)/mbkin**10 -
(44170*mckin**12)/mbkin**12 + (3064*mckin**14)/mbkin**14 +
(109*mckin**16)/mbkin**16)) + ((-1 + mckin**2/mbkin**2)**2*
(2903 - (41547*mckin**2)/mbkin**2 + (196111*mckin**4)/mbkin**4 +
(84389*mckin**6)/mbkin**6 - (2226350*mckin**8)/mbkin**8 -
(4060522*mckin**10)/mbkin**10 - (2007262*mckin**12)/mbkin**12 +
(332278*mckin**14)/mbkin**14 + (144215*mckin**16)/mbkin**16 -
(185931*mckin**18)/mbkin**18 + (19663*mckin**20)/mbkin**20 +
(613*mckin**22)/mbkin**22)*q_cut)/mbkin**2 -
(2*(2870 - (34643*mckin**2)/mbkin**2 + (127322*mckin**4)/mbkin**4 +
(190963*mckin**6)/mbkin**6 - (1214040*mckin**8)/mbkin**8 -
(2984930*mckin**10)/mbkin**10 - (3081596*mckin**12)/mbkin**12 -
(1121550*mckin**14)/mbkin**14 + (431494*mckin**16)/mbkin**16 +
(81077*mckin**18)/mbkin**18 - (159838*mckin**20)/mbkin**20 +
(20891*mckin**22)/mbkin**22 + (540*mckin**24)/mbkin**24)*q_cut**2)/
mbkin**4 - (2*(-969 + (3860*mckin**2)/mbkin**2 + (20318*mckin**4)/
mbkin**4 - (143553*mckin**6)/mbkin**6 + (43662*mckin**8)/mbkin**8 +
(243236*mckin**10)/mbkin**10 - (166416*mckin**12)/mbkin**12 -
(159266*mckin**14)/mbkin**14 + (150619*mckin**16)/mbkin**16 +
(20136*mckin**18)/mbkin**18 - (11694*mckin**20)/mbkin**20 +
(67*mckin**22)/mbkin**22)*q_cut**3)/mbkin**6 +
((8985 - (76768*mckin**2)/mbkin**2 + (136327*mckin**4)/mbkin**4 +
(661168*mckin**6)/mbkin**6 + (524386*mckin**8)/mbkin**8 +
(437592*mckin**10)/mbkin**10 + (491062*mckin**12)/mbkin**12 -
(151632*mckin**14)/mbkin**14 - (284475*mckin**16)/mbkin**16 +
(57128*mckin**18)/mbkin**18 + (2563*mckin**20)/mbkin**20)*q_cut**4)/
mbkin**8 - ((12407 - (70063*mckin**2)/mbkin**2 + (19312*mckin**4)/
mbkin**4 + (726800*mckin**6)/mbkin**6 + (1038918*mckin**8)/mbkin**
8 + (304322*mckin**10)/mbkin**10 - (490544*mckin**12)/mbkin**12 -
(209264*mckin**14)/mbkin**14 + (102019*mckin**16)/mbkin**16 +
(1997*mckin**18)/mbkin**18)*q_cut**5)/mbkin**10 -
(4*(-354 + (2543*mckin**2)/mbkin**2 - (2380*mckin**4)/mbkin**4 -
(59099*mckin**6)/mbkin**6 - (24326*mckin**8)/mbkin**8 +
(44915*mckin**10)/mbkin**10 + (6624*mckin**12)/mbkin**12 -
(8999*mckin**14)/mbkin**14 + (564*mckin**16)/mbkin**16)*q_cut**6)/
mbkin**12 + (4*(2103 - (3452*mckin**2)/mbkin**2 - (14684*mckin**4)/
mbkin**4 - (17263*mckin**6)/mbkin**6 - (5615*mckin**8)/mbkin**8 +
(7734*mckin**10)/mbkin**10 + (11390*mckin**12)/mbkin**12 +
(1035*mckin**14)/mbkin**14)*q_cut**7)/mbkin**14 +
((-5829 + (9694*mckin**2)/mbkin**2 + (45611*mckin**4)/mbkin**4 +
(25144*mckin**6)/mbkin**6 - (44863*mckin**8)/mbkin**8 -
(44134*mckin**10)/mbkin**10 - (1047*mckin**12)/mbkin**12)*q_cut**8)/
mbkin**16 - ((343 + (6467*mckin**2)/mbkin**2 + (16652*mckin**4)/mbkin**
4 - (4820*mckin**6)/mbkin**6 - (5767*mckin**8)/mbkin**8 +
(2021*mckin**10)/mbkin**10)*q_cut**9)/mbkin**18 +
(2*(830 + (1771*mckin**2)/mbkin**2 + (3242*mckin**4)/mbkin**4 +
(3405*mckin**6)/mbkin**6 + (928*mckin**8)/mbkin**8)*q_cut**10)/
mbkin**20 - (2*(247 + (420*mckin**2)/mbkin**2 + (1194*mckin**4)/mbkin**
4 + (291*mckin**6)/mbkin**6)*q_cut**11)/mbkin**22 +
((3 + (92*mckin**2)/mbkin**2 + (65*mckin**4)/mbkin**4)*q_cut**12)/
mbkin**24 - ((9 + (19*mckin**2)/mbkin**2)*q_cut**13)/mbkin**26 +
(8*q_cut**14)/mbkin**28)*rhoD + ((mbkin**6 - 7*mbkin**4*mckin**2 -
7*mbkin**2*mckin**4 + mckin**6 - mbkin**4*q_cut - mckin**4*q_cut -
mbkin**2*q_cut**2 - mckin**2*q_cut**2 + q_cut**3)*
(-16*(-((-1 + mckin**2/mbkin**2)**4*(-2158 + (14281*mckin**2)/
mbkin**2 + (16728*mckin**4)/mbkin**4 - (3957*mckin**6)/
mbkin**6 - (23262*mckin**8)/mbkin**8 - (4629*mckin**10)/
mbkin**10 + (14812*mckin**12)/mbkin**12 + (425*mckin**14)/
mbkin**14)) + (4*(-1 + mckin**2/mbkin**2)**2*(-2507 +
(11037*mckin**2)/mbkin**2 + (27788*mckin**4)/mbkin**4 +
(14892*mckin**6)/mbkin**6 - (16983*mckin**8)/mbkin**8 -
(31701*mckin**10)/mbkin**10 + (3740*mckin**12)/mbkin**12 +
(17724*mckin**14)/mbkin**14 + (490*mckin**16)/mbkin**16)*q_cut)/
mbkin**2 - ((-15790 + (48321*mckin**2)/mbkin**2 +
(127224*mckin**4)/mbkin**4 + (95584*mckin**6)/mbkin**6 +
(114300*mckin**8)/mbkin**8 - (90522*mckin**10)/mbkin**10 -
(246488*mckin**12)/mbkin**12 + (43344*mckin**14)/mbkin**14 +
(117234*mckin**16)/mbkin**16 + (2633*mckin**18)/mbkin**18)*q_cut**2)/
mbkin**4 + ((-4746 + (2616*mckin**2)/mbkin**2 + (37008*mckin**4)/
mbkin**4 - (24212*mckin**6)/mbkin**6 - (142016*mckin**8)/
mbkin**8 + (4768*mckin**10)/mbkin**10 + (173592*mckin**12)/
mbkin**12 + (47004*mckin**14)/mbkin**14 - (894*mckin**16)/
mbkin**16)*q_cut**3)/mbkin**6 + (4*(-3324 - (3576*mckin**2)/
mbkin**2 | |
: 1), (3 : 3 : 1), (3 : 10 : 1), (4 : 1 : 1), (4 : 12 : 1), (6 : 2 : 1), (6 : 11 : 1), (7 : 1 : 1), (7 : 12 : 1), (8 : 4 : 1), (8 : 9 : 1), (9 : 4 : 1), (9 : 9 : 1), (12 : 5 : 1), (12 : 8 : 1)]
sage: set(C._points_fast_sqrt()) == set(C._points_cache_sqrt())
True
"""
# For givaro finite fields, taking square roots is very fast
# so no need to cache as in prime case
K = self.base_ring()
f, h = self.hyperelliptic_polynomials()
one = K(1)
# start with the points at infinity
P = self.defining_polynomial()
if not P(K(0), K(1), K(0)):
# (0:1:0) is a point on the curve
points = [self.point([K(0), K(1), K(0)], check=True)]
else:
points=[]
if P.degree() > 2:
# P(1, y, 0) = r*y + s
s = P(K(1), K(0), K(0))
r = P(K(1), K(1), K(0)) - s
if r: # r not zero
points.append(self.point([K(1), -s/r, K(0)], check=True))
# the case r = 0 need not be considered
elif K.characteristic() == 2: # deg(P) = 2 and char(K) = 2
# quadratic equation doesn't work in characteristic 2 so use brute
# force
points += [self.point([K(1), y, K(0)], check=True) for y in K \
if not P(K(1), y, K(0))]
else: # deg(P) = 2 and char(K) not 2
# P(1, y, 0) = y^2 + r*y + s
s = -f[2]
r = h[1]
d = r**2/4 - s
if not d: # d = 0
points.append(self.point([K(1), -r/2, K(0)], check=True))
elif d.is_square():
sqrtd = d.sqrt()
points.append(self.point([K(1), -r/2+sqrtd, K(0)], check=True))
points.append(self.point([K(1), -r/2-sqrtd, K(0)], check=True))
if K.characteristic() == 2:
# quadratic equation doesn't work in characteristic 2
if h.is_zero():
for x in K:
points.append(self.point([x, f(x).sqrt(), one], check=True))
else:
a_sqrts = { } # Artin-Schreier 2-roots
for x in K:
a_sqrts[x**2 + x] = x # char 2 => x^2 - x == x^2 + x
for x in K:
b = h(x)
c = f(x)
if b:
try:
r = a_sqrts[c / b**2]
points.append(self.point([x, r*b, one], check=True))
points.append(self.point([x, r*b+b, one], check=True))
except KeyError:
# y^2 + by + c irreducible, so yields no points
pass
else: # b == 0
points.append(self.point([x, c.sqrt(), one], check=True))
elif h.is_zero():
# special case to save work if we are of the form y^2 = f(x)
for x in K:
y2 = f(x)
if not y2: # y = 0
points.append(self.point([x, y2, one], check=True))
elif y2.is_square():
y = y2.sqrt()
points.append(self.point([x, y, one], check=True))
points.append(self.point([x, -y, one], check=True))
else:
b = -h/2
D = b*b + f
for x in K:
Dval = D(x)
if not Dval: # D(x) = 0
points.append(self.point([x, b(x), one], check=True))
elif Dval.is_square():
sqrtD = Dval.sqrt()
v = b(x)
points.append(self.point([x, v+sqrtD, one], check=True))
points.append(self.point([x, v-sqrtD, one], check=True))
return points
def _points_cache_sqrt(self, brute_force=False):
"""
List points by enumerating over x and solving the resulting
quadratic for y.
Caches all square roots ahead of time by squaring every element of
the field. Elements must have an __index__ method.
EXAMPLES::
sage: x = polygen(GF(7))
sage: C = HyperellipticCurve(x^3 + x^2 - 1)
sage: C._points_cache_sqrt()
[(0 : 1 : 0), (1 : 6 : 1), (1 : 1 : 1), (2 : 5 : 1), (2 : 2 : 1), (3 : 0 : 1), (4 : 4 : 1), (4 : 3 : 1), (5 : 4 : 1), (5 : 3 : 1)]
sage: set(C._points_cache_sqrt()) == set(C._points_cache_sqrt(brute_force=True))
True
"""
K = self.base_ring()
if K.characteristic() != 2:
# cache the squares (faster than O(p) sqrts)
square_roots = [None] * len(K)
for x in K:
square_roots[x*x] = x
f, h = self.hyperelliptic_polynomials()
one = K(1)
# start with the points at infinity
P = self.defining_polynomial()
if not P(K(0), K(1), K(0)):
# (0:1:0) is a point on the curve
points = [self.point([K(0), K(1), K(0)], check=True)]
else:
points=[]
if P.degree() > 2:
# P(1, y, 0) = r*y + s
s = P(K(1), K(0), K(0))
r = P(K(1), K(1), K(0)) - s
if r: # r not zero
points.append(self.point([K(1), -s/r, K(0)], check=True))
# the case r = 0 need not be considered
elif K.characteristic() == 2: # deg(P) = 2 and char(K) = 2
# quadratic equation doesn't work in characteristic 2 so use brute
# force
points += [self.point([K(1), y, K(0)], check=True) for y in K \
if not P(K(1), y, K(0))]
else: # deg(P) = 2 and char(K) not 2
# P(1, y, 0) = y^2 + r*y + s
s = -f[2]
r = h[1]
d = r**2/4 - s
sqrtd = square_roots[d]
if not d: # d = 0
points.append(self.point([K(1), -r/2, K(0)], check=True))
elif sqrtd is not None:
points.append(self.point([K(1), -r/2+sqrtd, K(0)], check=True))
points.append(self.point([K(1), -r/2-sqrtd, K(0)], check=True))
if K.characteristic() == 2 or brute_force:
# quadratic equation doesn't work in characteristic 2
# but there are only 4 affine points, so just test them
f = self.defining_polynomial()
points += [self.point([x, y, one], check=True) for x in K for y in K if not f(x, y, one)]
elif h.is_zero():
# special case to save work if we are of the form y^2 = f(x)
for x in K:
y2 = f(x)
y = square_roots[y2]
if not y2: # y = 0
points.append(self.point([x, y2, one], check=True))
elif y is not None:
points.append(self.point([x, y, one], check=True))
points.append(self.point([x, -y, one], check=True))
else:
b = -h/2
D = b*b + f # this is really disc/4
for x in K:
Dval = D(x)
sqrtD = square_roots[Dval]
if not Dval: # D(x) = 0
points.append(self.point([x, b(x), one], check=True))
elif sqrtD is not None:
v = b(x)
points.append(self.point([x, v+sqrtD, one], check=True))
points.append(self.point([x, v-sqrtD, one], check=True))
return points
def points(self):
r"""
All the points on this hyperelliptic curve.
EXAMPLES::
sage: x = polygen(GF(7))
sage: C = HyperellipticCurve(x^7 - x^2 - 1)
sage: C.points()
[(0 : 1 : 0), (2 : 5 : 1), (2 : 2 : 1), (3 : 0 : 1), (4 : 6 : 1), (4 : 1 : 1), (5 : 0 : 1), (6 : 5 : 1), (6 : 2 : 1)]
::
sage: x = polygen(GF(121, 'a'))
sage: C = HyperellipticCurve(x^5 + x - 1, x^2 + 2)
sage: len(C.points())
122
Conics are allowed (the issue reported at :trac:`11800`
has been resolved)::
sage: R.<x> = GF(7)[]
sage: H = HyperellipticCurve(3*x^2 + 5*x + 1)
sage: H.points()
[(0 : 6 : 1), (0 : 1 : 1), (1 : 4 : 1), (1 : 3 : 1), (2 : 4 : 1), (2 : 3 : 1), (3 : 6 : 1), (3 : 1 : 1)]
The method currently lists points on the plane projective model, that
is the closure in $\mathbb{P}^2$ of the curve defined by $y^2+hy=f$.
This means that one point $(0:1:0)$ at infinity is returned if the
degree of the curve is at least 4 and $\deg(f)>\deg(h)+1$. This point
is a singular point of the plane model. Later implementations may
consider a smooth model instead since that would be a more relevant
object. Then, for a curve whose only singularity is at $(0:1:0)$, the
point at infinity would be replaced by a number of rational points of
the smooth model. We illustrate this with an example of a genus 2
hyperelliptic curve::
sage: R.<x>=GF(11)[]
sage: H = HyperellipticCurve(x*(x+1)*(x+2)*(x+3)*(x+4)*(x+5))
sage: H.points()
[(0 : 1 : 0), (0 : 0 : 1), (1 : 7 : 1), (1 : 4 : 1), (5 : 7 : 1), (5 : 4 : 1), (6 : 0 : 1), (7 : 0 : 1), (8 : 0 : 1), (9 : 0 : 1), (10 : 0 : 1)]
The plane model of the genus 2 hyperelliptic curve in the above example
is the | |
#Misc
import time, os, sys, pdb, argparse
from glob import glob
from fnmatch import fnmatch
#Base
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy.io as sio
import scipy.stats as st
import multiprocessing
#Save
import json
import scipy.io as sio
import h5py
import io_dict_to_hdf5 as ioh5
#Plot
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.gridspec as gridspec
import matplotlib.lines as mlines
from matplotlib.backends.backend_pdf import PdfPages
#Model
import ssm
from sklearn.model_selection import StratifiedKFold
#User
import util
import plotting as usrplt
behav_dict = {-1:'ambiguous', 0:'rest',1:'running'}
#Directories
RootDataDir = './data'
ResultsDir = './results'
##===== ============================ =====##
##===== Parse Command Line Arguments =====##
parser = argparse.ArgumentParser(description='HMM for jumping data')
parser.add_argument('--save',type=bool, default=1,
help='Save Results?')
##===== Data Options =====##
parser.add_argument('--mID',type=str, default='all_mice',
help='mouse to fit model to')
##===== Model Type =====##
parser.add_argument('--model_type', type=str, default='ARHMM',
help='ARHMM or SLDS')
parser.add_argument('--transitions', type=str, default='recurrent',
help='standard or recurrent or sticky or inputdriven')
parser.add_argument('--observations', type=str, default='autoregressive',
help='autoregressive or robust_autoregressive or diagonal_ar or diagonal_robust_ar')
parser.add_argument('--inputdriven', type=bool, default=0,
help='HMM transitions dependent on some input in addition to previous HMM state')
##===== Model Parameters =====##
parser.add_argument('--kappa', type=float, default=1e5,
help='sticky arhmm kappa')
parser.add_argument('--AR_lags', type=str, default=1,
help='Autoregressive lags')
parser.add_argument('--MAP_threshold', type=float, default=0.75,
help='MAP threshold')
##===== Run Options =====##
parser.add_argument('--Kmin', type=int, default=4,
help='minimum number of HMM states')
parser.add_argument('--Kmax', type=int, default=24,
help='maximum number of HMM states')
parser.add_argument('--kXval', type=int, default=5,
help='number of kfold')
parser.add_argument('--EM_tolerance', type=float, default=1e-6,
help='SSM EM algorithm tolerance')
parser.add_argument('--EM_iters', type=int, default=200,
help='EM Iterations')
parser.add_argument('--max_processes', type=int, default=15,
help='max # of parallel processes to run')
args = parser.parse_args()
def set_arhmm_hyperparams(opt,K):
Mobs = 0
#Autoregressive keyword arguments
ar_kwargs = dict(
# l2_penalty_A= args_dic['l2_penalty_A'],
# l2_penalty_b= args_dic['l2_penalty_b'],
# l2_penalty_V= args_dic['l2_penalty_V'],
lags = opt['AR_lags']
)
#HMM Transition parameters
if opt['transitions'] == 'sticky':
# alpha= args_dic['alpha'],
trans_kwargs['kappa'] = opt['kappa']
else:
trans_kwargs = {}
return M, ar_kwargs, trans_kwargs
def get_state_sequence(hmm, data_test, opt, inputs=None):
"""
Compute the local MAP state (arg-max of marginal state probabilities at each time step)
and overall state usages.
thresh: if marginal probability of MAP state is below threshold, replace with np.nan
(or rather output a mask array with nan's in those time steps)
Also output average state usages and the marginal state probabilities
"""
T = 0; ll_heldout = 0
state_usage = np.zeros(hmm.K)
trMAPs = []
trPosteriors = []
trMasks = []
#Loop over data to obtain MAP sequence for each trial
for index, data in enumerate(data_test):
#Get state probabilities and log-likelihood
if opt['transitions'] == 'inputdriven':
inputdata = inputs[index]
Ez, _, ll = hmm.expected_states(data,input=inputs)
else:
Ez, _, ll = hmm.expected_states(data)
#Update number of data points, state usage, and llood of data
T += Ez.shape[0]
state_usage += Ez.sum(axis=0)
ll_heldout += ll
#maximum a posteriori probability estimate of states
map_seq = np.argmax(Ez,axis=1)
max_prob = Ez[np.r_[0:Ez.shape[0]],map_seq]
#Save sequences
trMAPs.append(map_seq)
trPosteriors.append(Ez)
trMasks.append(max_prob > opt['MAP_threshold'])
#Normalize
state_usage /= T
#Get parameters from ARHMM object
param_dict = util.params_to_dict(hmm.params, opt)
return trMAPs, trPosteriors, trMasks, state_usage, ll_heldout, param_dict
def fit_ssm_get_llhood(data_list, K, opt, train_inds=None, test_inds=None, i_fold=-1):
#Go!
startTime = time.time()
nTrials = len(data_list)
#Separate the data into a training and test set based on the indices given
if train_inds is not None and test_inds is not None:
data_train = [data_list[ii] for ii in train_inds]
data_test = [data_list[ii] for ii in test_inds]
else:
#fit model on all data
data_train = data_list
data_test = data_list
#adding 10 so i_fold == -1 case doesn't give error
np.random.seed(10+i_fold)
#Autoregressive keyword arguments
ar_kwargs = dict(
# l2_penalty_A= args_dic['l2_penalty_A'],
# l2_penalty_b= args_dic['l2_penalty_b'],
# l2_penalty_V= args_dic['l2_penalty_V'],
lags = opt['AR_lags']
)
#HMM Transition parameters
if opt['transitions'] == 'sticky':
# alpha= args_dic['alpha'],
trans_kwargs['kappa'] = opt['kappa']
else:
trans_kwargs = {}
#Not implemented yet
if opt['transitions'] == 'inputdriven':
#Separate inputs from the data_list into training and test sets
raise Exception('TODO: Separate inputs from the data_list into training and test sets')
else:
inputs_train = None
inputs_test = None
M = 0
#Initialize Hidden Markov Model with
arhmm = ssm.HMM(K, opt['dObs'], M=M,
observations=opt['observations'], observation_kwargs=ar_kwargs,
transitions=opt['transitions'], transition_kwargs=trans_kwargs)
##===== Fit on training data =====##
model_convergence = arhmm.fit(data_train, inputs=inputs_train, method="em", num_iters=opt['EM_iters'], tolerance=opt['EM_tolerance'])
#Get MAP sequences for heldout data (or all of the data if this isn't part of the xval)
trMAPs, trPosteriors, trMasks, state_usage, ll_heldout2, params_dict = get_state_sequence(arhmm, data_test, opt)
#Calculate loglikehood of the test and training data
ll_heldout = arhmm.log_likelihood(data_test)
ll_training = arhmm.log_likelihood(data_train)
#Sort based on state-usage
# trMAPs, trPosteriors, state_usage, state_perm = util.sort_states_by_usage(state_usage, trMAPs, trPosteriors)
##===== Calculate Log-likelihood =====##
#Count total number of time steps in data
tTest = sum(map(len, data_test))
ll_heldout_perstep = ll_heldout/tTest
#For Training
tTrain = sum(map(len, data_train))
ll_training_perstep = ll_training/tTrain
llhood_tuple = (ll_heldout,ll_heldout_perstep,ll_training,ll_training_perstep)
##===== Save & Plot =====##
#Create subdirectory under base directory for kfold
SaveDir, fname_sffx = util.make_sub_dir(K, opt, i_fold)
#Stop time
RunTime = time.perf_counter() - startTime
## Save log-likelihood per kfold fit, as well as fit model parameters
ioh5.save(os.path.join(SaveDir, 'fit_parameters-{}.h5'.format(fname_sffx)),
{'ll_heldout':llhood_tuple[0], 'll_heldout_perstep':llhood_tuple[1],'tTest': tTest,
'll_training':llhood_tuple[2], 'll_training_perstep':llhood_tuple[3],'tTrain': tTrain,
'state_usage':state_usage, 'arhmm_params' : params_dict,'hyperparams': opt,
'model_convergence': model_convergence, 'RunTime': RunTime})
##===== Save and plot for full fit =====##
if i_fold == -1:
## Save state sequences for full fit
ioh5.save(os.path.join(SaveDir, 'MAP_seqs-{}.h5'.format(fname_sffx)),
{'trMAPs':trMAPs, 'trPosteriors':trPosteriors,'trMasks':trMasks,
'arhmm_params' : params_dict,'state_usage':state_usage,
'hyperparams' : opt})
## Calculate & plot state duration and state usage
state_duration_list, state_startend_list, state_usage = util.get_state_durations(trMAPs, trMasks, K)
usrplt.plot_state_durations2(state_duration_list,state_usage, K,
SAVEFIG=True,PlotDir=SaveDir,fname='state-durations_{}.pdf'.format(fname_sffx))
#Plot dynamics of latent states
usrplt.plot_dynamics_2d(arhmm,SAVEFIG=True,PlotDir=SaveDir,fname='AR-streamplots_{}.pdf'.format(fname_sffx))
## Plot the actual AR matrices, with their corresponding fixed point
usrplt.plot_AR_matrices(arhmm,SAVEFIG=True,PlotDir=SaveDir,fname='AR-matrices_{}.pdf'.format(fname_sffx))
## Plot example trajectories of actual trajectories for each state
usrplt.plot_example_trajectories(state_duration_list,state_startend_list,data_list, arhmm,
SAVEFIG=True,PlotDir=SaveDir,fname='state-trajectories_data_{}.pdf'.format(fname_sffx))
## Plot example trajectories simulated from the model for each state
usrplt.plot_example_trajectories(state_duration_list,state_startend_list,data_list, arhmm, simulated=True,
SAVEFIG=True,PlotDir=SaveDir,fname='state-trajectories_simulated_{}.pdf'.format(fname_sffx))
return ll_training_perstep, ll_heldout_perstep, K
##===== ===== =====##
##===== Start =====##
if __name__ == "__main__":
#GO!
startTime = time.time()
#Convert arguments into dictionary; opt <-> options
opt = args.__dict__
#Create base folder for saved results
SaveDirRoot = util.make_base_dir(opt['model_type'],opt['mID'])
#Save script options in JSON file
opt['SaveDirRoot'] = SaveDirRoot
if opt['save']:
with open(os.path.join(SaveDirRoot, 'ARHMM_hyperparameters.json'), 'w') as jsfile:
json.dump(opt, jsfile, indent=4)
##====== ============ ======##
##====== Read in Data ======##
data_df = pd.read_hdf('./data/jumping_data_102220.h5')
nTrials = len(data_df)
#DLC tracking confidence threshold at which to mask out data
confidence_threshold = 0.8
#Loop over trials and reformat data for ARHMM
data_list = []; mask_list = []
for iTrial in range(nTrials):
#Get coordinates of Take-Off platform
xc = np.nanmean(data_df.loc[iTrial]['Side TakeFL x'])
yc = np.nanmean(data_df.loc[iTrial]['Side TakeFL y'])
xy_list = []; ll_list = []
for ii, ptstr in enumerate(['Nose','LEye','LEar']):
x = data_df.loc[iTrial]['Side {} x'.format(ptstr)]
y = data_df.loc[iTrial]['Side {} y'.format(ptstr)]
llhood = data_df.loc[iTrial]['Side {} likelihood'.format(ptstr)]
#Coordinates relative to take-off platform
xy_list.append((x-xc,y-yc))
#Create mask for points that have a confidence lower than the given threshold
mask = llhood > confidence_threshold
ll_list.append((mask,mask))
tmp = np.vstack(xy_list).T; data_list.append(tmp[2:-2,:])
tmp = np.vstack(ll_list).T; mask_list.append(tmp[2:-2,:])
#Get number of time points and components per experiment
nT, dObs = data_list[0].shape
nComponents = dObs; opt['dObs'] = dObs
##===== ==================== =====##
##===== Perform X-validation =====##
k_fold = StratifiedKFold(n_splits=opt['kXval'])
#Stratify data per mice and per condition for kfolds
include = ['{}_D{}'.format(i,j) for i,j in zip(list(data_df['subject']),list(data_df['distance']))]
# Creates parallel processes
pool = multiprocessing.Pool(processes=opt['max_processes'])
#Preallocate matrix for cross-validation llhood values
Ks = np.arange(opt['Kmin'],opt['Kmax']+1,2)
ll_heldout = np.zeros((len(Ks),opt['kXval']+1))
ll_training = np.zeros((len(Ks),opt['kXval']+1))
model_fullfit = []
process_outputs = []
#Loop over number of HMM states
for index, K in enumerate(np.arange(opt['Kmin'],opt['Kmax']+1,2)):
print('{} states'.format(K))
#Fit the model to all of the data, and then for each kfold of x-validation
model_fullfit.append(pool.apply_async(fit_ssm_get_llhood, args=(data_list,K,opt)))
# ll_training_perstep, ll_heldout_perstep, K = fit_ssm_get_llhood(data_list,K,opt)
#Loop over kfolds
kfold_outputs = []
for iK, (train_indices, test_indices) in enumerate(k_fold.split(data_list,include)):
kfold_outputs.append(pool.apply_async(fit_ssm_get_llhood, args= \
(data_list, K, opt, train_indices, test_indices, iK)))
process_outputs.append(kfold_outputs)
##===== =========== =====##
##===== Get results =====##
#Extract log_likelihood results from parallel kfold processing
for index, results in enumerate(process_outputs):
ll_training[index,:-1] = np.array([iFold.get()[0] for iFold in results])
ll_heldout[index,:-1] = np.array([iFold.get()[1] for iFold in results])
Ks[index] = results[0].get()[2]
#For full fit
Ks_ff = Ks.copy()
for index, results in enumerate(model_fullfit):
ll_training[index,-1] = results.get()[0]
ll_heldout[index,-1] = results.get()[1]
Ks_ff = results.get()[2]
#Close Parallel pool
pool.close()
# pdb.set_trace()
#Total Run Time
RunTime = time.perf_counter() - startTime
opt.update(RunTime = RunTime)
hrs=int(RunTime//3600); mins=int((RunTime%3600)//60); secs=int(RunTime - hrs*3600 - mins*60)
print('\tTotal run time = {:02d}:{:02d}:{:02d} for {} K\'s\n'.format(hrs,mins,secs,opt['Kmax']+1-opt['Kmin']))
# Save summary data of all x-validation results
usrplt.plot_xval_lls_vs_K(ll_training, ll_heldout, | |
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by ERDOS team.
import math
import os
import random
import numpy as np
from numpy import linalg as LA
from converter import Converter
from city_track import CityTrack
import bezier
def angle_between(v1, v2):
return np.arccos(np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2))
def sldist(t1, t2):
return math.sqrt((t1[0] - t2[0]) * (t1[0] - t2[0]) +
(t1[1] - t2[1]) * (t1[1] - t2[1]))
class Waypointer(object):
def __init__(self, city_name):
# Open the necessary files
dir_path = os.path.dirname(__file__)
self.city_file = os.path.join(dir_path, city_name + '.txt')
self.city_name = city_name
# Define the specif parameter for the waypointer. Where is the middle of the road,
# how open are the curves being made, etc.
self.lane_shift_distance = 13 # The amount of shifting from the center the car should go
self.extra_spacing_rights = -3
self.extra_spacing_lefts = 7 # This is wrong, since it is expressed in world units
self.way_key_points_predicted = 7
self.number_of_waypoints = 30
self._converter = Converter(self.city_file, 0.1643, 50.0)
self._city_track = CityTrack(self.city_name)
self._map = self._city_track.get_map()
# Define here some specific configuration to produce waypoints
self.last_trajectory = []
self.lane_shift_distance = self.lane_shift_distance # The amount of shifting from the center the car should go
self.extra_spacing_rights = self.extra_spacing_rights
self.extra_spacing_lefts = self.extra_spacing_lefts
self.way_key_points_predicted = self.way_key_points_predicted
self.number_of_waypoints = self.number_of_waypoints
self.previous_map = [0, 0]
# The internal state variable
self.last_trajectory = []
self._route = []
self.previous_map = [0, 0]
self._previous_source = None
self.last_map_points = None
self.points = None
def reset(self):
self.last_trajectory = []
self._route = []
self.previous_map = [0, 0]
self._previous_source = None
self.last_map_points = None
self.points = None
def _search_around_square(self, map_point, map_central_2d):
"""
Function to search the map point in the central line.
Args:
map_point: the used map point
map_central_2d: the 2d map containing the central lines in red
Returns:
projected point in the central line
"""
x = int(map_point[0])
y = int(map_point[1])
square_crop = map_central_2d[(y - 30):(y + 30), (x - 30):(x + 30)]
small_distance = 10000
closest_point = [
15 - square_crop.shape[1] / 2, 15 - square_crop.shape[0] / 2
]
for t in np.transpose(np.nonzero(square_crop)):
distance = sldist(
t, [square_crop.shape[1] / 2, square_crop.shape[0] / 2])
if distance < small_distance:
small_distance = distance
closest_point = [
t[0] - square_crop.shape[1] / 2,
t[1] - square_crop.shape[0] / 2
]
return np.array([x + closest_point[0], y + closest_point[1]])
def _shift_points(self, distance_to_center, lane_points,
inflection_position):
"""
Function to take the route points in the middle of the road and shift then to the
center of the lane
Args:
distance_to_center: The distance you want to shift
lane_points: the lane points used
inflection_position: A corner case, when there is a turn.
Returns:
"""
shifted_lane_vec = []
for i in range(len(lane_points[:-1])):
lane_point = lane_points[i]
unit_vec = self._get_unit(lane_points[i + 1], lane_points[i])
shifted_lane = [
lane_point[0] + unit_vec[0] * distance_to_center[i],
lane_point[1] + unit_vec[1] * distance_to_center[i]
]
if i == inflection_position:
unit_vec = self._get_unit(lane_points[i], lane_points[i - 1])
shifted_lane_vec.append([
lane_point[0] + unit_vec[0] * distance_to_center[i],
lane_point[1] + unit_vec[1] * distance_to_center[i]
])
shifted_lane_vec.append(shifted_lane)
last_lane_point = lane_points[-1]
shifted_lane = [
last_lane_point[0] + unit_vec[0] * distance_to_center[-1],
last_lane_point[1] + unit_vec[1] * distance_to_center[-1]
]
shifted_lane_vec.append(shifted_lane)
return shifted_lane_vec
# Given a list, find the 3 curve points that this list correspond
def _find_curve_points(self, points):
"""
Function to find points when there is a curve.
Args:
points: the search space
Returns:
the points when there is a curve.
"""
curve_points = None
first_time = True
prev_unit_vec = None
for i in range(len(points) - 1):
unit_vec = self._get_unit(points[i + 1], points[i])
unit_vec = [round(unit_vec[0]), round(unit_vec[1])]
if not first_time:
if unit_vec != prev_unit_vec:
curve_points = [points[i + 1], points[i], points[i - 1]]
return curve_points, [i + 1, i, i - 1], np.cross(
unit_vec, prev_unit_vec)
first_time = False
prev_unit_vec = unit_vec
return curve_points, None, None
def _get_unit(self, last_pos, first_pos):
"""
Get a unity vector from two points point
"""
vector_dir = ((last_pos - first_pos) / LA.norm(last_pos - first_pos))
vector_s_dir = [0, 0]
vector_s_dir[0] = -vector_dir[1]
vector_s_dir[1] = vector_dir[0]
return vector_s_dir
def generate_final_trajectory(self, coarse_trajectory):
"""
Smooth the waypoints trajectory using a bezier curve.
Args:
coarse_trajectory:
Returns:
"""
total_course_trajectory_distance = 0
previous_point = coarse_trajectory[0]
for i in range(1, len(coarse_trajectory)):
total_course_trajectory_distance += sldist(coarse_trajectory[i],
previous_point)
points = bezier.bezier_curve(
coarse_trajectory,
max(1, int(total_course_trajectory_distance / 10.0)))
world_points = []
points = np.transpose(points)
points_list = []
for point in points:
world_points.append(self._converter.convert_to_world(point))
points_list.append(point.tolist())
return world_points, points_list
def get_free_node_direction_target(self, pos, pos_ori, source):
"""
Get free positions to drive in the direction of the target point
"""
free_nodes = self._map.get_adjacent_free_nodes(pos)
added_walls = set()
heading_start = np.array([pos_ori[0], pos_ori[1]])
for adj in free_nodes:
start_to_goal = np.array([adj[0] - pos[0], adj[1] - pos[1]])
angle = angle_between(heading_start, start_to_goal)
if angle < 2 and adj != source:
added_walls.add((adj[0], adj[1]))
return added_walls
def graph_to_waypoints(self, next_route):
"""
Convert the graph to raw waypoints, with the same size as as the route.
Basically just project the route to the map and shift to the center of the lane.
Args:
next_route: the graph points (nodes) that are going to be converted.
Returns:
the list of waypoints
"""
# Take the map with the central lines
lane_points = []
for point in next_route:
map_point = self._converter.convert_to_pixel(
[int(point[0]), int(point[1])])
lane_points.append(
self._search_around_square(map_point,
self._map.map_image_center))
# THE CURVE POINTS
_, points_indexes, curve_direction = self._find_curve_points(
lane_points)
# If it is a intersection we divide this in two parts
lan_shift_distance_vec = [self.lane_shift_distance] * len(lane_points)
if points_indexes is not None:
for i in points_indexes:
if curve_direction > 0:
lan_shift_distance_vec[i] += (self.extra_spacing_lefts * 1)
else:
lan_shift_distance_vec[i] += (
self.extra_spacing_rights * -1)
shifted_lane_vec = self._shift_points(
lan_shift_distance_vec, lane_points, points_indexes[1])
else:
shifted_lane_vec = self._shift_points(lan_shift_distance_vec,
lane_points, None)
return shifted_lane_vec
def add_extra_points(self, node_target, target_ori, node_source):
"""
Hacky: Add extra points after the target. The route needs to
"""
direction = node_target
direction_ori = target_ori
while len(self._route) < 10: # ADD EXTRA POINTS AFTER
try:
free_nodes = list(
self.get_free_node_direction_target(
direction, direction_ori, node_source))
direction_ori = self._get_unit(
np.array(direction), np.array(free_nodes[0]))
aux = -direction_ori[1]
direction_ori[1] = direction_ori[0]
direction_ori[0] = aux
direction = free_nodes[0]
except IndexError:
# Repeate some route point, there is no problem.
direction = [
round(self._route[-1][0] + direction_ori[0]),
round(self._route[-1][1] + direction_ori[1])
]
self._route.append(direction)
def convert_list_of_nodes_to_pixel(self, route):
map_points = []
for point in route:
map_point = self._converter.convert_to_pixel(
[int(point[0]), int(point[1])])
map_points.append(map_point)
return map_points
def get_next_waypoints(self, source, source_ori, target, target_ori):
"""
Get the next waypoints, from a list of generated waypoints.
Args:
source: source position
source_ori: source orientation
target: the desired end position
target_ori: the desired target orientation
Returns:
"""
# Project the source and target on the node space.
track_source = self._city_track.project_node(source)
track_target = self._city_track.project_node(target)
# Test if it is already at the goal
if track_source == track_target:
self.reset()
return self.last_trajectory, self.last_map_points, self.convert_list_of_nodes_to_pixel(
self._route)
# This is to avoid computing a new route when inside the route
# The the distance to the closest intersection.
distance_node = self._city_track.closest_curve_position(track_source)
# Potential problem, if the car goest too fast, there can be problems for the turns.
# I will keep this for a while.
if distance_node > 2 and self._previous_source != track_source:
self._route = self._city_track.compute_route(
track_source, source_ori, track_target, target_ori)
# IF needed we add points after the objective, that is very hacky.
self.add_extra_points(track_target, target_ori, track_source)
self.points = self.graph_to_waypoints(
self._route[1:(1 + self.way_key_points_predicted)])
self.last_trajectory, self.last_map_points = self.generate_final_trajectory(
[np.array(self._converter.convert_to_pixel(source))] +
self.points)
# Store the previous position, to avoid recomputation
self._previous_source = track_source
return self.last_trajectory, self.last_map_points, self.points
else:
if sldist(self.previous_map,
self._converter.convert_to_pixel(source)) > 1.0:
# That is because no route was ever computed. This is a problem we should solve.
if not self._route:
self._route = self._city_track.compute_route(
track_source, source_ori, track_target, target_ori)
self.add_extra_points(track_target, target_ori,
track_source)
self.points = self.graph_to_waypoints(
self._route[1:(1 + self.way_key_points_predicted)])
self.last_trajectory, self.last_map_points = self.generate_final_trajectory(
[np.array(self._converter.convert_to_pixel(source))] +
self.points)
# We have to find the current node position
self.previous_map = self._converter.convert_to_pixel(source)
# Make a source not replaced
for point in self.last_map_points:
point_vec = self._get_unit(
np.array(self._converter.convert_to_pixel(source)),
point)
cross_product = np.cross(source_ori[0:2], point_vec)
if (cross_product > 0.0 and sldist(
point, self._converter.convert_to_pixel(source)) <
50) or sldist(
point, self._converter.convert_to_pixel(
source)) < 15.0:
self.last_trajectory.remove(
self._converter.convert_to_world(point)
) # = [self.make_world_map(point)] + self.last_trajc
self.last_map_points.remove(point)
# Store the previous | |
<filename>sphinxcontrib/autophpdoc/__init__.py
"""
sphinxcontrib.autophpdoc
~~~~~~~~~~~~~~~~~~~~~~~~
Automatically insert docstrings for PHP functions, classes or whole modules into the doctree.
- use PHPDOC to build a structure.xml file of your whole project.
phpdoc -d src -t doc_src/phpdoc --template="xml"
- add to your conf.py
.. code::
extensions = [
...
'sphinxcontrib.phpdomain',
'sphinxcontrib.autophpdoc',
]
autophpdoc_structure_xml = 'doc_src/phpdoc/structure.xml'
autophpdoc_members = True
autophpdoc_title = True
- in your documentation:
.. php:automodule:: ^modules/main.php ^modules/.*.php
:copyright: Copyright 2019 by <NAME> <<EMAIL>>
:license: BSD, see LICENSE for details.
"""
import os
import re
from typing import Any, Callable, Dict, Iterator, List, Sequence, Set, Tuple, Union # noqa
import docutils
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
import sphinx
from sphinx.util.docutils import SphinxDirective, switch_source_input
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.errors import SphinxWarning, SphinxError, ExtensionError
from lxml import etree
import pbr.version
if False:
# For type annotations
from sphinx.application import Sphinx # noqa
__version__ = pbr.version.VersionInfo ('autophpdoc').version_string ()
NAME = 'autophpdoc'
logger = sphinx.util.logging.getLogger (__name__)
RE_AUTOSTRIP = re.compile (r'^php:auto') # strip directive name to obtain objtype
RE_TRIM = re.compile (r'(</?p>)')
RE_BRACES = re.compile (r'(\s*\(.*\))')
RE_WS = re.compile (r'(\s+)')
NS = {
're' : 'http://exslt.org/regular-expressions'
}
def trim (text):
""" Normalize spaces and remove other useless stuff PHPDoc put in. """
text = RE_TRIM.sub ('', text)
return RE_WS.sub (' ', text.strip ())
def strip_braces (text):
""" Strip the braces from function signatures. """
return RE_BRACES.sub ('', text)
def bs (link):
""" Replace \\ with \\\\ because RST wants it that way. """
# phpdomain does not grok leading backslashes
link = link.lstrip ('\\')
return link.replace ('\\', '\\\\')
def setup (app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_directive_to_domain ('php', 'automodule', AutoDirective)
app.add_directive_to_domain ('php', 'autoclass', AutoDirective)
app.add_directive_to_domain ('php', 'autofunction', AutoDirective)
app.add_config_value (NAME + '_structure_xml', '', False)
app.add_config_value (NAME + '_members', False, False)
app.add_config_value (NAME + '_title', False, False)
return {
'version' : __version__,
'parallel_read_safe' : True,
}
def members_option (arg: Any) -> Union[bool, List[str]]:
"""Used to convert the :members: option to auto directives."""
if arg is None or arg is True:
return True
if arg is False:
return False
return [x.strip () for x in arg.split (',')]
def bool_option(arg: Any) -> bool:
"""Used to convert flag options to auto directives. (Instead of
directives.flag(), which returns None).
"""
return True
class AutoPHPDocError (SphinxError):
""" The autophpdoc exception. """
category = NAME + ' error'
seen_namespaces = set () # the first time seen gets in the index, the others must have :noindex:
class Subject (object):
""" A thing to document. """
def __init__ (self, node, indent, directive):
self.node = node
self.indent = indent
self.directive = directive
self.options = self.directive.options
def xpath (self, query):
""" Perform an xpath search starting at this node. """
return self.node.xpath (query, namespaces = NS)
def xpath_str (self, query, default = None):
""" Perform an xpath search returning a string starting at this node. """
el = self.node.xpath (query, namespaces = NS, smart_strings = False)
if not el:
return default
try:
return etree.tostring (el[0], encoding = 'unicode', method = 'text').strip ()
except TypeError:
return str (el[0]).strip ()
def splitlines (self, text):
return [(' ' * self.indent) + s for s in text.splitlines ()]
def append (self, text, content):
sourceline = self.get_lineno ()
if isinstance (text, str):
text = self.splitlines (text)
for lineno, line in enumerate (text):
content.append (
line,
'%s:%d:<%s>' % (self.get_filename (), sourceline + lineno, NAME)
)
def append_ns (self, content):
ns = self.get_namespace ()
self.append (".. php:namespace:: %s" % ns, content)
if ns in seen_namespaces:
self.append (" :noindex:", content)
else:
seen_namespaces.add (ns)
self.nl (content)
def append_desc (self, content):
self.append (self.get_description (), content)
self.nl (content)
self.append (self.get_long_description (), content)
self.nl (content)
for node in self.xpath ("docblock/tag[@name='see']"):
PHPSee (node, self.indent, self.directive).run (content)
self.nl (content)
def nl (self, content):
content.append ('', '')
def underscore (self, text, char, content):
self.append (text, content)
self.append (char * len (text), content)
self.nl (content)
def get_filename (self):
return self.xpath_str ('ancestor-or-self::file/@path', 'filename unknown')
def get_lineno (self):
# N.B. phpdoc doesn't get the line nos. of the subtags right
# not much we can do
if 'line' in self.node.attrib:
return int (self.node.get ('line'))
return int (self.xpath_str ('docblock/@line', '0'))
def get_description (self):
return self.xpath_str ('docblock/description', '')
def get_long_description (self):
return self.xpath_str ('docblock/long-description', '')
def get_name (self):
return self.xpath_str ('name', '')
def get_value (self):
return self.xpath_str ('value', '')
def get_full_name (self):
return self.xpath_str ('full_name', '')
def get_type (self):
return self.xpath_str ('docblock/tag[@name="var"]/@type', '')
def get_namespace (self):
return self.xpath_str ("@namespace", '')
def get_package (self):
return self.xpath_str ("tag[@name='package']", '')
def xref (self, link):
if link:
what = 'ref'
if link in self.directive.classes:
what = 'php:class'
elif link in self.directive.functions:
what = 'php:func'
elif link in self.directive.methods:
what = 'php:meth'
elif link in self.directive.properties:
what = 'php:attr'
link = bs (link)
return ":%s:`%s`" % (what, link) if what != 'ref' else link
return ''
class PHPArgument (Subject):
def run (self, content):
name = trim (self.node.get ('variable'))
type_ = trim (self.node.get ('type'))
desc = trim (self.node.get ('description'))
self.append (":param %s %s: %s" % (bs (type_), name, desc), content)
class PHPReturn (Subject):
def run (self, content):
type_ = trim (self.node.get ('type'))
desc = trim (self.node.get ('description'))
if desc:
self.append (":returns: %s" % desc, content)
if type_:
self.append (":rtype: %s" % self.xref (type_), content)
class PHPThrows (Subject):
def run (self, content):
type_ = trim (self.node.get ('type'))
desc = trim (self.node.get ('description') or '')
self.append (":raises %s: %s" % (self.xref (type_), desc), content)
class PHPSee (Subject):
def run (self, content):
desc = trim (self.node.get ('description'))
link = self.node.get ('link')
if link.startswith ('http'):
self.append ("See: %s %s" % (link, desc), content)
else:
self.append ("See: %s %s" % (self.xref (link), desc), content)
self.nl (content)
class PHPVariable (Subject):
def run (self, content):
type_ = self.get_type ()
if type_:
self.append ("(%s)" % self.xref (type_), content)
self.append_desc (content)
class PHPConstant (PHPVariable):
def run (self, content):
self.append_ns (content)
self.append (".. php:const:: %s" % self.get_name (), content)
self.nl (content)
self.indent += 3
self.append (self.get_value (), content)
self.nl (content)
super ().run (content)
class PHPProperty (PHPVariable):
def run (self, content):
self.append (".. php:attr:: %s" % self.get_name (), content)
self.nl (content)
self.indent += 3
super ().run (content)
class PHPCallable (Subject):
def get_signature (self):
args = self.xpath ('argument/name/text ()')
return "%s (%s)" % (self.get_name (), ', '.join (args))
def run (self, content):
self.indent += 3
self.append_desc (content)
for node in self.xpath ("docblock/tag[@name='param']"):
PHPArgument (node, self.indent, self.directive).run (content)
for node in self.xpath ("docblock/tag[@name='return']"):
PHPReturn (node, self.indent, self.directive).run (content)
for node in self.xpath ("docblock/tag[@name='throws']"):
PHPThrows (node, self.indent, self.directive).run (content)
self.nl (content)
class PHPFunction (PHPCallable):
def run (self, content):
self.append_ns (content)
self.append (".. php:function:: %s" % self.get_signature (), content)
self.nl (content)
super ().run (content)
class PHPMethod (PHPCallable):
def run (self, content):
self.append (".. php:method:: %s" % self.get_signature (), content)
self.nl (content)
super ().run (content)
class PHPClass (Subject):
def run (self, content):
self.append_ns (content)
self.append (".. php:class:: %s" % self.get_name (), content)
self.nl (content)
self.indent += 3
self.append_desc (content)
for node in self.xpath ("property"):
PHPProperty (node, self.indent, self.directive).run (content)
for node in self.xpath ("method"):
PHPMethod (node, self.indent, self.directive).run (content)
self.nl (content)
class PHPModule (Subject):
def get_name (self):
return self.xpath_str ('@path', '')
def run (self, content):
filename = self.get_name ()
module = os.path.splitext (filename)[0].replace ('/', '.')
self.append (".. module:: %s" % module, content)
self.nl (content)
if self.directive.get_opt ('title'):
self.underscore (self.get_name (), '-', content)
self.nl (content)
self.append_desc (content)
if self.directive.get_opt ('members') is True:
for node in self.xpath ("constant"):
PHPConstant (node, self.indent, self.directive).run (content)
for node in self.xpath ("function"):
PHPFunction (node, self.indent, self.directive).run (content)
for node in self.xpath ("class"):
PHPClass (node, self.indent, self.directive).run (content)
self.nl (content)
class AutoDirective (SphinxDirective):
"""Directive to document a whole PHP file. """
# file path regex (should match a file/@path as found inside structure.xml)
required_arguments = 1
# more file path regexes
optional_arguments = 999
has_content = False
option_spec = {
'structure_xml' : directives.unchanged, # path of structure.xml file, overrides config
'members' : members_option, # which members to include (default: all)
'title' : bool_option, # should we output a section title
}
def get_opt (self, name, required = False):
opt = self.options.get (name) or getattr (self.env.config, "%s_%s" % (NAME, name))
if required and not opt:
raise AutoPHPDocError (
':%s: option required in directive (or set %s_%s in conf.py).' | |
# -*- coding: utf-8 -*-
"""
Namecheap SSL Certificate Management
.. versionadded:: 2017.7.0
Prerequisites
-------------
This module uses the ``requests`` Python module to communicate to the namecheap
API.
Configuration
-------------
The Namecheap username, API key and URL should be set in the minion configuration
file, or in the Pillar data.
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: <KEY>
namecheap.client_ip: 192.168.127.12
#Real url
namecheap.url: https://api.namecheap.com/xml.response
#Sandbox url
#namecheap.url: https://api.sandbox.namecheap.xml.response
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.stringutils
# Import 3rd-party libs
from salt.ext import six
try:
import salt.utils.namecheap
CAN_USE_NAMECHEAP = True
except ImportError:
CAN_USE_NAMECHEAP = False
log = logging.getLogger(__name__)
def __virtual__():
"""
Check to make sure requests and xml are installed and requests
"""
if CAN_USE_NAMECHEAP:
return "namecheap_ssl"
return False
def reissue(
csr_file,
certificate_id,
web_server_type,
approver_email=None,
http_dc_validation=False,
**kwargs
):
"""
Reissues a purchased SSL certificate. Returns a dictionary of result
values.
csr_file
Path to Certificate Signing Request file
certificate_id
Unique ID of the SSL certificate you wish to activate
web_server_type
The type of certificate format to return. Possible values include:
- apache2
- apacheapachessl
- apacheopenssl
- apacheraven
- apachessl
- apachessleay
- c2net
- cobaltseries
- cpanel
- domino
- dominogo4625
- dominogo4626
- ensim
- hsphere
- ibmhttp
- iis
- iis4
- iis5
- iplanet
- ipswitch
- netscape
- other
- plesk
- tomcat
- weblogic
- website
- webstar
- zeusv3
approver_email
The email ID which is on the approver email list.
.. note::
``http_dc_validation`` must be set to ``False`` if this option is
used.
http_dc_validation : False
Whether or not to activate using HTTP-based validation.
.. note::
For other parameters which may be required, see here__.
.. __: https://www.namecheap.com/support/api/methods/ssl/reissue.aspx
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_ssl.reissue my-csr-file my-cert-id apachessl
"""
return __get_certificates(
"namecheap.ssl.reissue",
"SSLReissueResult",
csr_file,
certificate_id,
web_server_type,
approver_email,
http_dc_validation,
kwargs,
)
def activate(
csr_file,
certificate_id,
web_server_type,
approver_email=None,
http_dc_validation=False,
**kwargs
):
"""
Activates a newly-purchased SSL certificate. Returns a dictionary of result
values.
csr_file
Path to Certificate Signing Request file
certificate_id
Unique ID of the SSL certificate you wish to activate
web_server_type
The type of certificate format to return. Possible values include:
- apache2
- apacheapachessl
- apacheopenssl
- apacheraven
- apachessl
- apachessleay
- c2net
- cobaltseries
- cpanel
- domino
- dominogo4625
- dominogo4626
- ensim
- hsphere
- ibmhttp
- iis
- iis4
- iis5
- iplanet
- ipswitch
- netscape
- other
- plesk
- tomcat
- weblogic
- website
- webstar
- zeusv3
approver_email
The email ID which is on the approver email list.
.. note::
``http_dc_validation`` must be set to ``False`` if this option is
used.
http_dc_validation : False
Whether or not to activate using HTTP-based validation.
.. note::
For other parameters which may be required, see here__.
.. __: https://www.namecheap.com/support/api/methods/ssl/activate.aspx
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_ssl.activate my-csr-file my-cert-id apachessl
"""
return __get_certificates(
"namecheap.ssl.activate",
"SSLActivateResult",
csr_file,
certificate_id,
web_server_type,
approver_email,
http_dc_validation,
kwargs,
)
def __get_certificates(
command,
result_tag_name,
csr_file,
certificate_id,
web_server_type,
approver_email,
http_dc_validation,
kwargs,
):
web_server_types = (
"apacheopenssl",
"apachessl",
"apacheraven",
"apachessleay",
"c2net",
"ibmhttp",
"iplanet",
"domino",
"dominogo4625",
"dominogo4626",
"netscape",
"zeusv3",
"apache2",
"apacheapachessl",
"cobaltseries",
"cpanel",
"ensim",
"hsphere",
"ipswitch",
"plesk",
"tomcat",
"weblogic",
"website",
"webstar",
"iis",
"other",
"iis4",
"iis5",
)
if web_server_type not in web_server_types:
log.error("Invalid option for web_server_type=%s", web_server_type)
raise Exception("Invalid option for web_server_type=" + web_server_type)
if approver_email is not None and http_dc_validation:
log.error("approver_email and http_dc_validation cannot both have values")
raise Exception("approver_email and http_dc_validation cannot both have values")
if approver_email is None and not http_dc_validation:
log.error("approver_email or http_dc_validation must have a value")
raise Exception("approver_email or http_dc_validation must have a value")
opts = salt.utils.namecheap.get_opts(command)
with salt.utils.files.fopen(csr_file, "rb") as csr_handle:
opts["csr"] = salt.utils.stringutils.to_unicode(csr_handle.read())
opts["CertificateID"] = certificate_id
opts["WebServerType"] = web_server_type
if approver_email is not None:
opts["ApproverEmail"] = approver_email
if http_dc_validation:
opts["HTTPDCValidation"] = "True"
for key, value in six.iteritems(kwargs):
opts[key] = value
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
sslresult = response_xml.getElementsByTagName(result_tag_name)[0]
result = salt.utils.namecheap.atts_to_dict(sslresult)
if http_dc_validation:
validation_tag = sslresult.getElementsByTagName("HttpDCValidation")
if validation_tag is not None and len(validation_tag) > 0:
validation_tag = validation_tag[0]
if validation_tag.getAttribute("ValueAvailable").lower() == "true":
validation_dict = {
"filename": validation_tag.getElementsByTagName("FileName")[0]
.childNodes[0]
.data,
"filecontent": validation_tag.getElementsByTagName("FileContent")[0]
.childNodes[0]
.data,
}
result["httpdcvalidation"] = validation_dict
return result
def renew(years, certificate_id, certificate_type, promotion_code=None):
"""
Renews an SSL certificate if it is ACTIVE and Expires <= 30 days. Returns
the following information:
- The certificate ID
- The order ID
- The transaction ID
- The amount charged for the order
years : 1
Number of years to register
certificate_id
Unique ID of the SSL certificate you wish to renew
certificate_type
Type of SSL Certificate. Possible values include:
- EV Multi Domain SSL
- EV SSL
- EV SSL SGC
- EssentialSSL
- EssentialSSL Wildcard
- InstantSSL
- InstantSSL Pro
- Multi Domain SSL
- PositiveSSL
- PositiveSSL Multi Domain
- PositiveSSL Wildcard
- PremiumSSL
- PremiumSSL Wildcard
- QuickSSL Premium
- RapidSSL
- RapidSSL Wildcard
- SGC Supercert
- SSL Web Server
- SSL Webserver EV
- SSL123
- Secure Site
- Secure Site Pro
- Secure Site Pro with EV
- Secure Site with EV
- True BusinessID
- True BusinessID Multi Domain
- True BusinessID Wildcard
- True BusinessID with EV
- True BusinessID with EV Multi Domain
- Unified Communications
promotional_code
An optional promo code to use when renewing the certificate
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_ssl.renew 1 my-cert-id RapidSSL
"""
valid_certs = (
"QuickSSL Premium",
"RapidSSL",
"RapidSSL Wildcard",
"PremiumSSL",
"InstantSSL",
"PositiveSSL",
"PositiveSSL Wildcard",
"True BusinessID with EV",
"True BusinessID",
"True BusinessID Wildcard",
"True BusinessID Multi Domain",
"True BusinessID with EV Multi Domain",
"Secure Site",
"Secure Site Pro",
"Secure Site with EV",
"Secure Site Pro with EV",
"EssentialSSL",
"EssentialSSL Wildcard",
"InstantSSL Pro",
"PremiumSSL Wildcard",
"EV SSL",
"EV SSL SGC",
"SSL123",
"SSL Web Server",
"SGC Supercert",
"SSL Webserver EV",
"EV Multi Domain SSL",
"Multi Domain SSL",
"PositiveSSL Multi Domain",
"Unified Communications",
)
if certificate_type not in valid_certs:
log.error("Invalid option for certificate_type=%s", certificate_type)
raise Exception("Invalid option for certificate_type=" + certificate_type)
if years < 1 or years > 5:
log.error("Invalid option for years=%s", six.text_type(years))
raise Exception("Invalid option for years=" + six.text_type(years))
opts = salt.utils.namecheap.get_opts("namecheap.ssl.renew")
opts["Years"] = six.text_type(years)
opts["CertificateID"] = six.text_type(certificate_id)
opts["SSLType"] = certificate_type
if promotion_code is not None:
opts["PromotionCode"] = promotion_code
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
sslrenewresult = response_xml.getElementsByTagName("SSLRenewResult")[0]
return salt.utils.namecheap.atts_to_dict(sslrenewresult)
def create(years, certificate_type, promotion_code=None, sans_to_add=None):
"""
Creates a new SSL certificate. Returns the following information:
- Whether or not the SSL order was successful
- The certificate ID
- The order ID
- The transaction ID
- The amount charged for the order
- The date on which the certificate was created
- The date on which the certificate will expire
- The type of SSL certificate
- The number of years for which the certificate was purchased
- The current status of the SSL certificate
years : 1
Number of years to register
certificate_type
Type of SSL Certificate. Possible values include:
- EV Multi Domain SSL
- EV SSL
- EV SSL SGC
- EssentialSSL
- EssentialSSL Wildcard
- InstantSSL
- InstantSSL Pro
- Multi Domain SSL
- PositiveSSL
- PositiveSSL Multi Domain
- PositiveSSL Wildcard
- PremiumSSL
- PremiumSSL Wildcard
- QuickSSL Premium
- RapidSSL
- RapidSSL Wildcard
- SGC Supercert
- SSL Web Server
- SSL Webserver EV
- SSL123
- Secure Site
- Secure Site Pro
- Secure Site Pro with EV
- Secure Site with EV
- True BusinessID
- True BusinessID Multi Domain
- True BusinessID Wildcard
- True BusinessID with EV
- True BusinessID with EV Multi Domain
- Unified Communications
promotional_code
An optional promo code to use when creating the certificate
sans_to_add : 0
This parameter defines the number of add-on domains to be purchased in
addition to the default number of domains included with a multi-domain
certificate. Each certificate that supports SANs has the default number
of domains included. You may check the default number of domains
included and the maximum number of domains that can be added to it in
the table | |
# Domato - main generator script
# -------------------------------
#
# Written and maintained by <NAME> <<EMAIL>>
#
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import random
import sys
from domato.grammar import Grammar
class Domato:
def __init__(self):
self._N_MAIN_LINES = 10 # orign 1000
self._N_EVENTHANDLER_LINES = 10 # orign 1000
self._N_ADDITIONAL_HTMLVARS = 1 # origin 5
# A map from tag name to corresponding type for HTML tags
self._HTML_TYPES = {
'a': 'HTMLAnchorElement',
'abbr': 'HTMLUnknownElement',
'acronym': 'HTMLUnknownElement',
'address': 'HTMLUnknownElement',
'applet': 'HTMLUnknownElement',
'area': 'HTMLAreaElement',
'article': 'HTMLUnknownElement',
'aside': 'HTMLUnknownElement',
'audio': 'HTMLAudioElement',
'b': 'HTMLUnknownElement',
'base': 'HTMLBaseElement',
'basefont': 'HTMLUnknownElement',
'bdi': 'HTMLUnknownElement',
'bdo': 'HTMLUnknownElement',
'bgsound': 'HTMLUnknownElement',
'big': 'HTMLUnknownElement',
'blockquote': 'HTMLUnknownElement',
'br': 'HTMLBRElement',
'button': 'HTMLButtonElement',
'canvas': 'HTMLCanvasElement',
'caption': 'HTMLTableCaptionElement',
'center': 'HTMLUnknownElement',
'cite': 'HTMLUnknownElement',
'code': 'HTMLUnknownElement',
'col': 'HTMLTableColElement',
'colgroup': 'HTMLUnknownElement',
'command': 'HTMLUnknownElement',
'content': 'HTMLContentElement',
'data': 'HTMLDataElement',
'datalist': 'HTMLDataListElement',
'dd': 'HTMLUnknownElement',
'del': 'HTMLModElement',
'details': 'HTMLDetailsElement',
'dfn': 'HTMLUnknownElement',
'dialog': 'HTMLDialogElement',
'dir': 'HTMLDirectoryElement',
'div': 'HTMLDivElement',
'dl': 'HTMLDListElement',
'dt': 'HTMLUnknownElement',
'em': 'HTMLUnknownElement',
'embed': 'HTMLEmbedElement',
'fieldset': 'HTMLFieldSetElement',
'figcaption': 'HTMLUnknownElement',
'figure': 'HTMLUnknownElement',
'font': 'HTMLFontElement',
'footer': 'HTMLUnknownElement',
'form': 'HTMLFormElement',
'frame': 'HTMLFrameElement',
'frameset': 'HTMLFrameSetElement',
'h1': 'HTMLHeadingElement',
'h2': 'HTMLHeadingElement',
'h3': 'HTMLHeadingElement',
'h4': 'HTMLHeadingElement',
'h5': 'HTMLHeadingElement',
'h6': 'HTMLHeadingElement',
'header': 'HTMLUnknownElement',
'hgroup': 'HTMLUnknownElement',
'hr': 'HTMLHRElement',
'i': 'HTMLUnknownElement',
'iframe': 'HTMLIFrameElement',
'image': 'HTMLImageElement',
'img': 'HTMLImageElement',
'input': 'HTMLInputElement',
'ins': 'HTMLModElement',
'isindex': 'HTMLUnknownElement',
'kbd': 'HTMLUnknownElement',
'keygen': 'HTMLKeygenElement',
'label': 'HTMLLabelElement',
'layer': 'HTMLUnknownElement',
'legend': 'HTMLLegendElement',
'li': 'HTMLLIElement',
'link': 'HTMLLinkElement',
'listing': 'HTMLUnknownElement',
'main': 'HTMLUnknownElement',
'map': 'HTMLMapElement',
'mark': 'HTMLUnknownElement',
'marquee': 'HTMLMarqueeElement',
'menu': 'HTMLMenuElement',
'menuitem': 'HTMLMenuItemElement',
'meta': 'HTMLMetaElement',
'meter': 'HTMLMeterElement',
'nav': 'HTMLUnknownElement',
'nobr': 'HTMLUnknownElement',
'noembed': 'HTMLUnknownElement',
'noframes': 'HTMLUnknownElement',
'nolayer': 'HTMLUnknownElement',
'noscript': 'HTMLUnknownElement',
'object': 'HTMLObjectElement',
'ol': 'HTMLOListElement',
'optgroup': 'HTMLOptGroupElement',
'option': 'HTMLOptionElement',
'output': 'HTMLOutputElement',
'p': 'HTMLParagraphElement',
'param': 'HTMLParamElement',
'picture': 'HTMLPictureElement',
'plaintext': 'HTMLUnknownElement',
'pre': 'HTMLPreElement',
'progress': 'HTMLProgressElement',
'q': 'HTMLQuoteElement',
'rp': 'HTMLUnknownElement',
'rt': 'HTMLUnknownElement',
'ruby': 'HTMLUnknownElement',
's': 'HTMLUnknownElement',
'samp': 'HTMLUnknownElement',
'section': 'HTMLUnknownElement',
'select': 'HTMLSelectElement',
'shadow': 'HTMLShadowElement',
'small': 'HTMLUnknownElement',
'source': 'HTMLSourceElement',
'span': 'HTMLSpanElement',
'strike': 'HTMLUnknownElement',
'strong': 'HTMLUnknownElement',
'style': 'HTMLStyleElement',
'sub': 'HTMLUnknownElement',
'summary': 'HTMLUnknownElement',
'sup': 'HTMLUnknownElement',
'table': 'HTMLTableElement',
'tbody': 'HTMLTableSectionElement',
'td': 'HTMLUnknownElement',
'template': 'HTMLTemplateElement',
'textarea': 'HTMLTextAreaElement',
'tfoot': 'HTMLTableSectionElement',
'th': 'HTMLTableCellElement',
'thead': 'HTMLTableSectionElement',
'time': 'HTMLTimeElement',
'title': 'HTMLTitleElement',
'tr': 'HTMLTableRowElement',
'track': 'HTMLTrackElement',
'tt': 'HTMLUnknownElement',
'u': 'HTMLUnknownElement',
'ul': 'HTMLUListElement',
'var': 'HTMLUnknownElement',
'video': 'HTMLVideoElement',
'wbr': 'HTMLUnknownElement',
'xmp': 'HTMLUnknownElement'
}
# A map from tag name to corresponding type for SVG tags
self._SVG_TYPES = {
'a': 'SVGAElement',
'altGlyph': 'SVGElement',
'altGlyphDef': 'SVGElement',
'altGlyphItem': 'SVGElement',
'animate': 'SVGAnimateElement',
'animateColor': 'SVGElement',
'animateMotion': 'SVGAnimateMotionElement',
'animateTransform': 'SVGAnimateTransformElement',
'circle': 'SVGCircleElement',
'clipPath': 'SVGClipPathElement',
'color-profile': 'SVGElement',
'cursor': 'SVGCursorElement',
'defs': 'SVGDefsElement',
'desc': 'SVGDescElement',
'discard': 'SVGDiscardElement',
'ellipse': 'SVGEllipseElement',
'feBlend': 'SVGFEBlendElement',
'feColorMatrix': 'SVGFEColorMatrixElement',
'feComponentTransfer': 'SVGFEComponentTransferElement',
'feComposite': 'SVGFECompositeElement',
'feConvolveMatrix': 'SVGFEConvolveMatrixElement',
'feDiffuseLighting': 'SVGFEDiffuseLightingElement',
'feDisplacementMap': 'SVGFEDisplacementMapElement',
'feDistantLight': 'SVGFEDistantLightElement',
'feDropShadow': 'SVGFEDropShadowElement',
'feFlood': 'SVGFEFloodElement',
'feFuncA': 'SVGFEFuncAElement',
'feFuncB': 'SVGFEFuncBElement',
'feFuncG': 'SVGFEFuncGElement',
'feFuncR': 'SVGFEFuncRElement',
'feGaussianBlur': 'SVGFEGaussianBlurElement',
'feImage': 'SVGFEImageElement',
'feMerge': 'SVGFEMergeElement',
'feMergeNode': 'SVGFEMergeNodeElement',
'feMorphology': 'SVGFEMorphologyElement',
'feOffset': 'SVGFEOffsetElement',
'fePointLight': 'SVGFEPointLightElement',
'feSpecularLighting': 'SVGFESpecularLightingElement',
'feSpotLight': 'SVGFESpotLightElement',
'feTile': 'SVGFETileElement',
'feTurbulence': 'SVGFETurbulenceElement',
'filter': 'SVGFilterElement',
'font': 'SVGElement',
'font-face': 'SVGElement',
'font-face-format': 'SVGElement',
'font-face-name': 'SVGElement',
'font-face-src': 'SVGElement',
'font-face-uri': 'SVGElement',
'foreignObject': 'SVGForeignObjectElement',
'g': 'SVGGElement',
'glyph': 'SVGElement',
'glyphRef': 'SVGElement',
'hatch': 'SVGElement',
'hatchpath': 'SVGElement',
'hkern': 'SVGElement',
'image': 'SVGImageElement',
'line': 'SVGLineElement',
'linearGradient': 'SVGLinearGradientElement',
'marker': 'SVGMarkerElement',
'mask': 'SVGMaskElement',
'mesh': 'SVGElement',
'meshgradient': 'SVGElement',
'meshpatch': 'SVGElement',
'meshrow': 'SVGElement',
'metadata': 'SVGMetadataElement',
'missing-glyph': 'SVGElement',
'mpath': 'SVGMPathElement',
'path': 'SVGPathElement',
'pattern': 'SVGPatternElement',
'polygon': 'SVGPolygonElement',
'polyline': 'SVGPolylineElement',
'radialGradient': 'SVGRadialGradientElement',
'rect': 'SVGRectElement',
'set': 'SVGSetElement',
'svg': 'SVGSVGElement',
'solidcolor': 'SVGElement',
'stop': 'SVGStopElement',
'switch': 'SVGSwitchElement',
'symbol': 'SVGSymbolElement',
'text': 'SVGTextElement',
'textPath': 'SVGTextPathElement',
'title': 'SVGTitleElement',
'tref': 'SVGElement',
'tspan': 'SVGTSpanElement',
'unknown': 'SVGElement',
'use': 'SVGUseElement',
'view': 'SVGViewElement',
'vkern': 'SVGElement'
}
def generate_html_elements(self, ctx, n):
for i in range(n):
tag = random.choice(list(self._HTML_TYPES))
tagtype = self._HTML_TYPES[tag]
ctx['htmlvarctr'] += 1
varname = 'htmlvar%05d' % ctx['htmlvarctr']
ctx['htmlvars'].append({'name': varname, 'type': tagtype})
ctx['htmlvargen'] += '/* newvar{' + varname + ':' + tagtype + '} */ var ' + varname + ' = document.createElement(\"' + tag + '\"); //' + tagtype + '\n'
def add_html_ids(self, matchobj, ctx):
tagname = matchobj.group(0)[1:-1]
if tagname in self._HTML_TYPES:
ctx['htmlvarctr'] += 1
varname = 'htmlvar%05d' % ctx['htmlvarctr']
ctx['htmlvars'].append({'name': varname, 'type': self._HTML_TYPES[tagname]})
ctx['htmlvargen'] += '/* newvar{' + varname + ':' + self._HTML_TYPES[tagname] + '} */ var ' + varname + ' = document.getElementById(\"' + varname + '\"); //' + self._HTML_TYPES[tagname] + '\n'
return matchobj.group(0) + 'id=\"' + varname + '\" '
elif tagname in self._SVG_TYPES:
ctx['svgvarctr'] += 1
varname = 'svgvar%05d' % ctx['svgvarctr']
ctx['htmlvars'].append({'name': varname, 'type': self._SVG_TYPES[tagname]})
ctx['htmlvargen'] += '/* newvar{' + varname + ':' + self._SVG_TYPES[tagname] + '} */ var ' + varname + ' = document.getElementById(\"' + varname + '\"); //' + self._SVG_TYPES[tagname] + '\n'
return matchobj.group(0) + 'id=\"' + varname + '\" '
else:
return matchobj.group(0)
def generate_function_body(self, jsgrammar, htmlctx, num_lines):
js = ''
js += 'var fuzzervars = {};\n\n'
js += "SetVariable(fuzzervars, window, 'Window');\nSetVariable(fuzzervars, document, 'Document');\nSetVariable(fuzzervars, document.body.firstChild, 'Element');\n\n"
js += '//beginjs\n'
js += htmlctx['htmlvargen']
js += jsgrammar._generate_code(num_lines, htmlctx['htmlvars'])
js += '\n//endjs\n'
js += 'var fuzzervars = {};\nfreememory()\n'
return js
def check_grammar(self, grammar):
"""Checks if grammar has errors and if so outputs them.
Args:
grammar: The grammar to check.
"""
for rule in grammar._all_rules:
for part in rule['parts']:
if part['type'] == 'text':
continue
tagname = part['tagname']
# print tagname
if tagname not in grammar._creators:
print('No creators for type ' + tagname)
def generate_new_sample(self, template, htmlgrammar, cssgrammar, jsgrammar):
"""Parses grammar rules from string.
Args:
template: A template string.
htmlgrammar: Grammar for generating HTML code.
cssgrammar: Grammar for generating CSS code.
jsgrammar: Grammar for generating JS code.
Returns:
A string containing sample data.
"""
result = template
css = cssgrammar.generate_symbol('rules')
html = htmlgrammar.generate_symbol('bodyelements')
htmlctx = {
'htmlvars': [],
'htmlvarctr': 0,
'svgvarctr': 0,
'htmlvargen': ''
}
html = re.sub(
r'<[a-zA-Z0-9_-]+ ',
lambda match: self.add_html_ids(match, htmlctx),
html
)
self.generate_html_elements(htmlctx, self._N_ADDITIONAL_HTMLVARS)
result = result.replace('<cssfuzzer>', css)
result = result.replace('<htmlfuzzer>', html)
handlers = False
while '<jsfuzzer>' in result:
numlines = self._N_MAIN_LINES
if handlers:
numlines = self._N_EVENTHANDLER_LINES
else:
handlers = True
result = result.replace(
'<jsfuzzer>',
self.generate_function_body(jsgrammar, htmlctx, numlines),
1
)
return result
def generate_samples(self, grammar_dir, outfiles): # original code
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
outfiles: A list of output filenames.
"""
f = open(os.path.join(grammar_dir, 'template.html')) # template data 가져옴
template = f.read()
f.close()
htmlgrammar = Grammar()
err = htmlgrammar.parse_from_file(os.path.join(grammar_dir, 'html.txt'))
# CheckGrammar(htmlgrammar)
if err > 0:
print('There were errors parsing grammar')
return
cssgrammar = Grammar()
err = cssgrammar.parse_from_file(os.path.join(grammar_dir, 'css.txt'))
# CheckGrammar(cssgrammar)
if err > 0:
print('There were errors parsing grammar')
return
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'js.txt'))
# CheckGrammar(jsgrammar)
if err > 0:
print('There were errors parsing grammar')
return
# JS and HTML grammar need access to CSS grammar.
# Add it as import
htmlgrammar.add_import('cssgrammar', cssgrammar)
jsgrammar.add_import('cssgrammar', cssgrammar)
for outfile in outfiles:
result = self.generate_new_sample(template, htmlgrammar, cssgrammar,
jsgrammar)
if result is not None:
print('Writing a sample to ' + outfile)
try:
f = open(outfile, 'w')
f.write(result)
f.close()
except IOError:
print('Error writing to output')
def generate_samples2(self, grammar_dir, template_data): # custom code
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
template_data: template data
"""
'''
f = open(os.path.join(grammar_dir, 'template.html')) # template data 가져옴
template = f.read()
f.close()
'''
template = template_data
htmlgrammar = Grammar()
err = htmlgrammar.parse_from_file(os.path.join(grammar_dir, 'html.txt'))
# CheckGrammar(htmlgrammar)
if err > 0:
print('There were errors parsing grammar')
return
cssgrammar = Grammar()
err = cssgrammar.parse_from_file(os.path.join(grammar_dir, 'css.txt'))
# CheckGrammar(cssgrammar)
if err > 0:
print('There were errors parsing grammar')
return
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'js.txt'))
# CheckGrammar(jsgrammar)
if err > | |
import unittest
from datetime import datetime, timedelta
from django.utils import timezone
from textwrap import dedent
import pytz
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
AnswerPaper, Answer, Course, IntegerTestCase, FloatTestCase,\
StringTestCase, McqTestCase, ArrangeTestCase
def setUpModule():
# Create user profile
# Create User 1
user = User.objects.create_user(username='demo_user_100',
password='<PASSWORD>',
email='<EMAIL>')
Profile.objects.create(user=user, roll_number=1,
institute='IIT', department='Aerospace',
position='Student')
# Create User 2
user2 = User.objects.create_user(
username='demo_user_101', password='<PASSWORD>',
email='<EMAIL>')
Profile.objects.create(user=user2, roll_number=2,
institute='IIT', department='Aerospace',
position='Student')
# Create a course
Course.objects.create(name="Python Course 100",
enrollment="Enroll Request", creator=user)
quiz = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True, attempts_allowed=1,
time_between_attempts=0, pass_criteria=0,
description='demo quiz 100',
instructions="Demo Instructions",
creator=user
)
QuestionPaper.objects.create(quiz=quiz, total_marks=1.0)
def tearDownModule():
User.objects.filter(username__in=["demo_user_100", "demo_user_101"])\
.delete()
class IntegerQuestionTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
# Creating Course
self.course = Course.objects.get(name="Python Course 100")
# Creating Quiz
self.quiz = Quiz.objects.get(description="demo quiz 100")
# Creating Question paper
self.question_paper = QuestionPaper.objects.get(quiz=self.quiz)
# Creating User
self.user = User.objects.get(username='demo_user_100')
# Creating Question
self.question1 = Question.objects.create(summary='int1', points=1,
type='code', user=self.user)
self.question1.language = 'python'
self.question1.type = "integer"
self.question1.test_case_type = 'integertestcase'
self.question1.description = 'sum of 12+13?'
self.question1.save()
# Creating answerpaper
self.answerpaper = AnswerPaper.objects.create(
user=self.user, user_ip='172.16.31.10', start_time=timezone.now(),
question_paper=self.question_paper, course=self.course,
end_time=timezone.now()+timedelta(minutes=5), attempt_number=1
)
self.answerpaper.questions.add(self.question1)
self.answerpaper.save()
# For question
self.integer_based_testcase = IntegerTestCase(question=self.question1,
correct=25,
type='integertestcase',
)
self.integer_based_testcase.save()
@classmethod
def tearDownClass(self):
self.question1.delete()
self.answerpaper.delete()
def test_validate_regrade_integer_correct_answer(self):
# Given
integer_answer = 25
self.answer = Answer(question=self.question1,
answer=integer_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
self.answerpaper.save()
# When
json_data = None
result = self.answerpaper.validate_answer(integer_answer,
self.question1,
json_data,
)
# Then
self.assertTrue(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=self.answer.id)
regrade_answer.answer = 200
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertEqual(self.answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_regrade_integer_incorrect_answer(self):
# Given
integer_answer = 26
self.answer = Answer(question=self.question1,
answer=integer_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(integer_answer,
self.question1, json_data
)
# Then
self.assertFalse(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=self.answer.id)
regrade_answer.answer = 25
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertEqual(self.answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 1)
self.assertTrue(self.answer.correct)
class StringQuestionTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
# Creating Course
self.course = Course.objects.get(name="Python Course 100")
# Creating Quiz
self.quiz = Quiz.objects.get(description="demo quiz 100")
# Creating Question paper
self.question_paper = QuestionPaper.objects.get(quiz=self.quiz)
# Creating User
self.user = User.objects.get(username='demo_user_100')
# Creating Question
self.question1 = Question.objects.create(summary='str1', points=1,
type='code', user=self.user)
self.question1.language = 'python'
self.question1.type = "string"
self.question1.test_case_type = 'stringtestcase'
self.question1.description = 'Write Hello, EARTH!'
self.question1.save()
self.question2 = Question.objects.create(summary='str2', points=1,
type='code', user=self.user)
self.question2.language = 'python'
self.question2.type = "string"
self.question2.test_case_type = 'stringtestcase'
self.question2.description = 'Write Hello, EARTH!'
self.question2.save()
# Creating answerpaper
self.answerpaper = AnswerPaper.objects.create(
user=self.user, user_ip='172.16.31.10', start_time=timezone.now(),
question_paper=self.question_paper, course=self.course,
end_time=timezone.now()+timedelta(minutes=5), attempt_number=1
)
self.answerpaper.questions.add(*[self.question1, self.question2])
self.answerpaper.save()
# For question
self.lower_string_testcase = StringTestCase(question=self.question1,
correct="Hello, EARTH!",
string_check="lower",
type='stringtestcase',
)
self.lower_string_testcase.save()
self.exact_string_testcase = StringTestCase(question=self.question2,
correct="Hello, EARTH!",
string_check="exact",
type='stringtestcase',
)
self.exact_string_testcase.save()
@classmethod
def tearDownClass(self):
self.question1.delete()
self.question2.delete()
self.answerpaper.delete()
def test_validate_regrade_case_insensitive_string_correct_answer(self):
# Given
string_answer = "hello, earth!"
answer = Answer(question=self.question1, answer=string_answer)
answer.save()
self.answerpaper.answers.add(answer)
# When
json_data = None
result = self.answerpaper.validate_answer(string_answer,
self.question1, json_data
)
# Then
self.assertTrue(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=answer.id)
regrade_answer.answer = "hello, mars!"
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
answer = self.answerpaper.answers.filter(
question=self.question1).last()
self.assertEqual(answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(answer.marks, 0)
self.assertFalse(answer.correct)
def test_validate_regrade_case_insensitive_string_incorrect_answer(self):
# Given
string_answer = "hello, mars!"
answer = Answer(question=self.question1, answer=string_answer)
answer.save()
self.answerpaper.answers.add(answer)
# When
json_data = None
result = self.answerpaper.validate_answer(string_answer,
self.question1, json_data
)
# Then
self.assertFalse(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=answer.id)
regrade_answer.answer = "hello, earth!"
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
answer = self.answerpaper.answers.filter(
question=self.question1).last()
self.assertEqual(answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(answer.marks, 1)
self.assertTrue(answer.correct)
def test_validate_regrade_case_sensitive_string_correct_answer(self):
# Given
string_answer = "Hello, EARTH!"
answer = Answer(question=self.question2, answer=string_answer)
answer.save()
self.answerpaper.answers.add(answer)
# When
json_data = None
result = self.answerpaper.validate_answer(string_answer,
self.question2, json_data
)
# Then
self.assertTrue(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=answer.id)
regrade_answer.answer = "hello, earth!"
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question2.id)
# Then
answer = self.answerpaper.answers.filter(
question=self.question2).last()
self.assertEqual(answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(answer.marks, 0)
self.assertFalse(answer.correct)
def test_case_sensitive_string_incorrect_answer(self):
# Given
string_answer = "hello, earth!"
answer = Answer(question=self.question2, answer=string_answer)
answer.save()
self.answerpaper.answers.add(answer)
# When
json_data = None
result = self.answerpaper.validate_answer(string_answer,
self.question2, json_data
)
# Then
self.assertFalse(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=answer.id)
regrade_answer.answer = "Hello, EARTH!"
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question2.id)
# Then
answer = self.answerpaper.answers.filter(
question=self.question2).last()
self.assertEqual(answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(answer.marks, 1)
self.assertTrue(answer.correct)
class FloatQuestionTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
# Creating Course
self.course = Course.objects.get(name="Python Course 100")
# Creating Quiz
self.quiz = Quiz.objects.get(description="demo quiz 100")
# Creating Question paper
self.question_paper = QuestionPaper.objects.get(quiz=self.quiz)
# Creating User
self.user = User.objects.get(username='demo_user_100')
# Creating Question
self.question1 = Question.objects.create(summary='flt1', points=1,
type='code', user=self.user)
self.question1.language = 'python'
self.question1.type = "float"
self.question1.test_case_type = 'floattestcase'
self.question1.save()
# Creating answerpaper
self.answerpaper = AnswerPaper.objects.create(
user=self.user, user_ip='172.16.31.10', start_time=timezone.now(),
question_paper=self.question_paper, course=self.course,
end_time=timezone.now()+timedelta(minutes=5), attempt_number=1,
)
self.answerpaper.questions.add(self.question1)
self.answerpaper.save()
# For question
self.float_based_testcase = FloatTestCase(question=self.question1,
correct=100,
error_margin=0.1,
type='floattestcase',
)
self.float_based_testcase.save()
@classmethod
def tearDownClass(self):
self.question1.delete()
self.answerpaper.delete()
def test_validate_regrade_float_correct_answer(self):
# Given
float_answer = 99.9
self.answer = Answer(question=self.question1,
answer=float_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(float_answer,
self.question1,
json_data,
)
# Then
self.assertTrue(result['success'])
# Regrade with wrong answer
# Given
regrade_answer = Answer.objects.get(id=self.answer.id)
regrade_answer.answer = 0.0
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertEqual(self.answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_float_incorrect_answer(self):
# Given
float_answer = 99.8
self.answer = Answer(question=self.question1,
answer=float_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(float_answer,
self.question1, json_data
)
# Then
self.assertFalse(result['success'])
# Regrade
# Given
regrade_answer = Answer.objects.get(id=self.answer.id)
regrade_answer.answer = 99.9
regrade_answer.save()
# When
details = self.answerpaper.regrade(self.question1.id)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertEqual(self.answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 1)
self.assertTrue(self.answer.correct)
class MCQQuestionTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
# Creating User
self.user = User.objects.get(username='demo_user_100')
self.user2 = User.objects.get(username='demo_user_101')
self.user_ip = '127.0.0.1'
# Creating Course
self.course = Course.objects.get(name="Python Course 100")
# Creating Quiz
self.quiz = Quiz.objects.get(description="demo quiz 100")
# Creating Question paper
self.question_paper = QuestionPaper.objects.get(quiz=self.quiz)
self.question_paper.shuffle_testcases = True
self.question_paper.save()
# Creating Question
self.question1 = Question.objects.create(summary='mcq1', points=1,
type='code', user=self.user,
)
self.question1.language = 'python'
self.question1.type = "mcq"
self.question1.test_case_type = 'Mcqtestcase'
self.question1.description = 'Which option is Correct?'
self.question1.save()
# For questions
self.mcq_based_testcase_1 = McqTestCase(question=self.question1,
options="Correct",
correct=True,
type='mcqtestcase',
)
self.mcq_based_testcase_1.save()
self.mcq_based_testcase_2 = McqTestCase(question=self.question1,
options="Incorrect",
correct=False,
type='mcqtestcase',
)
self.mcq_based_testcase_2.save()
self.mcq_based_testcase_3 = McqTestCase(question=self.question1,
options="Incorrect",
correct=False,
type='mcqtestcase',
)
self.mcq_based_testcase_3.save()
self.mcq_based_testcase_4 = McqTestCase(question=self.question1,
options="Incorrect",
correct=False,
type='mcqtestcase',
)
self.mcq_based_testcase_4.save()
self.question_paper.fixed_questions.add(self.question1)
self.answerpaper = self.question_paper.make_answerpaper(
user=self.user, ip=self.user_ip,
attempt_num=1,
course_id=self.course.id
)
# Answerpaper for user 2
self.answerpaper2 = self.question_paper.make_answerpaper(
user=self.user2, ip=self.user_ip,
attempt_num=1,
course_id=self.course.id
)
@classmethod
def tearDownClass(self):
self.question1.delete()
self.answerpaper.delete()
self.answerpaper2.delete()
def test_shuffle_test_cases(self):
# Given
# When
user_testcase = self.question1.get_ordered_test_cases(
self.answerpaper
)
order1 = [tc.id for tc in user_testcase]
user2_testcase = self.question1.get_ordered_test_cases(
self.answerpaper2
)
order2 = [tc.id for tc in user2_testcase]
self.question_paper.shuffle_testcases = False
self.question_paper.save()
answerpaper3 = self.question_paper.make_answerpaper(
user=self.user2, ip=self.user_ip,
attempt_num=self.answerpaper.attempt_number+1,
course_id=self.course.id
)
not_ordered_testcase = self.question1.get_ordered_test_cases(
answerpaper3
)
get_test_cases = self.question1.get_test_cases()
# Then
self.assertNotEqual(order1, order2)
self.assertEqual(get_test_cases, not_ordered_testcase)
class ArrangeQuestionTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
# Creating Course
self.course = Course.objects.get(name="Python Course 100")
# Creating Quiz
self.quiz = Quiz.objects.get(description="demo quiz 100")
# Creating Question paper
self.question_paper = QuestionPaper.objects.get(quiz=self.quiz,
total_marks=1.0)
# Creating User
self.user = User.objects.get(username='demo_user_100')
# Creating Question
self.question1 = Question.objects.create(summary='arrange1',
points=1.0,
user=self.user
)
self.question1.language = 'python'
self.question1.type = "arrange"
self.question1.description = "Arrange alphabets in ascending order"
self.question1.test_case_type = 'arrangetestcase'
self.question1.save()
# Creating answerpaper
self.answerpaper = AnswerPaper.objects.create(
user=self.user, user_ip='172.16.31.10', course=self.course,
start_time=timezone.now(), question_paper=self.question_paper,
end_time=timezone.now()+timedelta(minutes=5), attempt_number=1
)
self.answerpaper.questions.add(self.question1)
self.answerpaper.save()
# For question
self.arrange_testcase_1 = ArrangeTestCase(question=self.question1,
options="A",
type='arrangetestcase',
)
self.arrange_testcase_1.save()
self.testcase_1_id = self.arrange_testcase_1.id
self.arrange_testcase_2 = ArrangeTestCase(question=self.question1,
options="B",
type='arrangetestcase',
)
self.arrange_testcase_2.save()
self.testcase_2_id = self.arrange_testcase_2.id
self.arrange_testcase_3 = ArrangeTestCase(question=self.question1,
options="C",
type='arrangetestcase',
)
self.arrange_testcase_3.save()
self.testcase_3_id = self.arrange_testcase_3.id
@classmethod
def tearDownClass(self):
self.question1.delete()
self.answerpaper.delete()
def test_validate_regrade_arrange_correct_answer(self):
# Given
arrange_answer = [self.testcase_1_id,
self.testcase_2_id,
self.testcase_3_id,
]
self.answer = Answer(question=self.question1,
answer=arrange_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(arrange_answer,
self.question1,
json_data,
)
# Then
self.assertTrue(result['success'])
# Regrade with wrong answer
# Given
regrade_answer = Answer.objects.get(id=self.answer.id)
# Try regrade with wrong data structure
# When
regrade_answer.answer = 1
regrade_answer.save()
details = self.answerpaper.regrade(self.question1.id)
err_msg = dedent("""\
User: {0}; Quiz: {1}; Question: {2}.
{3} answer not a list.""".format(
self.user.username,
self.quiz.description,
self.question1.summary,
self.question1.type
))
self.assertFalse(details[0])
self.assertEqual(details[1], err_msg)
# Try regrade with incorrect answer
# When
regrade_answer.answer = [self.testcase_1_id,
self.testcase_3_id,
self.testcase_2_id,
]
regrade_answer.save()
# Then
details = self.answerpaper.regrade(self.question1.id)
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertEqual(self.answer, regrade_answer)
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_regrade_arrange_incorrect_answer(self):
# Given
arrange_answer = [self.testcase_1_id,
self.testcase_3_id,
self.testcase_2_id,
]
| |
"""Tests for graph_sage."""
import enum
import math
import os
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_gnn as tfgnn
from tensorflow_gnn.models.graph_sage import layers as graph_sage
_FEATURE_NAME = "f"
def _get_test_graph():
graph = tfgnn.GraphTensor.from_pieces(
context=tfgnn.Context.from_fields(
features={_FEATURE_NAME: tf.constant([0., 0.])}),
node_sets={
"topic":
tfgnn.NodeSet.from_fields(
features={_FEATURE_NAME: tf.constant([[1.] * 30, [0.] * 30])},
sizes=tf.constant([1, 1])),
"paper":
tfgnn.NodeSet.from_fields(
features={
_FEATURE_NAME: tf.constant([[1., 2., 3.], [2., 1., 3.]])
},
sizes=tf.constant([1, 1])),
"author":
tfgnn.NodeSet.from_fields(
features={
_FEATURE_NAME: tf.constant([[1., 0.], [0., 2.]] * 2)
},
sizes=tf.constant([2, 2])),
"institution":
tfgnn.NodeSet.from_fields(
features={
_FEATURE_NAME:
tf.constant([[1., 2., 3., 0.], [2., 1., 3., 0.]])
},
sizes=tf.constant([1, 1])),
},
edge_sets={
"written":
tfgnn.EdgeSet.from_fields(
features={},
sizes=tf.constant([2, 1]),
adjacency=tfgnn.Adjacency.from_indices(
("paper", tf.constant([0, 0, 1])),
("author", tf.constant([1, 0, 3])),
)),
"correlates":
tfgnn.EdgeSet.from_fields(
features={},
sizes=tf.constant([1, 0]),
adjacency=tfgnn.Adjacency.from_indices(
("topic", tf.constant([0])),
("topic", tf.constant([1])),
)),
"affiliated_with":
tfgnn.EdgeSet.from_fields(
features={},
sizes=tf.constant([2, 2]),
adjacency=tfgnn.Adjacency.from_indices(
("institution", tf.constant([0, 0, 1, 1])),
("author", tf.constant([0, 1, 2, 3])),
)),
},
)
return graph
class ReloadModel(int, enum.Enum):
"""Controls how to reload a model for further testing after saving."""
SKIP = 0
SAVED_MODEL = 1
KERAS = 2
class GraphsageTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("MaxPooling", "max"),
("MaxNoInfPooling", "max_no_inf"),
("MeanPooling", "mean"))
def testPooling(self, reduce_type):
graph = _get_test_graph()
out_units = 1
conv = graph_sage.GraphSAGEPoolingConv(
receiver_tag=tfgnn.TARGET,
sender_node_feature=_FEATURE_NAME,
units=out_units,
hidden_units=out_units,
reduce_type=reduce_type)
_ = conv(graph, edge_set_name="written") # Build weights.
weights = {v.name: v for v in conv.trainable_weights}
self.assertLen(weights, 3)
source_node_dims = 3
weights["graph_sage_pooling_conv/dense/kernel:0"].assign([[1.]] *
source_node_dims)
weights["graph_sage_pooling_conv/dense/bias:0"].assign([0.])
weights["graph_sage_pooling_conv/dense_1/kernel:0"].assign([[1.]] *
out_units)
actual = conv(graph, edge_set_name="written")
expected_output_dict = {
"max":
tf.constant([
[6.],
[6.],
[tf.float32.min], # No neighbors.
[6.]
]),
"max_no_inf":
tf.constant([
[6.],
[6.],
[0.], # No neighbors.
[6.]
]),
"mean":
tf.constant([
[6.],
[6.],
[0.], # No neighbors.
[6.]
])
}
self.assertAllEqual(expected_output_dict[reduce_type], actual)
def testMeanAggregation(self):
graph = _get_test_graph()
out_units = 1
conv = graph_sage.GraphSAGEAggregatorConv(
receiver_tag=tfgnn.TARGET,
sender_node_feature=_FEATURE_NAME,
units=out_units)
_ = conv(graph, edge_set_name="written") # Build weights.
weights = {v.name: v for v in conv.trainable_weights}
self.assertLen(weights, 1)
source_node_dims = 3
weights["graph_sage_aggregator_conv/dense/kernel:0"].assign(
[[1.]] * source_node_dims)
actual = conv(graph, edge_set_name="written")
expected_output = tf.constant([
[6.],
[6.],
[0.], # No neighbors.
[6.]
])
self.assertAllEqual(expected_output, actual)
@parameterized.named_parameters(
("NoDropoutMeanAggKeras", 0.0, ReloadModel.KERAS),
("NoDropoutMeanAggSavedModel", 0.0, ReloadModel.SAVED_MODEL),
("DropoutMeanAggKeras", 0.9, ReloadModel.KERAS),
("DropoutMeanAggSavedModel", 0.9, ReloadModel.SAVED_MODEL))
def testDropoutFullModel(self, dropout_rate, reload_model):
tf.random.set_seed(0)
graph = _get_test_graph()
out_units = 30
layer = graph_sage.GraphSAGEGraphUpdate(
node_set_names={"topic"},
l2_normalize=False,
receiver_tag=tfgnn.TARGET,
reduce_type="mean",
use_pooling=False,
dropout_rate=dropout_rate,
units=out_units,
feature_name=_FEATURE_NAME)
_ = layer(graph)
weights = {v.name: v for v in layer.trainable_weights}
self.assertLen(weights, 3)
node_dims = 30
weights[
"graph_sage/node_set_update/graph_sage_aggregator_conv/dense/kernel:0"].assign(
tf.eye(node_dims))
weights[
"graph_sage/node_set_update/graph_sage_next_state/dense_1/kernel:0"].assign(
tf.eye(node_dims))
bias_shape = out_units
weights["graph_sage/node_set_update/graph_sage_next_state/bias:0"].assign(
[0.] * bias_shape)
inputs = tf.keras.layers.Input(type_spec=graph.spec)
outputs = layer(inputs)
model = tf.keras.Model(inputs, outputs)
if reload_model:
export_dir = os.path.join(self.get_temp_dir(), "dropout-model")
model.save(export_dir, include_optimizer=False)
if reload_model == ReloadModel.KERAS:
model = tf.keras.models.load_model(export_dir)
else:
model = tf.saved_model.load(export_dir)
# Actual value returns all 1s without dropout for both of the topic node
# vectors. One of the nodes don't have any incoming edges, hence dropout is
# verified for self node vectors and the other has one edge with it self
# vector consisting of 0s, so that dropout is verified for edges only.
# Applying dropout value 0.9, max entry after scaling the vector inputs is:
# 1*1/(1-0.9) = 10.
def min_max(vector):
return [tf.reduce_min(vector), tf.reduce_max(vector)]
def get_topic_vectors(**kwargs):
out_gt = model(graph, **kwargs)
out_nodes = out_gt.node_sets["topic"][_FEATURE_NAME]
return out_nodes
self.assertAllEqual(
get_topic_vectors(training=False), [[1.] * node_dims, [1.] * node_dims])
if dropout_rate != 0.0:
topic_node_vectors = get_topic_vectors(training=True)
self.assertAllClose(min_max(topic_node_vectors[0]), [0., 10.])
self.assertAllClose(min_max(topic_node_vectors[1]), [0., 10.])
@parameterized.named_parameters(
("E2ENormalizeNoConcatPooling", True, "sum", True, ReloadModel.SKIP),
("E2ENormalizeNoConcatAgg", True, "sum", False, ReloadModel.SKIP),
("E2ENormalizeConcatPooling", True, "concat", True, ReloadModel.SKIP),
("E2ENormalizeConcatAgg", True, "concat", False, ReloadModel.SKIP),
("E2ENoNormalizeConcatPooling", False, "concat", True, ReloadModel.SKIP),
("E2ENoNormalizeConcatAgg", False, "concat", False, ReloadModel.SKIP),
("E2ENoNormalizeNoConcatPooling", False, "sum", True, ReloadModel.SKIP),
("E2ENoNormalizeNoConcatAgg", False, "sum", False, ReloadModel.SKIP),
("E2ELoadKerasPooling", True, "concat", True, ReloadModel.KERAS),
("E2ELoadSavedModelPooling", True, "concat", True,
ReloadModel.SAVED_MODEL))
def testFullModel(self, normalize, combine_type, use_pooling, reload_model):
graph = _get_test_graph()
out_units = 1
layer = graph_sage.GraphSAGEGraphUpdate(
node_set_names={"author"},
receiver_tag=tfgnn.TARGET,
reduce_type="mean",
use_pooling=use_pooling,
units=out_units,
hidden_units=out_units if use_pooling else None,
l2_normalize=normalize,
combine_type=combine_type,
feature_name=_FEATURE_NAME)
_ = layer(graph)
weights = {v.name: v for v in layer.trainable_weights}
if use_pooling:
self.assertLen(weights, 8)
else:
self.assertLen(weights, 4)
paper_node_dims = 3
institution_node_dims = 4
target_node_dims = 2
if use_pooling:
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense/kernel:0"].assign(
[[1.]] * paper_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense/bias:0"].assign(
[0.])
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_1/kernel:0"].assign(
[[1.]] * out_units)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_2/kernel:0"].assign(
[[1.]] * institution_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_2/bias:0"].assign(
[0.])
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_3/kernel:0"].assign(
[[1.]] * out_units)
weights[
"graph_sage/node_set_update/graph_sage_next_state/dense_4/kernel:0"].assign(
[[1.]] * target_node_dims)
else:
weights[
"graph_sage/node_set_update/graph_sage_aggregator_conv/dense/kernel:0"].assign(
[[1.]] * paper_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_aggregator_conv/dense_1/kernel:0"].assign(
[[1.]] * institution_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_next_state/dense_2/kernel:0"].assign(
[[1.]] * target_node_dims)
num_edge_type = 2
bias_shape = out_units if combine_type == "sum" else out_units * (
num_edge_type + 1)
weights["graph_sage/node_set_update/graph_sage_next_state/bias:0"].assign(
[0.] * bias_shape)
inputs = tf.keras.layers.Input(type_spec=graph.spec)
outputs = layer(inputs)
model = tf.keras.Model(inputs, outputs)
if reload_model:
export_dir = os.path.join(self.get_temp_dir(), "gsage-model")
model.save(export_dir, include_optimizer=False)
if reload_model == ReloadModel.KERAS:
model = tf.keras.models.load_model(export_dir)
else:
model = tf.saved_model.load(export_dir)
actual_graph = model(graph)
actual = actual_graph.node_sets["author"][_FEATURE_NAME]
# maps normalize-to-combine_type expected states.
expected_outputs = {
True: {
"concat":
tf.constant([[
1. / math.sqrt(1**2 + 6**2 * 2),
6. / math.sqrt(1**2 + 6**2 * 2),
6. / math.sqrt(1**2 + 6**2 * 2)
],
[
2. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2)
],
[
1. / math.sqrt(1**2 + 0**2 + 6**2),
6. / math.sqrt(1**2 + 0**2 + 6**2),
0. / math.sqrt(1**2 + 0**2 + 6**2)
],
[
2. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2)
]]),
"sum":
tf.constant([[1.], [1.], [1.], [1.]])
},
False: {
"concat":
tf.constant([[1., 6., 6.], [2., 6., 6.], [1., 6., 0.],
[2., 6., 6.]]),
"sum":
tf.constant([[13.], [14.], [7.], [14.]])
}
}
self.assertAllClose(actual, expected_outputs[normalize][combine_type])
@parameterized.named_parameters(
("E2ELoadKerasMeanPool", "mean", True, ReloadModel.KERAS),
("E2ELoadKerasMeanAgg", "mean", False, ReloadModel.KERAS),
("E2ELoadKerasMaxPool", "max", True, ReloadModel.KERAS),
("E2ELoadKerasMaxAgg", "max", False, ReloadModel.KERAS),
("E2ELoadKerasMaxNoInfPool", "max_no_inf", True, ReloadModel.KERAS),
("E2ELoadKerasMaxNoInfAgg", "max_no_inf", False, ReloadModel.KERAS),
("E2ELoadSavedModelMaxPool", "max", True, ReloadModel.SAVED_MODEL),
("E2ELoadSavedModelMaxAgg", "max", False, ReloadModel.SAVED_MODEL),
("E2ELoadSavedModelMaxNoInfPool", "max_no_inf", True,
ReloadModel.SAVED_MODEL), ("E2ELoadSavedModelMaxNoInfAgg", "max_no_inf",
False, ReloadModel.SAVED_MODEL),
("E2ELoadSavedModelMeanPool", "mean", True, ReloadModel.SAVED_MODEL),
("E2ELoadSavedModelMeanAgg", "mean", False, ReloadModel.SAVED_MODEL))
def testModelLoad(self, reduce_operation, use_pooling, reload_model):
graph = _get_test_graph()
out_units = 1
layer = graph_sage.GraphSAGEGraphUpdate(
node_set_names={"author", "paper"},
receiver_tag=tfgnn.TARGET,
reduce_type=reduce_operation,
combine_type="concat",
use_pooling=use_pooling,
units=out_units,
hidden_units=out_units if use_pooling else None,
feature_name=_FEATURE_NAME)
_ = layer(graph)
weights = {v.name: v for v in layer.trainable_weights}
if use_pooling:
self.assertLen(weights, 8)
else:
self.assertLen(weights, 4)
paper_node_dims = 3
institution_node_dims = 4
target_node_dims = 2
if use_pooling:
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense/kernel:0"].assign(
[[1.]] * paper_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense/bias:0"].assign(
[0.])
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_1/kernel:0"].assign(
[[1.]] * out_units)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_2/kernel:0"].assign(
[[1.]] * institution_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_2/bias:0"].assign(
[0.])
weights[
"graph_sage/node_set_update/graph_sage_pooling_conv/dense_3/kernel:0"].assign(
[[1.]] * out_units)
weights[
"graph_sage/node_set_update/graph_sage_next_state/dense_4/kernel:0"].assign(
[[1.]] * target_node_dims)
else:
weights[
"graph_sage/node_set_update/graph_sage_aggregator_conv/dense/kernel:0"].assign(
[[1.]] * paper_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_aggregator_conv/dense_1/kernel:0"].assign(
[[1.]] * institution_node_dims)
weights[
"graph_sage/node_set_update/graph_sage_next_state/dense_2/kernel:0"].assign(
[[1.]] * target_node_dims)
num_edge_type = 2
bias_shape = out_units * (num_edge_type + 1)
weights["graph_sage/node_set_update/graph_sage_next_state/bias:0"].assign(
[0.] * bias_shape)
inputs = tf.keras.layers.Input(type_spec=graph.spec)
outputs = layer(inputs)
model = tf.keras.Model(inputs, outputs)
if reload_model:
export_dir = os.path.join(self.get_temp_dir(), "gsage-model")
model.save(export_dir, include_optimizer=False)
if reload_model == ReloadModel.KERAS:
model = tf.keras.models.load_model(export_dir)
else:
model = tf.saved_model.load(export_dir)
actual_graph = model(graph)
actual = actual_graph.node_sets["author"][_FEATURE_NAME]
expected = tf.constant([[
1. / math.sqrt(1**2 + 6**2 * 2), 6. / math.sqrt(1**2 + 6**2 * 2),
6. / math.sqrt(1**2 + 6**2 * 2)
],
[
2. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2)
],
[
1. / math.sqrt(1**2 + 0**2 + 6**2),
6. / math.sqrt(1**2 + 0**2 + 6**2),
0. / math.sqrt(1**2 + 0**2 + 6**2)
],
[
2. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2),
6. / math.sqrt(2**2 + 6**2 * 2)
]])
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
("E2ELoadKerasGCNConv", ReloadModel.KERAS),
("E2ELoadSavedModelGCNConv", ReloadModel.SAVED_MODEL))
def testGCNConvolutionModelLoad(self, reload_model):
graph = _get_test_graph()
message_units = 1
conv = graph_sage.GCNGraphSAGENodeSetUpdate(
edge_set_names=["written", "affiliated_with"],
receiver_tag=tfgnn.TARGET,
self_node_feature=_FEATURE_NAME,
sender_node_feature=_FEATURE_NAME,
units=message_units,
use_bias=True)
layer = tfgnn.keras.layers.GraphUpdate(node_sets={"author": conv})
_ = layer(graph) # Build weights.
weights = {v.name: v for v in layer.trainable_weights}
self.assertLen(weights, 4)
paper_feature_dim = 3
institution_feature_dim = 4
author_feature_dim = 2
weights["graph_update/graph_sage_gcn_update/dense/kernel:0"].assign(
[[1.0]] * paper_feature_dim)
weights["graph_update/graph_sage_gcn_update/dense_1/kernel:0"].assign(
[[1.0]] * institution_feature_dim)
weights["graph_update/graph_sage_gcn_update/dense_2/kernel:0"].assign(
[[1.0]] * author_feature_dim)
weights["bias:0"].assign([0.] * message_units)
inputs = tf.keras.layers.Input(type_spec=graph.spec)
outputs = layer(inputs)
model = tf.keras.Model(inputs, outputs)
if reload_model:
export_dir = os.path.join(self.get_temp_dir(), "gsage-model")
model.save(export_dir, include_optimizer=False)
if reload_model == ReloadModel.KERAS:
model = tf.keras.models.load_model(export_dir)
else:
model = tf.saved_model.load(export_dir)
actual_graph = model(graph)
actual = actual_graph.node_sets["author"]
expected_output = tf.constant([[4.3333335], [4.6666665], [3.5],
[4.6666665]])
self.assertAllEqual(expected_output, actual[_FEATURE_NAME])
@parameterized.named_parameters(("WithSelfLoop", True), ("NoSelfLoop", False))
def testGCNConvolutionSharedWeights(self, add_self_loop):
graph = _get_test_graph()
message_units = 1
conv = graph_sage.GCNGraphSAGENodeSetUpdate(
edge_set_names=["correlates"],
receiver_tag=tfgnn.TARGET,
self_node_feature=_FEATURE_NAME,
sender_node_feature=_FEATURE_NAME,
units=message_units,
use_bias=True,
share_weights=True,
add_self_loop=add_self_loop)
| |
<reponame>santiagofdezg/textclassifier
import gensim
from gensim.models.doc2vec import TaggedDocument
from classifierlib.datasets import Reuters
# from tqdm import tqdm
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
import time
from joblib import dump, load
stop_words = stopwords.words("english")
def tokenize(document):
# Convert a document into a list of lowercase tokens, ignoring tokens
# that are too short
words = gensim.utils.simple_preprocess(document, min_len=3)
# Remove the stop words and stem the final words
stemmer = SnowballStemmer('english')
words = [stemmer.stem(word) for word in words if word not in stop_words]
# Remove no-alphabetic characters
f = re.compile('[a-zA-Z]+')
filtered_words = list(filter(lambda word: f.match(word), words))
return filtered_words
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=100)) for doc in tagged_docs])
# return targets, regressors
# print('## VERSION: DBOW_100epochs_1000vsize')
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=1000, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=100)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_100epochs_1000vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_100epochs_1000vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_100epochs_1000vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_100epochs_800vsize')
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=800, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=100)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_100epochs_800vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_100epochs_800vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_100epochs_800vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_100epochs_600vsize')
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=600, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=100)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_100epochs_600vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_100epochs_600vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_100epochs_600vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_70epochs_1000vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=70)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=1000, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=70)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_70epochs_1000vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_70epochs_1000vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_70epochs_1000vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_50epochs_600vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=50)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=600, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=50)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_50epochs_600vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_50epochs_600vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_50epochs_600vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_40epochs_600vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=40)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=600, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=40)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_40epochs_600vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_40epochs_600vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_40epochs_600vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_30epochs_600vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=30)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=600, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=30)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_30epochs_600vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_30epochs_600vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_30epochs_600vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_25epochs_600vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=25)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=600, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=25)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_25epochs_600vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# Y_train_new, X_train_new = feature_vector(model, train_tagged)
# Y_test_new, X_test_new = feature_vector(model, test_tagged)
# end = time.time()
# print(end - start)
#
# print('# Saving feature vectors')
# with open('./Y_train-X_train--feature_vector--DBOW_25epochs_600vsize', 'wb') as f:
# dump((Y_train_new, X_train_new), f)
#
# with open('./Y_test-X_test--feature_vector--DBOW_25epochs_600vsize', 'wb') as f:
# dump((Y_test_new, X_test_new), f)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# print('## VERSION: DBOW_30epochs_700vsize')
#
# def feature_vector(model, tagged_docs):
# targets, regressors = zip(*[(doc.tags, model.infer_vector(doc.words, steps=30)) for doc in tagged_docs])
# return targets, regressors
#
# print('# Loading dataset')
# X_train, X_test, Y_train, Y_test = Reuters.get_random_split()
#
# train = list(zip(X_train,Y_train))
# test = list(zip(X_test,Y_test))
#
# print('# Transform to TaggedDocument')
# train_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in train]
# test_tagged = [TaggedDocument(words=tokenize(doc[0]), tags=doc[1]) for doc in test]
#
# model = gensim.models.doc2vec.Doc2Vec(dm=0, vector_size=700, min_count=1, workers=3)
#
# model.build_vocab(train_tagged)
#
# print('# Training')
# start = time.time()
# # Lanzar warning sobre que pode tardar moito e que non ten ningún tipo de verbose
# model.train(train_tagged, total_examples=len(train_tagged), epochs=30)
# end = time.time()
# print(end - start)
#
# print('# Saving model')
# model.save('./doc2vec_DBOW_30epochs_700vsize')
#
# print('# Obtaining feature vectors of dataset')
# start = time.time()
# | |
"""
This environment provides an interface to software management features
through the C8y REST API.
With these we can emulate a user doing operations in the C8y UI.
They are rather slow as they use the complete chain from end to end.
WARNING: Handle with care!!!
The C8YDEVICEID will handle on which device this test will install and remove packages.
These tests are disabled by default as they will install and de-install packages.
Better run them in a VM or a container.
To run the tests:
pysys.py run 'sm-apt*' -XmyPlatform='smcontainer'
To run the tests with another tenant url:
pysys.py run 'sm-apt*' -XmyPlatform='smcontainer' -Xtenant_url='thin-edge-io.eu-latest.cumulocity.com'
TODO: Avoid hardcoded ids
TODO: Get software package ids from c8y
TODO: Add management for package creation and removal for c8y
-> Maybe as separate python module to access c8y
To override the hardcoded software id database you can use C8YSWREPO (format: JSON):
export C8YSWREPO='{
"asciijump": "5475278",
"robotfindskitten": "5473003",
"squirrel3": "5474871",
"rolldice": "5445239",
"moon-buggy": "5439204",
"apple": "5495053",
"banana": "5494888",
"cherry": "5495382",
"watermelon": "5494510" }'
To remove
unset C8YSWREPO
"""
from environment_c8y import EnvironmentC8y
import base64
import time
import json
import platform
import requests
import subprocess
import sys
import pysys
from pysys.basetest import BaseTest
sys.path.append("./environments")
def is_timezone_aware(stamp):
"""determine if object is timezone aware or naive
See also: https://docs.python.org/3/library/datetime.html?highlight=tzinfo#determining-if-an-object-is-aware-or-naive
"""
return stamp.tzinfo is not None and stamp.tzinfo.utcoffset(stamp) is not None
class SoftwareManagement(EnvironmentC8y):
"""Base class for software management tests"""
# Static class member that can be overridden by a command line argument
# E.g.:
# pysys.py run 'sm-apt*' -XmyPlatform='smcontainer'
myPlatform = None
# Static class member that can be overriden by a command line argument
# E.g.:
# pysys.py run 'sm-fake*' -Xfakeplugin='fakeplugin'
# Use it only when you have set up the dummy_plugin to install fruits
fakeplugin = None
# Static class member that can be overriden by a command line argument
# E.g.:
# pysys.py run 'sm-docker*' -Xdockerplugin='dockerplugin'
# Use it only when you have set up the docker_plugin
dockerplugin = None
tenant_url = "thin-edge-io.eu-latest.cumulocity.com"
def setup(self):
"""Setup Environment"""
if self.myPlatform != "smcontainer":
self.skipTest(
"Testing the apt plugin is not supported on this platform."+\
"Use parameter -XmyPlatform='smcontainer' to enable it")
# Database with package IDs taken from the thin-edge.io
# TODO make this somehow not hard-coded
self.pkg_id_db = {
# apt
"asciijump": "5475369",
"robotfindskitten": "5474869",
"squirrel3": "5475279",
"rolldice": "5152439",
"moon-buggy": "5439204",
# fake plugin
"apple": "5495053",
"banana": "5494888",
"cherry": "5495382",
"watermelon": "5494510",
# # docker plugin
"registry": "8018911",
"hello-world": "8021526",
"docker/getting-started": "8021973", # warning not available for arm
"alpine" : "7991792",
}
if self.project.c8yswrepo:
self.pkg_id_db = json.loads(self.project.c8yswrepo)
self.log.info("Using sw id database: %s"% self.pkg_id_db)
super().setup()
self.addCleanupFunction(self.mysmcleanup)
tenant = self.project.tenant
user = self.project.c8yusername
password = <PASSWORD>
# TODO are we doing something wrong while requesting?
self.timeout_req = 80 # seconds, got timeout with 60s
# Place to save the id of the operation that we started.
# This is suitable for one operation and not for multiple ones running
# at the same time.
self.operation_id = None
auth = bytes(f"{tenant}/{user}:{password}", "utf-8")
self.header = {
b"Authorization": b"Basic " + base64.b64encode(auth),
b"content-type": b"application/json",
b"Accept": b"application/json",
}
# Make sure we have no last operations pending or executing
self.wait_until_end()
def trigger_action(self, package_name, package_id, version, url, action):
"""Trigger a installation or de-installation of a package.
package_id is the id that is automatically assigned by C8y.
TODO Improve repository ID management to avoid hardcoded IDs
"""
self.trigger_action_json(
[
{
"name": package_name,
"id": package_id,
"version": version,
"url": url,
"action": action,
}
]
)
def trigger_action_json(self, json_content):
"""Take an actions description that is then forwarded to c8y.
So far, no checks are done on the json_content.
TODO Improve repository ID management to avoid hardcoded IDs
"""
url = f"https://{self.tenant_url}/devicecontrol/operations"
payload = {
"deviceId": self.project.deviceid,
"description": f"Apply software changes, triggered from PySys: {json_content}",
"c8y_SoftwareUpdate": json_content,
}
req = requests.post(
url, json=payload, headers=self.header, timeout=self.timeout_req
)
jresponse = json.loads(req.text)
self.log.info("Response status: %s", req.status_code)
self.log.info("Response to action: %s", json.dumps(jresponse, indent=4))
self.operation = jresponse
self.operation_id = jresponse.get("id")
if not self.operation_id:
raise SystemError("field id is missing in response")
self.log.info("Started operation: %s", self.operation)
req.raise_for_status()
def is_status_fail(self):
"""Check if the current status is a fail"""
if self.operation_id:
return self.check_status_of_operation("FAILED")
return self.check_status_of_last_operation("FAILED")
def is_status_success(self):
"""Check if the current status is a success"""
if self.operation_id:
return self.check_status_of_operation("SUCCESSFUL")
return self.check_status_of_last_operation("SUCCESSFUL")
def get_status_of_last_operation(self):
"""Returns the status of the last operation:
"FAILED" or "SUCCESSFUL".
When there is now last operation listened in C8Y return "NOOPFOUND".
Warning: an observation so far is, that installation failures
seem to be at the beginning of the list independent of if we
revert it or not.
"""
params = {
"deviceId": self.project.deviceid,
"pageSize": 1,
# To get the latest records first
"revert": "true",
# By using the date we make sure that the request comes
# sorted, otherwise the revert does not seem to have an
# effect. The lower boundary seems to be ok so we just
# use the beginning of the epoch same as the c8y ui.
"dateFrom": "1970-01-01T00:00:00.000Z",
}
url = f"https://{self.tenant_url}/devicecontrol/operations"
req = requests.get(
url, params=params, headers=self.header, timeout=self.timeout_req
)
req.raise_for_status()
self.log.debug("Final URL of the request: %s", req.url)
jresponse = json.loads(req.text)
if not jresponse["operations"]:
# This can happen e.g. after a weekend when C8y deleted the operations
self.log.error("No operations found, assuming it passed")
return "NOOPFOUND"
# Get the last operation, when we set "revert": "true" we can read it
# from the beginning of the list
operations = jresponse.get("operations")
if not operations or len(operations) != 1:
raise SystemError("field operations is missing in response or to long")
operation = operations[0]
# Observed states: PENDING, SUCCESSFUL, EXECUTING, FAILED
self.log.info("State of current operation: %s", operation.get("status"))
# In this case we just jump everything to see what is goin on
if operation.get("status") in ["FAILED", "PENDING"]:
self.log.debug("Final URL of the request: %s", req.url)
self.log.debug(
"State of current operation: %s", json.dumps(operation, indent=4)
)
if not operation.get("status"):
raise SystemError("No valid field status in response")
return operation.get("status")
def check_status_of_last_operation(self, status):
"""Check if the last operation is equal to status.
If none was found, return true
"""
current_status = self.get_status_of_last_operation()
if current_status == "NOOPFOUND":
return True
return current_status == status
def get_status_of_operation(self):
"""Get the last operation"""
if not self.operation_id:
raise SystemError("No valid operation ID available")
url = f"https://{self.tenant_url}/devicecontrol/operations/{self.operation_id}"
req = requests.get(url, headers=self.header, timeout=self.timeout_req)
req.raise_for_status()
operation = json.loads(req.text)
# Observed states: PENDING, SUCCESSFUL, EXECUTING, FAILED
self.log.info(
"State of operation %s : %s", self.operation_id, operation["status"]
)
if not operation.get("status"):
raise SystemError("No valid field status in response")
return operation.get("status")
def check_status_of_operation(self, status):
"""Check if the last operation is successfull"""
current_status = self.get_status_of_operation()
self.log.info("Expected status: %s, got status %s" %
(status, current_status))
return current_status == status
def wait_until_succcess(self):
"""Wait until c8y reports a success"""
self.wait_until_status("SUCCESSFUL")
def wait_until_fail(self):
"""Wait until c8y reports a fail"""
self.wait_until_status("FAILED")
def wait_until_end(self):
"""Wait until c8y reports a fail"""
self.wait_until_status("FAILED", "SUCCESSFUL")
def wait_until_status(self, status, status2=False):
"""Wait until c8y reports status or status2."""
poll_period = 2 # seconds
# Heuristic about how long to wait for a operation
if platform.machine() == "x86_64":
wait_time = int(90 / poll_period)
else:
wait_time = int(120 / poll_period) # 90s on the Rpi
timeout = 0
# wait for some time to let c8y process a request until we can poll for it
time.sleep(poll_period)
while True:
if self.operation_id:
current_status = self.get_status_of_operation()
if current_status == status or current_status == status2:
# Invalidate the old operation
self.operation_id = None
break
elif current_status == "FAILED":
self.log.error("Stopping as the operation has failed")
raise SystemError("The operation has failed")
else:
current_status = self.get_status_of_last_operation()
if current_status == status or current_status == status2 or current_status == "NOOPFOUND":
# Invalidate the old operation
self.operation_id = None
break
time.sleep(poll_period)
timeout += 1
if timeout > wait_time:
raise SystemError(
"Timeout while waiting for status %s or %s" % (
status, status2)
)
def check_is_installed(self, package_name, version=None):
"""Check if a package is installed"""
url = f"https://{self.tenant_url}/inventory/managedObjects/{self.project.deviceid}"
req = requests.get(url, headers=self.header, timeout=self.timeout_req)
req.raise_for_status()
jresponse = json.loads(req.text)
ret = False
package_list = jresponse.get("c8y_SoftwareList")
for package in package_list:
if package.get("name") == package_name:
self.log.info("Package %s is installed", package_name)
# self.log.info(package)
if version:
if package.get("version") == version:
ret = True
break
raise SystemError("Wrong version | |
self.__default_allow_privilege_escalation = default_allow_privilege_escalation
self.__allow_privilege_escalation = (
allow_privilege_escalation
if allow_privilege_escalation is not None
else True
)
self.__allowed_host_paths = (
allowed_host_paths if allowed_host_paths is not None else []
)
self.__allowed_flex_volumes = (
allowed_flex_volumes if allowed_flex_volumes is not None else []
)
self.__allowed_csi_drivers = (
allowed_csi_drivers if allowed_csi_drivers is not None else []
)
self.__allowed_unsafe_sysctls = (
allowed_unsafe_sysctls if allowed_unsafe_sysctls is not None else []
)
self.__forbidden_sysctls = (
forbidden_sysctls if forbidden_sysctls is not None else []
)
self.__allowed_proc_mount_types = (
allowed_proc_mount_types if allowed_proc_mount_types is not None else []
)
self.__runtime_class = runtime_class
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
privileged = self.privileged()
check_type("privileged", privileged, Optional[bool])
if privileged: # omit empty
v["privileged"] = privileged
default_add_capabilities = self.default_add_capabilities()
check_type(
"default_add_capabilities",
default_add_capabilities,
Optional[List[k8sv1.Capability]],
)
if default_add_capabilities: # omit empty
v["defaultAddCapabilities"] = default_add_capabilities
required_drop_capabilities = self.required_drop_capabilities()
check_type(
"required_drop_capabilities",
required_drop_capabilities,
Optional[List[k8sv1.Capability]],
)
if required_drop_capabilities: # omit empty
v["requiredDropCapabilities"] = required_drop_capabilities
allowed_capabilities = self.allowed_capabilities()
check_type(
"allowed_capabilities",
allowed_capabilities,
Optional[List[k8sv1.Capability]],
)
if allowed_capabilities: # omit empty
v["allowedCapabilities"] = allowed_capabilities
volumes = self.volumes()
check_type("volumes", volumes, Optional[List[FSType]])
if volumes: # omit empty
v["volumes"] = volumes
host_network = self.host_network()
check_type("host_network", host_network, Optional[bool])
if host_network: # omit empty
v["hostNetwork"] = host_network
host_ports = self.host_ports()
check_type("host_ports", host_ports, Optional[List["HostPortRange"]])
if host_ports: # omit empty
v["hostPorts"] = host_ports
host_pid = self.host_pid()
check_type("host_pid", host_pid, Optional[bool])
if host_pid: # omit empty
v["hostPID"] = host_pid
host_ipc = self.host_ipc()
check_type("host_ipc", host_ipc, Optional[bool])
if host_ipc: # omit empty
v["hostIPC"] = host_ipc
se_linux = self.se_linux()
check_type("se_linux", se_linux, "SELinuxStrategyOptions")
v["seLinux"] = se_linux
run_as_user = self.run_as_user()
check_type("run_as_user", run_as_user, "RunAsUserStrategyOptions")
v["runAsUser"] = run_as_user
run_as_group = self.run_as_group()
check_type("run_as_group", run_as_group, Optional["RunAsGroupStrategyOptions"])
if run_as_group is not None: # omit empty
v["runAsGroup"] = run_as_group
supplemental_groups = self.supplemental_groups()
check_type(
"supplemental_groups",
supplemental_groups,
"SupplementalGroupsStrategyOptions",
)
v["supplementalGroups"] = supplemental_groups
fs_group = self.fs_group()
check_type("fs_group", fs_group, "FSGroupStrategyOptions")
v["fsGroup"] = fs_group
read_only_root_filesystem = self.read_only_root_filesystem()
check_type(
"read_only_root_filesystem", read_only_root_filesystem, Optional[bool]
)
if read_only_root_filesystem: # omit empty
v["readOnlyRootFilesystem"] = read_only_root_filesystem
default_allow_privilege_escalation = self.default_allow_privilege_escalation()
check_type(
"default_allow_privilege_escalation",
default_allow_privilege_escalation,
Optional[bool],
)
if default_allow_privilege_escalation is not None: # omit empty
v["defaultAllowPrivilegeEscalation"] = default_allow_privilege_escalation
allow_privilege_escalation = self.allow_privilege_escalation()
check_type(
"allow_privilege_escalation", allow_privilege_escalation, Optional[bool]
)
if allow_privilege_escalation is not None: # omit empty
v["allowPrivilegeEscalation"] = allow_privilege_escalation
allowed_host_paths = self.allowed_host_paths()
check_type(
"allowed_host_paths", allowed_host_paths, Optional[List["AllowedHostPath"]]
)
if allowed_host_paths: # omit empty
v["allowedHostPaths"] = allowed_host_paths
allowed_flex_volumes = self.allowed_flex_volumes()
check_type(
"allowed_flex_volumes",
allowed_flex_volumes,
Optional[List["AllowedFlexVolume"]],
)
if allowed_flex_volumes: # omit empty
v["allowedFlexVolumes"] = allowed_flex_volumes
allowed_csi_drivers = self.allowed_csi_drivers()
check_type(
"allowed_csi_drivers",
allowed_csi_drivers,
Optional[List["AllowedCSIDriver"]],
)
if allowed_csi_drivers: # omit empty
v["allowedCSIDrivers"] = allowed_csi_drivers
allowed_unsafe_sysctls = self.allowed_unsafe_sysctls()
check_type(
"allowed_unsafe_sysctls", allowed_unsafe_sysctls, Optional[List[str]]
)
if allowed_unsafe_sysctls: # omit empty
v["allowedUnsafeSysctls"] = allowed_unsafe_sysctls
forbidden_sysctls = self.forbidden_sysctls()
check_type("forbidden_sysctls", forbidden_sysctls, Optional[List[str]])
if forbidden_sysctls: # omit empty
v["forbiddenSysctls"] = forbidden_sysctls
allowed_proc_mount_types = self.allowed_proc_mount_types()
check_type(
"allowed_proc_mount_types",
allowed_proc_mount_types,
Optional[List[k8sv1.ProcMountType]],
)
if allowed_proc_mount_types: # omit empty
v["allowedProcMountTypes"] = allowed_proc_mount_types
runtime_class = self.runtime_class()
check_type(
"runtime_class", runtime_class, Optional["RuntimeClassStrategyOptions"]
)
if runtime_class is not None: # omit empty
v["runtimeClass"] = runtime_class
return v
def privileged(self) -> Optional[bool]:
"""
privileged determines if a pod can request to be run as privileged.
"""
return self.__privileged
def default_add_capabilities(self) -> Optional[List[k8sv1.Capability]]:
"""
defaultAddCapabilities is the default set of capabilities that will be added to the container
unless the pod spec specifically drops the capability. You may not list a capability in both
defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
allowed, and need not be included in the allowedCapabilities list.
"""
return self.__default_add_capabilities
def required_drop_capabilities(self) -> Optional[List[k8sv1.Capability]]:
"""
requiredDropCapabilities are the capabilities that will be dropped from the container. These
are required to be dropped and cannot be added.
"""
return self.__required_drop_capabilities
def allowed_capabilities(self) -> Optional[List[k8sv1.Capability]]:
"""
allowedCapabilities is a list of capabilities that can be requested to add to the container.
Capabilities in this field may be added at the pod author's discretion.
You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
"""
return self.__allowed_capabilities
def volumes(self) -> Optional[List[FSType]]:
"""
volumes is a white list of allowed volume plugins. Empty indicates that
no volumes may be used. To allow all volumes you may use '*'.
"""
return self.__volumes
def host_network(self) -> Optional[bool]:
"""
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
"""
return self.__host_network
def host_ports(self) -> Optional[List["HostPortRange"]]:
"""
hostPorts determines which host port ranges are allowed to be exposed.
"""
return self.__host_ports
def host_pid(self) -> Optional[bool]:
"""
hostPID determines if the policy allows the use of HostPID in the pod spec.
"""
return self.__host_pid
def host_ipc(self) -> Optional[bool]:
"""
hostIPC determines if the policy allows the use of HostIPC in the pod spec.
"""
return self.__host_ipc
def se_linux(self) -> "SELinuxStrategyOptions":
"""
seLinux is the strategy that will dictate the allowable labels that may be set.
"""
return self.__se_linux
def run_as_user(self) -> "RunAsUserStrategyOptions":
"""
runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
"""
return self.__run_as_user
def run_as_group(self) -> Optional["RunAsGroupStrategyOptions"]:
"""
RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
RunAsGroup feature gate to be enabled.
"""
return self.__run_as_group
def supplemental_groups(self) -> "SupplementalGroupsStrategyOptions":
"""
supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
"""
return self.__supplemental_groups
def fs_group(self) -> "FSGroupStrategyOptions":
"""
fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
"""
return self.__fs_group
def read_only_root_filesystem(self) -> Optional[bool]:
"""
readOnlyRootFilesystem when set to true will force containers to run with a read only root file
system. If the container specifically requests to run with a non-read only root file system
the PSP should deny the pod.
If set to false the container may run with a read only root file system if it wishes but it
will not be forced to.
"""
return self.__read_only_root_filesystem
def default_allow_privilege_escalation(self) -> Optional[bool]:
"""
defaultAllowPrivilegeEscalation controls the default setting for whether a
process can gain more privileges than its parent process.
"""
return self.__default_allow_privilege_escalation
def allow_privilege_escalation(self) -> Optional[bool]:
"""
allowPrivilegeEscalation determines if a pod can request to allow
privilege escalation. If unspecified, defaults to true.
"""
return self.__allow_privilege_escalation
def allowed_host_paths(self) -> Optional[List["AllowedHostPath"]]:
"""
allowedHostPaths is a white list of allowed host paths. Empty indicates
that all host paths may be used.
"""
return self.__allowed_host_paths
def allowed_flex_volumes(self) -> Optional[List["AllowedFlexVolume"]]:
"""
allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
is allowed in the "volumes" field.
"""
return self.__allowed_flex_volumes
def allowed_csi_drivers(self) -> Optional[List["AllowedCSIDriver"]]:
"""
AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.
"""
return self.__allowed_csi_drivers
def allowed_unsafe_sysctls(self) -> Optional[List[str]]:
"""
allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
Each entry is either a plain sysctl name or ends in "*" in which case it is considered
as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
Examples:
e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
"""
return self.__allowed_unsafe_sysctls
def forbidden_sysctls(self) -> Optional[List[str]]:
"""
forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
Each entry is either a plain sysctl name or ends in "*" in which case it is considered
as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
Examples:
e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
"""
return self.__forbidden_sysctls
def allowed_proc_mount_types(self) -> Optional[List[k8sv1.ProcMountType]]:
"""
AllowedProcMountTypes is a whitelist of allowed ProcMountTypes.
Empty or nil indicates that only the DefaultProcMountType may be used.
This requires the ProcMountType feature flag to | |
<reponame>UnoSD/pulumi
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from datetime import datetime
from typing import (
Callable,
Mapping,
Any,
List,
Optional
)
from ._stack_settings import StackSettings
from ._project_settings import ProjectSettings
from ._config import ConfigMap, ConfigValue
PulumiFn = Callable[[], None]
class StackSummary:
"""A summary of the status of a given stack."""
name: str
current: bool
update_in_progress: bool
last_update: Optional[datetime]
resource_count: Optional[int]
url: Optional[str]
def __init__(self,
name: str,
current: bool,
update_in_progress: bool = False,
last_update: Optional[datetime] = None,
resource_count: Optional[int] = None,
url: Optional[str] = None) -> None:
self.name = name
self.current = current
self.update_in_progress = update_in_progress
self.last_update = last_update
self.resource_count = resource_count
self.url = url
class WhoAmIResult:
"""The currently logged-in Pulumi identity."""
user: str
def __init__(self, user: str):
self.user = user
class PluginInfo:
name: str
kind: str
size: int
last_used_time: datetime
install_time: Optional[datetime]
version: Optional[str]
def __init__(self,
name: str,
kind: str,
size: int,
last_used_time: datetime,
install_time: Optional[datetime] = None,
version: Optional[str] = None) -> None:
self.name = name
self.kind = kind
self.size = size
self.install_time = install_time
self.last_used = last_used_time
self.version = version
class Deployment:
version: Optional[int]
deployment: Optional[Mapping[str, Any]]
def __init__(self, version: Optional[int] = None, deployment: Optional[Mapping[str, Any]] = None) -> None:
self.version = version
self.deployment = deployment
def __repr__(self):
return f"Deployment(version={self.version!r}, deployment={self.deployment!r})"
class Workspace(ABC):
"""
Workspace is the execution context containing a single Pulumi project, a program, and multiple stacks.
Workspaces are used to manage the execution environment, providing various utilities such as plugin
installation, environment configuration ($PULUMI_HOME), and creation, deletion, and listing of Stacks.
"""
work_dir: str
"""
The working directory to run Pulumi CLI commands
"""
pulumi_home: Optional[str]
"""
The directory override for CLI metadata if set.
This customizes the location of $PULUMI_HOME where metadata is stored and plugins are installed.
"""
secrets_provider: Optional[str]
"""
The secrets provider to use for encryption and decryption of stack secrets.
See: https://www.pulumi.com/docs/intro/concepts/config/#available-encryption-providers
"""
program: Optional[PulumiFn]
"""
The inline program `PulumiFn` to be used for Preview/Update operations if any.
If none is specified, the stack will refer to ProjectSettings for this information.
"""
env_vars: Mapping[str, str] = {}
"""
Environment values scoped to the current workspace. These will be supplied to every Pulumi command.
"""
@abstractmethod
def project_settings(self) -> ProjectSettings:
"""
Returns the settings object for the current project if any.
:returns: ProjectSettings
"""
pass
@abstractmethod
def save_project_settings(self, settings: ProjectSettings) -> None:
"""
Overwrites the settings object in the current project.
There can only be a single project per workspace. Fails is new project name does not match old.
:param settings: The project settings to save.
"""
pass
@abstractmethod
def stack_settings(self, stack_name: str) -> StackSettings:
"""
Returns the settings object for the stack matching the specified stack name if any.
:param stack_name: The name of the stack.
:return: StackSettings
"""
pass
@abstractmethod
def save_stack_settings(self, stack_name: str, settings: StackSettings) -> None:
"""
Overwrites the settings object for the stack matching the specified stack name.
:param stack_name: The name of the stack.
:param settings: The stack settings to save.
"""
pass
@abstractmethod
def serialize_args_for_op(self, stack_name: str) -> List[str]:
"""
A hook to provide additional args to CLI commands before they are executed.
Provided with stack name, returns a list of args to append to an invoked command ["--config=...", ]
LocalWorkspace does not utilize this extensibility point.
:param stack_name: The name of the stack.
"""
pass
@abstractmethod
def post_command_callback(self, stack_name: str) -> None:
"""
A hook executed after every command. Called with the stack name.
An extensibility point to perform workspace cleanup (CLI operations may create/modify a Pulumi.stack.yaml)
LocalWorkspace does not utilize this extensibility point.
:param stack_name: The name of the stack.
"""
pass
@abstractmethod
def get_config(self, stack_name: str, key: str) -> ConfigValue:
"""
Returns the value associated with the specified stack name and key,
scoped to the Workspace.
:param stack_name: The name of the stack.
:param key: The key for the config item to get.
:returns: ConfigValue
"""
pass
@abstractmethod
def get_all_config(self, stack_name: str) -> ConfigMap:
"""
Returns the config map for the specified stack name, scoped to the current Workspace.
:param stack_name: The name of the stack.
:returns: ConfigMap
"""
pass
@abstractmethod
def set_config(self, stack_name: str, key: str, value: ConfigValue) -> None:
"""
Sets the specified key-value pair on the provided stack name.
:param stack_name: The name of the stack.
:param key: The config key to add.
:param value: The config value to add.
"""
pass
@abstractmethod
def set_all_config(self, stack_name: str, config: ConfigMap) -> None:
"""
Sets all values in the provided config map for the specified stack name.
:param stack_name: The name of the stack.
:param config: A mapping of key to ConfigValue to set to config.
"""
pass
@abstractmethod
def remove_config(self, stack_name: str, key: str) -> None:
"""
Removes the specified key-value pair on the provided stack name.
:param stack_name: The name of the stack.
:param key: The key to remove from config.
"""
pass
@abstractmethod
def remove_all_config(self, stack_name: str, keys: List[str]) -> None:
"""
Removes all values in the provided key list for the specified stack name.
:param stack_name: The name of the stack.
:param keys: The keys to remove from config.
"""
pass
@abstractmethod
def refresh_config(self, stack_name: str) -> None:
"""
Gets and sets the config map used with the last update for Stack matching stack name.
:param stack_name: The name of the stack.
"""
pass
@abstractmethod
def who_am_i(self) -> WhoAmIResult:
"""
Returns the currently authenticated user.
:returns: WhoAmIResult
"""
pass
@abstractmethod
def stack(self) -> Optional[StackSummary]:
"""
Returns a summary of the currently selected stack, if any.
:returns: Optional[StackSummary]
"""
pass
@abstractmethod
def create_stack(self, stack_name: str) -> None:
"""
Creates and sets a new stack with the stack name, failing if one already exists.
:param str stack_name: The name of the stack to create
:returns: None
:raises CommandError Raised if a stack with the same name exists.
"""
pass
@abstractmethod
def select_stack(self, stack_name: str) -> None:
"""
Selects and sets an existing stack matching the stack stack_name, failing if none exists.
:param stack_name: The name of the stack to select
:returns: None
:raises CommandError Raised if no matching stack exists.
"""
pass
@abstractmethod
def remove_stack(self, stack_name: str) -> None:
"""
Deletes the stack and all associated configuration and history.
:param stack_name: The name of the stack to remove
"""
pass
@abstractmethod
def list_stacks(self) -> List[StackSummary]:
"""
Returns all Stacks created under the current Project.
This queries underlying backend and may return stacks not present in the Workspace
(as Pulumi.<stack>.yaml files).
:returns: List[StackSummary]
"""
pass
@abstractmethod
def install_plugin(self, name: str, version: str, kind: str = "resource") -> None:
"""
Installs a plugin in the Workspace, for example to use cloud providers like AWS or GCP.
:param name: The name of the plugin to install.
:param version: The version to install.
:param kind: The kind of plugin.
"""
pass
@abstractmethod
def remove_plugin(self,
name: Optional[str] = None,
version_range: Optional[str] = None,
kind: str = "resource") -> None:
"""
Removes a plugin from the Workspace matching the specified name and version.
:param name: The name of the plugin to remove.
:param version_range: The version range to remove.
:param kind: The kind of plugin.
"""
pass
@abstractmethod
def list_plugins(self) -> List[PluginInfo]:
"""
Returns a list of all plugins installed in the Workspace.
:returns: List[PluginInfo]
"""
pass
@abstractmethod
def export_stack(self, stack_name: str) -> Deployment:
"""
ExportStack exports the deployment state of the stack matching the given name.
This can be combined with ImportStack to edit a stack's | |
<gh_stars>10-100
"""
This contains wrapper functions that simplify plotting raster
and vector data for publication-ready figures.
The documentation of the examples can be found here:
https://lsdtopotools.github.io/LSDTopoTools_ChiMudd2014/
<NAME> and <NAME>, January 2018
Released under GPL3
"""
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
from matplotlib import rcParams
"""
IMPORTANT: You must call this function from a lower level driectory
where both LSDPlottingTools and LSDMapFigure are in the python path!
That is, it will not work if you call it from outside the directory structure.
"""
import LSDPlottingTools as LSDP
import LSDPlottingTools.LSDMap_PointTools as LSDMap_PD
from LSDMapFigure.PlottingRaster import MapFigure
import LSDMapFigure.PlottingHelpers as PlotHelp
import LSDPlottingTools.LSDMap_ChiPlotting as LSDCP
#import LSDPlottingTools.adjust_text
def PrintChiChannels(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "jet", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, out_fname_prefix = ""):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
ChannelFileName (str): The name of the channel file (a csv) without path but with extension
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_lox (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
Returns:
Shaded relief plot with the channels coloured by a plotting column designated by the plotting_column keyword. Uses a colourbar to show each basin
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# Get the filenames you want
BackgroundRasterName = fname_prefix+"_hs.bil"
DrapeRasterName = fname_prefix+".bil"
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
# clear the plot
plt.clf()
# set up the base image and the map
MF = MapFigure(BackgroundRasterName, DataDirectory, coord_type="UTM_km",colourbar_location = "None")
MF.add_drape_image(DrapeRasterName,DataDirectory,colourmap = "gray", alpha = 0.6)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,this_colourmap = cmap,
scale_points = True,column_for_scaling = "drainage_area",
scaled_data_in_log = True,
max_point_size = 5, min_point_size = 1,discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chi_channels."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chi_channels."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiChannelsAndBasins(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "jet", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, colour_log = True, colorbarlabel = "Colourbar", Basin_remove_list = [], Basin_rename_dict = {} , value_dict = {}, out_fname_prefix = "", show_basins = True, min_channel_point_size = 0.5, max_channel_point_size = 2):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
colour_log (bool): If true the colours are in log scale
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
show_basins (bool): If true, plot the basins
min_channel_point_size (float): The minimum size of a channel point in points
max_channel_point_size (float): The maximum size of a channel point in points
Returns:
Shaded relief plot with the basins coloured by basin ID. Includes channels. These can be plotted by various metrics denoted but the plotting_column parameter.
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
# going to make the basin plots - need to have bil extensions.
print("I'm going to make the basin plots. Your topographic data must be in ENVI bil format or I'll break!!")
# get the rasters
raster_ext = '.bil'
#BackgroundRasterName = fname_prefix+raster_ext
HillshadeName = fname_prefix+'_hs'+raster_ext
BasinsName = fname_prefix+'_AllBasins'+raster_ext
print (BasinsName)
Basins = LSDP.GetBasinOutlines(DataDirectory, BasinsName)
chi_csv_fname = DataDirectory+ChannelFileName
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
#thisPointData.ThinDataSelection("basin_key",[10])
thisPointData.selectValue("basin_key",value = Basin_remove_list, operator = "!=")
#print("The new point data is:")
#print(thisPointData.GetLongitude())
# clear the plot
plt.clf()
# set up the base image and the map
print("I am showing the basins without text labels.")
MF = MapFigure(HillshadeName, DataDirectory,coord_type="UTM_km", colourbar_location="None")
# This adds the basins
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False,
colourmap = "gray")
if discrete_colours:
print("I am printing discrete colours.")
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,
scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,
colorbarlabel = colorbarlabel, this_colourmap = cmap,
scaled_data_in_log = True,
max_point_size = max_channel_point_size, min_point_size = min_channel_point_size,zorder=10, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chi_channels_and_basins."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chi_channels_and_basins."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiCoordChannelsAndBasins(DataDirectory,fname_prefix, ChannelFileName, add_basin_labels = True, cmap = "cubehelix", cbar_loc = "right", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10, colour_log = True, colorbarlabel = "Colourbar", Basin_remove_list = [], Basin_rename_dict = {} , value_dict = {}, plot_chi_raster = False, out_fname_prefix = "", show_basins = True, min_channel_point_size = 0.5, max_channel_point_size = 2):
"""
This function prints a channel map over a hillshade.
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots | |
#!/usr/bin/env python3
import math
import re
# change anything you want here
precision = 131
file_name_operations_h = r'operations.h'
# don't touch anything here
bits_per_word = 32
hex_digits_per_word = bits_per_word // 4
min_bignum_number_of_words = math.ceil(precision / bits_per_word)
max_bignum_number_of_words = math.ceil((2 * precision) / bits_per_word)
min_bit_length = min_bignum_number_of_words * bits_per_word
max_bit_length = max_bignum_number_of_words * bits_per_word
min_hex_length = min_bignum_number_of_words * hex_digits_per_word
max_hex_length = max_bignum_number_of_words * hex_digits_per_word
# The number of words needed to hold "precision" bits MUST be the same as the
# number of words needed to hold "precision + 1" bits. This is needed, because
# the addition of two n-bit numbers can give a (n + 1)-bit number, and our
# algorithms go by the principle that this (n + 1)-bit number is representable
# on the same number of bits as the n-bit number.
assert min_bignum_number_of_words == math.ceil((precision + 1) / bits_per_word)
# ATTENTION: all "_generic()" functions do NOT create macros. They just paste
# the assembly code that does the wanted operation for the specified operand
# precisions. All "_generic()" function also have curly brackets around them to
# avoid any scoping conflicts with the callers variables.
################################################################################
########################## GENERAL PURPOSE FUNCTIONS ###########################
################################################################################
def number_of_words_needed_for_precision(precision):
return math.ceil(precision / bits_per_word)
def add_res_precision(op1_precision, op2_precision):
res_precision = max(op1_precision, op2_precision) + 1
return res_precision
def mul_res_precision(op1_precision, op2_precision):
res_precision = op1_precision + op2_precision
# res_precision = op1_precision + op2_precision does not hold if one of the
# operands has precision 1. In that case, you need to reduce the precision
# of the result by 1 bit.
if (op1_precision == 1) or (op2_precision == 1):
res_precision -= 1
return res_precision
################################################################################
################################## DOCUMENTATION ###############################
################################################################################
def bignum_macro():
doc = """////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// BIGNUM /////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// A bignum is represented as the following 2 data structures depending on its
// size:
// uint32_t[MIN_BIGNUM_NUMBER_OF_WORDS]
// uint32_t[MAX_BIGNUM_NUMBER_OF_WORDS]
// In the code of this project, there will be no "bignum" type. It will only be
// referred to as a uint32_t*. This is needed, because having direct access to
// the inner representation of a bignum will be useful for efficient operations
// such as matrix transpositions, ...
// The code of this project will not have a bignum's size as a parameter to
// functions. This value is accessible throught the macros of this header file.
// A bignum is represented in "little endian" format: the most significant bits
// come in bignum[MAX_BIGNUM_NUMBER_OF_WORDS - 1] and the least significant bits
// come in bignum[0].
// A bignum's radix is 2^BITS_PER_WORD (words are 32 bits on our architecture).
// Assume you have an array of bignums "c", then the data would be conceptually
// represented as:
// c[0][0] c[0][1] ... c[0][H-1]
// c[1][0] c[1][1] ... c[1][H-1]
// . . . .
// . . . .
// . . . .
// c[N-1][0] c[N-1][1] ... c[N-1][H-1]
// with N = NUMBER_OF_BIGNUMS
// H = MIN_BIGNUM_NUMBER_OF_WORDS or MAX_BIGNUM_NUMBER_OF_WORDS
// A bignum is written "horizontally". The data on one "line" of a bignum
// consists of the MIN_BIGNUM_NUMBER_OF_WORDS or MAX_BIGNUM_NUMBER_OF_WORDS
// elements of the bignum.
// For memory alignment issues, an array of bignums will not be represented as a
// 2D array like uint32_t[N][H], but rather as a flattened 1D array like
// uint32_t[N * H]. Index manipulation will be needed to access the array like a
// 2D array.
// Assuming the human readable 2D standard array of bignums representation
// above, the following macro returns the index of the "j"th element of the
// "i"th bignum from a 1D array of size N * H (N and H defined as below).
// 0 <= i < N = NUMBER_OF_BIGNUMS
// 0 <= j < H = MIN_BIGNUM_NUMBER_OF_WORDS or MAX_BIGNUM_NUMBER_OF_WORDS
#define IDX(i, j, is_long_number) (((i) * ((is_long_number) ? (MAX_BIGNUM_NUMBER_OF_WORDS) : (MIN_BIGNUM_NUMBER_OF_WORDS))) + (j))
"""
doc_list = doc.split('\n')
for i in range(len(doc_list)):
doc_list[i] = doc_list[i].strip()
return doc_list
def coalesced_bignum_macro():
doc = """////////////////////////////////////////////////////////////////////////////////
////////////////////////////// COALESCED_BIGNUM ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// For efficient access to operands in gpu global memory, data needs to be
// accessed in a coalesced way. This is easily achieved by transposing an array
// of bignums to have the following representation:
// Assume you have an array of bignums "c", then the data in a coalesced array
// of bignums "c" would be:
// c[0][0] c[1][0] ... c[N-1][0]
// c[0][1] c[1][1] ... c[N-1][1]
// . . . .
// . . . .
// . . . .
// c[0][H-1] c[1][H-1] ... c[N-1][H-1]
// with N = NUMBER_OF_BIGNUMS
// H = MIN_BIGNUM_NUMBER_OF_WORDS or MAX_BIGNUM_NUMBER_OF_WORDS
// A bignum is written "vertically" instead of "horizontally" with this
// representation. Each column represents one bignum. The data on one "line" of
// a coalesced bignum is a mix of the j'th element of N different bignums.
// As for normal bignums, a coalesced array of bignums will be represented as a
// flattened 1D array like uint32_t[N * H], and index manipulation would be
// neeeded to access the array like a 2D array.
// Assuming the human readable 2D coalesced bignum array representation above,
// the following macro returns the index of the "i"th element of the "j"th
// bignum from a 1D array of size N * H (N and H defined as below).
// 0 <= i < H = MIN_BIGNUM_NUMBER_OF_WORDS or MAX_BIGNUM_NUMBER_OF_WORDS
// 0 <= j < N = NUMBER_OF_BIGNUMS
#define COAL_IDX(i, j) (((i) * (NUMBER_OF_BIGNUMS)) + (j))"""
doc_list = doc.split('\n')
for i in range(len(doc_list)):
doc_list[i] = doc_list[i].strip()
return doc_list
def add_doc():
doc = """
// Example of the schoolbook addition algorithm we will use if bignums were
// represented on 5 words:
//
// A[4]---A[3]---A[2]---A[1]---A[0]
// + B[4]---B[3]---B[2]---B[1]---B[0]
// ------------------------------------
// | A[4] | A[3] | A[2] | A[1] | A[0] |
// | + | + | + | + | + |
// | B[4] | B[3] | B[2] | B[1] | B[0] |
// | + | + | + | + | |
// | c_in | c_in | c_in | c_in | |
// ------------------------------------
// | C[4] | C[3] | C[2] | C[1] | C[0] |"""
doc_list = doc.split('\n')
for i in range(len(doc_list)):
doc_list[i] = doc_list[i].strip()
return doc_list
def sub_doc():
doc = """
// Example of the schoolbook subtraction algorithm we will use if bignums were
// represented on 5 words:
//
// A[4]---A[3]---A[2]---A[1]---A[0]
// - B[4]---B[3]---B[2]---B[1]---B[0]
// ------------------------------------
// | A[4] | A[3] | A[2] | A[1] | A[0] |
// | - | - | - | - | - |
// | B[4] | B[3] | B[2] | B[1] | B[0] |
// | - | - | - | - | |
// | b_in | b_in | b_in | b_in | |
// ------------------------------------
// | C[4] | C[3] | C[2] | C[1] | C[0] |"""
doc_list = doc.split('\n')
for i in range(len(doc_list)):
doc_list[i] = doc_list[i].strip()
return doc_list
def mul_doc():
doc = """
// Example of the schoolbook multiplication algorithm we will use if bignums
// were represented on 5 words:
//
// A[4]---A[3]---A[2]---A[1]---A[0]
// * B[4]---B[3]---B[2]---B[1]---B[0]
// -----------------------------------------------------------------------
// | | | | | | | | | B[0] * A[0] |
// | | | | | | | | B[0] * A[1] | |
// | | | | | | | B[0] * A[2] | | |
// | | | | | | B[0] * A[3] | | | |
// | | | | | B[0] * A[4] | | | | |
// | | | | | | | | B[1] * A[0] | |
// | | | | | | | B[1] * A[1] | | |
// | | | | | | B[1] * A[2] | | | |
// | | | | | B[1] * A[3] | | | | |
// | | | | B[1] * A[4] | | | | | |
// | | | | | | | B[2] * A[0] | | |
// | | | | | | B[2] * A[1] | | | |
// | | | | | B[2] * A[2] | | | | |
// | | | | B[2] * A[3] | | | | | |
// | | | B[2] * A[4] | | | | | | |
// | | | | | | B[3] * A[0] | | | |
// | | | | | B[3] * A[1] | | | | |
// | | | | B[3] * A[2] | | | | | |
// | | | B[3] * A[3] | | | | | | |
// | | B[3] * | |
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import platform
import shutil
import sys
import unittest
import IECore
import Gaffer
import GafferImage
class ImageWriterTest( unittest.TestCase ) :
__rgbFilePath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgb.100x100" )
__defaultFormatFile = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/defaultNegativeDisplayWindow.exr" )
__testDir = "/tmp/testImageWriter/"
__testFilePath = __testDir + "test"
__writeModes = [ ("scanline", 0), ("tile", 1) ]
# Test that we can select which channels to write.
def testChannelMask( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.__rgbFilePath+".exr" )
for name, mode in self.__writeModes :
testFile = self.__testFile( name, "RB", "exr" )
self.failIf( os.path.exists( testFile ) )
w = GafferImage.ImageWriter()
w["in"].setInput( r["out"] )
w["fileName"].setValue( testFile )
w["channels"].setValue( IECore.StringVectorData( ["R","B"] ) )
with Gaffer.Context() :
w.execute()
writerOutput = GafferImage.ImageReader()
writerOutput["fileName"].setValue( testFile )
channelNames = writerOutput["out"]["channelNames"].getValue()
self.failUnless( "R" in channelNames )
self.failUnless( not "G" in channelNames )
self.failUnless( "B" in channelNames )
self.failUnless( not "A" in channelNames )
def testAcceptsInput( self ) :
w = GafferImage.ImageWriter()
p = GafferImage.ImagePlug( direction = Gaffer.Plug.Direction.Out )
self.failIf( w['requirements']['requirement0'].acceptsInput( p ) )
self.failUnless( w["in"].acceptsInput( p ) )
def testTiffWrite( self ) :
self.__testExtension( "tif" )
@unittest.expectedFailure
def testJpgWrite( self ) :
self.__testExtension( "jpg", metadataToIgnore = [ "DocumentName", "HostComputer" ] )
@unittest.expectedFailure
def testTgaWrite( self ) :
self.__testExtension( "tga", metadataToIgnore = [ "compression", "HostComputer", "Software" ] )
def testExrWrite( self ) :
self.__testExtension( "exr" )
def testDefaultFormatWrite( self ) :
s = Gaffer.ScriptNode()
w = GafferImage.ImageWriter()
g = GafferImage.Grade()
s.addChild( g )
s.addChild( w )
testFile = self.__testFilePath + "testBlack.exr"
self.failIf( os.path.exists( testFile ) )
GafferImage.Format.setDefaultFormat( s, GafferImage.Format( IECore.Box2i( IECore.V2i( -7, -2 ), IECore.V2i( 22, 24 ) ), 1. ) )
w["in"].setInput( g["out"] )
w["fileName"].setValue( testFile )
w["channels"].setValue( IECore.StringVectorData( g["out"]["channelNames"].getValue() ) )
# Try to execute. In older versions of the ImageWriter this would throw an exception.
with s.context() :
w.execute()
self.failUnless( os.path.exists( testFile ) )
# Check the output.
expectedFile = self.__defaultFormatFile
expectedOutput = IECore.Reader.create( expectedFile ).read()
expectedOutput.blindData().clear()
writerOutput = IECore.Reader.create( testFile ).read()
writerOutput.blindData().clear()
self.assertEqual( writerOutput, expectedOutput )
# Write an RGBA image that has a data window to various supported formats and in both scanline and tile modes.
def __testExtension( self, ext, metadataToIgnore = [] ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.__rgbFilePath+".exr" )
w = GafferImage.ImageWriter()
for name, mode in self.__writeModes :
# Skip this test if the extension cannot write in tile mode.
if ( w["writeMode"].getFlags() & Gaffer.Plug.Flags.ReadOnly ) == True and name == "tile":
continue
testFile = self.__testFile( name, "RGBA", ext )
expectedFile = self.__rgbFilePath+"."+ext
self.failIf( os.path.exists( testFile ) )
# Setup the writer.
w["in"].setInput( r["out"] )
w["fileName"].setValue( testFile )
w["channels"].setValue( IECore.StringVectorData( r["out"]["channelNames"].getValue() ) )
if ( w["writeMode"].getFlags() & Gaffer.Plug.Flags.ReadOnly ) == False :
w["writeMode"].setValue( mode )
# Execute
with Gaffer.Context() :
w.execute()
self.failUnless( os.path.exists( testFile ) )
# Check the output.
expectedOutput = GafferImage.ImageReader()
expectedOutput["fileName"].setValue( expectedFile )
writerOutput = GafferImage.ImageReader()
writerOutput["fileName"].setValue( testFile )
expectedMetadata = expectedOutput["out"]["metadata"].getValue()
writerMetadata = writerOutput["out"]["metadata"].getValue()
# they were written at different times so
# we can't expect those values to match
if "DateTime" in writerMetadata :
expectedMetadata["DateTime"] = writerMetadata["DateTime"]
# the writer adds several standard attributes that aren't in the original file
expectedMetadata["Software"] = IECore.StringData( "Gaffer " + Gaffer.About.versionString() )
expectedMetadata["HostComputer"] = IECore.StringData( platform.node() )
expectedMetadata["Artist"] = IECore.StringData( os.getlogin() )
expectedMetadata["DocumentName"] = IECore.StringData( "untitled" )
# some formats support IPTC standards, and some of the standard metadata
# is translated automatically by OpenImageIO.
for key in writerMetadata.keys() :
if key.startswith( "IPTC:" ) :
expectedMetadata["IPTC:OriginatingProgram"] = expectedMetadata["Software"]
expectedMetadata["IPTC:Creator"] = expectedMetadata["Artist"]
break
# some input files don't contain all the metadata that the ImageWriter
# will create, and some output files don't support all the metadata
# that the ImageWriter attempt to create.
for name in metadataToIgnore :
if name in writerMetadata :
del writerMetadata[name]
if name in expectedMetadata :
del expectedMetadata[name]
self.assertEqual( expectedMetadata, writerMetadata )
op = IECore.ImageDiffOp()
res = op(
imageA = expectedOutput["out"].image(),
imageB = writerOutput["out"].image()
)
self.assertFalse( res.value )
def testOffsetDisplayWinodowWrite( self ) :
s = Gaffer.ScriptNode()
c = GafferImage.Constant()
s.addChild( c )
with s.context() :
format = GafferImage.Format( IECore.Box2i( IECore.V2i( -20, -15 ), IECore.V2i( 29, 14 ) ), 1. )
GafferImage.Format.setDefaultFormat( s, format )
self.assertEqual( c["out"]["format"].getValue(), format )
testFile = self.__testFile( "offsetDisplayWindow", "RGBA", "exr" )
w = GafferImage.ImageWriter()
w["in"].setInput( c["out"] )
w["fileName"].setValue( testFile )
# Execute
with Gaffer.Context() :
w.execute()
self.failUnless( os.path.exists( testFile ) )
i = IECore.Reader.create( testFile ).read()
i.blindData().clear()
self.assertEqual( i.displayWindow, format.getDisplayWindow() )
def testHash( self ) :
c = Gaffer.Context()
c.setFrame( 1 )
c2 = Gaffer.Context()
c2.setFrame( 2 )
writer = GafferImage.ImageWriter()
# empty file produces no effect
self.assertEqual( writer["fileName"].getValue(), "" )
self.assertEqual( writer.hash( c ), IECore.MurmurHash() )
# no input image produces no effect
writer["fileName"].setValue( "/tmp/test.exr" )
self.assertEqual( writer.hash( c ), IECore.MurmurHash() )
# now theres a file and an image, we get some output
constant = GafferImage.Constant()
writer["in"].setInput( constant["out"] )
self.assertNotEqual( writer.hash( c ), IECore.MurmurHash() )
# output doesn't vary by time yet
self.assertEqual( writer.hash( c ), writer.hash( c2 ) )
# now it does vary
writer["fileName"].setValue( "/tmp/test.#.exr" )
self.assertNotEqual( writer.hash( c ), writer.hash( c2 ) )
# also varies by input image
current = writer.hash( c )
constant['format'].setValue( GafferImage.Format( IECore.Box2i( IECore.V2i( -5 ), IECore.V2i( 5 ) ), 1. ) )
self.assertNotEqual( writer.hash( c ), current )
# other plugs matter too
current = writer.hash( c )
writer["writeMode"].setValue( 1 ) # tile mode
self.assertNotEqual( writer.hash( c ), current )
current = writer.hash( c )
writer["channels"].setValue( IECore.StringVectorData( [ "R" ] ) )
self.assertNotEqual( writer.hash( c ), current )
def testPassThrough( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Constant()
s["w"] = GafferImage.ImageWriter()
s["w"]["in"].setInput( s["c"]["out"] )
with s.context() :
ci = s["c"]["out"].image()
wi = s["w"]["out"].image()
self.assertEqual( ci, wi )
def testPassThroughSerialisation( self ) :
s = Gaffer.ScriptNode()
s["w"] = GafferImage.ImageWriter()
ss = s.serialise()
self.assertFalse( "out" in ss )
def testMetadataDocumentName( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.__rgbFilePath+".exr" )
w = GafferImage.ImageWriter()
testFile = self.__testFile( "metadataTest", "RGBA", "exr" )
self.failIf( os.path.exists( testFile ) )
w["in"].setInput( r["out"] )
w["fileName"].setValue( testFile )
with Gaffer.Context() :
w.execute()
self.failUnless( os.path.exists( testFile ) )
result = GafferImage.ImageReader()
result["fileName"].setValue( testFile )
self.assertEqual( result["out"]["metadata"].getValue()["DocumentName"].value, "untitled" )
# add the writer to a script
s = Gaffer.ScriptNode()
s.addChild( w )
with Gaffer.Context() :
w.execute()
result["refreshCount"].setValue( result["refreshCount"].getValue() + 1 )
self.assertEqual( result["out"]["metadata"].getValue()["DocumentName"].value, "untitled" )
# actually set the script's file name
s["fileName"].setValue( "/my/gaffer/script.gfr" )
with Gaffer.Context() :
w.execute()
result["refreshCount"].setValue( result["refreshCount"].getValue() + 1 )
self.assertEqual( result["out"]["metadata"].getValue()["DocumentName"].value, "/my/gaffer/script.gfr" )
def __testMetadataDoesNotAffectPixels( self, ext ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.__rgbFilePath+"."+ext )
m = GafferImage.ImageMetadata()
m["in"].setInput( r["out"] )
# lets tell a few lies
m["metadata"].addMember( "PixelAspectRatio", IECore.FloatData( 2 ) )
m["metadata"].addMember( "oiio:ColorSpace", IECore.StringData( "Rec709" ) )
m["metadata"].addMember( "oiio:BitsPerSample", IECore.IntData( 8 ) )
m["metadata"].addMember( "oiio:UnassociatedAlpha", IECore.IntData( 1 ) )
m["metadata"].addMember( "oiio:Gamma", IECore.FloatData( 0.25 ) )
testFile = self.__testFile( "metadataHasNoAffect", "RGBA", ext )
self.failIf( os.path.exists( testFile ) )
w = GafferImage.ImageWriter()
w["in"].setInput( m["out"] )
w["fileName"].setValue( testFile )
w["channels"].setValue( IECore.StringVectorData( m["out"]["channelNames"].getValue() ) )
testFile2 = self.__testFile( "noNewMetadata", "RGBA", ext )
self.failIf( os.path.exists( testFile2 ) )
w2 = GafferImage.ImageWriter()
w2["in"].setInput( r["out"] )
w2["fileName"].setValue( testFile2 )
w2["channels"].setValue( IECore.StringVectorData( r["out"]["channelNames"].getValue() ) )
inMetadata = w["in"]["metadata"].getValue()
self.assertEqual( inMetadata["PixelAspectRatio"], IECore.FloatData( 2 ) )
self.assertEqual( inMetadata["oiio:ColorSpace"], IECore.StringData( "Rec709" ) )
self.assertEqual( inMetadata["oiio:BitsPerSample"], IECore.IntData( 8 ) )
self.assertEqual( inMetadata["oiio:UnassociatedAlpha"], IECore.IntData( 1 ) )
self.assertEqual( inMetadata["oiio:Gamma"], IECore.FloatData( 0.25 ) )
with Gaffer.Context() :
w.execute()
w2.execute()
self.failUnless( os.path.exists( testFile ) )
self.failUnless( os.path.exists( testFile2 ) )
after = GafferImage.ImageReader()
after["fileName"].setValue( testFile )
before = GafferImage.ImageReader()
before["fileName"].setValue( testFile2 )
inImage = w["in"].image()
afterImage = after["out"].image()
beforeImage = before["out"].image()
inImage.blindData().clear()
afterImage.blindData().clear()
beforeImage.blindData().clear()
self.assertEqual( afterImage, inImage )
self.assertEqual( afterImage, beforeImage )
self.assertEqual( after["out"]["format"].getValue(), r["out"]["format"].getValue() )
self.assertEqual( after["out"]["format"].getValue(), before["out"]["format"].getValue() )
self.assertEqual( after["out"]["dataWindow"].getValue(), r["out"]["dataWindow"].getValue() )
self.assertEqual( after["out"]["dataWindow"].getValue(), before["out"]["dataWindow"].getValue() )
afterMetadata = after["out"]["metadata"].getValue()
beforeMetadata = before["out"]["metadata"].getValue()
expectedMetadata = r["out"]["metadata"].getValue()
# they were written at different times so we can't expect those values to match
beforeMetadata["DateTime"] = afterMetadata["DateTime"]
expectedMetadata["DateTime"] = afterMetadata["DateTime"]
# the writer adds several standard attributes that aren't in the original file
expectedMetadata["Software"] = IECore.StringData( "Gaffer " + Gaffer.About.versionString() )
expectedMetadata["HostComputer"] = IECore.StringData( platform.node() )
expectedMetadata["Artist"] = IECore.StringData( os.getlogin() )
expectedMetadata["DocumentName"] = IECore.StringData( "untitled" )
# some formats support IPTC standards, and some of the standard metadata
# is translated automatically by OpenImageIO.
for key in afterMetadata.keys() :
if key.startswith( "IPTC:" ) :
expectedMetadata["IPTC:OriginatingProgram"] = expectedMetadata["Software"]
expectedMetadata["IPTC:Creator"] = expectedMetadata["Artist"]
break
self.assertEqual( afterMetadata, expectedMetadata )
self.assertEqual( afterMetadata, beforeMetadata )
def testExrMetadata( self ) :
self.__testMetadataDoesNotAffectPixels( "exr" )
def testTiffMetadata( self ) :
self.__testMetadataDoesNotAffectPixels( | |
<filename>Lectures/EOM_Workshop/unit_tests.py
## Unit tests for rigid_body functions
import rigid_body as rb
import numpy as np
import aerosonde_uav as uav
def within_tol(a,b,tol):
if (np.abs(a-b)<tol).all():
return True
else:
return False
## Init checks array
checks = [True]
## Specify tol on checks
tol = 1e-8
## Unit tests for rb.get_euler_angles_from_rot
checks.append(within_tol(rb.get_euler_angles_from_rot(np.eye(3)),\
np.array([0,0,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[0,-1,0],\
[1,0,0],\
[0,0,1]])),np.array([np.pi/2,0,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[-1,0,0],\
[0,-1,0],\
[0,0,1]])),np.array([np.pi,0,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[0,1,0],\
[-1,0,0],\
[0,0,1]])),np.array([-np.pi/2,0,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[0,0,1],\
[0,1,0],\
[-1,0,0]])),np.array([0,np.pi/2,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[0,0,-1],\
[0,1,0],\
[1,0,0]])),np.array([0,-np.pi/2,0]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[1,0,0],\
[0,0,-1],\
[0,1,0]])),np.array([0,0,np.pi/2]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[1,0,0],\
[0,-1,0],\
[0,0,-1]])),np.array([0,0,np.pi]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[1,0,0],\
[0,0,1],\
[0,-1,0]])),np.array([0,0,-np.pi/2]),tol))
checks.append(within_tol(rb.get_euler_angles_from_rot(\
np.matrix([[0.903592586644424,0.150380970349009,0.401130902721456],\
[0.175640606326494,0.723994214832615,-0.667070276881055],\
[-0.390731128489274,0.673214631930854,0.627782800484135]])),\
np.array([11*np.pi/180,23*np.pi/180,47*np.pi/180]),tol))
## Unit tests for rb.skew
array0 = np.array([1,0,0])
checks.append(within_tol(rb.skew(np.array(array0)),\
np.matrix([[0,-array0[2],array0[1]],\
[array0[2],0,-array0[0]],\
[-array0[1],array0[0],0]]),tol))
array0 = array0 = np.array([0,1,0])
checks.append(within_tol(rb.skew(np.array(array0)),\
np.matrix([[0,-array0[2],array0[1]],\
[array0[2],0,-array0[0]],\
[-array0[1],array0[0],0]]),tol))
array0 = array0 = np.array([0,0,1])
checks.append(within_tol(rb.skew(np.array(array0)),\
np.matrix([[0,-array0[2],array0[1]],\
[array0[2],0,-array0[0]],\
[-array0[1],array0[0],0]]),tol))
array0 = array0 = np.array([1,2,3])
checks.append(within_tol(rb.skew(np.array(array0)),\
np.matrix([[0,-array0[2],array0[1]],\
[array0[2],0,-array0[0]],\
[-array0[1],array0[0],0]]),tol))
## Unit tests for rb.rot_from_quat
checks.append(within_tol(rb.rot_from_quat(np.array([1,0,0,0])),np.eye(3),tol))
checks.append(within_tol(rb.rot_from_quat(np.array([0,1,0,0])),\
np.matrix([[1,0,0],[0,-1,0],[0,0,-1]]),tol))
checks.append(within_tol(rb.rot_from_quat(np.array([0,1,0,0])),\
np.matrix([[1,0,0],[0,-1,0],[0,0,-1]]),tol))
checks.append(within_tol(rb.rot_from_quat(np.array([0,0,1,0])),\
np.matrix([[-1,0,0],[0,1,0],[0,0,-1]]),tol))
q = [0.1,0.2,0.3,0.4]
q = q/np.linalg.norm(q)
checks.append(within_tol(rb.rot_from_quat(q),\
np.matrix([[-0.66666667, 0.13333333, 0.73333333],\
[0.66666667, -0.33333333, 0.66666667],\
[0.33333333, 0.93333333, 0.13333333]]), tol))
## Unit tests for rb.quat_prod
checks.append(within_tol(rb.quat_prod(np.array([[1],[0],[0],[0]]),\
np.array([[1],[0],[0],[0]])),\
np.array([[1],[0],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[1],[0],[0]]),\
np.array([[0],[1],[0],[0]])),\
np.array([[-1],[0],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[0],[1],[0]]),\
np.array([[0],[0],[1],[0]])),\
np.array([[-1],[0],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[0],[0],[1]]),\
np.array([[0],[0],[0],[1]])),\
np.array([[-1],[0],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[1],[0],[0],[0]]),\
np.array([[0],[1],[0],[0]])),\
np.array([[0],[1],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[1],[0],[0],[0]]),\
np.array([[0],[0],[1],[0]])),\
np.array([[0],[0],[1],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[1],[0],[0],[0]]),\
np.array([[0],[0],[0],[1]])),\
np.array([[0],[0],[0],[1]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[1],[0],[0]]),\
np.array([[1],[0],[0],[0]])),\
np.array([[0],[1],[0],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[0],[1],[0]]),\
np.array([[1],[0],[0],[0]])),\
np.array([[0],[0],[1],[0]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[0],[0],[1]]),\
np.array([[1],[0],[0],[0]])),\
np.array([[0],[0],[0],[1]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[1],[0],[0]]),\
np.array([[0],[0],[1],[0]])),\
np.array([[0],[0],[0],[-1]]),tol))
checks.append(within_tol(rb.quat_prod(np.array([[0],[1],[0],[0]]),\
np.array([[0],[0],[0],[1]])),\
np.array([[0],[0],[1],[0]]),tol))
## Unit tests for rb.quat_from_ypr
checks.append(within_tol(rb.quat_from_ypr(0, 0, 0),\
np.array([[1],[0],[0],[0]]),tol))
checks.append(within_tol(rb.quat_from_ypr(np.pi/2, 0, 0),\
np.array([[0.707106781186548],[0],[0],[0.707106781186547]]),tol))
checks.append(within_tol(rb.quat_from_ypr(-np.pi/2, 0, 0),\
np.array([[0.707106781186548],[0],[0],[-0.707106781186547]]),tol))
checks.append(within_tol(rb.quat_from_ypr(0, np.pi/2, 0),\
np.array([[0.707106781186548],[0],[0.707106781186547],[0]]),tol))
checks.append(within_tol(rb.quat_from_ypr(0, -np.pi/2, 0),\
np.array([[0.707106781186548],[0],[-0.707106781186547],[0]]),tol))
checks.append(within_tol(rb.quat_from_ypr(0, 0, np.pi/2 ),\
np.array([[0.707106781186548],[0.707106781186547],[0],[0]]),tol))
checks.append(within_tol(rb.quat_from_ypr(0, 0, -np.pi/2 ),\
np.array([[0.707106781186548],[-0.707106781186547],[0],[0]]),tol))
checks.append(within_tol(rb.quat_from_ypr(np.pi/180*47,np.pi/180*15,np.pi/180*6),\
np.array([[0.910692391306739],[-0.004391258543109],\
[0.140226691736355],[0.388531285984923]]),tol))
## Unit tests for rb.RigidBody init
m = 1
J = np.eye(3)
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
checks.append(within_tol(m,drone.m,tol))
checks.append(within_tol(J,drone.J,tol))
checks.append(within_tol(x0,drone.x,tol))
m = uav.m
J = uav.J
x0 = np.array([1.0,2.0,3.0, 4.0,5.0,6.0, 0.0,1/np.sqrt(2),1/np.sqrt(2),0.0, 7.0,8.0,9.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
checks.append(within_tol(m,drone.m,tol))
checks.append(within_tol(J,drone.J,tol))
checks.append(within_tol(x0,drone.x,tol))
## Unit tests for rb.RigidBody.f, change in inertial position
# No velocities and no accels
t0 = 0
m = 1
J = np.eye(3)
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# Same but show time invarience
t0 = 10
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# pn = 1, 0 euler angles, no velocites, forces, or moments
t0 = 0
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([1.0,0.0,0.0, 0.0,0.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# pe = 1, 0 euler angles, no velocites, forces, or moments
x0 = np.array([0.0,1.0,0.0, 0.0,0.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# pd = 1, 0 euler angles, no velocites, forces, or moments
x0 = np.array([0.0,0.0,1.0, 0.0,0.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 1.0,0.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[3],0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# v = 1, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 0.0,1.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,x0[4],0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# w = 1, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,1.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 5, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 5.0,0.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[3],0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# v = 10, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 0.0,10.0,0.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,x0[4],0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# w = 15, 0 euler angles, no forces or moments
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,15.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 yaw 90 deg, no forces or moments
yaw0 = np.deg2rad(90)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([-x0[4],x0[3],x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 yaw 180 deg, no forces or moments
yaw0 = np.deg2rad(180)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([-x0[3],-x0[4],x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 yaw -90 deg, no forces or moments
yaw0 = np.deg2rad(-90)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[4],-x0[3],x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 pitch 90 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(90)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[5],x0[4],-x0[3], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 pitch 180 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(180)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([-x0[3],x0[4],-x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 pitch -90 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(-90)
roll0 = np.deg2rad(0)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([-x0[5],x0[4],x0[3], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 roll 90 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(90)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[3],-x0[5],x0[4], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 roll 180 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(180)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[3],-x0[4],-x0[5], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
# u = 1,v = 2,w = 3 roll -90 deg, no forces or moments
yaw0 = np.deg2rad(0)
pitch0 = np.deg2rad(0)
roll0 = np.deg2rad(-90)
quat0 = rb.quat_from_ypr(yaw0,pitch0,roll0)
x0 = np.array([0.0,0.0,0.0, 1.0,2.0,3.0, quat0[0],quat0[1],quat0[2],quat0[3], 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([x0[3],x0[5],-x0[4], 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
## Unit tests for rb.RigidBody.f, change in velocities
#No velocity no forces
t0 = 0
m = 5
J = np.eye(3)
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fx = m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([m,0.0,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 1.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fx = 3m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([3*m,0.0,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 3.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fx = -5m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([-5*m,0.0,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, -5.0,0.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fy = m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,m,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fy = 3m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,3*m,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,3.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fy = -5m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,-5*m,0.0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,-5.0,0.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fz = m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,m, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,1.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fz = 3m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,3*m, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,3.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#fz = -5m, no velocities
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,-5*m, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,0.0, 0.0,0.0,-5.0, 0.0,0.0,0.0,0.0, 0.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
checks.append(within_tol(drone.f(t0,drone.x,u0),expected_result,tol))
#No force applied, [u,v,w] = [0,1,0], [p,q,r] = [0,0,1]
x0 = np.array([0.0,0.0,0.0, 0.0,1.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,1.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([1.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
result = drone.f(t0,drone.x,u0)[3:6]
checks.append(within_tol(result,expected_result,tol))
#No force applied, [u,v,w] = [0,0,1], [p,q,r] = [0,1,0]
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,1.0, 1.0,0.0,0.0,0.0, 0.0,1.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([-1.0,0.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
result = drone.f(t0,drone.x,u0)[3:6]
checks.append(within_tol(result,expected_result,tol))
#No force applied, [u,v,w] = [0,0,1], [p,q,r] = [1,0,0]
x0 = np.array([0.0,0.0,0.0, 0.0,0.0,1.0, 1.0,0.0,0.0,0.0, 1.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,1.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
result = drone.f(t0,drone.x,u0)[3:6]
checks.append(within_tol(result,expected_result,tol))
#No force applied, [u,v,w] = [1,0,0], [p,q,r] = [0,0,1]
x0 = np.array([0.0,0.0,0.0, 1.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,0.0,1.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,-1.0,0.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
result = drone.f(t0,drone.x,u0)[3:6]
checks.append(within_tol(result,expected_result,tol))
#No force applied, [u,v,w] = [1,0,0], [p,q,r] = [0,1,0]
x0 = np.array([0.0,0.0,0.0, 1.0,0.0,0.0, 1.0,0.0,0.0,0.0, 0.0,1.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,1.0])
expected_result = expected_result.reshape(expected_result.shape[0],1)
result = drone.f(t0,drone.x,u0)[3:6]
checks.append(within_tol(result,expected_result,tol))
#No force applied, [u,v,w] = [0,1,0], [p,q,r] = [1,0,0]
x0 = np.array([0.0,0.0,0.0, 0.0,1.0,0.0, 1.0,0.0,0.0,0.0, 1.0,0.0,0.0])
x0 = x0.reshape(x0.shape[0],1)
u0 = np.array([0.0,0.0,0, 0.0,0.0,0.0])
u0 = u0.reshape(u0.shape[0],1)
drone = rb.RigidBody(m,J,x0)
expected_result = np.array([0.0,0.0,-1.0])
expected_result = | |
#!/usr/bin/env python
from big_ol_pile_of_manim_imports import *
from old_projects.triangle_of_power.triangle import TOP, OPERATION_COLORS
# To watch one of these scenes, run the following:
# python extract_scene.py file_name <SceneName> -p
#
# Use the flat -l for a faster rendering at a lower
# quality, use -s to skip to the end and just show
class Circletoquare(Scene):
def construct(self):
circle = Circle()
self.play()
self.wait()
class CountingScene(Scene):
CONFIG = {
"base" : 10,
"power_colors" : [YELLOW, MAROON_B, RED, GREEN, BLUE, PURPLE_D],
"counting_dot_starting_position" : (FRAME_X_RADIUS-1)*RIGHT + (FRAME_Y_RADIUS-1)*UP,
"count_dot_starting_radius" : 0.5,
"dot_configuration_height" : 2,
"ones_configuration_location" : UP+2*RIGHT,
"num_scale_factor" : 2,
"num_start_location" : 2*DOWN,
}
def setup(self):
self.dots = VGroup()
self.number = 0
self.number_mob = VGroup(TexMobject(str(self.number)))
self.number_mob.scale(self.num_scale_factor)
self.number_mob.shift(self.num_start_location)
self.digit_width = self.number_mob.get_width()
self.initialize_configurations()
self.arrows = VGroup()
self.add(self.number_mob)
def get_template_configuration(self):
#This should probably be replaced for non-base-10 counting scenes
down_right = (0.5)*RIGHT + (np.sqrt(3)/2)*DOWN
result = []
for down_right_steps in range(5):
for left_steps in range(down_right_steps):
result.append(
down_right_steps*down_right + left_steps*LEFT
)
return reversed(result[:self.base])
def get_dot_template(self):
#This should be replaced for non-base-10 counting scenes
down_right = (0.5)*RIGHT + (np.sqrt(3)/2)*DOWN
dots = VGroup(*[
Dot(
point,
radius = 0.25,
fill_opacity = 0,
stroke_width = 2,
stroke_color = WHITE,
)
for point in self.get_template_configuration()
])
dots[-1].set_stroke(width = 0)
dots.set_height(self.dot_configuration_height)
return dots
def initialize_configurations(self):
self.dot_templates = []
self.dot_template_iterators = []
self.curr_configurations = []
def add_configuration(self):
new_template = self.get_dot_template()
new_template.move_to(self.ones_configuration_location)
left_vect = (new_template.get_width()+LARGE_BUFF)*LEFT
new_template.shift(
left_vect*len(self.dot_templates)
)
self.dot_templates.append(new_template)
self.dot_template_iterators.append(
it.cycle(new_template)
)
self.curr_configurations.append(VGroup())
def count(self, max_val, run_time_per_anim = 1):
for x in range(max_val):
self.increment(run_time_per_anim)
def increment(self, run_time_per_anim = 1, added_anims = [], total_run_time = None):
run_all_at_once = (total_run_time is not None)
if run_all_at_once:
num_rollovers = self.get_num_rollovers()
run_time_per_anim = float(total_run_time)/(num_rollovers+1)
moving_dot = Dot(
self.counting_dot_starting_position,
radius = self.count_dot_starting_radius,
color = self.power_colors[0],
)
moving_dot.generate_target()
moving_dot.set_fill(opacity = 0)
continue_rolling_over = True
place = 0
self.number += 1
added_anims = list(added_anims) #Silly python objects...
added_anims += self.get_new_configuration_animations()
while continue_rolling_over:
moving_dot.target.replace(
next(self.dot_template_iterators[place])
)
if run_all_at_once:
denom = float(num_rollovers+1)
start_t = place/denom
def get_modified_rate_func(anim):
return lambda t : anim.original_rate_func(
start_t + t/denom
)
for anim in added_anims:
if not hasattr(anim, "original_rate_func"):
anim.original_rate_func = anim.rate_func
anim.rate_func = get_modified_rate_func(anim)
self.play(
MoveToTarget(moving_dot),
*added_anims,
run_time = run_time_per_anim
)
self.curr_configurations[place].add(moving_dot)
if not run_all_at_once:
added_anims = []
if len(self.curr_configurations[place].split()) == self.base:
full_configuration = self.curr_configurations[place]
self.curr_configurations[place] = VGroup()
place += 1
center = full_configuration.get_center_of_mass()
radius = 0.6*max(
full_configuration.get_width(),
full_configuration.get_height(),
)
circle = Circle(
radius = radius,
stroke_width = 0,
fill_color = self.power_colors[place],
fill_opacity = 0.5,
)
circle.move_to(center)
moving_dot = VGroup(circle, full_configuration)
moving_dot.generate_target()
moving_dot[0].set_fill(opacity = 0)
else:
continue_rolling_over = False
self.play(*self.get_digit_increment_animations())
def get_new_configuration_animations(self):
if self.is_perfect_power():
self.add_configuration()
return [FadeIn(self.dot_templates[-1])]
else:
return []
def get_digit_increment_animations(self):
result = []
new_number_mob = self.get_number_mob(self.number)
new_number_mob.move_to(self.number_mob, RIGHT)
if self.is_perfect_power():
place = len(new_number_mob.split())-1
arrow = Arrow(
new_number_mob[place].get_top(),
self.dot_templates[place].get_bottom(),
color = self.power_colors[place]
)
self.arrows.add(arrow)
result.append(ShowCreation(arrow))
result.append(Transform(
self.number_mob, new_number_mob,
submobject_mode = "lagged_start"
))
return result
def get_number_mob(self, num):
result = VGroup()
place = 0
while num > 0:
digit = TexMobject(str(num % self.base))
if place >= len(self.power_colors):
self.power_colors += self.power_colors
digit.set_color(self.power_colors[place])
digit.scale(self.num_scale_factor)
digit.move_to(result, RIGHT)
digit.shift(place*(self.digit_width+SMALL_BUFF)*LEFT)
result.add(digit)
num /= self.base
place += 1
return result
def is_perfect_power(self):
number = self.number
while number > 1:
if number%self.base != 0:
return False
number /= self.base
return True
def get_num_rollovers(self):
next_number = self.number + 1
result = 0
while next_number%self.base == 0:
result += 1
next_number /= self.base
return result
class BinaryCountingScene(CountingScene):
CONFIG = {
"base" : 2,
"dot_configuration_height" : 1,
"ones_configuration_location" : UP+5*RIGHT
}
def get_template_configuration(self):
return [ORIGIN, UP]
class CountInDecimal(CountingScene):
def construct(self):
for x in range(11):
self.increment()
for x in range(85):
self.increment(0.25)
for x in range(20):
self.increment()
class CountInTernary(CountingScene):
CONFIG = {
"base" : 3,
"dot_configuration_height" : 1,
"ones_configuration_location" : UP+4*RIGHT
}
def construct(self):
self.count(27)
# def get_template_configuration(self):
# return [ORIGIN, UP]
class CountTo27InTernary(CountInTernary):
def construct(self):
for x in range(27):
self.increment()
self.wait()
class CountInBinaryTo256(BinaryCountingScene):
def construct(self):
self.count(256, 0.25)
class TowersOfHanoiScene(Scene):
CONFIG = {
"disk_start_and_end_colors" : [BLUE_E, BLUE_A],
"num_disks" : 5,
"peg_width" : 0.25,
"peg_height" : 2.5,
"peg_spacing" : 4,
"include_peg_labels" : True,
"middle_peg_bottom" : 0.5*DOWN,
"disk_height" : 0.4,
"disk_min_width" : 1,
"disk_max_width" : 3,
"default_disk_run_time_off_peg" : 1,
"default_disk_run_time_on_peg" : 2,
}
def setup(self):
self.add_pegs()
self.add_disks()
def add_pegs(self):
peg = Rectangle(
height = self.peg_height,
width = self.peg_width,
stroke_width = 0,
fill_color = GREY_BROWN,
fill_opacity = 1,
)
peg.move_to(self.middle_peg_bottom, DOWN)
self.pegs = VGroup(*[
peg.copy().shift(vect)
for vect in (self.peg_spacing*LEFT, ORIGIN, self.peg_spacing*RIGHT)
])
self.add(self.pegs)
if self.include_peg_labels:
self.peg_labels = VGroup(*[
TexMobject(char).next_to(peg, DOWN)
for char, peg in zip("ABC", self.pegs)
])
self.add(self.peg_labels)
def add_disks(self):
self.disks = VGroup(*[
Rectangle(
height = self.disk_height,
width = width,
fill_color = color,
fill_opacity = 0.8,
stroke_width = 0,
)
for width, color in zip(
np.linspace(
self.disk_min_width,
self.disk_max_width,
self.num_disks
),
color_gradient(
self.disk_start_and_end_colors,
self.num_disks
)
)
])
for number, disk in enumerate(self.disks):
label = TexMobject(str(number))
label.set_color(BLACK)
label.set_height(self.disk_height/2)
label.move_to(disk)
disk.add(label)
disk.label = label
self.reset_disks(run_time = 0)
self.add(self.disks)
def reset_disks(self, **kwargs):
self.disks.generate_target()
self.disks.target.arrange_submobjects(DOWN, buff = 0)
self.disks.target.move_to(self.pegs[0], DOWN)
self.play(
MoveToTarget(self.disks),
**kwargs
)
self.disk_tracker = [
set(range(self.num_disks)),
set([]),
set([])
]
def disk_index_to_peg_index(self, disk_index):
for index, disk_set in enumerate(self.disk_tracker):
if disk_index in disk_set:
return index
raise Exception("Somehow this disk wasn't accounted for...")
def min_disk_index_on_peg(self, peg_index):
disk_index_set = self.disk_tracker[peg_index]
if disk_index_set:
return min(self.disk_tracker[peg_index])
else:
return self.num_disks
def bottom_point_for_next_disk(self, peg_index):
min_disk_index = self.min_disk_index_on_peg(peg_index)
if min_disk_index >= self.num_disks:
return self.pegs[peg_index].get_bottom()
else:
return self.disks[min_disk_index].get_top()
def get_next_disk_0_peg(self):
curr_peg_index = self.disk_index_to_peg_index(0)
return (curr_peg_index+1)%3
def get_available_peg(self, disk_index):
if disk_index == 0:
return self.get_next_disk_0_peg()
for index in range(len(list(self.pegs))):
if self.min_disk_index_on_peg(index) > disk_index:
return index
raise Exception("Tower's of Honoi rule broken: No available disks")
def set_disk_config(self, peg_indices):
assert(len(peg_indices) == self.num_disks)
self.disk_tracker = [set([]) for x in range(3)]
for n, peg_index in enumerate(peg_indices):
disk_index = self.num_disks - n - 1
disk = self.disks[disk_index]
peg = self.pegs[peg_index]
disk.move_to(peg.get_bottom(), DOWN)
n_disks_here = len(self.disk_tracker[peg_index])
disk.shift(disk.get_height()*n_disks_here*UP)
self.disk_tracker[peg_index].add(disk_index)
def move_disk(self, disk_index, **kwargs):
next_peg_index = self.get_available_peg(disk_index)
self.move_disk_to_peg(disk_index, next_peg_index, **kwargs)
def move_subtower_to_peg(self, num_disks, next_peg_index, **kwargs):
disk_indices = list(range(num_disks))
peg_indices = list(map(self.disk_index_to_peg_index, disk_indices))
if len(set(peg_indices)) != 1:
warnings.warn("These disks don't make up a tower right now")
self.move_disks_to_peg(disk_indices, next_peg_index, **kwargs)
def move_disk_to_peg(self, disk_index, next_peg_index, **kwargs):
self.move_disks_to_peg([disk_index], next_peg_index, **kwargs)
def move_disks_to_peg(self, disk_indices, next_peg_index, run_time = None, stay_on_peg = True, added_anims = []):
if run_time is None:
if stay_on_peg is True:
run_time = self.default_disk_run_time_on_peg
else:
run_time = self.default_disk_run_time_off_peg
disks = VGroup(*[self.disks[index] for index in disk_indices])
max_disk_index = max(disk_indices)
next_peg = self.pegs[next_peg_index]
curr_peg_index = self.disk_index_to_peg_index(max_disk_index)
curr_peg = self.pegs[curr_peg_index]
if self.min_disk_index_on_peg(curr_peg_index) != max_disk_index:
warnings.warn("Tower's of Hanoi rule broken: disk has crap on top of it")
target_bottom_point = self.bottom_point_for_next_disk(next_peg_index)
path_arc = np.sign(curr_peg_index-next_peg_index)*np.pi/3
if stay_on_peg:
self.play(
Succession(
ApplyMethod(disks.next_to, curr_peg, UP, 0),
ApplyMethod(disks.next_to, next_peg, UP, 0, path_arc = path_arc),
ApplyMethod(disks.move_to, target_bottom_point, DOWN),
),
*added_anims,
run_time = run_time,
rate_func = lambda t : smooth(t, 2)
)
else:
self.play(
ApplyMethod(disks.move_to, target_bottom_point, DOWN),
*added_anims,
path_arc = path_arc*2,
run_time = run_time,
rate_func = lambda t : smooth(t, 2)
)
for disk_index in disk_indices:
self.disk_tracker[curr_peg_index].remove(disk_index)
self.disk_tracker[next_peg_index].add(disk_index)
class UdatersExample(Scene):
def construct(self):
decimal = DecimalNumber(
0,
show_ellipsis=True,
num_decimal_places=3,
include_sign=True,
)
square = Square().to_edge(UP)
decimal.add_updater(lambda d: d.next_to(square, RIGHT))
decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))
self.add(square, decimal)
self.play(
square.to_edge, DOWN,
rate_func=there_and_back,
run_time=5,
)
self.wait()
class OpeningManimExample(Scene):
def construct(self):
title = TextMobject("This is some \\LaTeX")
basel = TexMobject(
"\\sum_{n=1}^\\infty "
"\\frac{1}{n^2} = \\frac{\\pi^2}{6}"
)
VGroup(title, basel).arrange_submobjects(DOWN)
self.play(
Write(title),
FadeInFrom(basel, UP),
)
self.wait()
transform_title = TextMobject("That was a transform")
transform_title.to_corner(UP + LEFT)
self.play(
Transform(title, transform_title),
LaggedStart(FadeOutAndShiftDown, basel),
)
self.wait()
grid = NumberPlane()
grid_title = TextMobject("This is a grid")
grid_title.scale(1.5)
grid_title.move_to(transform_title)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeOut(title),
FadeInFromDown(grid_title),
Write(grid),
)
self.wait()
grid_transform_title = TextMobject(
"That was a non-linear function \\\\"
"applied to the grid"
)
grid_transform_title.move_to(grid_title, UL)
grid.prepare_for_nonlinear_transform()
self.play(
grid.apply_function,
lambda p: p + np.array([
np.sin(p[1]),
np.sin(p[0]),
0,
]),
run_time=3,
)
self.wait()
self.play(
Transform(grid_title, grid_transform_title)
)
self.wait()
class Circletotingting(Scene):
"""docstring for CircletotiScene"""
def construct(self):
circle = Circle()
randy = Randolph().to_corner()
buddy = randy.get_bubble()
buddy.content_scale_factor = 0.8
buddy.add_content(TOP(2,3,4).scale(0.8))
self.add(randy)
self.play(ShowCreation(circle))
self.wait()
self.play(FadeIn(buddy))
self.wait()
class Dirthtoway(Scene):
def construct(self):
name = Square()
self.play(ShowCreation(name))
self.wait()
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
square = Square()
square.flip(RIGHT)
square.rotate(-3 * TAU / 8)
circle.set_fill(PINK, opacity=0.5)
self.play(ShowCreation(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
class Hellomanim(Scene):
"""docstring for Hellomanim"""
def construct(self):
circle = Circle()
self.play(ShowCreation(circle))
self.wait()
class DontLearnFromSymbols(Scene):
def construct(self):
randy = Randolph().to_corner()
bubble = randy.get_bubble()
bubble.content_scale_factor = 0.6
bubble.add_content(TOP(2, 3, 8).scale(0.7))
equation = VMobject(
TOP(2, "x"),
TexMobject("\\times"),
TOP(2, "y"),
TexMobject("="),
TOP(2, "x+y")
)
equation.arrange_submobjects()
| |
import numpy as np
import nibabel as nib
import SimpleITK as sitk
from matplotlib import patches
import scipy.ndimage.filters as fi
from matplotlib import pyplot as plt
from dipy.align.reslice import reslice
plt.rcParams['image.cmap'] = 'gray'
# -------------------------
# Nifti Image Preprocessing
# -------------------------
def load_nib(fpath):
"""
Load nifti image
:param fpath: path of nifti file
"""
im = nib.load(fpath)
return im
def resample_nib(im, new_spacing=(1, 1, 1), order=0):
"""
Resample nifti voxel array and corresponding affine
:param im: nifti image
:param new_spacing: new voxel size
:param order: order of interpolation for resampling/reslicing, 0 nearest interpolation, 1 trilinear etc.
:return new_im: resampled nifti image
"""
header = im.header
vox_zooms = header.get_zooms()
vox_arr = im.get_fdata()
vox_affine = im.affine
# resample using DIPY.ALIGN
if isinstance(new_spacing, int) or isinstance(new_spacing, float):
new_spacing = (new_spacing[0], new_spacing[1], new_spacing[2])
new_vox_arr, new_vox_affine = reslice(vox_arr, vox_affine, vox_zooms, new_spacing, order=order)
# create resampled image
new_im = nib.Nifti1Image(new_vox_arr, new_vox_affine, header)
return new_im
def transpose_compatible(arr, direction):
"""
Transpose array to a compatible direction
:param arr: numpy array
:param direction: 'asl_to_np' or 'np_to_asl' only
:return arr: transposed array
"""
if direction == 'asl_to_np':
arr = arr.transpose([1, 0, 2])[:, :, ::-1]
if direction == 'np_to_asl':
arr = arr[:, :, ::-1].transpose([1, 0, 2])
else:
'Direction can only be ASL to Anjany\'s numpy indexing or the other way around!'
return arr
# --------------------------
# Preprocessing Segmentation
# --------------------------
def get_vert_lims(loc, off, h, w, d):
"""
Get vertebra and padding limits for segmentation training
:param loc: vertebra centroid coordinates
:param off: offset
:param h: original image height
:param w: original image width
:param d: original image depth
:return:
vert_lims: vertebra patch in original full spine image coordinates (for cropping)
vert_pads: padding to add on 3 dimensions to center the vertebrae in the patch
"""
# height
if loc[0] + off[0, 0] < 0:
h_min = 0
h_lo_pad = 0 - (loc[0] + off[0, 0])
else:
h_min = loc[0] + off[0, 0]
h_lo_pad = 0
if loc[0] + off[0, 1] > h:
h_max = h
h_hi_pad = (loc[0] + off[0, 1]) - h
else:
h_max = loc[0] + off[0, 1]
h_hi_pad = 0
# width
if loc[1] + off[1, 0] < 0:
w_min = 0
w_lo_pad = 0 - (loc[1] + off[1, 0])
else:
w_min = loc[1] + off[1, 0]
w_lo_pad = 0
if loc[1] + off[1, 1] > w:
w_max = w
w_hi_pad = (loc[1] + off[1, 1]) - w
else:
w_max = loc[1] + off[1, 1]
w_hi_pad = 0
# depth
if loc[2] + off[2, 0] < 0:
d_min = 0
d_lo_pad = 0 - (loc[2] + off[2, 0])
else:
d_min = loc[2] + off[2, 0]
d_lo_pad = 0
if loc[2] + off[2, 1] > d:
d_max = d
d_hi_pad = (loc[2] + off[2, 1]) - d
else:
d_max = loc[2] + off[2, 1]
d_hi_pad = 0
vert_lims = [h_min, h_max, w_min, w_max, d_min, d_max]
vert_pads = [h_lo_pad, h_hi_pad, w_lo_pad, w_hi_pad, d_lo_pad, d_hi_pad]
return vert_lims, vert_pads
def rescale(x, min_val, max_val):
return (max_val - min_val) * (x - np.min(x)) / float(np.max(x) - np.min(x)) + min_val
def gen_gaussian_im(shape, mean, variance):
"""
Generate a 3D Gaussian kernel array for a single vertebra centroid
:param shape: full spine image shape 1 mm
:param mean: gaussian mean
:param variance: gaussian variance
:return:
"""
# create nxn zeros
gauss = np.zeros(shape)
# set element at the middle to one, a dirac delta
gauss[mean[0], mean[1], mean[2]] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return rescale(fi.gaussian_filter(gauss, variance), 0, 1)
def get_channelwise_gaussian(centroids_list, verts_in_im, im_shape):
"""
Generate a 3D Gaussian kernel array for all vertebrae
:param centroids_list: centroid coordinates
:param verts_in_im: vertebrae to patch
:param im_shape: full spine image shape 1 mm
:return:
"""
num_verts = centroids_list.shape[0]
cent_mask = np.repeat(np.expand_dims(np.zeros(im_shape, dtype='float32'), axis=-1), num_verts, axis=-1)
for vert_idx in verts_in_im:
if vert_idx <= num_verts:
cent_loc = centroids_list[vert_idx - 1].astype(int)
gauss = gen_gaussian_im(im_shape, cent_loc, variance=2)
gauss = (gauss - np.amin(gauss)) / np.amax(gauss - np.amin(gauss))
cent_mask[:, :, :, vert_idx - 1] = gauss
return cent_mask
def get_gaussian_heatmap(im_shape, cent_loc):
"""
Generate a 3D Gaussian heatmap for a single vertebrae
:param im_shape: full spine image shape 1 mm
:param cent_loc: vertebra centroid coordinates
:return: cent_mask: heatmap mask
"""
cent_mask = np.zeros(im_shape, dtype='float32')
gauss = gen_gaussian_im(im_shape, cent_loc, variance=2)
gauss = (gauss - np.amin(gauss)) / np.amax(gauss - np.amin(gauss))
cent_mask[:, :, :] = gauss
return cent_mask
def get_seg_patch(im, loc, off):
"""
Generate a vertebra patch for segmentation training
:param im: original full spine image 1 mm
:param loc: centroid coordinates 1 mm
:param gauss_cents: gaussian heatmaps
:param vert_idx:
:param off: padding offset
:return:
"""
h, w, d = im.shape
# get patch limits and padding
lims, pads = get_vert_lims(loc, off, h, w, d)
gauss_hm = get_gaussian_heatmap(im.shape, loc)
# crop
vert_im = im[lims[0]:lims[1], lims[2]:lims[3], lims[4]:lims[5]]
vert_gauss = gauss_hm[lims[0]:lims[1], lims[2]:lims[3], lims[4]:lims[5]]
# pad
vert_im = np.pad(vert_im, pad_width=((pads[0], pads[1]), (pads[2], pads[3]), (pads[4], pads[5])), mode='constant')
vert_gauss = np.pad(vert_gauss, pad_width=((pads[0], pads[1]), (pads[2], pads[3]), (pads[4], pads[5])),
mode='constant')
return vert_im, vert_gauss, lims, pads
def crop_seg_patch(msk, pads):
"""
Crop the patch to original size
:param msk: patch mask
:param pads: pads to crop
:return:
"""
h,w,d = msk.shape
[h_lo_pad, h_hi_pad, w_lo_pad, w_hi_pad, d_lo_pad, d_hi_pad] = pads
msk_crop = msk[h_lo_pad:h-h_hi_pad, w_lo_pad:w-w_hi_pad, d_lo_pad:d-d_hi_pad]
return msk_crop
# ----------------------------
# Postprocessing Localization
# ----------------------------
def clean_hm_prediction(msk, threshold):
"""
Apply largest 3d connected component for localization
:param msk: 3d spine localization heatmap
:param threshold: intensity (probability) threshold
:return msk_corrected: post-processed mask
"""
msk[msk < threshold] = 0
msk_binary = np.copy(msk)
msk_binary[msk_binary > threshold] = 1
msk_im = sitk.GetImageFromArray(msk_binary.astype('uint8'))
msk_im.SetSpacing([5, 5, 5])
# connected component filter
connected = sitk.ConnectedComponentImageFilter()
connected.FullyConnectedOn()
cc = connected.Execute(msk_im)
# find largest component
no_of_cc = connected.GetObjectCount()
cc_sizes = np.zeros((1, no_of_cc))
cc_arr = sitk.GetArrayFromImage(cc)
for i in range(1, no_of_cc + 1):
cc_sizes[0, i - 1] = np.count_nonzero(cc_arr == i)
cc_seq = np.argsort(cc_sizes)
largest_comp = cc_seq[0, -1] + 1
# remove every other 'component' other than largest component
cc_arr[cc_arr != largest_comp] = False
cc_arr[cc_arr == largest_comp] = True
# return the 'mask' corresponding to the largest connected component
msk_corrected = np.zeros_like(msk)
msk_corrected[cc_arr != 0] = msk[cc_arr != 0]
return msk_corrected
def msk_2_box(msk, threshold):
"""
Compute the 3d bounding box coordinates from the localization heatmap
:param msk: 3d spine localization heatmap
:param threshold: intensity (probability) threshold
:return: 3d bounding box coordinates
"""
msk_temp = np.copy(msk)
msk_temp[msk < threshold] = 0
nzs = np.nonzero(msk_temp)
if len(nzs[0]) > 0:
h_min = np.amin(nzs[0])
w_min = np.amin(nzs[1])
d_min = np.amin(nzs[2])
h_max = np.amax(nzs[0])
w_max = np.amax(nzs[1])
d_max = np.amax(nzs[2])
return [h_min, h_max, w_min, w_max, d_min, d_max]
else:
h, w, d = msk_temp.shape
return [0, h, 0, w, 0, d]
def add_tolerance(box, im_shape, tols):
"""
Add distance tolerance to the dimensions of the bounding box
:param box: 3d bounding box
:param im_shape: image shape where the bounding box is applied
:param tols: tolerances
:return: new 3d bounding box coordinates
"""
h, w, d = im_shape
[h_min, h_max, w_min, w_max, d_min, d_max] = box
h_min = h_min - tols[0]
h_max = h_max + tols[1]
w_min = w_min - tols[2]
w_max = w_max + tols[3]
d_min = d_min - tols[4]
d_max = d_max + tols[5]
if h_min < 0:
h_min = 0
if h_max > h:
h_max = h
if w_min < 0:
w_min = 0
if w_max > w:
w_max = w
if d_min < 0:
d_min = 0
if d_max > d:
d_max = d
return h_min, h_max, w_min, w_max, d_min, d_max
def adjust_box(box, im, image=True):
"""
Adjust bounding box shape
:param box: 3d bounding box
:param im: image or mask where the bounding box is applied
:param image: True if image, False if centroid mask
:return: new 3d image or
"""
# first bounding box
[h_min, h_max, w_min, w_max, d_min, d_max] = box
# based on first box decide tolerance
depth = d_max - d_min
width = w_max - w_min
max_dim = max(depth, width)
# tolerance
tol_h = (50, 50)
tol_d = (0, 0)
# add tolerance on sagittal view depending on bounding box
if max_dim <= 25:
tol_w = (25, 35)
elif max_dim <= 45:
tol_w = (10, 15)
else:
| |
import direct.gui
import direct.showbase
import pandac.PandaModules
import framework.eventmarkers.eventmarkers
import math
class BasicStimuli:
"""
A class that provides convenience functions for displaying psychological-type stimuli.
This includes text, rectangles, crosshairs, images, sounds, and video.
These functions are automatically available to any LatentModule.
"""
class destroy_helper:
"""Small helper class to destroy multiple objects using a destroy() call."""
def __init__(self,objs):
self.objs = objs
def destroy(self):
for o in self.objs:
o.destroy()
def __init__(self):
self.set_engine(base=base,direct=direct,pandac=pandac.PandaModules)
self.audio3d = None # 3d audio manager, if needed
self.implicit_markers = False # whether to generate implicit markers
# in write(), movie(), etc.
self._to_destroy = []
def marker(self,markercode):
"""
Emit a marker. The markercode can be a string or a number.
Side note: strings will not work if a legacy marker sending protocol is enabled (such as DataRiver or the parallel port).
"""
framework.eventmarkers.eventmarkers.send_marker(markercode)
def write(self,
text, # the text to display
duration=1.0, # duration for which the text will be displayed
# if this is a string, the stimulus will be displayed until the corresponding event is generated
# if this is a list of [number,string], the stimulus will at least be displayed for <number> seconds, but needs to confirmed with the respective event
# if this is 0, the write will be non-blocking and you have to .destroy() the return value of this function manually
block=True, # whether to wait for the duration until the function returns
# optional parameters:
pos=(0,0), # x/y position of the text on the screen
roll=0, # roll angle of the text
scale=0.07, # size of the text; either a single float (e.g. 0.07) or a 2-tuple of floats for non-uniform scaling
fg=None, # the (r,g,b,a) color of the text; usually each is a floats between 0 and 1
bg=None, # the (r,g,b,a) color of the text background; if a is zero, no background will be created
shadow=None, # the (r,g,b,a) color of the text's shadow
shadowOffset=(0.04,0.04), # offset of the drop shadow from the text
frame=None, # the (r,g,b,a) color of the text's frame, drawn around the background (if desired)
align='center', # either 'left', 'center', or 'right'
wordwrap=None, # optionally the width to wordwrap the text at
drawOrder=None, # optional drawing order
font='arial.ttf', # optionally the font of the text (see loader.loadFont)
parent=None, # parent rendering context or Panda3d NodePath
sort=0 # sorting order of the text
):
"""Write a piece of text on the screen and keep it there for a particular duration."""
if align == 'left':
align = self._engine.pandac.TextNode.ALeft
elif align == 'right':
align = self._engine.pandac.TextNode.ARight
else:
align = self._engine.pandac.TextNode.ACenter
if duration == 0:
block = False
if type(font) == str:
font = self._engine.base.loader.loadFont(font)
obj = self._engine.direct.gui.OnscreenText.OnscreenText(text=text,pos=(pos[0],pos[1]-scale/4),roll=roll,scale=scale,fg=fg,bg=bg,shadow=shadow,shadowOffset=shadowOffset,frame=frame,align=align,wordwrap=wordwrap,drawOrder=drawOrder,font=font,parent=parent,sort=sort)
self._to_destroy.append(obj)
if self.implicit_markers:
self.marker(254)
if block:
if type(duration) == list or type(duration) == tuple:
self.sleep(duration[0])
self.waitfor(duration[1])
elif type(duration) == str:
self.waitfor(duration)
else:
self.sleep(duration)
self._destroy_object(obj,255)
else:
if duration > 0:
self._engine.base.taskMgr.doMethodLater(duration, self._destroy_object, 'ConvenienceFunctions, remove_text',extraArgs=[obj,255])
return obj
def crosshair(self,
duration=1.0, # duration for which this object will be displayed
# if this is a string, the stimulus will be displayed until the corresponding event is generated
# if this is a list of [number,string], the stimulus will at least be displayed for <number> seconds, but needs to confirmed with the respective event
# if this is 0, the write will be non-blocking and you have to .destroy() the return value of this function manually
block=True, # whether this function should only return once the duration is over
# additional parameters
pos=(0,0), # position of the crosshair
size=0.25, # size of the crosshair
width=0.01, # thickness of the rectangles
color=(0,0,0,1), # color of the crosshair
parent=None # the renderer to use for displaying the object
):
"""Draw a crosshair."""
obj1 = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=(pos[0],0,pos[1]),scale=(size,1,width),color=color,parent=parent)
self._to_destroy.append(obj1)
obj1.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
obj2 = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=(pos[0],0,pos[1]),scale=(width,1,size),color=color,parent=parent)
self._to_destroy.append(obj2)
obj2.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
if self.implicit_markers:
self.marker(252)
if block:
if type(duration) == list or type(duration) == tuple:
self.sleep(duration[0])
self.waitfor(duration[1])
elif type(duration) == str:
self.waitfor(duration)
else:
self.sleep(duration)
self._destroy_object([obj1,obj2],253)
else:
if duration > 0:
self._engine.base.taskMgr.doMethodLater(duration, self._destroy_object, 'ConvenienceFunctions, remove_crosshair',extraArgs=[[obj1,obj2],253])
return self.destroy_helper([obj1,obj2])
def rectangle(self,
rect=None, # the bounds of the rectangle (left,right,top,bottom)
duration=1.0, # duration for which this object will be displayed
# if this is a string, the stimulus will be displayed until the corresponding event is generated
# if this is a list of [number,string], the stimulus will at least be displayed for <number> seconds, but needs to confirmed with the respective event
# if this is 0, the write will be non-blocking and you have to .destroy() the return value of this function manually
block=True, # whether this function should only return once the duration is over
# additional parameters
color=(1,1,1,1), # the (r,g,b,a) color of the rectangle
parent=None, # the renderer to use for displaying the object
depth=0, # screen depth of the rectangle
):
"""Draw a single-colored rectangle."""
if duration == 0:
block = False
l=rect[0];r=rect[1];t=rect[2];b=rect[3]
obj = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=((l+r)/2,depth,(b+t)/2),scale=((r-l)/2,1,(b-t)/2),color=color,parent=parent)
self._to_destroy.append(obj)
obj.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
if self.implicit_markers:
self.marker(250)
if block:
if type(duration) == list or type(duration) == tuple:
self.sleep(duration[0])
self.waitfor(duration[1])
elif type(duration) == str:
self.waitfor(duration)
else:
self.sleep(duration)
self._destroy_object(obj,251)
else:
if duration > 0:
self._engine.base.taskMgr.doMethodLater(duration, self._destroy_object, 'ConvenienceFunctions, remove_rect',extraArgs=[obj,251])
return obj
def frame(self,
rect=None, # the inner bounds of the frame (left,right,top,bottom)
thickness=(0.01,0.01),# thickness of the frame (left/right, top/bottom)
duration=1.0, # duration for which this object will be displayed
# if this is a string, the stimulus will be displayed until the corresponding event is generated
# if this is a list of [number,string], the stimulus will at least be displayed for <number> seconds, but needs to confirmed with the respective event
# if this is 0, the write will be non-blocking and you have to .destroy() the return value of this function manually
block=True, # whether this function should only return once the duration is over
# additional parameters
color=(1,1,1,1), # the (r,g,b,a) color of the rectangle
parent=None, # the renderer to use for displaying the object
):
"""Display a frame on the screen and keep it there for a particular duration."""
l=rect[0];r=rect[1];t=rect[2];b=rect[3]
w=thickness[0];h=thickness[1]
L = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=(l-w/2,0,(b+t)/2),scale=(w/2,1,w+(b-t)/2),color=color,parent=parent)
L.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
self._to_destroy.append(L)
R = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=(r+w/2,0,(b+t)/2),scale=(w/2,1,w+(b-t)/2),color=color,parent=parent)
R.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
self._to_destroy.append(R)
T = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=((l+r)/2,0,t-h/2),scale=(h+(r-l)/2,1,h/2),color=color,parent=parent)
T.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
self._to_destroy.append(T)
B = self._engine.direct.gui.OnscreenImage.OnscreenImage(image='blank.tga',pos=((l+r)/2,0,b+h/2),scale=(h+(r-l)/2,1,h/2),color=color,parent=parent)
B.setTransparency(self._engine.pandac.TransparencyAttrib.MAlpha)
self._to_destroy.append(B)
if self.implicit_markers:
self.marker(242)
if block:
if type(duration) == list or type(duration) == tuple:
self.sleep(duration[0])
self.waitfor(duration[1])
elif type(duration) == str:
self.waitfor(duration)
else:
self.sleep(duration)
self._destroy_object([L,R,T,B],243)
else:
if duration > 0:
self._engine.base.taskMgr.doMethodLater(duration,self._destroy_object, 'ConvenienceFunctions, remove_frame',extraArgs=[[L,R,T,B],243])
return self.destroy_helper([L,R,T,B])
def picture(self,
image, # the image to display (may be a file name, preferably a relative path)
duration=1.0, # duration for which this object will be displayed
# if this is a string, the stimulus will be displayed until the corresponding event is generated
# if this is a list of [number,string], the stimulus will at least be displayed for <number> seconds, but needs to confirmed with the respective event
# if this is 0, the write will be non-blocking and you have to .destroy() the return value of this function manually
block=True, # whether to wait for the duration until the function returns
# optional parameters:
pos=None, # the (x,z) or (x,y,z) position of the image on the screen; this may be a 3-tuple of floats; y should be zero
hpr=None, # the (heading,pitch,roll) angles of the image; if this is a single number, it will be taken as the roll angle
scale=None, # the size of the image; this may be a single flot, a 3-tuple of floats, or a vector; y should be 1, if a 3-tuple is given
color=None, # the (r,g,b,a) coloring of the image
parent=None, # parent rendering context or Panda3d NodePath
):
"""Display a picture on the screen and keep it there for a particular duration."""
if pos is not None and type(pos) not in (int,float) and len(pos) == 2:
pos = (pos[0],0,pos[1])
if scale is not None and type(scale) | |
else:
# <--------->
# <--------->
overlap_len = len(pre_sbjct[overlap_start:])
overlap_end_pos = pre_block_end
# Update current end
current_end = next_block_end
# Use the entire pre sequence and add the last part of the next sequence
final_sbjct += next_sbjct[overlap_len:]
final_qry += next_qry[overlap_len:]
# Find query overlap sequences
pre_qry_overlap = pre_qry[overlap_start : (overlap_start + overlap_len)] # can work for both types of overlap
next_qry_overlap = next_qry[:overlap_len]
sbjct_overlap = next_sbjct[:overlap_len]
# If alternative query overlap excist save it
if pre_qry_overlap != next_qry_overlap:
print("OVERLAP WARNING:")
print(pre_qry_overlap, "\n", next_qry_overlap)
# Save alternative overlaps
alternative_overlaps += [(next_block_start, overlap_end_pos, sbjct_overlap, next_qry_overlap)]
elif next_block_start > current_end:
# <------->
# <------->
gap_size = next_block_start - current_end - 1
final_qry += "N"*gap_size
if silent_N_flag:
final_sbjct += "N"*gap_size
else:
ref_seq = get_gene_seqs(specie_path, gene)
final_sbjct += ref_seq[pre_block_end:pre_block_end+gap_size]
current_end = next_block_end
final_sbjct += next_sbjct
final_qry += next_qry
# Calculate coverage
no_call = final_qry.upper().count("N")
coverage = (current_end - all_start +1 - no_call) / float(sbjct_len)
# Calculate identity
equal = 0
not_equal = 0
for i in range(len(final_qry)):
if final_qry[i].upper() != "N":
if final_qry[i].upper() == final_sbjct[i].upper():
equal += 1
else:
not_equal += 1
identity = equal/float(equal + not_equal)
return final_sbjct, final_qry, all_start, current_end, alternative_overlaps, coverage, identity
def find_mismatches(gene, sbjct_start, sbjct_seq, qry_seq, alternative_overlaps = []):
"""
This function finds mis matches between two sequeces. Depending on the
the sequence type either the function find_codon_mismatches or
find_nucleotid_mismatches are called, if the sequences contains both
a promoter and a coding region both functions are called. The function
can also call it self if alternative overlaps is give. All found mis
matches are returned
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
# Find mis matches in RNA genes
if gene in RNA_gene_list:
mis_matches += find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq)
else:
# Check if the gene sequence is with a promoter
regex = r"promoter_size_(\d+)(?:bp)"
promtr_gene_objt = re.search(regex, gene)
# Check for promoter sequences
if promtr_gene_objt:
# Get promoter length
promtr_len = int(promtr_gene_objt.group(1))
# Extract promoter sequence, while considering gaps
# --------agt-->----
# ---->?
if sbjct_start <= promtr_len:
#Find position in sbjct sequence where promoter ends
promtr_end = 0
nuc_count = sbjct_start - 1
for i in range(len(sbjct_seq)):
promtr_end += 1
if sbjct_seq[i] != "-":
nuc_count += 1
if nuc_count == promtr_len:
break
# Check if only a part of the promoter is found
#--------agt-->----
# ----
promtr_sbjct_start = -1
if nuc_count < promtr_len:
promtr_sbjct_start = nuc_count - promtr_len
# Get promoter part of subject and query
sbjct_promtr_seq = sbjct_seq[:promtr_end]
qry_promtr_seq = qry_seq[:promtr_end]
# For promoter part find nucleotide mis matches
mis_matches += find_nucleotid_mismatches(promtr_sbjct_start, sbjct_promtr_seq, qry_promtr_seq, promoter = True)
# Check if gene is also found
#--------agt-->----
# -----------
if (sbjct_start + len(sbjct_seq.replace("-", ""))) > promtr_len:
sbjct_gene_seq = sbjct_seq[promtr_end:]
qry_gene_seq = qry_seq[promtr_end:]
sbjct_gene_start = 1
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_gene_seq, qry_gene_seq)
# No promoter, only gene is found
#--------agt-->----
# -----
else:
sbjct_gene_start = sbjct_start - promtr_len
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_seq, qry_seq)
else:
# Find mismatches in gene
mis_matches += find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq)
# Find mismatches in alternative overlaps if any
for overlap in alternative_overlaps:
mis_matches += find_mismatches(gene, overlap[0], overlap[2], overlap[3])
return mis_matches
def find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq, promoter = False):
"""
This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces.
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
sbjct_start = abs(sbjct_start)
seq_pos = sbjct_start
# Set variables depending on promoter status
factor = 1
mut_prefix = "r."
if promoter == True:
factor = (-1)
mut_prefix = "n."
# Reverse promoter sequences
sbjct_seq = sbjct_seq[::-1]
qry_seq = qry_seq[::-1]
# Go through sequences one nucleotide at a time
shift = 0
for index in range(sbjct_start - 1, len(sbjct_seq)):
mut_name = mut_prefix
mut = ""
# Shift index according to gaps
i = index + shift
# If the end of the sequence is reached, stop
if i == len(sbjct_seq):
break
sbjct_nuc = sbjct_seq[i]
qry_nuc = qry_seq[i]
# Check for mis matches
if sbjct_nuc.upper() != qry_nuc.upper():
# check for insertions and deletions
if sbjct_nuc == "-" or qry_nuc == "-":
if sbjct_nuc == "-":
mut = "ins"
indel_start_pos = (seq_pos -1) *factor
indel_end_pos = seq_pos * factor
indel = find_nuc_indel(sbjct_seq[i:], qry_seq[i:])
else:
mut = "del"
indel_start_pos = seq_pos * factor
indel = find_nuc_indel(qry_seq[i:], sbjct_seq[i:])
indel_end_pos = (seq_pos + len(indel) - 1) * factor
seq_pos += len(indel) - 1
# Shift the index to the end of the indel
shift += len(indel) - 1
# Write mutation name, depending on sequnce
if len(indel) == 1 and mut == "del":
mut_name += str(indel_start_pos) + mut + indel
else:
if promoter == True:
# Reverse the sequence and the start and end positions
indel = indel[::-1]
temp = indel_start_pos
indel_start_pos = indel_end_pos
indel_end_pos = temp
mut_name += str(indel_start_pos) + "_" +str(indel_end_pos) + mut + indel
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, indel, mut_name, mut, indel]]
# Check for substitutions mutations
else:
mut = "sub"
mut_name += str(seq_pos * factor) + sbjct_nuc + ">" + qry_nuc
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, qry_nuc, mut_name, sbjct_nuc, qry_nuc]]
# Increment sequence position
if mut != "ins":
seq_pos += 1
return mis_matches
def find_nuc_indel(gapped_seq, indel_seq):
"""
This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap.
"""
ref_indel = indel_seq[0]
for j in range(1,len(gapped_seq)):
if gapped_seq[j] == "-":
ref_indel += indel_seq[j]
else:
break
return ref_indel
def aa(codon):
"""
This function converts a codon to an amino acid. If the codon is not
valid an error message is given, or else, the amino acid is returned.
"""
codon = codon.upper()
aa = {"ATT": "I", "ATC": "I", "ATA": "I",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L", "TTA": "L", "TTG": "L",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TTT": "F", "TTC": "F",
"ATG": "M",
"TGT": "C", "TGC": "C",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"TAT": "Y", "TAC": "Y",
"TGG": "W",
"CAA": "Q", "CAG": "Q",
"AAT": "N", "AAC": "N",
"CAT": "H", "CAC": "H",
"GAA": "E", "GAG": "E",
"GAT": "D", "GAC": "D",
"AAA": "K", "AAG": "K",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TAA": "*", "TAG": "*", "TGA": "*"}
# Translate valid codon
try:
amino_a = aa[codon]
except KeyError:
amino_a = "?"
return amino_a
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
"""
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset):
"""
This function is used to name a insertion mutation based on the HGVS
recommendation.
"""
start_codon_no = | |
#from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import errno
import json
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
import sys
sys.path.append("/home/liyi/video-pred/video_prediction")
from video_prediction import datasets, models, metrics
from video_prediction.policies.servo_policy import ServoPolicy
def compute_expectation_np(pix_distrib):
assert pix_distrib.shape[-1] == 1
pix_distrib = pix_distrib / np.sum(pix_distrib, axis=(-3, -2), keepdims=True)
height, width = pix_distrib.shape[-3:-1]
xv, yv = np.meshgrid(np.arange(width), np.arange(height))
return np.stack([np.sum(yv[:, :, None] * pix_distrib, axis=(-3, -2, -1)),
np.sum(xv[:, :, None] * pix_distrib, axis=(-3, -2, -1))], axis=-1)
def as_heatmap(image, normalize=True):
import matplotlib.pyplot as plt
image = np.squeeze(image, axis=-1)
if normalize:
image = image / np.max(image, axis=(-2, -1), keepdims=True)
cmap = plt.get_cmap('viridis')
heatmap = cmap(image)[..., :3]
return heatmap
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def resize_and_draw_circle(image, size, center, radius, dpi=128.0, **kwargs):
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import io
height, width = size
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1])
ax.imshow(image, interpolation='none')
circle = Circle(center[::-1], radius=radius, **kwargs)
ax.add_patch(circle)
ax.axis("off")
fig.canvas.draw()
trans = ax.figure.dpi_scale_trans.inverted()
bbox = ax.bbox.transformed(trans)
buff = io.BytesIO()
plt.savefig(buff, format="png", dpi=ax.figure.dpi, bbox_inches=bbox)
buff.seek(0)
image = plt.imread(buff)[..., :3]
plt.close(fig)
return image
def save_image_sequence(prefix_fname, images, overlaid_images=None, centers=None,
radius=5, alpha=0.8, time_start_ind=0):
import cv2
head, tail = os.path.split(prefix_fname)
if head and not os.path.exists(head):
os.makedirs(head)
if images.shape[-1] == 1:
images = as_heatmap(images)
if overlaid_images is not None:
assert images.shape[-1] == 3
assert overlaid_images.shape[-1] == 1
gray_images = rgb2gray(images)
overlaid_images = as_heatmap(overlaid_images)
images = (1 - alpha) * gray_images[..., None] + alpha * overlaid_images
for t, image in enumerate(images):
image_fname = '%s_%02d.png' % (prefix_fname, time_start_ind + t)
if centers is not None:
scale = np.max(np.array([256, 256]) / np.array(image.shape[:2]))
image = resize_and_draw_circle(image, np.array(image.shape[:2]) * scale, centers[t], radius,
edgecolor='r', fill=False, linestyle='--', linewidth=2)
image = (image * 255.0).astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(image_fname, image)
def save_image_sequences(prefix_fname, images, overlaid_images=None, centers=None,
radius=5, alpha=0.8, sample_start_ind=0, time_start_ind=0):
head, tail = os.path.split(prefix_fname)
if head and not os.path.exists(head):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(images)
if centers is None:
centers = [None] * len(images)
for i, (images_, overlaid_images_, centers_) in enumerate(zip(images, overlaid_images, centers)):
images_fname = '%s_%05d' % (prefix_fname, sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_,
radius=radius, alpha=alpha, time_start_ind=time_start_ind)
def save_metrics(prefix_fname, metrics, sample_start_ind=0):
head, tail = os.path.split(prefix_fname)
if head and not os.path.exists(head):
os.makedirs(head)
assert metrics.ndim == 2
file_mode = 'w' if sample_start_ind == 0 else 'a'
with open('%s.csv' % prefix_fname, file_mode, newline='') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
if sample_start_ind == 0:
writer.writerow(map(str, ['sample_ind'] + list(range(metrics.shape[1])) + ['mean']))
for i, metrics_row in enumerate(metrics):
writer.writerow(map(str, [sample_start_ind + i] + list(metrics_row) + [np.mean(metrics_row)]))
def load_metrics(prefix_fname):
with open('%s.csv' % prefix_fname, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
rows = list(reader)
# skip header (first row), indices (first column), and means (last column)
metrics = np.array(rows)[1:, 1:-1].astype(np.float32)
return metrics
def merge_hparams(hparams0, hparams1):
hparams0 = hparams0 or []
hparams1 = hparams1 or []
if not isinstance(hparams0, (list, tuple)):
hparams0 = [hparams0]
if not isinstance(hparams1, (list, tuple)):
hparams1 = [hparams1]
hparams = list(hparams0) + list(hparams1)
# simplify into the content if possible
if len(hparams) == 1:
hparams, = hparams
return hparams
def save_prediction_eval_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False, subtasks=None):
context_frames = model_hparams.context_frames
context_images = results['images'][:, :context_frames]
images = results['eval_images']
metric_names = ['psnr', 'ssim', 'ssim_scikit', 'ssim_finn', 'ssim_mcnet', 'vgg_csim']
metric_fns = [metrics.peak_signal_to_noise_ratio_np,
metrics.structural_similarity_np,
metrics.structural_similarity_scikit_np,
metrics.structural_similarity_finn_np,
metrics.structural_similarity_mcnet_np,
None]
subtasks = subtasks or ['max']
for metric_name, metric_fn in zip(metric_names, metric_fns):
for subtask in subtasks:
subtask_dir = task_dir + '_%s_%s' % (metric_name, subtask)
gen_images = results.get('eval_gen_images_%s/%s' % (metric_name, subtask), results.get('eval_gen_images'))
if metric_fn is not None: # recompute using numpy implementation
metric = metric_fn(images, gen_images, keep_axis=(0, 1))
else:
metric = results['eval_%s/%s' % (metric_name, subtask)]
save_metrics(os.path.join(subtask_dir, 'metrics', metric_name),
metric, sample_start_ind=sample_start_ind)
if only_metrics:
continue
save_image_sequences(os.path.join(subtask_dir, 'inputs', 'context_image'),
context_images, sample_start_ind=sample_start_ind)
save_image_sequences(os.path.join(subtask_dir, 'outputs', 'gen_image'),
gen_images, sample_start_ind=sample_start_ind)
def save_prediction_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):
context_frames = model_hparams.context_frames
sequence_length = model_hparams.sequence_length
context_images, images = np.split(results['images'], [context_frames], axis=1)
gen_images = results['gen_images'][:, context_frames - sequence_length:]
psnr = metrics.peak_signal_to_noise_ratio_np(images, gen_images, keep_axis=(0, 1))
mse = metrics.mean_squared_error_np(images, gen_images, keep_axis=(0, 1))
ssim = metrics.structural_similarity_np(images, gen_images, keep_axis=(0, 1))
save_metrics(os.path.join(task_dir, 'metrics', 'psnr'),
psnr, sample_start_ind=sample_start_ind)
save_metrics(os.path.join(task_dir, 'metrics', 'mse'),
mse, sample_start_ind=sample_start_ind)
save_metrics(os.path.join(task_dir, 'metrics', 'ssim'),
ssim, sample_start_ind=sample_start_ind)
if only_metrics:
return
save_image_sequences(os.path.join(task_dir, 'inputs', 'context_image'),
context_images, sample_start_ind=sample_start_ind)
save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image'),
gen_images, sample_start_ind=sample_start_ind)
def save_motion_results(task_dir, results, model_hparams, draw_center=False,
sample_start_ind=0, only_metrics=False):
context_frames = model_hparams.context_frames
sequence_length = model_hparams.sequence_length
pix_distribs = results['pix_distribs'][:, context_frames:]
gen_pix_distribs = results['gen_pix_distribs'][:, context_frames - sequence_length:]
pix_dist = metrics.expected_pixel_distance_np(pix_distribs, gen_pix_distribs, keep_axis=(0, 1))
save_metrics(os.path.join(task_dir, 'metrics', 'pix_dist'),
pix_dist, sample_start_ind=sample_start_ind)
if only_metrics:
return
context_images, images = np.split(results['images'], [context_frames], axis=1)
gen_images = results['gen_images'][:, context_frames - sequence_length:]
initial_pix_distrib = results['pix_distribs'][:, 0:1]
num_motions = pix_distribs.shape[-1]
for i in range(num_motions):
output_name_posfix = '%d' % i if num_motions > 1 else ''
centers = compute_expectation_np(initial_pix_distrib[..., i:i + 1]) if draw_center else None
save_image_sequences(os.path.join(task_dir, 'inputs', 'pix_distrib%s' % output_name_posfix),
context_images[:, 0:1], initial_pix_distrib[..., i:i + 1], centers, sample_start_ind=sample_start_ind)
centers = compute_expectation_np(gen_pix_distribs[..., i:i + 1]) if draw_center else None
save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_pix_distrib%s' % output_name_posfix),
gen_images, gen_pix_distribs[..., i:i + 1], centers, sample_start_ind=sample_start_ind)
def save_servo_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):
context_frames = model_hparams.context_frames
sequence_length = model_hparams.sequence_length
context_images, images = np.split(results['images'], [context_frames], axis=1)
gen_images = results['gen_images'][:, context_frames - sequence_length:]
goal_image = results['goal_image']
# TODO: should exclude "context" actions assuming that they are passed in to the network
actions = results['actions']
gen_actions = results['gen_actions']
goal_image_mse = metrics.mean_squared_error_np(goal_image, gen_images[:, -1], keep_axis=0)
action_mse = metrics.mean_squared_error_np(actions, gen_actions, keep_axis=(0, 1))
save_metrics(os.path.join(task_dir, 'metrics', 'goal_image_mse'),
goal_image_mse[:, None], sample_start_ind=sample_start_ind)
save_metrics(os.path.join(task_dir, 'metrics', 'action_mse'),
action_mse, sample_start_ind=sample_start_ind)
if only_metrics:
return
save_image_sequences(os.path.join(task_dir, 'inputs', 'context_image'),
context_images, sample_start_ind=sample_start_ind)
save_image_sequences(os.path.join(task_dir, 'inputs', 'goal_image'),
goal_image[:, None], sample_start_ind=sample_start_ind)
save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image'),
gen_images, sample_start_ind=sample_start_ind)
gen_image_goal_diffs = np.abs(gen_images - goal_image[:, None])
save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image_goal_diff'),
gen_image_goal_diffs, sample_start_ind=sample_start_ind)
def main():
"""
results_dir
├── output_dir # condition / method
│ ├── prediction # task
│ │ ├── inputs
│ │ │ ├── context_image_00000_00.png # indexed by sample index and time step
│ │ │ └── ...
│ │ ├── outputs
│ │ │ ├── gen_image_00000_00.png # predicted images (only the ones in the loss)
│ │ │ └── ...
│ │ └── metrics
│ │ ├── psnr.csv
│ │ ├── mse.csv
│ │ └── ssim.csv
│ ├── prediction_eval_vgg_csim_max # task: best sample in terms of VGG cosine similarity
│ │ ├── inputs
│ │ │ ├── context_image_00000_00.png # indexed by sample index and time step
│ │ │ └── ...
│ │ ├── outputs
│ │ │ ├── gen_image_00000_00.png # predicted images (only the ones in the loss)
│ │ │ └── ...
│ │ └── metrics
│ │ └── vgg_csim.csv
│ ├── servo
│ │ ├── inputs
│ │ │ ├── context_image_00000_00.png
│ │ │ ├── ...
│ │ │ ├── goal_image_00000_00.png # only one goal image per sample
│ │ │ └── ...
│ │ ├── outputs
│ │ │ ├── gen_image_00000_00.png
│ │ │ ├── ...
│ │ │ ├── gen_image_goal_diff_00000_00.png
│ │ │ └── ...
│ │ └── metrics
│ │ ├── action_mse.csv
│ │ └── goal_image_mse.csv
│ ├── motion
│ │ ├── inputs
│ │ │ ├── pix_distrib_00000_00.png
│ │ │ └── ...
│ │ ├── outputs
│ │ │ ├── gen_pix_distrib_00000_00.png
│ │ │ ├── ...
│ │ │ ├── gen_pix_distrib_overlaid_00000_00.png
│ │ │ └── ...
│ │ └── metrics
│ │ └── pix_dist.csv
│ └── ...
└── ...
"""
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str, required=True, help="either a directory containing subdirectories "
"train, val, test, etc, or a directory containing "
"the tfrecords")
parser.add_argument("--results_dir", type=str, default='results', help="ignored if output_dir is specified")
parser.add_argument("--output_dir", help="output directory where results are saved. default is results_dir/model_fname, "
"where model_fname is the directory name of checkpoint")
parser.add_argument("--checkpoint", help="directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)")
parser.add_argument("--mode", type=str, choices=['val', 'test'], default='val', help='mode for dataset, val or test.')
parser.add_argument("--dataset", type=str, help="dataset class name")
parser.add_argument("--dataset_hparams", type=str, help="a string of comma separated list of dataset hyperparameters")
parser.add_argument("--model", type=str, help="model class name")
parser.add_argument("--model_hparams", type=str, help="a string of comma separated list of model hyperparameters")
parser.add_argument("--batch_size", type=int, default=8, help="number of samples in batch")
parser.add_argument("--num_samples", type=int, help="number of samples in total (all of them by default)")
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--tasks", type=str, nargs='+', help='tasks to evaluate (e.g. prediction, prediction_eval, servo, motion)')
parser.add_argument("--eval_substasks", type=str, nargs='+', default=['max', 'min'], | |
<reponame>RicardoVinicioJara/LugaresTusisticos
# coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The IBM Watson™ Tone Analyzer service uses linguistic analysis to detect emotional
and language tones in written text. The service can analyze tone at both the document and
sentence levels. You can use the service to understand how your written communications are
perceived and then to improve the tone of your communications. Businesses can use the
service to learn the tone of their customers' communications and to respond to each
customer appropriately, or to understand and improve their customer conversations.
**Note:** Request logging is disabled for the Tone Analyzer service. Regardless of whether
you set the `X-Watson-Learning-Opt-Out` request header, the service does not log or retain
data from requests and responses.
"""
from __future__ import absolute_import
import json
from .watson_service import WatsonService
from .utils import deprecated
##############################################################################
# Service
##############################################################################
@deprecated("watson-developer-cloud moved to ibm-watson. To get updates, use the new package.")
class ToneAnalyzerV3(WatsonService):
"""The Tone Analyzer V3 service."""
default_url = 'https://gateway.watsonplatform.net/tone-analyzer/api'
def __init__(
self,
version,
url=default_url,
username=None,
password=<PASSWORD>,
iam_apikey=None,
iam_access_token=None,
iam_url=None,
):
"""
Construct a new client for the Tone Analyzer service.
:param str version: The API version date to use with the service, in
"YYYY-MM-DD" format. Whenever the API is changed in a backwards
incompatible way, a new minor version of the API is released.
The service uses the API version for the date you specify, or
the most recent version before that date. Note that you should
not programmatically specify the current date at runtime, in
case the API has been updated since your application's release.
Instead, specify a version date that is compatible with your
application, and don't change it until your application is
ready for a later version.
:param str url: The base url to use when contacting the service (e.g.
"https://gateway.watsonplatform.net/tone-analyzer/api").
The base url may differ between Bluemix regions.
:param str username: The username used to authenticate with the service.
Username and password credentials are only required to run your
application locally or outside of Bluemix. When running on
Bluemix, the credentials will be automatically loaded from the
`VCAP_SERVICES` environment variable.
:param str password: The password used to authenticate with the service.
Username and password credentials are only required to run your
application locally or outside of Bluemix. When running on
Bluemix, the credentials will be automatically loaded from the
`VCAP_SERVICES` environment variable.
:param str iam_apikey: An API key that can be used to request IAM tokens. If
this API key is provided, the SDK will manage the token and handle the
refreshing.
:param str iam_access_token: An IAM access token is fully managed by the application.
Responsibility falls on the application to refresh the token, either before
it expires or reactively upon receiving a 401 from the service as any requests
made with an expired token will fail.
:param str iam_url: An optional URL for the IAM service API. Defaults to
'https://iam.bluemix.net/identity/token'.
"""
WatsonService.__init__(
self,
vcap_services_name='tone_analyzer',
url=url,
username=username,
password=password,
iam_apikey=iam_apikey,
iam_access_token=iam_access_token,
iam_url=iam_url,
use_vcap_services=True,
display_name='Tone Analyzer')
self.version = version
#########################
# Methods
#########################
def tone(self,
tone_input,
content_type=None,
sentences=None,
tones=None,
content_language=None,
accept_language=None,
**kwargs):
"""
Analyze general tone.
Use the general purpose endpoint to analyze the tone of your input content. The
service analyzes the content for emotional and language tones. The method always
analyzes the tone of the full document; by default, it also analyzes the tone of
each individual sentence of the content.
You can submit no more than 128 KB of total input content and no more than 1000
individual sentences in JSON, plain text, or HTML format. The service analyzes the
first 1000 sentences for document-level analysis and only the first 100 sentences
for sentence-level analysis.
Per the JSON specification, the default character encoding for JSON content is
effectively always UTF-8; per the HTTP specification, the default encoding for
plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When
specifying a content type of plain text or HTML, include the `charset` parameter
to indicate the character encoding of the input text; for example: `Content-Type:
text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and
analyzes only the textual content.
**See also:** [Using the general-purpose
endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint).
:param ToneInput tone_input: JSON, plain text, or HTML input that contains the
content to be analyzed. For JSON input, provide an object of type `ToneInput`.
:param str content_type: The type of the input. A character encoding can be
specified by including a `charset` parameter. For example,
'text/plain;charset=utf-8'.
:param bool sentences: Indicates whether the service is to return an analysis of
each individual sentence in addition to its analysis of the full document. If
`true` (the default), the service returns results for each sentence.
:param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to
accept the parameter for backward-compatibility, but the parameter no longer
affects the response.
**`2016-05-19`:** A comma-separated list of tones for which the service is to
return its analysis of the input; the indicated tones apply both to the full
document and to individual sentences of the document. You can specify one or more
of the valid values. Omit the parameter to request results for all three tones.
:param str content_language: The language of the input text for the request:
English or French. Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. The input content must match the
specified language. Do not submit content that contains both languages. You can
use different languages for **Content-Language** and **Accept-Language**.
* **`2017-09-21`:** Accepts `en` or `fr`.
* **`2016-05-19`:** Accepts only `en`.
:param str accept_language: The desired language of the response. For
two-character arguments, regional variants are treated as their parent language;
for example, `en-US` is interpreted as `en`. You can use different languages for
**Content-Language** and **Accept-Language**.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if tone_input is None:
raise ValueError('tone_input must be provided')
if isinstance(tone_input, ToneInput):
tone_input = self._convert_model(tone_input, ToneInput)
headers = {
'Content-Type': content_type,
'Content-Language': content_language,
'Accept-Language': accept_language
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers[
'X-IBMCloud-SDK-Analytics'] = 'service_name=tone_analyzer;service_version=V3;operation_id=tone'
params = {
'version': self.version,
'sentences': sentences,
'tones': self._convert_list(tones)
}
if content_type == 'application/json' and isinstance(tone_input, dict):
data = json.dumps(tone_input)
else:
data = tone_input
url = '/v3/tone'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response
def tone_chat(self,
utterances,
content_language=None,
accept_language=None,
**kwargs):
"""
Analyze customer engagement tone.
Use the customer engagement endpoint to analyze the tone of customer service and
customer support conversations. For each utterance of a conversation, the method
reports the most prevalent subset of the following seven tones: sad, frustrated,
satisfied, excited, polite, impolite, and sympathetic.
If you submit more than 50 utterances, the service returns a warning for the
overall content and analyzes only the first 50 utterances. If you submit a single
utterance that contains more than 500 characters, the service returns an error for
that utterance and does not analyze the utterance. The request fails if all
utterances have more than 500 characters. Per the JSON specification, the default
character encoding for JSON content is effectively always UTF-8.
**See also:** [Using the customer-engagement
endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone-chat.html#using-the-customer-engagement-endpoint).
:param list[Utterance] utterances: An array of `Utterance` objects that provides
the input content that the service is to analyze.
:param str content_language: The language of the input text for the request:
English or French. Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. | |
<gh_stars>0
from montepython.likelihood_class import Likelihood
import io_mp
import scipy.integrate
from scipy import interpolate as itp
import os
import numpy as np
import math
# Adapted from <NAME>
class euclid_lensing(Likelihood):
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
# Force the cosmological module to store Pk for redshifts up to
# max(self.z)
self.need_cosmo_arguments(data, {'output': 'mPk'})
self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
# Force the cosmological module to store Pk for k up to an arbitrary
# number
self.need_cosmo_arguments(data, {'P_k_max_1/Mpc': self.k_max})
# Define array of l values, and initialize them
# It is a logspace
self.l = np.exp(self.dlnl*np.arange(self.nlmax))
########################################################
# Find distribution of dn_dz (not normalized) in each bin
########################################################
# Assuming each bin contains the same number of galaxies, we find the
# bin limits in z space
# Compute the total number of galaxies until zmax (no normalization
# yet), that is the integral of the galaxy distribution function from 0
# to self.zmax
n_tot, error = scipy.integrate.quad(
self.galaxy_distribution, 0, self.zmax)
assert error <= 1e-7, (
"The integration of the galaxy distribution is not as "
"precise as expected.")
# For each bin, compute the limit in z space
# Create the array that will contain the z boundaries for each bin. The
# first value is already correctly set to 0.
self.z_bin_edge = np.zeros(self.nbin+1, 'float64')
for Bin in xrange(self.nbin-1):
bin_count = 0.
z = self.z_bin_edge[Bin]
while (bin_count <= n_tot/self.nbin):
gd_1 = self.galaxy_distribution(z)
gd_2 = self.galaxy_distribution(z+self.dz)
bin_count += 0.5*(gd_1+gd_2)*self.dz
z += self.dz
self.z_bin_edge[Bin+1] = z
self.z_bin_edge[self.nbin] = self.zmax
# Fill array of discrete z values
self.z = np.linspace(0, self.zmax, num=self.nzmax)
# Fill distribution for each bin (convolving with photo_z distribution)
self.eta_z = np.zeros((self.nzmax, self.nbin), 'float64')
gal = self.galaxy_distribution(self.z, True)
for Bin in xrange(self.nbin):
low = self.z_bin_edge[Bin]
hig = self.z_bin_edge[Bin+1]
for nz in xrange(self.nzmax):
z = self.z[nz]
integrand = gal*self.photo_z_distribution(z, self.z, True)
integrand = np.array([
elem if low <= self.z[index] <= hig else 0
for index, elem in enumerate(integrand)])
self.eta_z[nz, Bin] = scipy.integrate.trapz(
integrand,
self.z)
# integrate eta(z) over z (in view of normalizing it to one)
self.eta_norm = np.zeros(self.nbin, 'float64')
for Bin in xrange(self.nbin):
self.eta_norm[Bin] = np.sum(0.5*(
self.eta_z[1:, Bin]+self.eta_z[:-1, Bin])*(
self.z[1:]-self.z[:-1]))
################
# Noise spectrum
################
# Number of galaxies per steradian
self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2
# Number of galaxies per steradian per bin
self.noise = self.noise/self.nbin
# Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
self.noise = self.rms_shear**2/self.noise
###########
# Read data
###########
# If the file exists, initialize the fiducial values
# It has been stored flat, so we use the reshape function to put it in
# the right shape.
self.Cl_fid = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
self.fid_values_exist = False
fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
if os.path.exists(fid_file_path):
self.fid_values_exist = True
flat_Cl = np.loadtxt(fid_file_path)
self.Cl_fid = flat_Cl.reshape((self.nlmax, self.nbin, self.nbin))
return
def galaxy_distribution(self, z, array=False):
"""
Galaxy distribution returns the function D(z) from the notes
If the array flag is set to True, z is then interpretated as an array,
and not as a single value.
"""
zmean = 0.9
z0 = zmean/1.412
if not array:
galaxy_dist = z**2*math.exp(-(z/z0)**(1.5))
else:
return z**2*np.exp(-(z/z0)**(1.5))
return galaxy_dist
def photo_z_distribution(self, z, zph, array=True):
"""
Photo z distribution
If the array flag is set to True, z is then interpretated as an array,
and not as a single value.
"""
# Standard error on dz/(1+z)
sigma_ph = 0.05
# Note: you must normalize it yourself to one if you want to get nice
# plots of the galaxy distribution function in each bin (otherwise, the
# spectra will remain correct, but each D_i(x) will loot strangely
# normalized when compared to the original D(z)
if not array:
photo_z_dist = math.exp(-0.5*(
(z-zph)/sigma_ph/(1.+z))**2)/sigma_ph/(1.+z)/math.sqrt(
2.*math.pi)
else:
photo_z_dist = np.exp(-0.5*(
(z-zph)/sigma_ph/(1.+z))**2)/sigma_ph/(1.+z)/math.sqrt(
2.*math.pi)
return photo_z_dist
def loglkl(self, cosmo, data):
# One wants to obtain here the relation between z and r, this is done
# by asking the cosmological module with the function z_of_r
self.r = np.zeros(self.nzmax, 'float64')
self.dzdr = np.zeros(self.nzmax, 'float64')
self.r, self.dzdr = cosmo.z_of_r(self.z)
# Compute now the selection function eta(r) = eta(z) dz/dr normalized
# to one. The np.newaxis helps to broadcast the one-dimensional array
# dzdr to the proper shape. Note that eta_norm is also broadcasted as
# an array of the same shape as eta_z
self.eta_r = self.eta_z*(self.dzdr[:, np.newaxis]/self.eta_norm)
# Compute function g_i(r), that depends on r and the bin
# g_i(r) = 2r(1+z(r)) int_0^+\infty drs eta_r(rs) (rs-r)/rs
# TODO is the integration from 0 or r ?
g = np.zeros((self.nzmax, self.nbin), 'float64')
for Bin in xrange(self.nbin):
for nr in xrange(1, self.nzmax-1):
fun = self.eta_r[nr:, Bin]*(self.r[nr:]-self.r[nr])/self.r[nr:]
g[nr, Bin] = np.sum(0.5*(
fun[1:]+fun[:-1])*(self.r[nr+1:]-self.r[nr:-1]))
g[nr, Bin] *= 2.*self.r[nr]*(1.+self.z[nr])
# Get power spectrum P(k=l/r,z(r)) from cosmological module
pk = np.zeros((self.nlmax, self.nzmax), 'float64')
for index_l in xrange(self.nlmax):
for index_z in xrange(1, self.nzmax):
if (self.l[index_l]/self.r[index_z] > self.k_max):
raise io_mp.LikelihoodError(
"you should increase euclid_lensing.k_max up to at"
"least %g" % self.l[index_l]/self.r[index_z])
pk[index_l, index_z] = cosmo.pk(
self.l[index_l]/self.r[index_z], self.z[index_z])
# Recover the non_linear scale computed by halofit. If no scale was
# affected, set the scale to one, and make sure that the nuisance
# parameter epsilon is set to zero
k_sigma = np.zeros(self.nzmax, 'float64')
if (cosmo.nonlinear_method == 0):
k_sigma[:] = 1.e6
else:
k_sigma = cosmo.nonlinear_scale(self.z, self.nzmax)
# Define the alpha function, that will characterize the theoretical
# uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2
# to self.theoretical_error
alpha = np.zeros((self.nlmax, self.nzmax), 'float64')
# self.theoretical_error = 0.1
if self.theoretical_error != 0:
for index_l in range(self.nlmax):
k = self.l[index_l]/self.r[1:]
alpha[index_l, 1:] = np.log(1.+k[:]/k_sigma[1:])/(
1.+np.log(1.+k[:]/k_sigma[1:]))*self.theoretical_error
# recover the e_th_nu part of the error function
e_th_nu = self.coefficient_f_nu*cosmo.Omega_nu/cosmo.Omega_m()
# Compute the Error E_th_nu function
if 'epsilon' in self.use_nuisance:
E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64')
for index_l in range(1, self.nlmax):
E_th_nu[index_l, :] = np.log(
1.+self.l[index_l]/k_sigma[:]*self.r[:]) / (
1.+np.log(1.+self.l[index_l]/k_sigma[:]*self.r[:]))*e_th_nu
# Add the error function, with the nuisance parameter, to P_nl_th, if
# the nuisance parameter exists
for index_l in range(self.nlmax):
epsilon = data.mcmc_parameters['epsilon']['current']*(
data.mcmc_parameters['epsilon']['scale'])
pk[index_l, :] *= (1.+epsilon*E_th_nu[index_l, :])
# Start loop over l for computation of C_l^shear
Cl_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
Cl = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
# Start loop over l for computation of E_l
if self.theoretical_error != 0:
El_integrand = np.zeros((self.nzmax, self.nbin, self.nbin),
'float64')
El = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
for nl in xrange(self.nlmax):
# find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(self.nbin):
Cl_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*g[1:, Bin2]/(
self.r[1:]**2)*pk[nl, 1:]
if self.theoretical_error != 0:
El_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*(
g[1:, Bin2])/(
self.r[1:]**2)*pk[nl, 1:]*alpha[nl, 1:]
# Integrate over r to get C_l^shear_ij = P_ij(l)
# C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r)
# g_j(r) /r**2) P(k=l/r,z(r))
# It it then multiplied by 9/16*Omega_m**2 to be in units of Mpc**4
# and then by (h/2997.9)**4 to be dimensionless
for Bin1 in xrange(self.nbin):
for Bin2 in xrange(self.nbin):
Cl[nl, Bin1, Bin2] = np.sum(0.5*(
Cl_integrand[1:, Bin1, Bin2] +
Cl_integrand[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
Cl[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2
Cl[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4
if self.theoretical_error != 0:
El[nl, Bin1, Bin2] = np.sum(0.5*(
El_integrand[1:, Bin1, Bin2] +
El_integrand[:-1, Bin1, Bin2])*(
self.r[1:]-self.r[:-1]))
El[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2
El[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4
if Bin1 == Bin2:
Cl[nl, Bin1, Bin2] += self.noise
# Write fiducial model spectra if needed (exit in that case)
if self.fid_values_exist is False:
# Store the values now, and exit.
fid_file_path = os.path.join(
self.data_directory, self.fiducial_file)
with open(fid_file_path, 'w') as fid_file:
fid_file.write('# Fiducial parameters')
for key, value in data.mcmc_parameters.iteritems():
fid_file.write(
', %s = %.5g' % (key, value['current']*value['scale']))
fid_file.write('\n')
for nl in range(self.nlmax):
for Bin1 in range(self.nbin):
for Bin2 in range(self.nbin):
fid_file.write("%.8g\n" % Cl[nl, Bin1, Bin2])
print '\n\n /|\ Writing fiducial model in {0}'.format(
fid_file_path)
print '/_o_\ for {0} likelihood'.format(self.name)
return 1j
# Now that the fiducial model is stored, we add the El to both Cl and
# Cl_fid (we create a new array, otherwise we would modify the
# self.Cl_fid from one step to the other)
# Spline Cl[nl,Bin1,Bin2] along l
spline_Cl | |
<reponame>ninavelikajne/RhythmCount
import pandas as pd
import numpy as np
import scipy.signal as signal
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels
import statsmodels.api as sm
from matplotlib.lines import Line2D
from RhythmCount import helpers as hlp
from RhythmCount import plot
import math
colors = ['blue', 'green', 'orange', 'red', 'purple', 'olive', 'tomato', 'yellow', 'pink', 'turquoise', 'lightgreen']
count_models = ['poisson', 'zero_poisson', 'gen_poisson', 'nb', 'zero_nb']
n_components = [1, 2, 3, 4]
def clean_data(df):
df = df.dropna(subset=['X', 'Y'])
x = int(df['X'].unique().max() + 1)
for hour in range(0, x, 1):
df_hour = df.loc[df.X == hour].copy()
# cleaning outliers
df_hour = df_hour.loc[df_hour.Y >= df_hour.Y.quantile(0.15)].copy()
df_hour = df_hour.loc[df_hour.Y <= df_hour.Y.quantile(0.85)].copy()
df.loc[df['X'] == hour, ['Y']] = df_hour['Y']
df = df.dropna(subset=['X', 'Y'])
return df
def fit_to_models(df, count_models=count_models, n_components=n_components, maxiter=5000, maxfun=5000, disp=0,
method='nm', plot_models=True, period=24, save_file_to='models.pdf'):
df_results = pd.DataFrame()
if plot_models:
rows, cols = hlp.get_factors(len(count_models))
fig = plt.figure(figsize=(8 * cols, 8 * rows))
i = 0
for count_model in count_models:
c = 0
for n_component in n_components:
_, df_result, _ = fit_to_model(df, n_component, count_model, period, maxiter, maxfun, method, disp)
# plot
if plot_models:
ax = plt.subplot(rows, cols, i+1)
title = hlp.get_model_name(count_model)
if c == 0:
plot.subplot_model(df['X'], df['Y'], df_result['X_test'], df_result['Y_test'], ax, color=colors[c],
title=title, fit_label='N=' + str(n_component))
else:
plot.subplot_model(df['X'], df['Y'], df_result['X_test'], df_result['Y_test'], ax, color=colors[c],
title=title, fit_label='N=' + str(n_component), plot_measurements=False)
c = c + 1
df_results = df_results.append(df_result, ignore_index=True)
i = i + 1
# show plots
if plot_models:
ax_list = fig.axes
for ax in ax_list:
ax.legend(loc='upper left', fontsize='medium')
fig.tight_layout()
plt.show()
# save
try:
hlp.make_results_dir()
fig.savefig(r'results\/' + save_file_to)
except:
print("Can not save plot.")
return df_results
def cosinor_generate_independents(X, n_components, period=24):
X_test = np.linspace(0, 100, 1000)
for i in range(n_components):
k = i + 1
A = np.sin((X / (period / k)) * np.pi * 2)
B = np.cos((X / (period / k)) * np.pi * 2)
A_test = np.sin((X_test / (period / k)) * np.pi * 2)
B_test = np.cos((X_test / (period / k)) * np.pi * 2)
if i == 0:
X_fit = np.column_stack((A, B))
X_fit_test = np.column_stack((A_test, B_test))
else:
X_fit = np.column_stack((X_fit, A, B))
X_fit_test = np.column_stack((X_fit_test, A_test, B_test))
X_fit_eval_params = X_fit_test
return X_fit, X_test, X_fit_test, X_fit_eval_params
def fit_to_model(df, n_components, count_model, period, maxiter, maxfun, method, disp):
X_fit, X_test, X_fit_test, X_fit_eval_params = cosinor_generate_independents(df['X'], n_components=n_components, period=period)
Y = df['Y'].to_numpy()
X_fit = sm.add_constant(X_fit, has_constant='add')
X_fit_test = sm.add_constant(X_fit_test, has_constant='add')
X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')
if count_model == 'poisson':
model = statsmodels.discrete.discrete_model.Poisson(Y, X_fit)
results = model.fit(maxiter=maxiter, maxfun=maxfun, method=method, disp=disp)
elif count_model == 'gen_poisson':
model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit, p=1)
results = model.fit(maxiter=maxiter, maxfun=maxfun, method=method, disp=disp)
elif count_model == 'zero_poisson':
model = statsmodels.discrete.count_model.ZeroInflatedPoisson(endog=Y, exog=X_fit, exog_infl=X_fit)
results = model.fit(maxiter=maxiter, maxfun=maxfun, skip_hessian=True, method=method, disp=disp)
elif count_model == 'zero_nb':
model = statsmodels.discrete.count_model.ZeroInflatedNegativeBinomialP(endog=Y, exog=X_fit, exog_infl=X_fit,
p=1)
results = model.fit(maxiter=maxiter, maxfun=maxfun, skip_hessian=True, method=method, disp=disp)
elif count_model == 'nb':
model = statsmodels.discrete.discrete_model.NegativeBinomialP(Y, X_fit, p=1)
results = model.fit(maxiter=maxiter, maxfun=maxfun, method=method, disp=disp)
else:
raise Exception("Invalid model type.")
if count_model == 'zero_nb' or count_model == "zero_poisson":
Y_test = results.predict(X_fit_test, exog_infl=X_fit_test)
Y_eval_params = results.predict(X_fit_eval_params, exog_infl=X_fit_eval_params)
Y_fit = results.predict(X_fit, exog_infl=X_fit)
else:
Y_test = results.predict(X_fit_test)
Y_eval_params = results.predict(X_fit_eval_params)
Y_fit = results.predict(X_fit)
rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params)
df_result = calculate_statistics(Y, Y_fit, n_components, results, model, count_model, rhythm_params)
df_result.update({'X_test': X_test})
df_result.update({'Y_test': Y_test})
return results, df_result, X_fit_test
def calculate_confidence_intervals(df, n_components, count_model, repetitions=20, maxiter=5000, maxfun=5000, method='nm',
period=24):
sample_size = round(df.shape[0] - df.shape[0] / 3)
for i in range(0, repetitions):
sample = df.sample(sample_size)
results, _, _ = fit_to_model(sample, n_components, count_model, period, maxiter, maxfun, method, 0)
if i == 0:
save = pd.DataFrame({str(i): results.params})
else:
save[str(i)] = results.params
columns = save.shape[0]
mean = save.mean(axis=1)
std = save.std(axis=1)
save = pd.DataFrame({"mean": mean, "std": std})
save['CI1'] = save['mean'] - 1.96 * save['std']
save['CI2'] = save['mean'] + 1.96 * save['std']
CIs = pd.DataFrame({0: [], 1: []})
for i in range(columns):
CIs = CIs.append({0: save['CI1'].iloc[i], 1: save['CI2'].iloc[i]}, ignore_index=True)
return CIs
def evaluate_rhythm_params(X, Y, period=24):
X = X[:period * 10]
Y = Y[:period * 10]
m = min(Y)
M = max(Y)
A = M - m
MESOR = m + A / 2
AMPLITUDE = A / 2
locs, heights = signal.find_peaks(Y, height=M * 0.75)
heights = heights['peak_heights']
x = np.take(X, locs)
result = {'amplitude': round(AMPLITUDE, 2), 'mesor': round(MESOR, 2), 'locs': np.around(x, decimals=2),
'heights': np.around(heights, decimals=2)}
return result
def calculate_statistics(Y, Y_fit, n_components, results, model, count_model, rhythm_param):
# RSS
RSS = sum((Y - Y_fit) ** 2)
# p
p = results.llr_pvalue
# AIC
aic = results.aic
# BIC
bic = results.bic
# llf for each observation
logs = model.loglikeobs(results.params)
return {'count_model': count_model, 'n_components': n_components,
'amplitude': rhythm_param['amplitude'],
'mesor': rhythm_param['mesor'], 'peaks': rhythm_param['locs'], 'heights': rhythm_param['heights'], 'llr_pvalue': p,
'RSS': RSS, 'AIC': aic, 'BIC': bic,
'log_likelihood': results.llf, 'logs': logs, 'mean(est)': Y_fit.mean(), 'Y(est)': Y_fit}
def get_best_n_components(df_results, test, count_model=None):
if count_model:
df_results = df_results[df_results['count_model'] == count_model].copy()
df_results = df_results.sort_values(by='n_components')
i = 0
for index, new_row in df_results.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
if best_row['n_components'] == new_row['n_components']: # non-nested
if test == 'AIC':
best_row = AIC_test(best_row, new_row)
elif test == 'BIC':
best_row = BIC_test(best_row, new_row)
elif test == 'Vuong':
best_row = vuong_test(best_row, new_row)
else: # nested
best_row = f_test(best_row, new_row)
return best_row
def get_best_count_model(df_results, test, n_components=None):
if n_components:
df_results = df_results[df_results['n_components'] == n_components].copy()
df_results = df_results.sort_values(by='count_model')
i = 0
for index, new_row in df_results.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
if test == 'AIC':
best_row = AIC_test(best_row, new_row)
elif test == 'BIC':
best_row = BIC_test(best_row, new_row)
elif test == 'Vuong':
best_row = vuong_test(best_row, new_row)
elif test == 'F':
best_row = f_test(best_row, new_row)
else:
raise Exception("Invalid criterium option.")
return best_row
def vuong_test(first_row, second_row):
n_points = len(first_row['logs'])
DF1 = first_row.n_components * 2 + 1
DF2 = second_row.n_components * 2 + 1
DoF = DF2 - DF1
LR = second_row['log_likelihood'] - first_row['log_likelihood'] - (DoF / 2) * math.log(n_points, 10)
var = (1 / n_points) * sum((second_row['logs'] - first_row['logs']) ** 2)
Z = LR / math.sqrt(n_points * var)
v = 1 - stats.norm.cdf(Z, DoF, DF1)
if v < 0.05:
return second_row
return first_row
def AIC_test(first_row, second_row):
if second_row['AIC'] < first_row['AIC']:
return second_row
return first_row
def BIC_test(first_row, second_row):
if second_row['BIC'] < first_row['BIC']:
return second_row
return first_row
def f_test(first_row, second_row):
n_points = len(first_row['logs'])
RSS1 = first_row.RSS
RSS2 = second_row.RSS
DF1 = n_points - (first_row.n_components * 2 + 1)
DF2 = n_points - (second_row.n_components * 2 + 1)
if DF2 < DF1:
F = ((RSS1 - RSS2) / (DF1 - DF2)) / (RSS2 / DF2)
f = 1 - stats.f.cdf(F, DF1 - DF2, DF2)
else:
F = ((RSS2 - RSS1) / (DF2 - DF1)) / (RSS1 / DF1)
f = 1 - stats.f.cdf(F, DF2 - DF1, DF1)
if f < 0.05:
return second_row
return first_row
def calculate_confidence_intervals_parameters(df, n_components, count_model, all_peaks, repetitions=20, maxiter=5000,
maxfun=5000, method='nm', period=24, precision_rate=2):
sample_size = round(df.shape[0] - df.shape[0] / 3)
for i in range(0, repetitions):
sample = df.sample(sample_size)
_, df_result, _ = fit_to_model(sample, n_components, count_model, period, maxiter, maxfun, method, 0)
if i == 0:
amplitude = np.array(df_result['amplitude'])
mesor = np.array(df_result['mesor'])
peaks = np.empty((repetitions, period))
peaks[:] = np.nan
peaks = hlp.add_to_table(peaks, df_result['peaks'], i)
heights = np.empty((repetitions, period))
heights[:] = np.nan
heights = hlp.add_to_table(heights, df_result['heights'], i)
else:
amplitude = np.append(amplitude, df_result['amplitude'])
mesor = np.append(mesor, df_result['mesor'])
peaks = hlp.add_to_table(peaks, df_result['peaks'], i)
heights = hlp.add_to_table(heights, df_result['heights'], i)
mean_amplitude = amplitude.mean()
std_amplitude = amplitude.std()
mean_mesor = mesor.mean()
std_mesor = mesor.std()
mean_std_peaks, mean_std_heights = hlp.calculate_mean_std(peaks, heights, all_peaks, precision_rate)
amplitude = np.array([mean_amplitude - 1.96 * std_amplitude, mean_amplitude + 1.96 * std_amplitude])
mesor = np.array([mean_mesor - 1.96 * std_mesor, mean_mesor + 1.96 * std_mesor])
if (len(mean_std_peaks) == 0):
peaks = []
heights = []
elif isinstance(mean_std_peaks[0], np.ndarray):
peaks = np.array([mean_std_peaks[:, 0] - 1.96 * mean_std_peaks[:, 1],
mean_std_peaks[:, 0] + 1.96 * mean_std_peaks[:, 1]])
heights = np.array([mean_std_heights[:, 0] - 1.96 * mean_std_heights[:, 1],
mean_std_heights[:, 0] + 1.96 * mean_std_heights[:, 1]])
else:
peaks = np.array([mean_std_peaks[0] - 1.96 * mean_std_peaks[1],
mean_std_peaks[0] + 1.96 * mean_std_peaks[1]])
heights = np.array([mean_std_heights[0] - 1.96 * mean_std_heights[1],
mean_std_heights[0] + 1.96 * mean_std_heights[1]])
peaks = np.transpose(peaks)
heights = np.transpose(heights)
return {'amplitude_CIs': np.around(amplitude, decimals=2), 'mesor_CIs': np.around(mesor, decimals=2),
'peaks_CIs': np.around(peaks, decimals=2), 'heights_CIs': np.around(heights, decimals=2)}
def compare_by_component(df, component, n_components, count_models, ax_indices, ax_titles, rows=1, cols=1, labels=None,
eval_order=True, maxiter=5000, maxfun=5000, method='nm', period=24, precision_rate=2,
repetitions=20, test='Vuong', save_file_to='comparison.pdf'):
df_results | |
__all__ = ['POW']
from .base import BinaryHead, heads_precedence, Head, Expr, Pair, ArithmeticHead
from ..core import init_module
init_module.import_heads()
init_module.import_lowlevel_operations()
init_module.import_numbers()
@init_module
def _init(module):
from ..arithmetic.numbers import try_power
module.try_power = try_power
class PowHead(ArithmeticHead):
""" PowHead represents exponentiation operation, data is a 2-tuple
of base and exponent expressions. Both can be number instances or
algebra instances.
"""
op_mth = '__pow__'
op_rmth = '__rpow__'
def is_data_ok(self, cls, data):
if type(data) is tuple and len(data)==2:
base, exp = data
if isinstance(base, cls):
if isinstance(exp, numbertypes):
return
if isinstance(exp, cls):
if exp.head is NUMBER:
if isinstance(exp.data, numbertypes):
return 'data[1] must be lowlevel number or non-numeric but got %s' % (type(exp.data))
else:
return
else:
return 'data[1] must be %s instance but got %s' % ((cls, numbertypes), type(exp))
else:
return 'data[0] must be %s instance but got %s' % (cls, type(exp))
else:
return 'data must be 2-tuple'
return
def __repr__(self): return 'POW'
def new(self, cls, (base, exp), evaluate=True):
if exp==1:
return base
if exp==0 or base==1:
return cls(NUMBER, 1)
if not evaluate:
return cls(self, (base, exp))
if type(exp) is cls:
h, d = exp.pair
if h is NUMBER:
exp = d
if base.head is NUMBER and isinstance(exp, numbertypes):
b = base.data
if isinstance(b, numbertypes):
r, base_exp_list = try_power(b, exp)
if not base_exp_list:
return cls(NUMBER, r)
if len(base_exp_list)==1:
b, e = base_exp_list[0]
rest = cls(POW, (cls(NUMBER, b), e))
else:
d = {}
for b, e in base_exp_list:
d[cls(NUMBER, b)] = e
rest = cls(BASE_EXP_DICT, d)
if r==1:
return rest
return cls(TERM_COEFF, (rest, r))
return cls(self, (base, exp))
def reevaluate(self, cls, (base, exp)):
return base ** exp
def data_to_str_and_precedence(self, cls, (base, exp)):
pow_p = heads_precedence.POW
div_p = heads_precedence.DIV
if isinstance(base, Expr):
b, b_p = base.head.data_to_str_and_precedence(cls, base.data)
elif isinstance(base, numbertypes):
b, b_p = NUMBER.data_to_str_and_precedence(cls, base)
else:
b, b_p = SYMBOL.data_to_str_and_precedence(cls, base)
if isinstance(exp, Expr):
h, d = exp.pair
if h is NUMBER and isinstance(d, numbertypes):
exp = d
if isinstance(exp, numbertypes):
if exp==0:
return '1', heads_precedence.NUMBER
if exp==1:
return b, b_p
if exp < 0:
if exp==-1:
s1 = '('+b+')' if b_p <= pow_p else b
return '1/' + s1, div_p
e, e_p = NUMBER.data_to_str_and_precedence(cls, -exp)
s1 = '('+b+')' if b_p < pow_p else b
s2 = '('+e+')' if e_p < pow_p else e
return '1/' + s1 + '**' + s2, div_p
e, e_p = NUMBER.data_to_str_and_precedence(cls, exp)
else:
if isinstance(exp, Expr):
e, e_p = exp.head.data_to_str_and_precedence(cls, exp.data)
else:
e, e_p = str(exp), 0.0
s1 = '('+b+')' if b_p <= pow_p else b
s2 = '('+e+')' if e_p < pow_p else e
return s1 + '**' + s2, pow_p
def to_ADD(self, Algebra, (base, exp), expr):
return Algebra(ADD, [expr])
def to_TERM_COEFF_DICT(self, cls, data, expr):
return expr
def to_EXP_COEFF_DICT(self, cls, (base, exp), expr, variables=None):
if isinstance(exp, Expr):
if exp.head is NUMBER:
exp = exp.data
elif exp.head is TERM_COEFF:
t. c = exp.data
return self.to_EXP_COEFF_DICT(cls, (base**t, c), expr, variables)
if isinstance(exp, inttypes):
return base.head.to_EXP_COEFF_DICT(cls, base.data, base, variables) ** exp
if isinstance(exp, rationaltypes):
numer, denom = exp
if numer!=1:
return self.to_EXP_COEFF_DICT(cls, (base**(exp/numer), numer), expr, variables)
raise NotImplementedError(`base, exp`)
def non_commutative_mul(self, cls, lhs, rhs):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return term_coeff_new(cls, (lhs, rdata))
if rhead is SYMBOL or rhead is POW:
return MUL.combine(cls, [lhs, rhs])
if rhead is TERM_COEFF:
term, coeff = rdata
return (lhs * term) * coeff
if rhead is DIFF:
base, exp = lhs.data
if base==rhs:
return pow_new(cls, (base, exp + 1))
return MUL.combine(cls, [lhs, rhs])
if rhead is MUL:
return MUL.combine(cls, [lhs] + rdata)
raise NotImplementedError(`self, cls, lhs.pair, rhs.pair`)
def commutative_mul_number(self, cls, lhs, rhs):
return term_coeff_new(cls, (lhs, rhs))
non_commutative_mul_number = commutative_mul_number
non_commutative_rmul_number = commutative_mul_number
def commutative_mul(self, cls, lhs, rhs):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return term_coeff_new(cls, (lhs, rdata))
if rhead is SYMBOL or rhead is ADD or rhead is TERM_COEFF_DICT or rhead is APPLY or rhead is DIFF or rhead is FDIFF:
lbase, lexp = lhs.data
if lbase == rhs:
return pow_new(cls, (lbase, lexp + 1))
return cls(BASE_EXP_DICT, {rhs:1, lbase:lexp})
if rhead is POW:
lbase, lexp = lhs.data
rbase, rexp = rdata
if lbase==rbase:
return POW.new(cls, (lbase, lexp + rexp))
return cls(BASE_EXP_DICT, {lbase:lexp, rbase:rexp})
if rhead is BASE_EXP_DICT:
base, exp = lhs.data
data = rhs.data.copy()
dict_add_item(cls, data, base, exp)
return base_exp_dict_new(cls, data)
if rhead is TERM_COEFF:
term, coeff = rdata
return (lhs * term) * coeff
raise NotImplementedError(`self, cls, lhs.pair, rhs.pair`)
inplace_commutative_mul = commutative_mul
def commutative_div_number(self, cls, lhs, rhs):
r = number_div(cls, 1, rhs)
if rhs==0:
return r * lhs
return term_coeff_new(cls, (lhs, r))
def commutative_rdiv_number(self, cls, lhs, rhs):
base, exp = lhs.data
return term_coeff_new(cls, (pow_new(cls, (base, -exp)), rhs))
def commutative_div(self, cls, lhs, rhs):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return self.commutative_div_number(cls, lhs, rdata)
base, exp = lhs.data
if rhead is POW:
rbase, rexp = rdata
if base==rbase:
return pow_new(cls, (base, exp-rexp))
return base_exp_dict_new(cls, {base:exp, rbase: -rexp})
if rhead is BASE_EXP_DICT:
data = {base:exp}
for b, e in rdata.iteritems():
base_exp_dict_add_item(cls, data, b, -e)
return base_exp_dict_new(cls, data)
if rhead is TERM_COEFF:
term, coeff = rhs.term_coeff()
return (lhs / term) / coeff
if base==rhs:
return pow_new(cls, (base, exp-1))
return base_exp_dict_new(cls, {base:exp, rhs:-1})
def base_exp(self, cls, expr):
base, exp = expr.data
return base, exp
def pow(self, cls, base, exp):
if exp==0:
return cls(NUMBER, 1)
if exp==1:
return base
if isinstance(exp, Expr) and exp.head is NUMBER:
exp = exp.data
if isinstance(exp, inttypes):
b, e = base.data
base, exp = b, e*exp
return POW.new(cls, (base, exp))
pow_number = pow
def walk(self, func, cls, data, target):
base, exp = data
base1 = base.head.walk(func, cls, base.data, base)
if isinstance(exp, Expr):
exp1 = exp.head.walk(func, cls, exp.data, exp)
else:
exp1 = NUMBER.walk(func, cls, exp, exp)
if base1 is base and exp1 is exp:
return func(cls, self, data, target)
else:
r = base1 ** exp1
return func(cls, r.head, r.data, r)
def scan(self, proc, cls, data, target):
base, exp = data
base.head.scan(proc, cls, base.data, target)
if isinstance(exp, Expr):
exp.head.scan(proc, cls, exp.data, target)
else:
NUMBER.scan(proc, cls, exp, target)
proc(cls, self, data, target)
def expand(self, cls, expr):
base, exp = expr.data
if isinstance(exp, Expr):
exp = exp.expand()
h, d = exp.pair
if h is NUMBER and isinstance(d, int):
exp = d
if isinstance(base, Expr):
base = base.expand()
if isinstance(exp, int):
return base.head.expand_intpow(cls, base, exp)
return cls(POW, (base, exp))
def diff(self, cls, data, expr, symbol, order, cache={}):
# XXXX needs implementatiin
key = (expr, symbol, order)
result = cache.get(key)
if result is not None:
return result
base, exp = data
texp = type(exp)
if symbol not in base.symbols_data:
# constant ** exp
if texp is cls:
if exp.head is SYMBOL:
if exp.data==symbol:
result = expr * cls.Log(base)**order
cache[key] = result
return result
else:
return cls(NUMBER, 0)
if symbol not in exp.symbols_data:
return cls(NUMBER, 0)
key1 = (expr, symbol, 1)
result = cache.get(key1)
if result is None:
de = exp.head.diff(cls, exp.data, exp, symbol, 1, cache=cache)
if symbol not in de.symbols_data:
result = expr * de**order * cls.Log(base)**order
cache[key] = result
return result
result = expr * cls.Log(base) * de
cache[key1] = result
if order>1:
result = result.head.diff(cls, result.data, result, symbol, order-1, cache=cache)
cache[key] = result
return result
else:
return cls(NUMBER, 0)
elif not (texp is cls and symbol in exp.symbols_data):
if exp is cls and exp.head is NUMBER:
exp = exp.data
# variable ** constant
# f(x)**n -> n*f**(n-1)*f' ->
db = base.head.diff(cls, base.data, base, symbol, 1, cache=cache)
if db.head is NUMBER:
if texp is int and order>exp and exp>0:
return cls(NUMBER, 0)
p = db.data ** order
e = exp
for i in xrange(order):
p *= e
e -= 1
result = p * base ** e
cache[key] = result
return result
key1 = (expr, symbol, 1)
result = cache.get(key1)
if result is None:
base, exp = data
db = base.head.diff(cls, base.data, base, symbol, 1, cache=cache)
if isinstance(exp, Expr):
de = exp.head.diff(cls, exp.data, exp, symbol, 1, cache=cache)
if de==0:
result = base ** (exp-1) * db * exp
else:
result = expr | |
<gh_stars>0
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.point_source import PointSource # noqa: F401,E501
from logicmonitor_sdk.models.widget import Widget # noqa: F401,E501
class GoogleMapWidget(Widget):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_updated_by': 'str',
'user_permission': 'str',
'dashboard_id': 'int',
'name': 'str',
'description': 'str',
'last_updated_on': 'int',
'theme': 'str',
'interval': 'int',
'id': 'int',
'type': 'str',
'timescale': 'str',
'ack_checked': 'bool',
'display_error_alert': 'bool',
'display_warn_alert': 'bool',
'display_critical_alert': 'bool',
'sdt_checked': 'bool',
'map_points': 'list[PointSource]'
}
attribute_map = {
'last_updated_by': 'lastUpdatedBy',
'user_permission': 'userPermission',
'dashboard_id': 'dashboardId',
'name': 'name',
'description': 'description',
'last_updated_on': 'lastUpdatedOn',
'theme': 'theme',
'interval': 'interval',
'id': 'id',
'type': 'type',
'timescale': 'timescale',
'ack_checked': 'ackChecked',
'display_error_alert': 'displayErrorAlert',
'display_warn_alert': 'displayWarnAlert',
'display_critical_alert': 'displayCriticalAlert',
'sdt_checked': 'sdtChecked',
'map_points': 'mapPoints'
}
def __init__(self, last_updated_by=None, user_permission=None, dashboard_id=None, name=None, description=None, last_updated_on=None, theme=None, interval=None, id=None, type=None, timescale=None, ack_checked=None, display_error_alert=None, display_warn_alert=None, display_critical_alert=None, sdt_checked=None, map_points=None): # noqa: E501
"""GoogleMapWidget - a model defined in Swagger""" # noqa: E501
self._last_updated_by = None
self._user_permission = None
self._dashboard_id = None
self._name = None
self._description = None
self._last_updated_on = None
self._theme = None
self._interval = None
self._id = None
self._type = None
self._timescale = None
self._ack_checked = None
self._display_error_alert = None
self._display_warn_alert = None
self._display_critical_alert = None
self._sdt_checked = None
self._map_points = None
self.discriminator = None
if last_updated_by is not None:
self.last_updated_by = last_updated_by
if user_permission is not None:
self.user_permission = user_permission
self.dashboard_id = dashboard_id
self.name = name
if description is not None:
self.description = description
if last_updated_on is not None:
self.last_updated_on = last_updated_on
if theme is not None:
self.theme = theme
if interval is not None:
self.interval = interval
if id is not None:
self.id = id
self.type = type
if timescale is not None:
self.timescale = timescale
if ack_checked is not None:
self.ack_checked = ack_checked
if display_error_alert is not None:
self.display_error_alert = display_error_alert
if display_warn_alert is not None:
self.display_warn_alert = display_warn_alert
if display_critical_alert is not None:
self.display_critical_alert = display_critical_alert
if sdt_checked is not None:
self.sdt_checked = sdt_checked
self.map_points = map_points
@property
def last_updated_by(self):
"""Gets the last_updated_by of this GoogleMapWidget. # noqa: E501
The user that last updated the widget # noqa: E501
:return: The last_updated_by of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, last_updated_by):
"""Sets the last_updated_by of this GoogleMapWidget.
The user that last updated the widget # noqa: E501
:param last_updated_by: The last_updated_by of this GoogleMapWidget. # noqa: E501
:type: str
"""
self._last_updated_by = last_updated_by
@property
def user_permission(self):
"""Gets the user_permission of this GoogleMapWidget. # noqa: E501
The permission level of the user who last modified the widget # noqa: E501
:return: The user_permission of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._user_permission
@user_permission.setter
def user_permission(self, user_permission):
"""Sets the user_permission of this GoogleMapWidget.
The permission level of the user who last modified the widget # noqa: E501
:param user_permission: The user_permission of this GoogleMapWidget. # noqa: E501
:type: str
"""
self._user_permission = user_permission
@property
def dashboard_id(self):
"""Gets the dashboard_id of this GoogleMapWidget. # noqa: E501
The id of the dashboard the widget belongs to # noqa: E501
:return: The dashboard_id of this GoogleMapWidget. # noqa: E501
:rtype: int
"""
return self._dashboard_id
@dashboard_id.setter
def dashboard_id(self, dashboard_id):
"""Sets the dashboard_id of this GoogleMapWidget.
The id of the dashboard the widget belongs to # noqa: E501
:param dashboard_id: The dashboard_id of this GoogleMapWidget. # noqa: E501
:type: int
"""
if dashboard_id is None:
raise ValueError("Invalid value for `dashboard_id`, must not be `None`") # noqa: E501
self._dashboard_id = dashboard_id
@property
def name(self):
"""Gets the name of this GoogleMapWidget. # noqa: E501
The name of the widget # noqa: E501
:return: The name of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GoogleMapWidget.
The name of the widget # noqa: E501
:param name: The name of this GoogleMapWidget. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this GoogleMapWidget. # noqa: E501
The description of the widget # noqa: E501
:return: The description of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this GoogleMapWidget.
The description of the widget # noqa: E501
:param description: The description of this GoogleMapWidget. # noqa: E501
:type: str
"""
self._description = description
@property
def last_updated_on(self):
"""Gets the last_updated_on of this GoogleMapWidget. # noqa: E501
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:return: The last_updated_on of this GoogleMapWidget. # noqa: E501
:rtype: int
"""
return self._last_updated_on
@last_updated_on.setter
def last_updated_on(self, last_updated_on):
"""Sets the last_updated_on of this GoogleMapWidget.
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:param last_updated_on: The last_updated_on of this GoogleMapWidget. # noqa: E501
:type: int
"""
self._last_updated_on = last_updated_on
@property
def theme(self):
"""Gets the theme of this GoogleMapWidget. # noqa: E501
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:return: The theme of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._theme
@theme.setter
def theme(self, theme):
"""Sets the theme of this GoogleMapWidget.
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:param theme: The theme of this GoogleMapWidget. # noqa: E501
:type: str
"""
self._theme = theme
@property
def interval(self):
"""Gets the interval of this GoogleMapWidget. # noqa: E501
The refresh interval of the widget, in minutes # noqa: E501
:return: The interval of this GoogleMapWidget. # noqa: E501
:rtype: int
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this GoogleMapWidget.
The refresh interval of the widget, in minutes # noqa: E501
:param interval: The interval of this GoogleMapWidget. # noqa: E501
:type: int
"""
self._interval = interval
@property
def id(self):
"""Gets the id of this GoogleMapWidget. # noqa: E501
The Id of the widget # noqa: E501
:return: The id of this GoogleMapWidget. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GoogleMapWidget.
The Id of the widget # noqa: E501
:param id: The id of this GoogleMapWidget. # noqa: E501
:type: int
"""
self._id = id
@property
def type(self):
"""Gets the type of this GoogleMapWidget. # noqa: E501
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:return: The type of this GoogleMapWidget. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this GoogleMapWidget.
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:param type: The type of this GoogleMapWidget. # noqa: E501
:type: str
"""
if type is None:
| |
<reponame>ccsourcecode/tixcraft_bot
#!/usr/bin/env python
#encoding=utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
# for close tab.
from selenium.common.exceptions import NoSuchWindowException
# for alert
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import NoAlertPresentException
# for alert 2
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
# for ["pageLoadStrategy"] = "eager"
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# for wait #1
import time
import os
import sys
import platform
import json
import random
import re
from datetime import datetime
# for error output
import logging
logging.basicConfig()
logger = logging.getLogger('logger')
# for check reg_info
import requests
import warnings
from urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore',InsecureRequestWarning)
#執行方式:python chrome_tixcraft.py 或 python3 chrome_tixcraft.py
#附註1:沒有寫的很好,很多地方應該可以模組化。
#附註2:
CONST_APP_VERSION = u"MaxBot (2021.03.22)"
CONST_FROM_TOP_TO_BOTTOM = u"from top to bottom"
CONST_FROM_BOTTOM_TO_TOP = u"from bottom to top"
CONST_RANDOM = u"random"
CONST_SELECT_ORDER_DEFAULT = CONST_FROM_TOP_TO_BOTTOM
CONST_SELECT_OPTIONS_DEFAULT = (CONST_FROM_TOP_TO_BOTTOM, CONST_FROM_BOTTOM_TO_TOP, CONST_RANDOM)
CONST_SELECT_OPTIONS_ARRAY = [CONST_FROM_TOP_TO_BOTTOM, CONST_FROM_BOTTOM_TO_TOP, CONST_RANDOM]
CONT_STRING_1_SEATS_REMAINING = [u'1 seat(s) remaining',u'剩餘 1',u'1 席残り']
# initial webdriver
# 說明:初始化 webdriver
driver = None
# 讀取檔案裡的參數值
basis = ""
if hasattr(sys, 'frozen'):
basis = sys.executable
else:
basis = sys.argv[0]
app_root = os.path.dirname(basis)
config_filepath = os.path.join(app_root, 'settings.json')
config_dict = None
if os.path.isfile(config_filepath):
with open(config_filepath) as json_data:
config_dict = json.load(json_data)
homepage = None
browser = None
ticket_number = None
facebook_account = None
auto_press_next_step_button = False
auto_fill_ticket_number = False
auto_fill_ticket_price = None
date_auto_select_enable = False
date_auto_select_mode = None
date_keyword = None
area_auto_select_enable = False
area_auto_select_mode = None
area_keyword = None
area_keyword_1 = None
area_keyword_2 = None
pass_1_seat_remaining_enable = False # default not checked.
kktix_area_auto_select_mode = None
kktix_area_keyword = None
kktix_answer_dictionary = None
kktix_answer_dictionary_list = None
auto_guess_options = False
if not config_dict is None:
# read config.
if 'homepage' in config_dict:
homepage = config_dict["homepage"]
if 'browser' in config_dict:
browser = config_dict["browser"]
# default ticket number
# 說明:自動選擇的票數
#ticket_number = "2"
ticket_number = ""
if 'ticket_number' in config_dict:
ticket_number = str(config_dict["ticket_number"])
facebook_account = ""
if 'facebook_account' in config_dict:
facebook_account = str(config_dict["facebook_account"])
# for ["kktix"]
if 'kktix' in config_dict:
auto_press_next_step_button = config_dict["kktix"]["auto_press_next_step_button"]
auto_fill_ticket_number = config_dict["kktix"]["auto_fill_ticket_number"]
if 'area_mode' in config_dict["kktix"]:
kktix_area_auto_select_mode = config_dict["kktix"]["area_mode"]
kktix_area_auto_select_mode = kktix_area_auto_select_mode.strip()
if not kktix_area_auto_select_mode in CONST_SELECT_OPTIONS_ARRAY:
kktix_area_auto_select_mode = CONST_SELECT_ORDER_DEFAULT
if 'area_keyword' in config_dict["kktix"]:
kktix_area_keyword = config_dict["kktix"]["area_keyword"]
if kktix_area_keyword is None:
kktix_area_keyword = ""
kktix_area_keyword = kktix_area_keyword.strip()
# disable password brute force attack
if 'answer_dictionary' in config_dict["kktix"]:
kktix_answer_dictionary = config_dict["kktix"]["answer_dictionary"]
if kktix_answer_dictionary is None:
kktix_answer_dictionary = ""
kktix_answer_dictionary = kktix_answer_dictionary.strip()
if len(kktix_answer_dictionary) > 0:
kktix_answer_dictionary_list = kktix_answer_dictionary.split(',')
if 'auto_guess_options' in config_dict["kktix"]:
auto_guess_options = config_dict["kktix"]["auto_guess_options"]
# for ["tixcraft"]
if 'tixcraft' in config_dict:
date_auto_select_enable = False
date_auto_select_mode = None
if 'date_auto_select' in config_dict["tixcraft"]:
date_auto_select_enable = config_dict["tixcraft"]["date_auto_select"]["enable"]
date_auto_select_mode = config_dict["tixcraft"]["date_auto_select"]["mode"]
if not date_auto_select_mode in CONST_SELECT_OPTIONS_ARRAY:
date_auto_select_mode = CONST_SELECT_ORDER_DEFAULT
if 'date_keyword' in config_dict["tixcraft"]["date_auto_select"]:
date_keyword = config_dict["tixcraft"]["date_auto_select"]["date_keyword"]
date_keyword = date_keyword.strip()
area_auto_select_enable = False
area_auto_select_mode = None
if 'area_auto_select' in config_dict["tixcraft"]:
area_auto_select_enable = config_dict["tixcraft"]["area_auto_select"]["enable"]
area_auto_select_mode = config_dict["tixcraft"]["area_auto_select"]["mode"]
if not area_auto_select_mode in CONST_SELECT_OPTIONS_ARRAY:
area_auto_select_mode = CONST_SELECT_ORDER_DEFAULT
if 'area_keyword_1' in config_dict["tixcraft"]["area_auto_select"]:
area_keyword_1 = config_dict["tixcraft"]["area_auto_select"]["area_keyword_1"]
area_keyword_1 = area_keyword_1.strip()
if 'area_keyword_2' in config_dict["tixcraft"]["area_auto_select"]:
area_keyword_2 = config_dict["tixcraft"]["area_auto_select"]["area_keyword_2"]
area_keyword_2 = area_keyword_2.strip()
pass_1_seat_remaining_enable = False
if 'pass_1_seat_remaining' in config_dict["tixcraft"]:
pass_1_seat_remaining_enable = config_dict["tixcraft"]["pass_1_seat_remaining"]
# output config:
print("version", CONST_APP_VERSION)
print("homepage", homepage)
print("browser", browser)
print("ticket_number", ticket_number)
print("facebook_account", facebook_account)
# for kktix
print("==[kktix]==")
print("auto_press_next_step_button", auto_press_next_step_button)
print("auto_fill_ticket_number", auto_fill_ticket_number)
print("kktix_area_keyword", kktix_area_keyword)
print("kktix_answer_dictionary", kktix_answer_dictionary)
print("auto_guess_options", auto_guess_options)
# for tixcraft
print("==[tixcraft]==")
print("date_auto_select_enable", date_auto_select_enable)
print("date_auto_select_mode", date_auto_select_mode)
print("date_keyword", date_keyword)
print("area_auto_select_enable", area_auto_select_enable)
print("area_auto_select_mode", area_auto_select_mode)
print("area_keyword_1", area_keyword_1)
print("area_keyword_2", area_keyword_2)
print("pass_1_seat_remaining", pass_1_seat_remaining_enable)
# entry point
# 說明:自動開啟第一個的網頁
if homepage is None:
homepage = ""
if len(homepage) == 0:
homepage = "https://tixcraft.com/activity/"
Root_Dir = ""
if browser == "chrome":
DEFAULT_ARGS = [
'--disable-audio-output',
'--disable-background-networking',
'--disable-background-timer-throttling',
'--disable-breakpad',
'--disable-browser-side-navigation',
'--disable-checker-imaging',
'--disable-client-side-phishing-detection',
'--disable-default-apps',
'--disable-demo-mode',
'--disable-dev-shm-usage',
#'--disable-extensions',
'--disable-features=site-per-process',
'--disable-hang-monitor',
'--disable-in-process-stack-traces',
'--disable-javascript-harmony-shipping',
'--disable-logging',
'--disable-notifications',
'--disable-popup-blocking',
'--disable-prompt-on-repost',
'--disable-perfetto',
'--disable-permissions-api',
'--disable-plugins',
'--disable-presentation-api',
'--disable-reading-from-canvas',
'--disable-renderer-accessibility',
'--disable-renderer-backgrounding',
'--disable-shader-name-hashing',
'--disable-smooth-scrolling',
'--disable-speech-api',
'--disable-speech-synthesis-api',
'--disable-sync',
'--disable-translate',
'--ignore-certificate-errors',
'--metrics-recording-only',
'--no-first-run',
'--no-experiments',
'--safebrowsing-disable-auto-update',
#'--enable-automation',
'--password-store=<PASSWORD>',
'--use-mock-keychain',
'--lang=zh-TW',
'--stable-release-mode',
'--use-mobile-user-agent',
'--webview-disable-safebrowsing-support',
#'--no-sandbox',
#'--incognito',
]
chrome_options = webdriver.ChromeOptions()
# for navigator.webdriver
chrome_options.add_experimental_option("excludeSwitches", ['enable-automation'])
chrome_options.add_experimental_option('useAutomationExtension', False)
chrome_options.add_experimental_option("prefs", {"profile.password_manager_enabled": False, "credentials_enable_service": False,'profile.default_content_setting_values':{'notifications':2}})
if 'kktix.c' in homepage:
#chrome_options.add_argument('blink-settings=imagesEnabled=false')
pass
# default os is linux/mac
chromedriver_path =Root_Dir+ "webdriver/chromedriver"
if platform.system()=="windows":
chromedriver_path =Root_Dir+ "webdriver/chromedriver.exe"
if not 'kktix.c' in homepage:
extension_path = Root_Dir + "webdriver/AdBlock.crx"
extension_file_exist = os.path.isfile(extension_path)
if extension_file_exist:
chrome_options.add_extension(extension_path)
else:
print("extention not exist:", extension_path)
extension_path = Root_Dir + "webdriver/BlockYourselfFromAnalytics.crx"
extension_file_exist = os.path.isfile(extension_path)
if extension_file_exist:
chrome_options.add_extension(extension_path)
else:
print("extention not exist:", extension_path)
#caps = DesiredCapabilities().CHROME
caps = chrome_options.to_capabilities()
#caps["pageLoadStrategy"] = u"normal" # complete
caps["pageLoadStrategy"] = u"eager" # interactive
#caps["pageLoadStrategy"] = u"none"
#caps["unhandledPromptBehavior"] = u"dismiss and notify" # default
caps["unhandledPromptBehavior"] = u"ignore"
#caps["unhandledPromptBehavior"] = u"dismiss"
#print("caps:", caps)
# method 1:
#driver = webdriver.Chrome(executable_path=chromedriver_path, options=chrome_options, desired_capabilities=caps)
#driver = webdriver.Chrome(executable_path=chromedriver_path, options=chrome_options)
# method 2:
#driver = webdriver.Remote(command_executor='http://127.0.0.1:9515', desired_capabilities=caps)
#driver = webdriver.Remote(command_executor='http://127.0.0.1:9515', options=chrome_options)
# method 3:
driver = webdriver.Chrome(desired_capabilities=caps, executable_path=chromedriver_path)
if browser == "firefox":
# default os is linux/mac
chromedriver_path =Root_Dir+ "webdriver/geckodriver"
if platform.system()=="windows":
chromedriver_path =Root_Dir+ "webdriver/geckodriver.exe"
driver = webdriver.Firefox(executable_path=chromedriver_path)
time.sleep(1.0)
try:
window_handles_count = len(driver.window_handles)
if window_handles_count >= 1:
driver.switch_to.window(driver.window_handles[1])
driver.close()
driver.switch_to.window(driver.window_handles[0])
except Exception as excSwithFail:
pass
driver.get(homepage)
else:
print("Config error!")
# common functions.
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
# convert web string to reg pattern
def convert_string_to_pattern(my_str, dynamic_length=True):
my_hint_anwser_length = len(my_str)
my_formated = ""
if my_hint_anwser_length > 0:
my_anwser_symbols = u"()[]<>{}-"
for idx in range(my_hint_anwser_length):
char = my_str[idx:idx+1]
if char in my_anwser_symbols:
my_formated += (u'\\' + char)
continue
pattern = re.compile(u"[A-Z]")
match_result = pattern.match(char)
#print("match_result A:", match_result)
if not match_result is None:
my_formated += u"[A-Z]"
pattern = re.compile(u"[a-z]")
match_result = pattern.match(char)
#print("match_result a:", match_result)
if not match_result is None:
my_formated += u"[a-z]"
pattern = re.compile(u"[\d]")
match_result = pattern.match(char)
#print("match_result d:", match_result)
if not match_result is None:
my_formated += u"[\d]"
# for dynamic length
if dynamic_length:
for i in range(10):
my_formated = my_formated.replace(u"[A-Z][A-Z]",u"[A-Z]")
my_formated = my_formated.replace(u"[a-z][a-z]",u"[a-z]")
my_formated = my_formated.replace(u"[\d][\d]",u"[\d]")
my_formated = my_formated.replace(u"[A-Z]",u"[A-Z]+")
my_formated = my_formated.replace(u"[a-z]",u"[a-z]+")
my_formated = my_formated.replace(u"[\d]",u"[\d]+")
return my_formated
def get_answer_list_by_question(captcha_text_div_text):
return_list = None
my_answer_delimitor = ""
#if u"?" in captcha_text_div_text or u"?" in captcha_text_div_text:
if True:
tmp_text = captcha_text_div_text
tmp_text = tmp_text.replace(u' ',u' ')
tmp_text = tmp_text.replace(u':',u':')
# for hint
tmp_text = tmp_text.replace(u'*',u'*')
# replace ex.
tmp_text = tmp_text.replace(u'例如',u'範例')
tmp_text = tmp_text.replace(u'如:',u'範例:')
tmp_text = tmp_text.replace(u'舉例',u'範例')
if not u'範例' in tmp_text:
tmp_text = tmp_text.replace(u'例',u'範例')
# important, maybe 例 & ex occurs at same time.
tmp_text = tmp_text.replace(u'ex:',u'範例:')
tmp_text = tmp_text.replace(u'Ex:',u'範例:')
#tmp_text = tmp_text.replace(u'[',u'(')
#tmp_text = tmp_text.replace(u']',u')')
tmp_text = tmp_text.replace(u'?',u'?')
tmp_text = tmp_text.replace(u'(',u'(')
tmp_text = tmp_text.replace(u')',u')')
# is need to convert . ? I am not sure!
tmp_text = tmp_text.replace(u'。',u' ')
my_question = ""
my_options = ""
my_hint = ""
my_hint_anwser = ""
my_anwser_formated = ""
if u"?" in tmp_text:
question_index = tmp_text.find(u"?")
my_question = tmp_text[:question_index+1]
if u"。" in tmp_text:
question_index = tmp_text.find(u"。")
my_question = tmp_text[:question_index+1]
if len(my_question) == 0:
my_question = tmp_text
#print(u"my_question:", my_question)
# get hint from quota.
hint_list = None
# ps: hint_list is not options list
# try rule1:
if u'(' in tmp_text and u')' in tmp_text and u'範例' in tmp_text:
#import re
#print("text:" , re.findall('\([\w]+\)', tmp_text))
hint_list = re.findall(u'\(.*?\)', tmp_text)
#print("hint_list:", hint_list)
# try rule2:
if hint_list is None:
if u'【' in tmp_text and u'】' in tmp_text and u'範例' in tmp_text:
#import re
#print("text:" , re.findall('\([\w]+\)', tmp_text))
hint_list = re.findall(u'【.*?】', tmp_text)
# try rule3:
if not hint_list is None:
for hint in hint_list:
if u'範例' in hint:
my_hint = hint
if my_hint[:1] == u'【':
my_hint = my_hint[1:]
if my_hint[-1:] == u'】':
my_hint = my_hint[:-1]
break;
else:
# get hint from rule 3: with '(' & '), but ex: is outside
if u'半形' in hint:
hint_index = tmp_text.find(hint)
ex_index = tmp_text.find(u"範例")
if ex_index > 0:
ex_end_index = tmp_text.find(u" ",ex_index)
if ex_end_index < 0:
ex_end_index = tmp_text.find(u"(",ex_index)
if ex_end_index < 0:
ex_end_index = tmp_text.find(u"(",ex_index)
if ex_end_index < 0:
ex_end_index = tmp_text.find(u".",ex_index)
if ex_end_index < 0:
ex_end_index = tmp_text.find(u"。",ex_index)
if ex_end_index >=0:
my_hint = tmp_text[hint_index:ex_end_index+1]
# try rule4:
# get hint from rule 3: without '(' & '), but use "*"
if len(my_hint) == 0:
target_symbol = u"*"
if target_symbol in tmp_text :
star_index = tmp_text.find(target_symbol)
space_index = tmp_text.find(u" ", star_index + len(target_symbol))
my_hint = tmp_text[star_index: space_index]
# is need to merge next block
if len(my_hint) > 0:
target_symbol = my_hint + u" "
if target_symbol in tmp_text :
star_index = tmp_text.find(target_symbol)
next_block_index = star_index + len(target_symbol)
space_index = tmp_text.find(u" ", next_block_index)
next_block = tmp_text[next_block_index: space_index]
if u'範例' in next_block:
| |
#!/usr/bin/env python
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import json
import os
import re
from google.appengine.api import users
from google.appengine.ext import ndb
DEFAULT_TWEET_DB_NAME = 'tweet_db'
DEFAULT_AUTHOR_DB_NAME = 'author_db'
# Example data: 'Wed Dec 10 21:00:24 2014'
DATE_PARSE_FMT_STR = '%a %b %d %H:%M:%S %Y'
# On App Engine prod this will be set correctly, but in a unittest environment
# the version will not be set when this is executed.
APP_VERSION = os.environ.get('CURRENT_VERSION_ID', '-1')
# We want operations on an individual user to be consistent.
def user_key(user_id, user_table_name=DEFAULT_AUTHOR_DB_NAME):
return ndb.Key('User', '%s_%s' % (user_table_name, user_id))
# We want operations on an individual tweet to be consistent.
def tweet_key(tweet_id, tweet_table_name=DEFAULT_TWEET_DB_NAME):
return ndb.Key('Tweet', '%s_%s' % (tweet_table_name, tweet_id))
def ParseTweetDateString(date_str, tweet_id='', user_id=''):
"""Parses a date string from a tweet, returning 'utcnow' on failure.
Args:
date_str: The date string to be parsed.
tweet_id: The id of the tweet this date is being parsed from.
user_id: The id of the user this date is being parsed from.
"""
if not date_str:
id_value, date_type = CalculateDateType(tweet_id, user_id)
logging.warning('Empty creation date in %s id %s', date_type, id_value)
return datetime.datetime.utcnow()
try:
# Convert to UTC time by manually parsing the timedelta because it is not
# supported on all platforms.
td = ParseUtcTimeDelta(date_str[-10:-5])
return datetime.datetime.strptime('%s %s' % (date_str[:-11], date_str[-4:]),
DATE_PARSE_FMT_STR) + td
except ValueError:
logging.warning('Failed to parse date "%s" from tweet id %s, user id %s',
date_str, tweet_id, user_id)
return datetime.datetime.utcnow()
def ParseUtcTimeDelta(td_str):
"""Manually parse the UTC timedelta from the string (not supported some places).
Args:
td_str: Timedelta string of the form specified for the '%z' format
specifier in strftime.
Returns:
A timedelta object.
"""
# The most common case - let's make this easy.
if td_str == '+0000':
return datetime.timedelta(0, 0, 0)
if td_str[0] not in ['-', '+'] or len(td_str) != 5:
logging.warning('Bad UTC offset: %s', td_str)
return datetime.timedelta(0, 0, 0)
try:
int(td_str[1:5])
except ValueError:
logging.warning('Bad UTC offset: %s', td_str)
return datetime.timedelta(0, 0, 0)
seconds = int(td_str[1:3])*3600 + int(td_str[3:])*60
if td_str[0] == '-':
return datetime.timedelta(0, seconds, 0)
return datetime.timedelta(-1, (24 * 3600) - seconds, 0)
def WriteTweetDateString(dt):
return '%s +0000 %s' % (dt.strftime('%a %b %d %H:%M:%S'), dt.strftime('%Y'))
def CalculateDateType(tweet_id, user_id):
id_value = tweet_id or user_id
if tweet_id:
return ('tweet', id_value)
return ('user', id_value)
def ParseIntegersFromTweet(entities, tweet_text):
"""Parses integers that don't occur in other entities.
Args:
entities: Entities object of other entities in the tweet
tweet_text: The text of the tweet.
Returns:
A possibly empty list of IntegerEntity objects
"""
ies = []
if not tweet_text:
return []
for item in re.finditer(r'\$?[\d:.,]+', tweet_text):
# Don't worry about big numbers - they're not scores
if len(item.group(0)) > 3:
continue
# If we didn't match any numbers, move on
if not re.findall(r'\d+', item.group(0)):
continue
# Don't worry about money amounts
if '$' in item.group(0)[0]:
continue
# Numbers with commas are not scores, but don't worry about trailing commas
if '.' in item.group(0)[:-1]:
continue
# Neither are decimal numbers
if ',' in item.group(0)[:-1]:
continue
# Neither are decimal numbers
if ':' in item.group(0):
continue
# Don't worry about numbers in other entities
if entities.IsIndexInEntities(item.start(0), item.end(0)):
continue
ie = IntegerEntity()
number_text = item.group(0)
end_offset = 0
if number_text[-1] in ['.', ',']:
number_text = number_text[:-1]
end_offset = -1
ie.num = long(number_text)
ie.start_idx = int(item.start(0))
ie.end_idx = int(item.end(0) + end_offset)
ies.append(ie)
return ies
def ParseGeoData(json_obj):
"""Return an ndb.GeoPt object from the 'geo' twitter json entry."""
if not json_obj:
return None
if json_obj.get('type', '') != 'Point':
return None
pt_data = json_obj.get('coordinates', [])
if not pt_data or len(pt_data) < 2:
return None
return ndb.GeoPt(pt_data[0], pt_data[1])
def ParsePlaceId(json_obj):
"""Parse the place id from the 'place' twitter tag if it exists."""
if not json_obj:
return None
return json_obj.get('id', '')
class UserMentionEntity(ndb.Model):
"""Information about the mention of a user in a tweet."""
user_id = ndb.StringProperty('id', required=True)
# ID property as a 64-bit signed int. This will eventually replace user_id as
# the main property.
user_id_64 = ndb.IntegerProperty('id_64')
# The character positions in the tweet where this entity started and ended.
start_idx = ndb.IntegerProperty('si')
end_idx = ndb.IntegerProperty('ei')
@classmethod
def fromJson(cls, json_obj):
"""Builds a UserMentionEntity object from a json object."""
ume = UserMentionEntity()
ume.user_id = json_obj.get('id_str', '')
ume.user_id_64 = json_obj.get('id', 0)
indices = json_obj.get('indices', [])
if len(indices) < 2:
return ume
ume.start_idx = indices[0]
ume.end_idx = indices[1]
return ume
class UrlMentionEntity(ndb.Model):
"""Information about a URL in a tweet."""
# URL as shown in tweet text.
url = ndb.StringProperty('u', indexed=False)
# Display URL for user.
display_url = ndb.StringProperty('du', indexed=False)
# Fully resolved URL.
expanded_url = ndb.StringProperty('eu', indexed=False)
# The character positions in the tweet where this entity started and ended.
start_idx = ndb.IntegerProperty('si')
end_idx = ndb.IntegerProperty('ei')
@classmethod
def fromJson(cls, json_obj):
"""Builds a UrlMentionEntity object from a json object."""
ume = UrlMentionEntity()
ume.display_url = json_obj.get('display_url')
ume.url = json_obj.get('url')
ume.expanded_url = json_obj.get('expanded_url')
indices = json_obj.get('indices', [])
if len(indices) < 2:
return ume
ume.start_idx = indices[0]
ume.end_idx = indices[1]
return ume
class HashTagEntity(ndb.Model):
"""Information about a hashtag in the tweet."""
text = ndb.StringProperty()
# The character positions in the tweet where this entity started and ended.
start_idx = ndb.IntegerProperty('si')
end_idx = ndb.IntegerProperty('ei')
@classmethod
def fromJson(cls, json_obj):
"""Builds a HashTagEntity object from a json object."""
hte = HashTagEntity()
hte.text = json_obj.get('text', '')
indices = json_obj.get('indices', [])
if len(indices) < 2:
return hte
hte.start_idx = indices[0]
hte.end_idx = indices[1]
return hte
class MediaEntity(ndb.Model):
"""Information about media (eg, a link to a photo) in the tweet."""
# We don't save most of the info, we're mostly just interested in the indices
url_https = ndb.StringProperty(indexed=False)
id_str = ndb.StringProperty(indexed=False)
# The character positions in the tweet where this entity started and ended.
start_idx = ndb.IntegerProperty('si')
end_idx = ndb.IntegerProperty('ei')
@classmethod
def fromJson(cls, json_obj):
"""Builds a MediaEntity object from a json object."""
ment = MediaEntity()
ment.url_https = json_obj.get('media_url_https', '')
ment.id_str = json_obj.get('id_str', '')
indices = json_obj.get('indices', [])
if len(indices) < 2:
return ment
ment.start_idx = indices[0]
ment.end_idx = indices[1]
return ment
class IntegerEntity(ndb.Model):
"""Information about an integer in the tweet.
Note: this is *not* returned from the Twitter API, but it's important enough
for Score minion that we parse it out for each tweet.
"""
num = ndb.IntegerProperty()
# The character positions in the tweet where this entity started and ended.
start_idx = ndb.IntegerProperty('si')
end_idx = ndb.IntegerProperty('ei')
class Entities(ndb.Model):
"""Item from the 'entities' tag in the API.
More info: https://dev.twitter.com/overview/api/entities-in-twitter-objects
and https://dev.twitter.com/overview/api/entities
"""
hashtags = ndb.StructuredProperty(HashTagEntity, 'h', repeated=True)
user_mentions = ndb.StructuredProperty(UserMentionEntity, 'usm', repeated=True)
url_mentions = ndb.StructuredProperty(UrlMentionEntity, 'urm', repeated=True)
media = ndb.StructuredProperty(MediaEntity, 'me', repeated=True)
integers = ndb.StructuredProperty(IntegerEntity, 'n', repeated=True)
def IsIndexInEntities(self, start_idx, end_idx):
"""Returns True if this interval overlaps with another non-Integer entity interval."""
for hashtag in self.hashtags:
if start_idx >= hashtag.start_idx and start_idx < hashtag.end_idx:
return True
for um in self.user_mentions:
if start_idx >= um.start_idx and start_idx < um.end_idx:
return True
for um in self.url_mentions:
if start_idx >= um.start_idx and start_idx < um.end_idx:
return True
for ment in self.media:
if start_idx >= ment.start_idx and start_idx < ment.end_idx:
return True
# Don't worry about integers because this is called by the integer-parsing code.
return False
# Major field this class is ignoring: media
@classmethod
def fromJson(cls, json_obj, tweet_text=''):
"""Builds a Entities object from a json object.
Args:
json_obj: The JSON object representing the Entities.
tweet_text: The text of the tweet, if parsing of IntegerEntity objects
is desired.
Returns:
An Entities object.
"""
entities = Entities()
for hashtag in json_obj.get('hashtags', []):
parsed_ht = HashTagEntity.fromJson(hashtag)
if parsed_ht:
entities.hashtags.append(parsed_ht)
for user_mention in json_obj.get('user_mentions', []):
parsed_um = UserMentionEntity.fromJson(user_mention)
if parsed_um:
entities.user_mentions.append(parsed_um)
for url_mention in json_obj.get('urls', []):
parsed_um = UrlMentionEntity.fromJson(url_mention)
if parsed_um:
entities.url_mentions.append(parsed_um)
for media_entity in json_obj.get('media', []):
parsed_ment = | |
most recent invitation recepient."""
try:
return (self.orginvitation_set.select(
OrgInvitation.invitee).where(OrgInvitation.invitee_id == self.tech_contact_id)
.order_by(OrgInvitation.created_at.desc()).first().invitee)
except Exception:
return None
@property
def invitation_sent_at(self):
"""Get the timestamp of the most recent invitation sent to the technical contact."""
try:
return (self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_sent_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).first().last_sent_at)
except Exception:
return None
@property
def invitation_confirmed_at(self):
"""Get the timestamp when the invitation link was opened."""
try:
return (self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_confirmed_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).where(
OrgInvitation.confirmed_at.is_null(False)).first().last_confirmed_at)
except Exception:
return None
@property
def users(self):
"""Get organisation's user query."""
return User.select().join(
UserOrg, on=(UserOrg.user_id == User.id)).where(UserOrg.org == self)
@property
def admins(self):
"""Get organisation's adminstrator query."""
return self.users.where(UserOrg.is_admin)
def __repr__(self):
return self.name or self.tuakiri_name
def save(self, *args, **kwargs):
"""Handle data consitency validation and saving."""
if self.is_dirty():
if self.name is None:
self.name = self.tuakiri_name
if self.field_is_updated("tech_contact") and self.tech_contact:
if not self.tech_contact.has_role(Role.TECHNICAL):
self.tech_contact.roles |= Role.TECHNICAL
self.tech_contact.save()
app.logger.info(f"Added TECHNICAL role to user {self.tech_contact}")
super().save(*args, **kwargs)
class OrgInfo(BaseModel):
"""Preloaded organisation data."""
name = CharField(max_length=100, unique=True, verbose_name="Organisation")
tuakiri_name = CharField(max_length=100, unique=True, null=True, verbose_name="TUAKIRI Name")
title = CharField(null=True, verbose_name="Contact person tile")
first_name = CharField(null=True, verbose_name="Contact person's first name")
last_name = CharField(null=True, verbose_name="Contact person's last name")
role = CharField(null=True, verbose_name="Contact person's role")
email = CharField(null=True, verbose_name="Contact person's email")
phone = CharField(null=True, verbose_name="Contact person's phone")
is_public = BooleanField(
null=True, default=False, verbose_name="Permission to post contact information to WEB")
country = CharField(null=True, verbose_name="Country Code", default=DEFAULT_COUNTRY)
city = CharField(null=True, verbose_name="City of home campus")
disambiguated_id = CharField(
null=True, verbose_name="common:disambiguated-organization-identifier")
disambiguation_source = CharField(null=True, verbose_name="common:disambiguation-source")
def __repr__(self):
return self.name or self.disambiguated_id or super().__repr__()
class Meta: # noqa: D101,D106
db_table = "org_info"
table_alias = "oi"
@classmethod
def load_from_csv(cls, source):
"""Load data from CSV file or a string."""
if isinstance(source, str):
if '\n' in source:
source = StringIO(source)
else:
source = open(source)
reader = csv.reader(source)
header = next(reader)
assert len(header) >= 3, \
"Wrong number of fields. Expected at least 3 fields " \
"(name, disambiguated organisation ID, and disambiguation source). " \
"Read header: %s" % header
header_rexs = [
re.compile(ex, re.I)
for ex in ("organisation|name", "title", r"first\s*(name)?", r"last\s*(name)?", "role",
"email", "phone", "public|permission to post to web", r"country\s*(code)?",
"city", "(common:)?disambiguated.*identifier",
"(common:)?disambiguation.*source", r"tuakiri\s*(name)?")
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
def val(row, i, default=None):
if idxs[i] is None:
return default
else:
v = row[idxs[i]].strip()
return None if v == '' else v
for row in reader:
# skip empty lines:
if row is None or (len(row) == 1 and row[0].strip() == ''):
continue
name = val(row, 0)
oi, _ = cls.get_or_create(name=name)
oi.title = val(row, 1)
oi.first_name = val(row, 2)
oi.last_name = val(row, 3)
oi.role = val(row, 4)
oi.email = val(row, 5)
oi.phone = val(row, 6)
oi.is_public = val(row, 7) and val(row, 7).upper() == "YES"
oi.country = val(row, 8) or DEFAULT_COUNTRY
oi.city = val(row, 9)
oi.disambiguated_id = val(row, 10)
oi.disambiguation_source = val(row, 11)
oi.tuakiri_name = val(row, 12)
oi.save()
return reader.line_num - 1
class User(BaseModel, UserMixin, AuditMixin):
"""
ORCiD Hub user.
It's a gneric user including researchers, organisation administrators, hub administrators, etc.
"""
name = CharField(max_length=64, null=True)
first_name = CharField(null=True, verbose_name="Firs Name")
last_name = CharField(null=True, verbose_name="Last Name")
email = CharField(max_length=120, unique=True, null=True)
eppn = CharField(max_length=120, unique=True, null=True)
# ORCiD:
orcid = OrcidIdField(null=True, verbose_name="ORCID iD", help_text="User's ORCID iD")
confirmed = BooleanField(default=False)
# Role bit-map:
roles = SmallIntegerField(default=0)
is_locked = BooleanField(default=False)
webhook_enabled = BooleanField(default=False, null=True)
orcid_updated_at = DateTimeField(null=True, default=None)
# TODO: many-to-many
# NB! depricated!
# TODO: we still need to rememeber the rognanistiaon that last authenticated the user
organisation = ForeignKeyField(
Organisation, related_name="members", on_delete="CASCADE", null=True)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
def __repr__(self):
if self.name and (self.eppn or self.email):
return "%s (%s)" % (self.name, self.email or self.eppn)
return self.name or self.email or self.orcid or super().__repr__()
@property
def organisations(self):
"""Get all linked to the user organisation query."""
return (Organisation.select(
Organisation, (Organisation.tech_contact_id == self.id).alias("is_tech_contact"),
((UserOrg.is_admin.is_null(False)) & (UserOrg.is_admin)).alias("is_admin")).join(
UserOrg, on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id)))
.naive())
@property
def linked_accounts(self):
"""Get all linked accounts - accounts sharing the same ORCID ID."""
return [u for u in User.select().where(User.orcid == self.orcid)] if self.orcid else [self]
@property
def available_organisations(self):
"""Get all not yet linked to the user organisation query."""
return (Organisation.select(Organisation).where(UserOrg.id.is_null()).join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id))))
@property
def admin_for(self):
"""Get organisations the user is admin for (query)."""
return self.organisations.where(UserOrg.is_admin)
@property
def is_active(self):
"""Get 'is_active' based on confirmed for Flask-Login.
TODO: confirmed - user that email is cunfimed either by IdP or by confirmation email
ins't the same as "is active".
"""
return self.confirmed
def has_role(self, role):
"""Return `True` if the user identifies with the specified role.
:param role: A role name, `Role` instance, or integer value.
"""
if isinstance(role, Role):
return bool(role & Role(self.roles))
elif isinstance(role, str):
try:
return bool(Role[role.upper()] & Role(self.roles))
except Exception:
False
elif type(role) is int:
return bool(role & self.roles)
else:
return False
@property
def is_superuser(self):
"""Test if the user is a HUB admin."""
return bool(self.roles & Role.SUPERUSER)
@is_superuser.setter
def is_superuser(self, value): # noqa: D401
"""Sets user as a HUB admin."""
if value:
self.roles |= Role.SUPERUSER
else:
self.roles &= ~Role.SUPERUSER
@property
def is_admin(self):
"""Test if the user belongs to the organisation admin."""
return bool(self.roles & Role.ADMIN)
def avatar(self, size=40, default="identicon"):
"""Return Gravatar service user avatar URL."""
# TODO: default gravatar image
# default = "https://www.example.com/default.jpg"
gravatar_url = "https://www.gravatar.com/avatar/" + md5(
self.email.lower().encode()).hexdigest() + "?"
gravatar_url += urlencode({'d': default, 's': str(size)})
return gravatar_url
@property
def gravatar_profile_url(self):
"""Return Gravatar service user profile URL."""
return "https://www.gravatar.com/" + md5(self.email.lower().encode()).hexdigest()
@property
def affiliations(self):
"""Return affiliations with the current organisation."""
try:
user_org = UserOrg.get(user=self, org=self.organisation)
return Affiliation(user_org.affiliations)
except UserOrg.DoesNotExist:
return Affiliation.NONE
def is_tech_contact_of(self, org=None):
"""Indicats if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and org.tech_contact and org.tech_contact_id == self.id
def is_admin_of(self, org=None):
"""Indicats if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and UserOrg.select().where(UserOrg.user == self, UserOrg.org == org, UserOrg.is_admin).exists()
@property
def uuid(self):
"""Generate UUID for the user basee on the the primary email."""
return uuid.uuid5(uuid.NAMESPACE_URL, "mailto:" + (self.email or self.eppn))
DeferredUser.set_model(User)
class OrgInvitation(BaseModel, AuditMixin):
"""Organisation invitation to on-board the Hub."""
invitee = ForeignKeyField(
User, on_delete="CASCADE", null=True, related_name="received_org_invitations")
inviter = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="sent_org_invitations")
org = ForeignKeyField(Organisation, on_delete="SET NULL", verbose_name="Organisation")
email = TextField(help_text="The email address the invitation was sent to.")
token = TextField(unique=True)
confirmed_at = DateTimeField(null=True)
@property
def sent_at(self):
"""Get the time the invitation was sent."""
return self.created_at
class Meta: # noqa: D101,D106
db_table = "org_invitation"
class UserOrg(BaseModel, AuditMixin):
"""Linking object for many-to-many relationship."""
user = ForeignKeyField(User, on_delete="CASCADE", index=True)
org = ForeignKeyField(
Organisation, on_delete="CASCADE", index=True, verbose_name="Organisation")
is_admin = BooleanField(
null=True, default=False, help_text="User is an administrator for the organisation")
# Affiliation bit-map:
affiliations = SmallIntegerField(default=0, null=True, verbose_name="EDU Person Affiliations")
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_user_orgs")
updated_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="updated_user_orgs")
# TODO: the access token should be either here or in a separate list
# access_token = CharField(max_length=120, unique=True, null=True)
def save(self, *args, **kwargs):
"""Enforce foriegn key contraints and consolidate user roles with the linked organisations.
Enforce foriegn key contraints and consolidate user roles with the linked organisations
before saving data.
"""
if self.is_dirty():
if self.field_is_updated("org"):
self.org # just enforce re-querying
user = self.user
if self.is_admin != user.is_admin:
if self.is_admin or UserOrg.select().where((UserOrg.user_id == self.user_id) & (
UserOrg.org_id != self.org_id) & UserOrg.is_admin).exists(): # noqa: E125
user.roles |= Role.ADMIN
app.logger.info(f"Added ADMIN role to user {user}")
else:
user.roles &= ~Role.ADMIN
app.logger.info(f"Revoked ADMIN role from user {user}")
user.save()
return super().save(*args, **kwargs)
class Meta: # noqa: D101,D106
db_table = "user_org"
table_alias = "uo"
indexes = ((("user", "org"), True), )
class OrcidToken(BaseModel, AuditMixin):
"""For Keeping Orcid token in the table."""
user = ForeignKeyField(User, null=True, index=True) # TODO: add validation for 3-legged authorization tokens
org = ForeignKeyField(Organisation, index=True, verbose_name="Organisation")
scope = TextField(null=True, db_column="scope") # TODO impomenet property
access_token = CharField(max_length=36, unique=True, null=True)
issue_time = DateTimeField(default=datetime.utcnow)
refresh_token = CharField(max_length=36, unique=True, | |
type: dict
sample: null
sku:
description:
- SKU of the application gateway resource.
returned: always
type: dict
sample: null
contains:
name:
description:
- Name of an application gateway SKU.
returned: always
type: str
sample: null
tier:
description:
- Tier of an application gateway.
returned: always
type: str
sample: null
capacity:
description:
- Capacity (instance count) of an application gateway.
returned: always
type: number
sample: null
ssl_policy:
description:
- SSL policy of the application gateway resource.
returned: always
type: dict
sample: null
contains:
disabled_ssl_protocols:
description:
- Ssl protocols to be disabled on application gateway.
returned: always
type: str
sample: null
policy_type:
description:
- Type of Ssl Policy.
returned: always
type: str
sample: null
policy_name:
description:
- Name of Ssl predefined policy.
returned: always
type: str
sample: null
cipher_suites:
description:
- >-
Ssl cipher suites to be enabled in the specified order to
application gateway.
returned: always
type: str
sample: null
min_protocol_version:
description:
- >-
Minimum version of Ssl protocol to be supported on application
gateway.
returned: always
type: str
sample: null
operational_state:
description:
- Operational state of the application gateway resource.
returned: always
type: str
sample: null
gateway_ipconfigurations:
description:
- >-
Subnets of the application gateway resource. For default limits,
see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway IP configuration.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the IP configuration that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
authentication_certificates:
description:
- >-
Authentication certificates of the application gateway resource.
For default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway authentication
certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the authentication certificate that is unique within
an Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
trusted_root_certificates:
description:
- >-
Trusted Root certificates of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway trusted root
certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the trusted root certificate that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
ssl_certificates:
description:
- >-
SSL certificates of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway SSL certificate.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the SSL certificate that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
frontend_ipconfigurations:
description:
- >-
Frontend IP addresses of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- >-
Properties of the application gateway frontend IP
configuration.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the frontend IP configuration that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
frontend_ports:
description:
- >-
Frontend ports of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway frontend port.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the frontend port that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
probes:
description:
- Probes of the application gateway resource.
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway probe.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the probe that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
backend_address_pools:
description:
- >-
Backend address pool of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway backend address pool.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the backend address pool that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
backend_http_settings_collection:
description:
- >-
Backend http settings of the application gateway resource. For
default limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway backend HTTP settings.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the backend http settings that is unique within an
Application Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
http_listeners:
description:
- >-
Http listeners of the application gateway resource. For default
limits, see [Application Gateway
limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the application gateway HTTP listener.
returned: always
type: dict
sample: null
name:
description:
- >-
Name of the HTTP listener that is unique within an Application
Gateway.
returned: always
type: str
sample: null
etag:
description:
- >-
A unique read-only string that changes whenever the resource
is updated.
returned: always
type: str
sample: null
type:
description:
- Type of the resource.
returned: always
type: str
sample: null
url_path_maps:
description:
- >-
URL path map of the application gateway | |
<reponame>libremente/service-app<filename>backend/ozon/api_base.py
# Copyright INRIM (https://www.inrim.eu)
# See LICENSE file for full licensing details.
from .appinit import *
import ujson
from .core.ServiceMain import ServiceMain
from json import JSONDecodeError
# TODO component base move to frontend
# Actions
@app.post("/action/{name}/{rec_name}", tags=["Actions"])
async def post_action_name_ref(
request: Request,
name: str,
rec_name: str,
parent: Optional[str] = "",
iframe: Optional[str] = "",
container_act: Optional[str] = "",
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
return await service.service_handle_action(
action_name=name, data=data, rec_name=rec_name, parent=parent,
iframe=iframe, execute=True, container_act=container_act)
@app.post("/action/{name}", tags=["Actions"])
async def post_action_name(
request: Request,
name: str,
parent: Optional[str] = "",
iframe: Optional[str] = "",
container_act: Optional[str] = "",
apitoken: str = Header(None)
):
rec_name = ""
service = ServiceMain.new(request=request)
dataj = await request.json()
# logger.info(data)
if isinstance(dataj, str):
data = ujson.loads(dataj)
else:
data = dataj.copy()
return await service.service_handle_action(
action_name=name, data=data, rec_name=rec_name, parent=parent,
iframe=iframe, execute=True, container_act=container_act)
# only for Action Builder (maybe)
@app.get("/action/{name}", tags=["Actions"])
async def get_action_name(
request: Request,
name: str,
parent: Optional[str] = "",
iframe: Optional[str] = "",
container_act: Optional[str] = "",
apitoken: str = Header(None)
):
rec_name = ""
session = request.scope['ozon'].session
service = ServiceMain.new(request=request)
res = await service.service_handle_action(
action_name=name, rec_name=rec_name, parent=parent, iframe=iframe, container_act=container_act)
res['breadcrumb'] = session.app.get('breadcrumb', {})
return res
@app.get("/action/{name}/{rec_name}", tags=["Actions"])
async def get_action_ref(
request: Request,
name: str,
rec_name: str,
parent: Optional[str] = "",
iframe: Optional[str] = "",
container_act: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
service = ServiceMain.new(request=request)
res = await service.service_handle_action(
action_name=name, rec_name=rec_name, parent=parent, iframe=iframe, container_act=container_act)
res['breadcrumb'] = session.app['breadcrumb']
return res
@app.delete("/action/{name}/{rec_name}", tags=["Actions"])
async def delete_action_name_ref(
request: Request,
name: str,
rec_name: str,
parent: Optional[str] = "",
iframe: Optional[str] = "",
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
session = request.scope['ozon'].session
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
res = await service.service_handle_action(
action_name=name, data=data, rec_name=rec_name, parent=parent, iframe=iframe, execute=True)
res['breadcrumb'] = session.app['breadcrumb']
return res
# Component Remote Data and Resources
@app.get("/get_remote_data_select", tags=["Component Remote Data and Resources"])
async def get_remote_data_select(
request: Request,
url: str,
header_key: str,
header_value_key: str,
path_value: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# # session.app['save_session'] = False
service = ServiceMain.new(request=request)
res = await service.get_remote_data_select(url, path_value, header_key, header_value_key)
return res
@app.get("/resource/schema/select", tags=["Component Remote Data and Resources"])
async def get_schema_resource_select(
request: Request,
otype: str,
select: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
add_key = ["_id", "rec_name"]
return await service.service_get_schemas_by_type(
schema_type=otype, fields=select.split(","), additional_key=add_key)
@app.get("/resource/data/{model_name}", tags=["Component Remote Data and Resources"])
async def get_data_resources(
request: Request,
model_name: str,
fields: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
field_list = []
if fields:
field_list = fields.split(",")
service = ServiceMain.new(request=request)
props = request.query_params.__dict__['_dict'].copy()
query = props.get("domain", "{}")
domain = service.qe.check_parse_json(query)
if not isinstance(domain, dict):
return {
"status": "error",
"message": f'Errore Nella codifica del json {domain} {type(domain)}verifica double quote ',
"model": model
}
return await service.service_get_data_for_model(
model_name, query=domain, fields=field_list)
@app.get("/db_view/{model_name}", tags=["Component Remote Data and Resources"])
async def get_db_view(
request: Request,
model_name: str,
fields: Optional[str] = "",
apitoken: str = Header(None)
):
# session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
props = request.query_params.__dict__['_dict'].copy()
query = props.get("domain", "{}")
domain = service.qe.check_parse_json(query)
if not isinstance(domain, dict):
return {
"status": "error",
"message": f'Errore Nella codifica del json {domain} {type(domain)}verifica double quote ',
"model": model
}
return await service.service_get_data_view(
model_name, query=domain)
# Structural Data
@app.get("/layout", tags=["Structural Data"])
async def default_layout(
request: Request,
name: Optional[str] = "",
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
return await service.service_get_layout(name)
@app.get("/dashboard", tags=["Structural Data"])
async def dashboard(
request: Request,
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
return await service.service_get_dashboard()
@app.get("/dashboard/{menu_group}", tags=["Structural Data"])
async def dashboard(
request: Request,
menu_group: str,
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
return await service.service_get_dashboard(parent=menu_group)
@app.get("/form/schema/select", tags=["Structural Data"])
async def get_schema_select(
request: Request,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
fields = ['rec_name', 'title']
return await service.service_get_schemas_by_type(
schema_type="form", fields=fields)
@app.get("/form/schema/{parent}", tags=["Structural Data"])
async def get_schema_parent(
request: Request,
parent: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
return await service.service_get_schemas_by_parent_and_type(
parent, schema_type="form", fields=[])
@app.get("/schema/{model_name}", tags=["Structural Data"])
async def get_schema_model(
request: Request,
model_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
return await service.service_get_schema(model_name)
@app.get("/schema_model/{model_name}", tags=["Structural Data"])
async def get_schema_model_for_model_name(
request: Request,
model_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
return await service.service_get_schema_model(model_name)
@app.get("/record/{model_name}/{rec_name}", tags=["Structural Data"])
async def get_record_rec_name(
request: Request,
model_name: str,
rec_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
return await service.service_get_record(model_name, rec_name)
@app.get("/record/{model_name}", tags=["Core"])
async def get_record(
request: Request,
model_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
return await service.service_get_record(model_name, "")
@app.get("/models/distinct", tags=["Core"])
async def get_distinct_model(
request: Request,
model: Optional[str] = "component",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
props = request.query_params.__dict__['_dict'].copy()
query = props.get("domain", "{}")
domain = service.qe.check_parse_json(query)
if not isinstance(domain, dict):
err = {
"status": "error",
"message": f'Errore Nella codifica del json {domain} {type(domain)}verifica double quote ',
"model": model
}
logger.error(err)
return err
res = await service.service_distinct_rec_name_by_model(
model_name=model, domain=domain, props=props)
return res
@app.post("/count/{model_name}", tags=["Core"])
async def analysis_count_model(
request: Request,
model_name: str,
apitoken: str = Header(None)
):
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
res = await service.count(model_name=model, query_data=data)
return res
@app.post("/model/analysis/count", tags=["Core"])
async def analysis_count_model(
request: Request,
model: Optional[str] = "component",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
res = await service.service_freq_for_field_model(
model_name=model, field=data['field'], field_query=data.get('field_query', {}),
min_occurence=data.get('min_occurence', 2), add_fields=data.get('add_fields', ""),
sort=data.get('min_occurence', -1)
)
return res
@app.get("/clean/records", tags=["Core"])
async def clean_records(
request: Request,
apitoken: str = Header(None)
):
"""
Remove all recerds in all collections with 'deleted' timestam grater than now.
"""
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
if session.is_admin:
res = await service.clean_all_to_delete_action()
else:
res = {"status": "err"}
return res
@app.post("/data/table/{action_name}", tags=["Table Data"])
async def post_table_data(
request: Request,
action_name: str,
parent: Optional[str] = "",
container_act: Optional[str] = "",
apitoken: str = Header(None)
):
rec_name = ""
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
res = await service.service_handle_action(
action_name=action_name, data=data, rec_name=rec_name, parent=parent,
iframe=False, execute=True, container_act=container_act)
return res
@app.post("/reorder/data/table", tags=["Table Data reorder"])
async def post_table_data_reorder(
request: Request,
apitoken: str = Header(None)
):
rec_name = ""
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
return await service.service_reorder_record(data)
@app.post("/data/search/{model}", tags=["Search Engine"])
async def post_table_search(
request: Request,
model: str,
parent: Optional[str] = "",
apitoken: str = Header(None)
):
rec_name = ""
session = request.scope['ozon'].session
# service = ServiceMain.new(request=request)
data = await request.json()
session.app.get('queries')[model] = ujson.dumps(data, escape_forward_slashes=False, ensure_ascii=False)
return {"link": "#"} # reload page
@app.post("/export_data/{model}", tags=["Component Remote Data and Model for export file"])
async def get_export_data(
request: Request,
model: str,
parent: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
dataj = await request.json()
if isinstance(dataj, dict):
data = dataj.copy()
elif isinstance(dataj, str):
data = ujson.loads(dataj)
res = await service.export_data(model, data, parent_name=parent)
return res
@app.post("/attachment/trash/{model}/{rec_name}", tags=["Attachments"])
async def attachment_to_trash(
request: Request,
model: str,
rec_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
data = await request.json()
# data = {}
# if isinstance(dataj, dict):
# data = dataj.copy()
# elif isinstance(dataj, str):
# data = ujson.loads(dataj)
logger.info(data)
res = await service.attachment_to_trash(model, rec_name, data.copy())
return res
@app.get("/mail_template/{model}", tags=["Mail"])
async def get_mail_template(
request: Request,
model: str,
parent: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
res = await service.get_mail_template(model, template_name="")
return res
@app.get("/mail_template/{model}/{template_name}", tags=["Mail"])
async def get_mail_template_with_name(
request: Request,
model: str,
template_name: Optional[str] = "",
parent: Optional[str] = "",
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service = ServiceMain.new(request=request)
res = await service.get_mail_template(model, template_name=template_name)
return res
@app.get("/mail_server/{server_name}", tags=["Mail"])
async def get_mail_server(
request: Request,
server_name: str,
apitoken: str = Header(None)
):
session = request.scope['ozon'].session
# session.app['save_session'] = False
service | |
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.10.zip',
'remote_bytes': 1726068853,
'remote_md5': '2ad595819ffa1d56d2de4c7ed43205a6',
'filename': filename_base + '.audio.10.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.11.zip',
'remote_bytes': 1744480272,
'remote_md5': '0ad29f7040a4e6a22cfd639b3a6738e5',
'filename': filename_base + '.audio.11.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.12.zip',
'remote_bytes': 1738707195,
'remote_md5': 'e5f4400c6b9697295fab4cf507155a2f',
'filename': filename_base + '.audio.12.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.13.zip',
'remote_bytes': 1835797785,
'remote_md5': '8855ab9f9896422746ab4c5d89d8da2f',
'filename': filename_base + '.audio.13.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.14.zip',
'remote_bytes': 1846390881,
'remote_md5': '092ad744452cd3e7de78f988a3d13020',
'filename': filename_base + '.audio.14.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.15.zip',
'remote_bytes': 1869032508,
'remote_md5': '4b5eb85f6592aebf846088d9df76b420',
'filename': filename_base + '.audio.15.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.16.zip',
'remote_bytes': 436971777,
'remote_md5': '2e0a89723e58a3836be019e6996ae460',
'filename': filename_base + '.audio.16.zip'
},
]
kwargs['audio_paths'] = [
'audio'
]
super(TAUUrbanAcousticScenes_2020_Mobile_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
if not item.source_label:
item.source_label = os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[-1]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
class TAUUrbanAcousticScenes_2020_Mobile_EvaluationSet(AcousticSceneDataset):
"""TAU Urban Acoustic Scenes 2020 Mobile Evaluation dataset
This dataset is used in DCASE2020 - Task 1, Acoustic scene classification / Subtask A / Evaluation
"""
def __init__(self,
storage_name='TAU-urban-acoustic-scenes-2020-mobile-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TAU-urban-acoustic-scenes-2020-mobile-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': '<NAME>, <NAME>, and <NAME>',
'title': 'TAU Urban Acoustic Scenes 2020 Mobile, evaluation dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural/Synthetic',
'audio_recording_device_model': 'Zoom F8',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['reference_data_present'] = False
kwargs['crossvalidation_folds'] = 1
kwargs['evaluation_setup_file_extension'] = 'csv'
kwargs['meta_filename'] ='meta.csv'
kwargs['check_meta'] = False
filename_base = 'TAU-urban-acoustic-scenes-2020-mobile-evaluation'
source_url = 'https://zenodo.org/record/3685828/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 8030,
'remote_md5': '2f1ac2991111c6ee1d51bec6e27bd825',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 28811,
'remote_md5': 'b8d9bb50faa282be170b81dc57e2b8b3',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1668607729,
'remote_md5': '632841f6b1ef9ed962ea61f879967411',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1674217327,
'remote_md5': '711fb0469f9b66669a300ebd1de24e9b',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1672649006,
'remote_md5': '575e517b826a5faf020be22ce766adf8',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1665432028,
'remote_md5': '5919fcbe217964756892a9661323c020',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1680713648,
'remote_md5': 'c733767217f16c746f50796c65ca1dd6',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1670741441,
'remote_md5': 'f39feb24910ffc97413e9c94b418f7ab',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1673394812,
'remote_md5': '90bad61f14163146702d430cf8241932',
'filename': filename_base + '.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.8.zip',
'remote_bytes': 1433443122,
'remote_md5': '4db5255382a5e5cab2d463c0d836b888',
'filename': filename_base + '.audio.8.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TAUUrbanAcousticScenes_2020_Mobile_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists() and self.reference_data_present:
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
def load_crossvalidation_data(self):
"""Load cross-validation into the container.
Returns
-------
self
"""
# Reset cross validation data and insert 'all_data'
if self.meta_container:
# Meta data is available
self.crossvalidation_data = DictContainer({
'train': {
'all_data': self.meta_container
},
'test': {
'all_data': self.meta_container
},
'evaluate': {
'all_data': self.meta_container
},
})
else:
# No meta data available, load data from evaluation setup files (if they exists).
self.crossvalidation_data = DictContainer({
'train': {
'all_data': MetaDataContainer()
},
'test': {
'all_data': MetaDataContainer()
},
'evaluate': {
'all_data': MetaDataContainer()
},
})
test_filename = self.evaluation_setup_filename(setup_part='test', fold=1)
evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate', fold=1)
if os.path.isfile(test_filename):
# Testing data exists, load and process it
self.crossvalidation_data['test']['all_data'] = MetaDataContainer(filename=test_filename).load()
# Process items
for item in self.crossvalidation_data['test']['all_data']:
self.process_meta_item(item=item)
if os.path.isfile(evaluate_filename):
# Evaluation data exists, load and process it
self.crossvalidation_data['evaluate']['all_data'] = MetaDataContainer(filename=evaluate_filename).load()
# Process items
for item in self.crossvalidation_data['evaluate']['all_data']:
self.process_meta_item(item=item)
for crossvalidation_set in list(self.crossvalidation_data.keys()):
for item in self.crossvalidation_data[crossvalidation_set]['all_data']:
self.process_meta_item(item=item)
return self
def scene_labels(self):
"""List of unique scene labels in the meta data.
Returns
-------
list
List of scene labels in alphabetical order.
"""
return ['airport',
'bus',
'metro',
'metro_station',
'park',
'public_square',
'shopping_mall',
'street_pedestrian',
'street_traffic',
'tram']
class TAUUrbanAcousticScenes_2020_3Class_DevelopmentSet(AcousticSceneDataset):
"""TAU Urban Acoustic Scenes 2020 3Class Development dataset
This dataset is used in DCASE2020 - Task 1, Low-Complexity Acoustic Scene Classification / Subtask B / Development
"""
def __init__(self,
storage_name='TAU-urban-acoustic-scenes-2020-3class-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TAU-urban-acoustic-scenes-2020-3class-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': '<NAME>, <NAME>, and <NAME>',
'title': 'TAU Urban Acoustic Scenes 2020 3Class, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'audio_recording_device_model': 'Various',
'microphone_model': 'Various',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['evaluation_setup_file_extension'] = 'csv'
kwargs['meta_filename'] = 'meta.csv'
filename_base = 'TAU-urban-acoustic-scenes-2020-3class-development'
source_url = 'https://zenodo.org/record/3670185/files/'
kwargs['package_list'] = []
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 12026,
'remote_md5': '1f50091832fef59ef79f7b7fcfc91525',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 154856,
'remote_md5': '68de6dc1a81f8ef9c3a7851acda67786',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1657560336,
'remote_md5': 'dab8b3564c1927eb8fc5906f61917ef9',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1654366875,
'remote_md5': '82995465514560a3dff486ffc1b77cab',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1817911349,
'remote_md5': 'fda4f39dae354d6eea8662c4f8228b70',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1818799452,
'remote_md5': '6795666e7e872114a0bd8b7dea333761',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1803128209,
'remote_md5': '0920299dd8600c3fec421af79588535b',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1777403835,
'remote_md5': '65fab659046ef15c8ae3e15025737551',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + | |
' '),
('testspacelist', '\n')):
for lst in lists:
argv = ('./program', '--%s=%s' % (name, sep.join(lst)))
argv = FLAGS(argv)
self.assertEquals(getattr(FLAGS, name), lst)
# Test help text
flagsHelp = str(FLAGS)
assert flagsHelp.find("repeat") != -1, "cannot find flag in help"
assert flagsHelp.find(repeatHelp) != -1, "cannot find help string in help"
# Test flag specified twice
argv = ('./program', '--repeat=4', '--repeat=2', '--debug', '--nodebug')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 2)
self.assertEqual(FLAGS.get('debug', None), 0)
# Test MultiFlag with single default value
gflags.DEFINE_multistring('s_str', 'sing1',
'string option that can occur multiple times',
short_name='s')
self.assertEqual(FLAGS.get('s_str', None), [ 'sing1', ])
# Test MultiFlag with list of default values
multi_string_defs = [ 'def1', 'def2', ]
gflags.DEFINE_multistring('m_str', multi_string_defs,
'string option that can occur multiple times',
short_name='m')
self.assertEqual(FLAGS.get('m_str', None), multi_string_defs)
# Test flag specified multiple times with a MultiFlag
argv = ('./program', '--m_str=str1', '-m', 'str2')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('m_str', None), [ 'str1', 'str2', ])
# Test single-letter flags; should support both single and double dash
argv = ('./program', '-q', '-x8')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 1)
self.assertEqual(FLAGS.get('x', None), 8)
argv = ('./program', '--q', '--x', '9', '--noqu')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 1)
self.assertEqual(FLAGS.get('x', None), 9)
# --noqu should match '--noquack since it's a unique prefix
self.assertEqual(FLAGS.get('quack', None), 0)
argv = ('./program', '--noq', '--x=10', '--qu')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 0)
self.assertEqual(FLAGS.get('x', None), 10)
self.assertEqual(FLAGS.get('quack', None), 1)
####################################
# Test flag serialization code:
oldtestlist = FLAGS.testlist
oldtestspacelist = FLAGS.testspacelist
argv = ('./program',
FLAGS['test0'].Serialize(),
FLAGS['test1'].Serialize(),
FLAGS['testnone'].Serialize(),
FLAGS['s_str'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS['test0'].Serialize(), '--notest0')
self.assertEqual(FLAGS['test1'].Serialize(), '--test1')
self.assertEqual(FLAGS['testnone'].Serialize(), '')
self.assertEqual(FLAGS['s_str'].Serialize(), '--s_str=sing1')
testlist1 = ['aa', 'bb']
testspacelist1 = ['aa', 'bb', 'cc']
FLAGS.testlist = list(testlist1)
FLAGS.testspacelist = list(testspacelist1)
argv = ('./program',
FLAGS['testlist'].Serialize(),
FLAGS['testspacelist'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS.testlist, testlist1)
self.assertEqual(FLAGS.testspacelist, testspacelist1)
testlist1 = ['aa some spaces', 'bb']
testspacelist1 = ['aa', 'bb,some,commas,', 'cc']
FLAGS.testlist = list(testlist1)
FLAGS.testspacelist = list(testspacelist1)
argv = ('./program',
FLAGS['testlist'].Serialize(),
FLAGS['testspacelist'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS.testlist, testlist1)
self.assertEqual(FLAGS.testspacelist, testspacelist1)
FLAGS.testlist = oldtestlist
FLAGS.testspacelist = oldtestspacelist
####################################
# Test flag-update:
def ArgsString():
flagnames = FLAGS.RegisteredFlags()
flagnames.sort()
nonbool_flags = ['--%s %s' % (name, FLAGS.get(name, None))
for name in flagnames
if not isinstance(FLAGS[name], gflags.BooleanFlag)]
truebool_flags = ['--%s' % (name)
for name in flagnames
if isinstance(FLAGS[name], gflags.BooleanFlag) and
FLAGS.get(name, None)]
falsebool_flags = ['--no%s' % (name)
for name in flagnames
if isinstance(FLAGS[name], gflags.BooleanFlag) and
not FLAGS.get(name, None)]
return ' '.join(nonbool_flags + truebool_flags + falsebool_flags)
argv = ('./program', '--repeat=3', '--name=giants', '--nodebug')
FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 3)
self.assertEqual(FLAGS.get('name', None), 'giants')
self.assertEqual(FLAGS.get('debug', None), 0)
self.assertEqual(ArgsString(),
"--kwery None "
"--l 9223372032559808512 "
"--letters ['a', 'b', 'c'] "
"--m ['str1', 'str2'] --m_str ['str1', 'str2'] "
"--name giants "
"--numbers [1, 2, 3] "
"--repeat 3 "
"--s ['sing1'] --s_str ['sing1'] "
""
""
"--testget4 None --testlist [] "
"--testspacelist [] --x 10 "
"--noexec --quack "
"--test1 "
"--testget1 --tmod_baz_x "
"--no? --nodebug --nohelp --nohelpshort --nohelpxml --noq "
""
"--notest0 --notestget2 --notestget3 --notestnone")
argv = ('./program', '--debug', '--m_str=upd1', '-s', 'upd2')
FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 3)
self.assertEqual(FLAGS.get('name', None), 'giants')
self.assertEqual(FLAGS.get('debug', None), 1)
# items appended to existing non-default value lists for --m/--m_str
# new value overwrites default value (not appended to it) for --s/--s_str
self.assertEqual(ArgsString(),
"--kwery None "
"--l 9223372032559808512 "
"--letters ['a', 'b', 'c'] "
"--m ['str1', 'str2', 'upd1'] "
"--m_str ['str1', 'str2', 'upd1'] "
"--name giants "
"--numbers [1, 2, 3] "
"--repeat 3 "
"--s ['upd2'] --s_str ['upd2'] "
""
""
"--testget4 None --testlist [] "
"--testspacelist [] --x 10 "
"--debug --noexec --quack "
"--test1 "
"--testget1 --tmod_baz_x "
"--no? --nohelp --nohelpshort --nohelpxml --noq "
""
"--notest0 --notestget2 --notestget3 --notestnone")
####################################
# Test all kind of error conditions.
# Duplicate flag detection
try:
gflags.DEFINE_boolean("run", 0, "runhelp", short_name='q')
raise AssertionError("duplicate flag detection failed")
except gflags.DuplicateFlag:
pass
# Duplicate short flag detection
try:
gflags.DEFINE_boolean("zoom1", 0, "runhelp z1", short_name='z')
gflags.DEFINE_boolean("zoom2", 0, "runhelp z2", short_name='z')
raise AssertionError("duplicate short flag detection failed")
except gflags.DuplicateFlag, e:
self.assertTrue("The flag 'z' is defined twice. " in e.args[0])
self.assertTrue("First from" in e.args[0])
self.assertTrue(", Second from" in e.args[0])
# Duplicate mixed flag detection
try:
gflags.DEFINE_boolean("short1", 0, "runhelp s1", short_name='s')
gflags.DEFINE_boolean("s", 0, "runhelp s2")
raise AssertionError("duplicate mixed flag detection failed")
except gflags.DuplicateFlag, e:
self.assertTrue("The flag 's' is defined twice. " in e.args[0])
self.assertTrue("First from" in e.args[0])
self.assertTrue(", Second from" in e.args[0])
# Check that duplicate flag detection detects definition sites
# correctly.
flagnames = ["repeated"]
original_flags = gflags.FlagValues()
gflags.DEFINE_boolean(flagnames[0], False, "Flag about to be repeated.",
flag_values=original_flags)
duplicate_flags = module_foo.DuplicateFlags(flagnames)
try:
original_flags.AppendFlagValues(duplicate_flags)
except gflags.DuplicateFlagError, e:
self.assertTrue("flags_unittest" in str(e))
self.assertTrue("module_foo" in str(e))
# Make sure allow_override works
try:
gflags.DEFINE_boolean("dup1", 0, "runhelp d11", short_name='u',
allow_override=0)
flag = FLAGS.FlagDict()['dup1']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup1", 1, "runhelp d12", short_name='u',
allow_override=1)
flag = FLAGS.FlagDict()['dup1']
self.assertEqual(flag.default, 1)
except gflags.DuplicateFlag:
raise AssertionError("allow_override did not permit a flag duplication")
# Make sure allow_override works
try:
gflags.DEFINE_boolean("dup2", 0, "runhelp d21", short_name='u',
allow_override=1)
flag = FLAGS.FlagDict()['dup2']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup2", 1, "runhelp d22", short_name='u',
allow_override=0)
flag = FLAGS.FlagDict()['dup2']
self.assertEqual(flag.default, 1)
except gflags.DuplicateFlag:
raise AssertionError("allow_override did not permit a flag duplication")
# Make sure allow_override doesn't work with None default
try:
gflags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u3',
allow_override=0)
flag = FLAGS.FlagDict()['dup3']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup3", None, "runhelp d32", short_name='u3',
allow_override=1)
raise AssertionError('Cannot override a flag with a default of None')
except gflags.DuplicateFlagCannotPropagateNoneToSwig:
pass
# Make sure that when we override, the help string gets updated correctly
gflags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u',
allow_override=1)
gflags.DEFINE_boolean("dup3", 1, "runhelp d32", short_name='u',
allow_override=1)
self.assert_(str(FLAGS).find('runhelp d31') == -1)
self.assert_(str(FLAGS).find('runhelp d32') != -1)
# Make sure AppendFlagValues works
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("new1", 0, "runhelp n1", flag_values=new_flags)
gflags.DEFINE_boolean("new2", 0, "runhelp n2", flag_values=new_flags)
self.assertEqual(len(new_flags.FlagDict()), 2)
old_len = len(FLAGS.FlagDict())
FLAGS.AppendFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict())-old_len, 2)
self.assertEqual("new1" in FLAGS.FlagDict(), True)
self.assertEqual("new2" in FLAGS.FlagDict(), True)
# Then test that removing those flags works
FLAGS.RemoveFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict()), old_len)
self.assertFalse("new1" in FLAGS.FlagDict())
self.assertFalse("new2" in FLAGS.FlagDict())
# Make sure AppendFlagValues works with flags with shortnames.
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("new3", 0, "runhelp n3", flag_values=new_flags)
gflags.DEFINE_boolean("new4", 0, "runhelp n4", flag_values=new_flags,
short_name="n4")
self.assertEqual(len(new_flags.FlagDict()), 3)
old_len = len(FLAGS.FlagDict())
FLAGS.AppendFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict())-old_len, 3)
self.assertTrue("new3" in FLAGS.FlagDict())
self.assertTrue("new4" in FLAGS.FlagDict())
self.assertTrue("n4" in FLAGS.FlagDict())
self.assertEqual(FLAGS.FlagDict()['n4'], FLAGS.FlagDict()['new4'])
# Then test removing them
FLAGS.RemoveFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict()), old_len)
self.assertFalse("new3" in FLAGS.FlagDict())
self.assertFalse("new4" in FLAGS.FlagDict())
self.assertFalse("n4" in FLAGS.FlagDict())
# Make sure AppendFlagValues fails on duplicates
gflags.DEFINE_boolean("dup4", 0, "runhelp d41")
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("dup4", 0, "runhelp d42", flag_values=new_flags)
try:
FLAGS.AppendFlagValues(new_flags)
raise AssertionError("ignore_copy was not set but caused no exception")
except gflags.DuplicateFlag:
pass
# Integer out of bounds
try:
argv = ('./program', '--repeat=-4')
FLAGS(argv)
raise AssertionError('integer bounds exception not raised:'
+ str(FLAGS.repeat))
except gflags.IllegalFlagValue:
pass
# Non-integer
try:
argv = ('./program', '--repeat=2.5')
FLAGS(argv)
raise AssertionError("malformed integer value exception not raised")
except gflags.IllegalFlagValue:
pass
# Missing required arugment
try:
argv = ('./program', '--name')
FLAGS(argv)
raise AssertionError("Flag argument required exception not raised")
except gflags.FlagsError:
pass
# Non-boolean arguments for boolean
try:
argv = ('./program', '--debug=goofup')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
try:
argv = ('./program', '--debug=42')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
# Non-numeric argument for integer flag --repeat
try:
argv = ('./program', '--repeat', 'Bob', 'extra')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
# Test ModuleHelp().
helpstr = FLAGS.ModuleHelp(module_baz)
expected_help = "\n" + module_baz.__name__ + ":" + """
--[no]tmod_baz_x: Boolean flag.
(default: 'true')"""
self.assertMultiLineEqual(expected_help, helpstr)
# Test MainModuleHelp(). This must be part of test_flags because
# it dpeends on dup1/2/3/etc being introduced first.
helpstr = FLAGS.MainModuleHelp()
expected_help = "\n" + sys.argv[0] + ':' + """
--[no]debug: debughelp
(default: 'false')
-u,--[no]dup1: runhelp d12
(default: 'true')
-u,--[no]dup2: runhelp d22
(default: 'true')
-u,--[no]dup3: runhelp d32
(default: 'true')
--[no]dup4: runhelp d41
(default: 'false')
--kwery: <who|what|why|where|when>: ?
--l: how long to be
(default: '9223372032559808512')
(an integer)
--letters: a list of letters
(default: 'a,b,c')
(a comma separated list)
-m,--m_str: string option that can occur multiple times;
repeat this option to specify a list of values
(default: "['def1', 'def2']")
--name: namehelp
(default: 'Bob')
--[no]noexec: boolean flag with no as prefix
(default: 'true')
--numbers: a list of numbers
(default: '1,2,3')
(a comma separated list)
--[no]q: quiet mode
(default: 'true')
--[no]quack: superstring of 'q'
(default: 'false')
-r,--repeat: how | |
rule1323,
rule1324,
rule1325,
rule1326,
rule1327,
rule1328,
rule1329,
rule1330,
rule1331,
rule1332,
rule1333,
rule1334,
rule1335,
rule1336,
rule1337,
rule1338,
rule1339,
rule1340,
rule1341,
rule1342,
rule1343,
rule1344,
rule1345,
rule1346,
rule1347,
rule1348,
rule1349,
rule1350,
rule1351,
rule1352,
rule1353,
rule1354,
rule1355,
rule1356,
rule1357,
rule1358,
rule1359,
rule1360,
rule1361,
rule1362,
rule1363,
rule1364,
rule1365,
rule1366,
rule1367,
rule1368,
rule1369,
rule1370,
rule1371,
rule1372,
rule1373,
rule1374,
rule1375,
rule1376,
rule1377,
rule1378,
rule1379,
rule1380,
rule1381,
rule1382,
rule1383,
rule1384,
rule1385,
rule1386,
rule1387,
rule1388,
rule1389,
rule1390,
rule1391,
rule1392,
rule1393,
rule1394,
rule1395,
rule1396,
rule1397,
rule1398,
rule1399,
rule1400,
rule1401,
rule1402,
rule1403,
rule1404,
rule1405,
rule1406,
rule1407,
rule1408,
rule1409,
rule1410,
rule1411,
rule1412,
rule1413,
rule1414,
rule1415,
rule1416,
rule1417,
rule1418,
rule1419,
rule1420,
rule1421,
rule1422,
rule1423,
rule1424,
rule1425,
rule1426,
rule1427,
rule1428,
rule1429,
rule1430,
rule1431,
rule1432,
rule1433,
rule1434,
rule1435,
rule1436,
rule1437,
rule1438,
rule1439,
rule1440,
rule1441,
rule1442,
rule1443,
rule1444,
rule1445,
rule1446,
rule1447,
rule1448,
rule1449,
rule1450,
rule1451,
rule1452,
rule1453,
rule1454,
rule1455,
rule1456,
rule1457,
rule1458,
rule1459,
rule1460,
rule1461,
rule1462,
rule1463,
rule1464,
rule1465,
rule1466,
rule1467,
rule1468,
rule1469,
rule1470,
rule1471,
rule1472,
rule1473,
rule1474,
rule1475,
]
def replacement1079(a, b, c, n, n2, p, x):
return Int(x ** (S(2) * n * p) * (a * x ** (-S(2) * n) + b * x ** (-n) + c) ** p, x)
def With1080(a, b, c, n, n2, p, x):
k = Denominator(n)
return Dist(
k,
Subst(
Int(
x ** (k + S(-1))
* (a + b * x ** (k * n) + c * x ** (S(2) * k * n)) ** p,
x,
),
x,
x ** (S(1) / k),
),
x,
)
def replacement1081(a, b, c, n, n2, p, x):
return Simp(
x
* (S(2) * a + b * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** p
/ (S(2) * a),
x,
)
def replacement1082(a, b, c, n, n2, p, x):
return -Simp(
x
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(1))
/ (a * (S(2) * p + S(1))),
x,
) + Simp(
x
* (S(2) * a + b * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** p
/ (S(2) * a * (n + S(1))),
x,
)
def replacement1083(a, b, c, n, n2, p, x):
return Dist(
sqrt(a + b * x ** n + c * x ** (S(2) * n)) / (b + S(2) * c * x ** n),
Int(
(b + S(2) * c * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(-1) / 2),
x,
),
x,
)
def replacement1084(a, b, c, n, n2, p, x):
return Dist(
(S(4) * c) ** (-IntPart(p))
* (b + S(2) * c * x ** n) ** (-S(2) * FracPart(p))
* (a + b * x ** n + c * x ** (S(2) * n)) ** FracPart(p),
Int((b + S(2) * c * x ** n) ** (S(2) * p), x),
x,
)
def replacement1085(a, b, c, n, n2, x):
return Simp(x * sqrt(a + b * x ** n + c * x ** (S(2) * n)) / (n + S(1)), x) + Simp(
b
* n
* x
* sqrt(a + b * x ** n + c * x ** (S(2) * n))
/ ((b + S(2) * c * x ** n) * (n + S(1))),
x,
)
def replacement1086(a, b, c, n, n2, p, x):
return -Subst(
Int((a + b * x ** (-n) + c * x ** (-S(2) * n)) ** p / x ** S(2), x), x, S(1) / x
)
def replacement1087(a, b, c, n, n2, p, x):
return (
Dist(
S(2)
* a
* n ** S(2)
* p
* (S(2) * p + S(-1))
/ ((S(2) * n * p + S(1)) * (n * (S(2) * p + S(-1)) + S(1))),
Int((a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(-1)), x),
x,
)
+ Simp(
x * (a + b * x ** n + c * x ** (S(2) * n)) ** p / (S(2) * n * p + S(1)), x
)
+ Simp(
n
* p
* x
* (S(2) * a + b * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(-1))
/ ((S(2) * n * p + S(1)) * (n * (S(2) * p + S(-1)) + S(1))),
x,
)
)
def replacement1088(a, b, c, n, n2, p, x):
return (
Dist(
(S(2) * n * (p + S(1)) + S(1))
* (n * (S(2) * p + S(1)) + S(1))
/ (S(2) * a * n ** S(2) * (p + S(1)) * (S(2) * p + S(1))),
Int((a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(1)), x),
x,
)
- Simp(
x
* (S(2) * a + b * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** p
/ (S(2) * a * n * (S(2) * p + S(1))),
x,
)
- Simp(
x
* (n * (S(2) * p + S(1)) + S(1))
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(1))
/ (S(2) * a * n ** S(2) * (p + S(1)) * (S(2) * p + S(1))),
x,
)
)
def replacement1089(a, b, c, n, n2, p, x):
return Dist(
c ** (-IntPart(p))
* (b / S(2) + c * x ** n) ** (-S(2) * FracPart(p))
* (a + b * x ** n + c * x ** (S(2) * n)) ** FracPart(p),
Int((b / S(2) + c * x ** n) ** (S(2) * p), x),
x,
)
def replacement1090(a, b, c, n, n2, p, x):
return -Subst(
Int((a + b * x ** (-n) + c * x ** (-S(2) * n)) ** p / x ** S(2), x), x, S(1) / x
)
def replacement1091(a, b, c, n, n2, p, x):
return Int(ExpandIntegrand((a + b * x ** n + c * x ** (S(2) * n)) ** p, x), x)
def replacement1092(a, b, c, n, n2, p, x):
return Dist(
n * p / (S(2) * n * p + S(1)),
Int(
(S(2) * a + b * x ** n)
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(-1)),
x,
),
x,
) + Simp(x * (a + b * x ** n + c * x ** (S(2) * n)) ** p / (S(2) * n * p + S(1)), x)
def replacement1093(a, b, c, n, n2, p, x):
return Dist(
S(1) / (a * n * (p + S(1)) * (-S(4) * a * c + b ** S(2))),
Int(
(a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(1))
* (
-S(2) * a * c
+ b ** S(2)
+ b * c * x ** n * (n * (S(2) * p + S(3)) + S(1))
+ n * (p + S(1)) * (-S(4) * a * c + b ** S(2))
),
x,
),
x,
) - Simp(
x
* (a + b * x ** n + c * x ** (S(2) * n)) ** (p + S(1))
* (-S(2) * a * c + b ** S(2) + b * c * x ** n)
/ (a * n * (p + S(1)) * (-S(4) * | |
(self.vpp_seq_number, 0),
"BFD sequence number not one of "
"(%s, 0)" % self.vpp_seq_number)
self.vpp_seq_number = recvd_seq_num
# last 20 bytes represent the hash - so replace them with the key,
# pad the result with zeros and hash the result
hash_material = bfd.original[:-20] + self.sha1_key.key + \
b"\0" * (20 - len(self.sha1_key.key))
expected_hash = hashlib.sha1(hash_material).hexdigest()
self.test.assert_equal(binascii.hexlify(bfd.auth_key_hash),
expected_hash, "Auth key hash")
def verify_bfd(self, packet):
""" Verify correctness of BFD layer. """
bfd = packet[BFD]
self.test.assert_equal(bfd.version, 1, "BFD version")
self.test.assert_equal(bfd.your_discriminator,
self.my_discriminator,
"BFD - your discriminator")
if self.sha1_key:
self.verify_sha1_auth(packet)
def bfd_session_up(test):
""" Bring BFD session up """
test.logger.info("BFD: Waiting for slow hello")
p = wait_for_bfd_packet(test, 2)
old_offset = None
if hasattr(test, 'vpp_clock_offset'):
old_offset = test.vpp_clock_offset
test.vpp_clock_offset = time.time() - p.time
test.logger.debug("BFD: Calculated vpp clock offset: %s",
test.vpp_clock_offset)
if old_offset:
test.assertAlmostEqual(
old_offset, test.vpp_clock_offset, delta=0.5,
msg="vpp clock offset not stable (new: %s, old: %s)" %
(test.vpp_clock_offset, old_offset))
test.logger.info("BFD: Sending Init")
test.test_session.update(my_discriminator=randint(0, 40000000),
your_discriminator=p[BFD].my_discriminator,
state=BFDState.init)
if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
BFDAuthType.meticulous_keyed_sha1:
test.test_session.inc_seq_num()
test.test_session.send_packet()
test.logger.info("BFD: Waiting for event")
e = test.vapi.wait_for_event(1, "bfd_udp_session_details")
verify_event(test, e, expected_state=BFDState.up)
test.logger.info("BFD: Session is Up")
test.test_session.update(state=BFDState.up)
if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
BFDAuthType.meticulous_keyed_sha1:
test.test_session.inc_seq_num()
test.test_session.send_packet()
test.assert_equal(test.vpp_session.state, BFDState.up, BFDState)
def bfd_session_down(test):
""" Bring BFD session down """
test.assert_equal(test.vpp_session.state, BFDState.up, BFDState)
test.test_session.update(state=BFDState.down)
if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
BFDAuthType.meticulous_keyed_sha1:
test.test_session.inc_seq_num()
test.test_session.send_packet()
test.logger.info("BFD: Waiting for event")
e = test.vapi.wait_for_event(1, "bfd_udp_session_details")
verify_event(test, e, expected_state=BFDState.down)
test.logger.info("BFD: Session is Down")
test.assert_equal(test.vpp_session.state, BFDState.down, BFDState)
def verify_bfd_session_config(test, session, state=None):
dump = session.get_bfd_udp_session_dump_entry()
test.assertIsNotNone(dump)
# since dump is not none, we have verified that sw_if_index and addresses
# are valid (in get_bfd_udp_session_dump_entry)
if state:
test.assert_equal(dump.state, state, "session state")
test.assert_equal(dump.required_min_rx, session.required_min_rx,
"required min rx interval")
test.assert_equal(dump.desired_min_tx, session.desired_min_tx,
"desired min tx interval")
test.assert_equal(dump.detect_mult, session.detect_mult,
"detect multiplier")
if session.sha1_key is None:
test.assert_equal(dump.is_authenticated, 0, "is_authenticated flag")
else:
test.assert_equal(dump.is_authenticated, 1, "is_authenticated flag")
test.assert_equal(dump.bfd_key_id, session.bfd_key_id,
"bfd key id")
test.assert_equal(dump.conf_key_id,
session.sha1_key.conf_key_id,
"config key id")
def verify_ip(test, packet):
""" Verify correctness of IP layer. """
if test.vpp_session.af == AF_INET6:
ip = packet[IPv6]
local_ip = test.pg0.local_ip6
remote_ip = test.pg0.remote_ip6
test.assert_equal(ip.hlim, 255, "IPv6 hop limit")
else:
ip = packet[IP]
local_ip = test.pg0.local_ip4
remote_ip = test.pg0.remote_ip4
test.assert_equal(ip.ttl, 255, "IPv4 TTL")
test.assert_equal(ip.src, local_ip, "IP source address")
test.assert_equal(ip.dst, remote_ip, "IP destination address")
def verify_udp(test, packet):
""" Verify correctness of UDP layer. """
udp = packet[UDP]
test.assert_equal(udp.dport, BFD.udp_dport, "UDP destination port")
test.assert_in_range(udp.sport, BFD.udp_sport_min, BFD.udp_sport_max,
"UDP source port")
def verify_event(test, event, expected_state):
""" Verify correctness of event values. """
e = event
test.logger.debug("BFD: Event: %s" % moves.reprlib.repr(e))
test.assert_equal(e.sw_if_index,
test.vpp_session.interface.sw_if_index,
"BFD interface index")
is_ipv6 = 0
if test.vpp_session.af == AF_INET6:
is_ipv6 = 1
test.assert_equal(e.is_ipv6, is_ipv6, "is_ipv6")
if test.vpp_session.af == AF_INET:
test.assert_equal(e.local_addr[:4], test.vpp_session.local_addr_n,
"Local IPv4 address")
test.assert_equal(e.peer_addr[:4], test.vpp_session.peer_addr_n,
"Peer IPv4 address")
else:
test.assert_equal(e.local_addr, test.vpp_session.local_addr_n,
"Local IPv6 address")
test.assert_equal(e.peer_addr, test.vpp_session.peer_addr_n,
"Peer IPv6 address")
test.assert_equal(e.state, expected_state, BFDState)
def wait_for_bfd_packet(test, timeout=1, pcap_time_min=None):
""" wait for BFD packet and verify its correctness
:param timeout: how long to wait
:param pcap_time_min: ignore packets with pcap timestamp lower than this
:returns: tuple (packet, time spent waiting for packet)
"""
test.logger.info("BFD: Waiting for BFD packet")
deadline = time.time() + timeout
counter = 0
while True:
counter += 1
# sanity check
test.assert_in_range(counter, 0, 100, "number of packets ignored")
time_left = deadline - time.time()
if time_left < 0:
raise CaptureTimeoutError("Packet did not arrive within timeout")
p = test.pg0.wait_for_packet(timeout=time_left)
test.logger.debug(ppp("BFD: Got packet:", p))
if pcap_time_min is not None and p.time < pcap_time_min:
test.logger.debug(ppp("BFD: ignoring packet (pcap time %s < "
"pcap time min %s):" %
(p.time, pcap_time_min), p))
else:
break
bfd = p[BFD]
if bfd is None:
raise Exception(ppp("Unexpected or invalid BFD packet:", p))
if bfd.payload:
raise Exception(ppp("Unexpected payload in BFD packet:", bfd))
verify_ip(test, p)
verify_udp(test, p)
test.test_session.verify_bfd(p)
return p
@unittest.skipUnless(running_extended_tests, "part of extended tests")
class BFD4TestCase(VppTestCase):
"""Bidirectional Forwarding Detection (BFD)"""
pg0 = None
vpp_clock_offset = None
vpp_session = None
test_session = None
@classmethod
def setUpClass(cls):
super(BFD4TestCase, cls).setUpClass()
cls.vapi.cli("set log class bfd level debug")
try:
cls.create_pg_interfaces([0])
cls.create_loopback_interfaces(1)
cls.loopback0 = cls.lo_interfaces[0]
cls.loopback0.config_ip4()
cls.loopback0.admin_up()
cls.pg0.config_ip4()
cls.pg0.configure_ipv4_neighbors()
cls.pg0.admin_up()
cls.pg0.resolve_arp()
except Exception:
super(BFD4TestCase, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(BFD4TestCase, cls).tearDownClass()
def setUp(self):
super(BFD4TestCase, self).setUp()
self.factory = AuthKeyFactory()
self.vapi.want_bfd_events()
self.pg0.enable_capture()
try:
self.vpp_session = VppBFDUDPSession(self, self.pg0,
self.pg0.remote_ip4)
self.vpp_session.add_vpp_config()
self.vpp_session.admin_up()
self.test_session = BFDTestSession(self, self.pg0, AF_INET)
except:
self.vapi.want_bfd_events(enable_disable=0)
raise
def tearDown(self):
if not self.vpp_dead:
self.vapi.want_bfd_events(enable_disable=0)
self.vapi.collect_events() # clear the event queue
super(BFD4TestCase, self).tearDown()
def test_session_up(self):
""" bring BFD session up """
bfd_session_up(self)
def test_session_up_by_ip(self):
""" bring BFD session up - first frame looked up by address pair """
self.logger.info("BFD: Sending Slow control frame")
self.test_session.update(my_discriminator=randint(0, 40000000))
self.test_session.send_packet()
self.pg0.enable_capture()
p = self.pg0.wait_for_packet(1)
self.assert_equal(p[BFD].your_discriminator,
self.test_session.my_discriminator,
"BFD - your discriminator")
self.assert_equal(p[BFD].state, BFDState.init, BFDState)
self.test_session.update(your_discriminator=p[BFD].my_discriminator,
state=BFDState.up)
self.logger.info("BFD: Waiting for event")
e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
verify_event(self, e, expected_state=BFDState.init)
self.logger.info("BFD: Sending Up")
self.test_session.send_packet()
self.logger.info("BFD: Waiting for event")
e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
verify_event(self, e, expected_state=BFDState.up)
self.logger.info("BFD: Session is Up")
self.test_session.update(state=BFDState.up)
self.test_session.send_packet()
self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
def test_session_down(self):
""" bring BFD session down """
bfd_session_up(self)
bfd_session_down(self)
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_hold_up(self):
""" hold BFD session up """
bfd_session_up(self)
for dummy in range(self.test_session.detect_mult * 2):
wait_for_bfd_packet(self)
self.test_session.send_packet()
self.assert_equal(len(self.vapi.collect_events()), 0,
"number of bfd events")
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_slow_timer(self):
""" verify slow periodic control frames while session down """
packet_count = 3
self.logger.info("BFD: Waiting for %d BFD packets", packet_count)
prev_packet = wait_for_bfd_packet(self, 2)
for dummy in range(packet_count):
next_packet = wait_for_bfd_packet(self, 2)
time_diff = next_packet.time - prev_packet.time
# spec says the range should be <0.75, 1>, allow extra 0.05 margin
# to work around timing issues
self.assert_in_range(
time_diff, 0.70, 1.05, "time between slow packets")
prev_packet = next_packet
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_zero_remote_min_rx(self):
""" no packets when zero remote required min rx interval """
bfd_session_up(self)
self.test_session.update(required_min_rx=0)
self.test_session.send_packet()
for dummy in range(self.test_session.detect_mult):
self.sleep(self.vpp_session.required_min_rx / USEC_IN_SEC,
"sleep before transmitting bfd packet")
self.test_session.send_packet()
try:
p = wait_for_bfd_packet(self, timeout=0)
self.logger.error(ppp("Received unexpected packet:", p))
except CaptureTimeoutError:
pass
self.assert_equal(
len(self.vapi.collect_events()), 0, "number of bfd events")
self.test_session.update(required_min_rx=300000)
for dummy in range(3):
self.test_session.send_packet()
wait_for_bfd_packet(
self, timeout=self.test_session.required_min_rx / USEC_IN_SEC)
self.assert_equal(
len(self.vapi.collect_events()), 0, "number of bfd events")
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_conn_down(self):
""" verify session goes down after inactivity """
bfd_session_up(self)
detection_time = self.test_session.detect_mult *\
self.vpp_session.required_min_rx / USEC_IN_SEC
self.sleep(detection_time, "waiting for BFD session time-out")
e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
verify_event(self, e, expected_state=BFDState.down)
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_large_required_min_rx(self):
""" large remote required min rx interval """
bfd_session_up(self)
p = wait_for_bfd_packet(self)
interval = 3000000
self.test_session.update(required_min_rx=interval)
self.test_session.send_packet()
time_mark = time.time()
count = 0
# busy wait here, trying to collect a packet or event, vpp is not
# allowed to send packets and the session will timeout first - so the
# Up->Down event must arrive before any packets do
while time.time() < time_mark + interval / USEC_IN_SEC:
try:
p = wait_for_bfd_packet(self, timeout=0)
# if vpp managed to send a packet before we did the session
# session update, then that's fine, ignore it
if p.time < time_mark - self.vpp_clock_offset:
continue
self.logger.error(ppp("Received unexpected packet:", p))
count += 1
except CaptureTimeoutError:
pass
events = self.vapi.collect_events()
if len(events) > 0:
verify_event(self, events[0], BFDState.down)
break
self.assert_equal(count, 0, "number of packets received")
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_immediate_remote_min_rx_reduction(self):
""" immediately honor remote required min rx reduction """
self.vpp_session.remove_vpp_config()
self.vpp_session = VppBFDUDPSession(
self, self.pg0, self.pg0.remote_ip4, desired_min_tx=10000)
self.pg0.enable_capture()
self.vpp_session.add_vpp_config()
self.test_session.update(desired_min_tx=1000000,
required_min_rx=1000000)
bfd_session_up(self)
reference_packet = wait_for_bfd_packet(self)
time_mark = time.time()
interval = 300000
self.test_session.update(required_min_rx=interval)
self.test_session.send_packet()
extra_time = time.time() - time_mark
p = wait_for_bfd_packet(self)
# first packet is allowed to be late by time we spent doing the update
# calculated in extra_time
self.assert_in_range(p.time - reference_packet.time,
.95 * 0.75 * interval / USEC_IN_SEC,
1.05 * interval / USEC_IN_SEC + extra_time,
"time between BFD packets")
reference_packet = p
for dummy in range(3):
p = wait_for_bfd_packet(self)
diff = p.time - reference_packet.time
self.assert_in_range(diff, .95 * .75 * interval / USEC_IN_SEC,
1.05 * interval / USEC_IN_SEC,
"time between BFD packets")
reference_packet = p
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_modify_req_min_rx_double(self):
""" modify session - double required min rx """
bfd_session_up(self)
p = wait_for_bfd_packet(self)
self.test_session.update(desired_min_tx=10000,
required_min_rx=10000)
self.test_session.send_packet()
# double required min rx
self.vpp_session.modify_parameters(
required_min_rx=2 * self.vpp_session.required_min_rx)
p = wait_for_bfd_packet(
self, pcap_time_min=time.time() - self.vpp_clock_offset)
# poll bit needs to be set
self.assertIn("P", p.sprintf("%BFD.flags%"),
"Poll bit not | |
order to make room for new data, without going over the specified memory limit."
lazyfree_lazy_expire_desc = "Redis deletes objects independently of a user call because of expire. when a key with an associated time to live (see the EXPIRE command) must be deleted from memory."
lazyfree_lazy_server_del_desc = "Redis deletes objects independently of a user call Because of a side effect of a command that stores data on a key that may already exist."
slave_lazy_flush_desc = "Redis deletes objects independently of a user call During replication, when a slave performs a full resynchronization with its master, the content of the whole database is removed in order to load the RDB file just transfered."
LAZY_FREEING_CONFIG = OrderedDict(
{
"lazyfree-lazy-eviction": {
"desc": lazyfree_lazy_eviction_desc,
"type": "select",
"can_edit": True,
},
"lazyfree-lazy-expire": {
"desc": lazyfree_lazy_expire_desc,
"type": "select",
"can_edit": True,
},
"lazyfree-lazy-server-del": {
"desc": lazyfree_lazy_server_del_desc,
"type": "select",
"can_edit": True,
},
"slave-lazy-flush": {
"desc": slave_lazy_flush_desc,
"type": "select",
"can_edit": True,
},
}
)
auto_aof_rewrite_percentage_desc = "Automatic rewrite of the append only file. Redis is able to automatically rewrite the log file implicitly calling BGREWRITEAOF when the AOF log size grows by the specified percentage."
auto_aof_rewrite_min_size_desc = "You need to specify a minimal size for the AOF file to be rewritten, this is useful to avoid rewriting the AOF file even if the percentage increase is reached but it is still pretty small."
no_appendfsync_on_rewrite_desc = "In some Linux configurations Redis may block too long on the fsync() call. "
aof_load_truncated_desc = "An AOF file may be found to be truncated at the end during the Redis startup process, when the AOF data gets loaded back into memory."
aof_use_rdb_preamble_desc = "When rewriting the AOF file, Redis is able to use an RDB preamble in the AOF file for faster rewrites and recoveries."
appendfsync_desc = "The fsync() call tells the Operating System to actually write data on disk instead of waiting for more data in the output buffer. "
appendonly_desc = "By default Redis asynchronously dumps the dataset on disk. This mode is good enough in many applications, but an issue with the Redis process or a power outage may result into a few minutes of writes lost."
APPEND_ONLY_MODE_CONFIG = OrderedDict(
{
"auto-aof-rewrite-percentage": {
"desc": auto_aof_rewrite_percentage_desc,
"type": "number",
"can_edit": True,
},
"auto-aof-rewrite-min-size": {
"desc": auto_aof_rewrite_min_size_desc,
"type": "number",
"can_edit": True,
},
"no-appendfsync-on-rewrite": {
"desc": no_appendfsync_on_rewrite_desc,
"type": "select",
"can_edit": True,
},
"aof-load-truncated": {
"desc": aof_load_truncated_desc,
"type": "select",
"can_edit": True,
},
"aof-use-rdb-preamble": {
"desc": aof_use_rdb_preamble_desc,
"type": "select",
"can_edit": True,
},
"appendfsync": {"desc": appendfsync_desc, "type": "select", "can_edit": True},
"appendonly": {"desc": appendonly_desc, "type": "select", "can_edit": True},
}
)
lua_time_limit_desc = "Max execution time of a Lua script in milliseconds."
LUA_SCRIPTING_CONFIG = OrderedDict(
{
"lua-time-limit": {
"desc": lua_time_limit_desc,
"type": "number",
"can_edit": True,
}
}
)
cluster_node_timeout_desc = "Cluster node timeout is the amount of milliseconds a node must be unreachable for it to be considered in failure state."
cluster_migration_barrier_desc = "Slaves migrate to orphaned masters only if there are still at least a given number of other working slaves for their old master. This number is the migration barrier."
cluster_slave_validity_factor_desc = "A large slave-validity-factor may allow slaves with too old data to failover a master, while a too small value may prevent the cluster from being able to elect a slave at all."
cluster_require_full_coverage_desc = "By default Redis Cluster nodes stop accepting queries if they detect there is at least an hash slot uncovered (no available node is serving it)."
REDIS_CLUSTER_CONFIG = OrderedDict(
{
"cluster-node-timeout": {
"desc": cluster_node_timeout_desc,
"type": "number",
"can_edit": True,
},
"cluster-migration-barrier": {
"desc": cluster_migration_barrier_desc,
"type": "number",
"can_edit": True,
},
"cluster-slave-validity-factor": {
"desc": cluster_slave_validity_factor_desc,
"type": "number",
"can_edit": True,
},
"cluster-require-full-coverage": {
"desc": cluster_require_full_coverage_desc,
"type": "select",
"can_edit": True,
},
}
)
cluster_docker_nat_desc = "In certain deployments, Redis Cluster nodes address discovery fails, because addresses are NAT-ted or because ports are forwarded."
CLUSTER_DOCKER_NAT_CONFIG = OrderedDict(
{
"cluster-announce-ip": {
"desc": cluster_docker_nat_desc,
"type": "text",
"can_edit": True,
},
"cluster-announce-port": {
"desc": cluster_docker_nat_desc,
"type": "number",
"can_edit": True,
},
"cluster-announce-bus-port ": {
"desc": cluster_docker_nat_desc,
"type": "number",
"can_edit": True,
},
}
)
slowlog_log_slower_than_desc = "This tells Redis what is the execution time, in microseconds, to exceed in order for the command to get logged."
slowlog_max_len_desc = "This is the length of the slow log.There is no limit to this length. Just be aware that it will consume memory."
SLOWLOG_CONFIG = OrderedDict(
{
"slowlog-log-slower-than": {
"desc": slowlog_log_slower_than_desc,
"type": "number",
"can_edit": True,
},
"slowlog-max-len": {
"desc": slowlog_max_len_desc,
"type": "number",
"can_edit": True,
},
}
)
latency_monitor_threshold_desc = "The Redis latency monitoring subsystem samples different operations at runtime in order to collect data related to possible sources of latency of a Redis instance."
LATENCY_MONITOR_CONFIG = OrderedDict(
{
"latency-monitor-threshold": {
"desc": latency_monitor_threshold_desc,
"type": "number",
"can_edit": True,
}
}
)
notify_keyspace_events_desc = "The 'notify-keyspace-events' takes as argument a string that is composed of zero or multiple characters."
EVENT_NOTIFICATION_CONFIG = OrderedDict(
{
"notify-keyspace-events": {
"desc": notify_keyspace_events_desc,
"type": "checklist",
"can_edit": True,
}
}
)
active_defrag_threshold_lower_desc = "Minimum percentage of fragmentation to start active defrag. "
active_defrag_threshold_upper_desc = "Maximum percentage of fragmentation at which we use maximum effort. "
active_defrag_ignore_bytes_desc = "Minimum amount of fragmentation waste to start active defrag. "
active_defrag_cycle_min_desc = "Minimal effort for defrag in CPU percentage."
active_defrag_cycle_max_desc = "Maximal effort for defrag in CPU percentage"
activedefrag_desc = "Active defragmentation allows a Redis server to compact the spaces left between small allocations and deallocations of data in memory, thus allowing to reclaim back memory."
DEFRAGMENTATION_CONFIG = OrderedDict(
{
"active-defrag-threshold-lower": {
"desc": active_defrag_threshold_lower_desc,
"type": "number",
"can_edit": True,
},
"active-defrag-threshold-upper": {
"desc": active_defrag_threshold_upper_desc,
"type": "number",
"can_edit": True,
},
"active-defrag-ignore-bytes": {
"desc": active_defrag_ignore_bytes_desc,
"type": "number",
"can_edit": True,
},
"active-defrag-cycle-min": {
"desc": active_defrag_cycle_min_desc,
"type": "number",
"can_edit": True,
},
"active-defrag-cycle-max": {
"desc": active_defrag_cycle_max_desc,
"type": "number",
"can_edit": True,
},
"activedefrag": {"desc": activedefrag_desc, "type": "select", "can_edit": True},
}
)
client_query_buffer_limit_desc = "Client query buffers accumulate new commands."
lfu_log_factor_desc = "The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis uses a probabilistic increment with logarithmic behavior."
lfu_decay_time_desc = "The counter decay time is the time, in minutes, that must elapse in order for the key counter to be divided by two (or decremented if it has a value less <= 10)."
hash_max_ziplist_entries_desc = "Hashes are encoded using a memory efficient data structure when they have a small number of entries, and the biggest entry does not exceed a given threshold.These thresholds can be configured using the above directive."
hash_max_ziplist_value_desc = "Hashes are encoded using a memory efficient data structure when they have a small number of entries, and the biggest entry does not exceed a given threshold.These thresholds can be configured using the above directive."
list_max_ziplist_size_desc = "Lists are also encoded in a special way to save a lot of space. The number of entries allowed per internal list node can be specified as a fixed maximum size or a maximum number of elements."
list_compress_depth_desc = "Lists may also be compressed. Compress depth is the number of quicklist ziplist nodes from each side of the list to exclude from compression. The head and tail of the list are always uncompressed for fast push/pop operations."
set_max_intset_entries_desc = "Sets have a special encoding in just one case: when a set is composed of just strings that happen to be integers in radix 10 in the range of 64 bit signed integers."
zset_max_ziplist_entries_desc = "Sorted sets are also specially encoded in order to save a lot of space. This encoding is only used when the length and elements of a sorted set are below the specified limits."
zset_max_ziplist_value_desc = "Sorted sets are also specially encoded in order to save a lot of space. This encoding is only used when the length and elements of a sorted set are below the specified limits."
hll_sparse_max_bytes_desc = "HyperLogLog sparse representation bytes limit. The limit includes the 16 bytes header."
hz_desc = "Redis calls an internal function to perform many background tasks, like closing connections of clients in timeout, purging expired keys that are never requested, and so forth. "
activerehashing_desc = "Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in order to help rehashing the main Redis hash table."
aof_rewrite_incremental_fsync_desc = "When | |
"""Create entanglement state
Args:
- qc (QuantumCircuit): init circuit
Returns:
- QuantumCircuit: the added circuit
"""
for i in range(0, qc.num_qubits):
if i == qc.num_qubits - 1:
qc.cnot(qc.num_qubits - 1, 0)
else:
qc.cnot(i, i + 1)
return qc
def u_cluster_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Create a complicated u gate multi-qubit
Args:
- qc (QuantumCircuit): init circuit
- thetas (Numpy array): parameters
Returns:
- QuantumCircuit: the added circuit
"""
qc = entangle_nqubit(qc)
qc = u_nqubit_ansatz(qc, thetas[0:qc.num_qubits * 3])
return qc
def u_cluster_nlayer_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray, num_layers):
"""Create a complicated u gate multi-qubit
Args:
- qc (QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
Returns:
- QuantumCircuit: the added circuit
"""
if isinstance(num_layers, int) != True:
num_layers = (num_layers['num_layers'])
params_per_layer = int(len(thetas) / num_layers)
for i in range(0, num_layers):
qc = entangle_nqubit(qc)
qc = (qc, thetas[i * params_per_layer:(i + 1) * params_per_layer])
return qc
def create_rx_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Add a R_X layer
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits):
qc.rx(thetas[i], i)
return qc
def create_rz_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Add a R_Z layer
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits):
qc.rz(thetas[i], i)
return qc
def create_cry_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Create control Control-RY state
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i, i + 1)
for i in range(1, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i, i + 1)
qc.cry(thetas[qc.num_qubits - 1], qc.num_qubits - 1, 0)
return qc
def create_cry_nqubit_inverse(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Create control Control-RY state but swap control and target qubit
Args:
- qc (qiskit.QuantumCircuit): Init circuit
- thetas (np.ndarray): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i + 1, i)
for i in range(1, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i + 1, i)
qc.cry(thetas[qc.num_qubits - 1], 0, qc.num_qubits - 1)
return qc
def create_linear_ansatz(qc: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_layers: int = 1):
"""Create linear ansatz. The number of param is num_layers * n * 5
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- n_layers (int): numpy of layers
Returns:
- qiskit.QuantumCircuit
"""
n = qc.num_qubits
if isinstance(num_layers, int) != True:
num_layers = (num_layers['num_layers'])
if len(thetas) != num_layers * n * 5:
raise Exception(
'Number of parameters must be equal n_layers * num_qubits * 5')
for i in range(0, num_layers):
phis = thetas[i * n * 5:(i + 1) * n * 5]
qc = create_rx_nqubit(qc, phis[:n])
qc = create_cry_nqubit_inverse(qc, phis[n:n * 2])
qc = create_rz_nqubit(qc, phis[n * 2:n * 3])
qc = create_cry_nqubit(qc, phis[n * 3:n * 4])
qc = create_rz_nqubit(qc, phis[n * 4:n * 5])
return qc
def create_haarchecker_linear(qc: qiskit.QuantumCircuit, thetas: np.ndarray,
num_layers: int, encoder):
"""Create circuit includes haar and linear
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- num_layers (int): num_layers for linear
- encoder: encoder for haar
Returns:
- qiskit.QuantumCircuit
"""
if isinstance(num_layers, int) != True:
num_layers = num_layers['num_layers']
if isinstance(encoder, qtm.encoding.Encoding) != True:
encoder = encoder['encoder']
qc1 = qiskit.QuantumCircuit(encoder.quantum_data)
qc1 = create_linear_ansatz(qc1, thetas, num_layers=num_layers)
qc1 = qc1.combine(qc.inverse())
qc1.add_register(qiskit.ClassicalRegister(encoder.num_qubits))
return qc1
def create_haarchecker_graph(qc: qiskit.QuantumCircuit, thetas: np.ndarray, encoder):
"""Create circuit includes haar and linear
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- num_layers (int): num_layers for linear
- encoder: encoder for haar
Returns:
- qiskit.QuantumCircuit
"""
if isinstance(encoder, qtm.encoding.Encoding) != True:
encoder = encoder['encoder']
qc1 = qiskit.QuantumCircuit(encoder.quantum_data)
qc1 = create_graph_ansatz(qc1, thetas)
qc1 = qc1.combine(qc.inverse())
qc1.add_register(qiskit.ClassicalRegister(encoder.num_qubits))
return qc1
def create_haarchecker_binho(qc: qiskit.QuantumCircuit, thetas: np.ndarray,
num_layers: int, encoder):
"""Create circuit includes haar and linear
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- num_layers (int): num_layers for linear
- encoder: encoder for haar
Returns:
- qiskit.QuantumCircuit
"""
if isinstance(num_layers, int) != True:
num_layers = num_layers['num_layers']
if isinstance(encoder, qtm.encoding.Encoding) != True:
encoder = encoder['encoder']
qc1 = qiskit.QuantumCircuit(encoder.quantum_data)
qc1 = create_binho_ansatz(qc1, thetas, num_layers=num_layers)
qc1 = qc1.combine(qc.inverse())
qc1.add_register(qiskit.ClassicalRegister(encoder.num_qubits))
return qc1
def create_haarchecker_alternating_layered(qc: qiskit.QuantumCircuit,
thetas: np.ndarray, num_layers: int,
encoder):
"""Create circuit includes Alternating layered and linear
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- num_layers (int): num_layers for linear
- encoder: encoder for haar
Returns:
- qiskit.QuantumCircuit
"""
if isinstance(num_layers, int) != True:
num_layers = num_layers['num_layers']
if isinstance(encoder, qtm.encoding.Encoding) != True:
encoder = encoder['encoder']
qc1 = qiskit.QuantumCircuit(encoder.quantum_data)
qc1 = create_alternating_layered_ansatz(qc1, thetas, num_layers=num_layers)
qc1 = qc1.combine(qc.inverse())
qc1.add_register(qiskit.ClassicalRegister(encoder.num_qubits))
return qc1
###########################
######## Binho State ######
###########################
def create_wy_ansatz_ansatz(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Create WY state
Args:
- qc (qiskit.QuantumCircuit): Init circuit
- thetas (Numpy array): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i + 1, i)
for i in range(1, qc.num_qubits - 1, 2):
qc.cry(thetas[i], i + 1, i)
qc.cry(thetas[qc.num_qubits - 1], 0, qc.num_qubits - 1)
return qc
def create_binho_ansatz(qc: qiskit.QuantumCircuit, thetas: np.ndarray, num_layers: int = 1):
"""Create linear ansatz
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- n_layers (Int): numpy of layers
Returns:
- qiskit.QuantumCircuit
"""
n = qc.num_qubits
if isinstance(num_layers, int) != True:
num_layers = (num_layers['num_layers'])
if len(thetas) != num_layers * n * 5:
raise Exception(
'Number of parameters must be equal n_layers * num_qubits * 5')
for i in range(0, num_layers):
phis = thetas[i * n * 5:(i + 1) * n * 5]
qc = create_rx_nqubit(qc, phis[:n])
qc = create_wy_ansatz(qc, phis[n:n * 2])
qc = create_rz_nqubit(qc, phis[n * 2:n * 3])
qc = create_wy_ansatz(qc, phis[n * 3:n * 4])
qc = create_rz_nqubit(qc, phis[n * 4:n * 5])
return qc
###########################
###### Layered State ######
###########################
def create_ry_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray, shift=0):
"""Add a R_Y layer
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- shift (int): start index
Returns:
- qiskit.QuantumCircuit
"""
if qc.num_qubits - shift < len(thetas):
raise Exception(
'Number of parameters must be equal num_qubits - shift')
for i in range(0, len(thetas)):
qc.ry(thetas[i], i + shift)
return qc
def create_swap_nqubit(qc: qiskit.QuantumCircuit, shift=0):
"""Add a SWAP layer
Args:
- qc (qiskit.QuantumCircuit): init circuit
- shift (Int): start index
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0 + shift, qc.num_qubits - 1, 2):
qc.swap(i, i + 1)
return qc
def create_alternating_layered_ansatz(qc: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_layers: int = 1):
"""Create Alternating layered ansatz
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- n_layers (int): numpy of layers
Returns:
- qiskit.QuantumCircuit
"""
n = qc.num_qubits
if isinstance(num_layers, int) != True:
num_layers = (num_layers['num_layers'])
if len(thetas) != num_layers * (n * 5 - 4):
raise Exception(
'Number of parameters must be equal n_layers * num_qubits * 5')
for i in range(0, num_layers):
phis = thetas[i * (n * 5 - 4):(i + 1) * (n * 5 - 4)]
qc.barrier()
qc = create_ry_nqubit(qc, phis[:n])
qc = create_swap_nqubit(qc)
qc = create_ry_nqubit(qc, phis[n:n * 2 - 1])
qc = create_swap_nqubit(qc, shift=1)
qc = create_ry_nqubit(qc, phis[n * 2 - 1:n * 3 - 2], shift=1)
qc = create_swap_nqubit(qc)
qc = create_ry_nqubit(qc, phis[n * 3 - 2:n * 4 - 3])
qc = create_swap_nqubit(qc, shift=1)
qc = create_ry_nqubit(qc, phis[n * 4 - 3:n * 5 - 4], shift=1)
return qc
###########################
#### Tomography circuit ###
###########################
def create_Wchain(qc: qiskit.QuantumCircuit, thetas: np.ndarray):
"""Create W_chain ansatz
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits - 1):
qc.cry(thetas[i], i, i + 1)
qc.cry(thetas[-1], qc.num_qubits - 1, 0)
return qc
def create_WchainCNOT(qc: qiskit.QuantumCircuit):
"""Create W_chain ansatz but replacing CRY gate by CNOT gate
Args:
- qc (qiskit.QuantumCircuit): init circuit
Returns:
- qiskit.QuantumCircuit
"""
for i in range(0, qc.num_qubits - 1):
qc.cnot(i, i + 1)
qc.cnot(qc.num_qubits - 1, 0)
return qc
def create_Walternating(qc: qiskit.QuantumCircuit, thetas: np.ndarray, index_layer):
"""Create W_alternating ansatz
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- index_layer (int)
Returns:
- qiskit.QuantumCircuit
"""
t = 0
if index_layer % 2 == 0:
# Even
for i in range(1, qc.num_qubits - 1, 2):
qc.cry(thetas[t], i, i + 1)
t += 1
qc.cry(thetas[-1], 0, qc.num_qubits - 1)
else:
# | |
t)*g, where t is an unrelated symbol and
filtering out solution that does not contain t.
For more information on the implemented algorithm refer to:
[1] <NAME>, <NAME>, <NAME>, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 187
"""
if not isinstance(f, Poly):
f = Poly(f, *symbols)
elif symbols:
raise SymbolsError("Redundant symbols were given")
f, g = f.unify_with(g)
symbols, flags = f.symbols, f.flags
if f.is_monomial and g.is_monomial:
monom = monomial_lcm(f.LM, g.LM)
fc, gc = f.LC, g.LC
if fc.is_Rational and gc.is_Rational:
coeff = Integer(ilcm(fc.p, gc.p))
else:
coeff = S.One
return Poly((coeff, monom), *symbols, **flags)
fc, f = f.as_primitive()
gc, g = g.as_primitive()
lcm = ilcm(int(fc), int(gc))
if f.is_multivariate:
t = Symbol('t', dummy=True)
lex = { 'order' : 'lex' }
f_monoms = [ (1,) + monom for monom in f.monoms ]
F = Poly((f.coeffs, f_monoms), t, *symbols, **lex)
g_monoms = [ (0,) + monom for monom in g.monoms ] + \
[ (1,) + monom for monom in g.monoms ]
g_coeffs = list(g.coeffs) + [ -coeff for coeff in g.coeffs ]
G = Poly(dict(zip(g_monoms, g_coeffs)), t, *symbols, **lex)
def independent(h):
return all(not monom[0] for monom in h.monoms)
H = [ h for h in poly_groebner((F, G)) if independent(h) ]
if lcm != 1:
h_coeffs = [ coeff*lcm for coeff in H[0].coeffs ]
else:
h_coeffs = H[0].coeffs
h_monoms = [ monom[1:] for monom in H[0].monoms ]
return Poly(dict(zip(h_monoms, h_coeffs)), *symbols, **flags)
else:
h = poly_div(f * g, poly_gcd(f, g))[0]
if lcm != 1:
return h.mul_term(lcm / h.LC)
else:
return h.as_monic()
def poly_gcd(f, g, *symbols):
"""Compute greatest common divisor of two polynomials.
Given two univariate polynomials, subresultants are used
to compute the GCD. In multivariate case Groebner basis
approach is used together with f*g = gcd(f, g)*lcm(f, g)
well known formula.
For more information on the implemented algorithm refer to:
[1] <NAME>, <NAME>, <NAME>, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 187
"""
if not isinstance(f, Poly):
f = Poly(f, *symbols)
elif symbols:
raise SymbolsError("Redundant symbols were given")
f, g = f.unify_with(g)
symbols, flags = f.symbols, f.flags
if f.is_zero and g.is_zero:
return f
if f.is_constant:
if f.is_zero:
cont, g = g.as_primitive()
return g.mul_term(cont / g.LC)
if f.is_one:
return f
if g.is_constant:
if g.is_zero:
cont, f = f.as_primitive()
return f.mul_term(cont / f.LC)
if g.is_one:
return g
if f.is_monomial and g.is_monomial:
monom = monomial_gcd(f.LM, g.LM)
fc, gc = f.LC, g.LC
if fc.is_Rational and gc.is_Rational:
coeff = Integer(igcd(fc.p, gc.p))
else:
coeff = S.One
return Poly((coeff, monom), *symbols, **flags)
cf, f = f.as_primitive()
cg, g = g.as_primitive()
gcd = igcd(int(cf), int(cg))
if f.is_multivariate:
h = poly_div(f*g, poly_lcm(f, g))[0]
else:
h = poly_subresultants(f, g, res=False)[-1]
if gcd != 1:
return h.mul_term(gcd / h.LC)
else:
return h.as_monic()
def poly_gcdex(f, g, *symbols):
"""Extended Euclidean algorithm.
Given univariate polynomials f and g over an Euclidean domain,
computes polynomials s, t and h, such that h = gcd(f, g) and
s*f + t*g = h.
For more information on the implemented algorithm refer to:
[1] <NAME>, Symbolic Integration I: Transcendental
Functions, Second Edition, Springer-Verlag, 2005
"""
s, h = poly_half_gcdex(f, g, *symbols)
return s, poly_div(h - s*f, g)[0], h
def poly_half_gcdex(f, g, *symbols):
"""Half extended Euclidean algorithm.
Efficiently computes gcd(f, g) and one of the coefficients
in extended Euclidean algorithm. Formally, given univariate
polynomials f and g over an Euclidean domain, computes s
and h, such that h = gcd(f, g) and s*f = h (mod g).
For more information on the implemented algorithm refer to:
[1] <NAME>, Symbolic Integration I: Transcendental
Functions, Second Edition, Springer-Verlag, 2005
"""
if not isinstance(f, Poly):
f = Poly(f, *symbols)
elif symbols:
raise SymbolsError("Redundant symbols were given")
f, g = f.unify_with(g)
if f.is_multivariate:
raise MultivariatePolyError(f)
symbols, flags = f.symbols, f.flags
a = Poly(S.One, *symbols, **flags)
b = Poly((), *symbols, **flags)
while not g.is_zero:
q, r = poly_div(f, g)
f, g = g, r
c = a - q*b
a, b = b, c
return a.div_term(f.LC), f.as_monic()
def poly_resultant(f, g, *symbols):
"""Computes resultant of two univariate polynomials.
Resultants are a classical algebraic tool for determining if
a system of n polynomials in n-1 variables have common root
without explicitly solving for the roots.
They are efficiently represented as determinants of Bezout
matrices whose entries are computed using O(n**2) additions
and multiplications where n = max(deg(f), deg(g)).
>>> from sympy.polys.algorithms import poly_resultant
>>> from sympy.abc import x, y
Polynomials x**2-1 and (x-1)**2 have common root:
>>> poly_resultant(x**2-1, (x-1)**2, x)
0
For more information on the implemented algorithm refer to:
[1] <NAME>, Fast Computation of the Bezout and Dixon
Resultant Matrices, Journal of Symbolic Computation, ACM,
Volume 33, Issue 1, January 2002, Pages 13-29
"""
if not isinstance(f, Poly):
f = Poly(f, *symbols)
elif symbols:
raise SymbolsError("Redundant symbols were given")
f, g = f.unify_with(g)
if f.is_multivariate:
raise MultivariatePolyError(f)
n, m = f.degree, g.degree
N = max(n, m)
if n < m:
p = f.as_uv_dict()
q = g.as_uv_dict()
else:
q = f.as_uv_dict()
p = g.as_uv_dict()
import sympy.matrices
B = sympy.matrices.zeros(N)
for i in xrange(N):
for j in xrange(i, N):
if i in p and j+1 in q:
B[i, j] += p[i] * q[j+1]
if j+1 in p and i in q:
B[i, j] -= p[j+1] * q[i]
for i in xrange(1, N-1):
for j in xrange(i, N-1):
B[i, j] += B[i-1, j+1]
for i in xrange(N):
for j in xrange(i+1, N):
B[j, i] = B[i, j]
det = B.det()
if not det:
return det
else:
if n >= m:
det /= f.LC**(n-m)
else:
det /= g.LC**(m-n)
sign = (-1)**(n*(n-1)//2)
if det.is_Atom:
return sign * det
else:
return sign * Poly.cancel(det)
def poly_subresultants(f, g, *symbols, **flags):
"""Computes subresultant PRS of two univariate polynomials.
Polynomial remainder sequence (PRS) is a fundamental tool in
computer algebra as it gives as a sub-product the polynomial
greatest common divisor (GCD), provided that the coefficient
domain is a unique factorization domain.
There are several methods for computing PRS, e.g.: Euclidean
PRS, where the most famous algorithm is used, primitive PRS
and, finally, subresultants which are implemented here.
The Euclidean approach is reasonably efficient but suffers
severely from coefficient growth. The primitive algorithm
avoids this but requires a lot of coefficient computations.
Subresultants solve both problems and so it is efficient and
have moderate coefficient growth. The current implementation
uses pseudo-divisions which is well suited for coefficients
in integral domains or number fields.
Formally, given univariate polynomials f and g over an UFD,
then a sequence (R_0, R_1, ..., R_k, 0, ...) is a polynomial
remainder sequence where R_0 = f, R_1 = g, R_k != 0 and R_k
is similar to gcd(f, g).
The result is returned as tuple (res, R) where R is the PRS
sequence and res is the resultant of the input polynomials.
If only polynomial remainder sequence is important, then by
setting res=False in keyword arguments expensive computation
of the resultant can be avoided (only PRS is returned).
For more information on the implemented algorithm refer to:
[1] <NAME>, Symbolic Integration I: Transcendental
Functions, Second Edition, Springer-Verlag, 2005
[2] <NAME>, Division-Free computation of subresultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
if not isinstance(f, Poly):
f = Poly(f, *symbols)
elif symbols:
raise SymbolsError("Redundant symbols were given")
f, g = f.unify_with(g)
if f.is_multivariate:
raise MultivariatePolyError(f)
else:
symbols = f.symbols
n, m = f.degree, g.degree
if n < m:
f, g = g, f
n, m = m, n
R = [f, g]
d = n - m
b = S(-1)**(d + 1)
c = S(-1)
B, D = [b], [d]
h = poly_prem(f, g)
h = h.mul_term(b)
while not h.is_zero:
k = h.degree
R.append(h)
lc = g.LC
C = (-lc)**d / c**(d-1)
c = Poly.cancel(C)
b = -lc * c**(m-k)
f, g, m, d = g, h, k, m-k
B.append(b)
D.append(d)
h = poly_prem(f, g)
h = h.div_term(b)
if not flags.get('res', True):
return R
| |
<gh_stars>0
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
from Qt import QtWidgets
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "<layoutName>:index" controls ordering of plugs within the layout
# - "<layoutName>:section" places the plug in a named section of the layout
# - "<layoutName>:divider" specifies whether or not a plug should be followed by a divider
# - "<layoutName>:activator" the name of an activator to control editability
# - "<layoutName>:visibilityActivator" the name of an activator to control visibility
# - "<layoutName>:accessory" groups as an accessory to the previous widget
# - "<layoutName>:width" gives a specific width to the plug's widget
#
# Per-parent metadata support :
#
# - <layoutName>:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - <layoutName>:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
# - "<layoutName>:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "<layoutName>:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "<layoutName>:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "<layoutName>:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "<layoutName>:customWidget:Name:*" as for the standard per-plug "<layoutName>:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
# We use this when we can't find a ScriptNode to provide the context.
__fallbackContext = Gaffer.Context()
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", embedded = False, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
# embedded indicates that the PlugLayout is embedded in another layout
# which affects how the widget is built
self.__embedded = embedded
self.__layout = _TabLayout( orientation, embedded = embedded ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
self.__layoutName = layoutName
# not to be confused with __rootSection, which holds an actual _Section object
self.__rootSectionName = rootSection
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ), scoped = False )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# set up an appropriate default context in which to view the plugs.
scriptNode = self.__node() if isinstance( self.__node(), Gaffer.ScriptNode ) else self.__node().scriptNode()
self.setContext( scriptNode.context() if scriptNode is not None else self.__fallbackContext )
# schedule our first update, which will take place when we become
# visible for the first time.
self.__updateLazily()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
def getContext( self ) :
return self.__context
def setContext( self, context ) :
self.__context = context
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
for widget in self.__widgets.values() :
self.__applyContext( widget, context )
## Returns a PlugValueWidget representing the specified child plug.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def plugValueWidget( self, childPlug, lazy=True ) :
if not lazy :
self.__updateLazily.flush( self )
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def customWidget( self, name, lazy=True ) :
if not lazy :
self.__updateLazily.flush( self )
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False, layoutName = "layout" ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets, layoutName = layoutName ) :
sectionPath = cls.__staticSectionPath( item, parent, layoutName )
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return d.keys()
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "<layoutName>:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None, layoutName = "layout", rootSection = "" ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
for name in Gaffer.Metadata.registeredValues( parent ) :
m = re.match( layoutName + ":customWidget:(.+):widgetType", name )
if m and cls.__metadataValue( parent, name ) :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent, layoutName )
if index is not None :
index = index if index >= 0 else sys.maxint + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if | |
lineSeparator
html = html + " <td class=\"left\">" + j2cCFResourceProperties[property] + "</td>" + lineSeparator
html = html + " <td>" + value + "</td>" + lineSeparator
html = html + " </tr>" + lineSeparator
# List the Connection Pool properties
html = html + " <tr>" + lineSeparator
html = html + " <td class=\"properties\" colspan=2><b>Connection Pool</b></td>" + lineSeparator
html = html + " </tr>" + lineSeparator
connPoolId = AdminConfig.showAttribute(j2cConnId, 'connectionPool')
for property,desc in connPoolProperties.items():
value = AdminConfig.showAttribute(connPoolId, property)
if value:
html = html + " <tr>" + lineSeparator
html = html + " <td class=\"left\">" + desc + "</td>" + lineSeparator
html = html + " <td>" + value + "</td>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Close out the table
html = html + "</table>" + lineSeparator
return html
# Function - Build the J2C Queue XML
def buildJ2CQueueXML(level, queues):
# Generate the title and table tags
xml = indent(level) + "<j2cqueues>" + lineSeparator
# Iterate through each J2C Queue for this scope and build table entries
for queue in queues:
xml = xml + indent(level + 1) + "<j2cqueue name=\"" + queue.split("(")[0].replace('\"', '') + "\">" + lineSeparator
# List the J2C Queue properties
for property,desc in j2cQueueProperties.items():
value = AdminConfig.showAttribute(queue, property)
if value and desc != "Queue Name":
xml = xml + indent(level + 2) + "<property description=\"" + desc + "\">" + value + "</property>" + lineSeparator
# List the J2C Queue Resource properties
resProps = AdminConfig.showAttribute(queue, 'properties')[1:-1].split()
for propId in resProps:
value = AdminConfig.showAttribute(propId, "value")
property = AdminConfig.showAttribute(propId, "name")
if j2cQueueResourceProperties.get(property,"") and value:
xml = xml + indent(level + 2) + "<property description=\"" + j2cQueueResourceProperties[property] + "\">" + value + "</property>" + lineSeparator
xml = xml + indent(level + 1) + "</j2cqueue>" + lineSeparator
# Close out the table
xml = xml + indent(level) + "</j2cqueues>" + lineSeparator
return xml
# Function - Build the J2C Queue Section
def buildJ2CQueueSection(scopes, j2cQueues):
# Generate the title and table tags
html = "<h4><a id=\"j2cqueue\"></a>J2C Queues</h4>" + lineSeparator
html = html + "<table>" + lineSeparator
# Check for J2C Queues
if not j2cQueues:
html = html + " <tr><th>None" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
html = html + " <tr><td> </td></tr>" + lineSeparator
else:
# Iterate through the scope and build the associated table entries if
# J2C Queues are found for the given scope
for scope in scopes:
type = scope.split(":")[0]
name = scope.split(":")[1]
if type == "cell":
title = "Cell: " + name
elif type == "cluster":
title = "Cluster: " + name
cluster_name = name
elif type == "cluster_member":
title = "Cluster: " + cluster_name + " | Cluster Member: " + name
name = name + ":" + cluster_name
elif type == "node":
node = name
title = "Node: " + name
elif type == "app_server":
title = "Node: " + node + " | Application Server: " + name.split(",")[0]
name = name.split(",")[0] + ":" + node
else:
continue
# Find J2C Queues for this scope and build table entries if found
id, queues = findConfigIds(type, name, j2cQueues)
if not id:
continue
show = "y"
# Iterate through each J2C Queue for this scope and build table entries
for queue in queues:
if show:
html = html + " <tr><th class=\"scope\" colspan=2><a id=\"" + id + "\">" + title + "</a></th></tr>" + lineSeparator
show = None
# Build the table header
html = html + " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>" + queue.split("(")[0].replace('\"', '') + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
# List the J2C Queue properties
for property,desc in j2cQueueProperties.items():
value = AdminConfig.showAttribute(queue, property)
if value:
html = html + " <tr>" + lineSeparator
html = html + " <td class=\"left\">" + desc + "</td>" + lineSeparator
html = html + " <td>" + value + "</td>" + lineSeparator
html = html + " </tr>" + lineSeparator
# List the J2C Queue Resource properties
resProps = AdminConfig.showAttribute(queue, 'properties')[1:-1].split()
for propId in resProps:
value = AdminConfig.showAttribute(propId, "value")
property = AdminConfig.showAttribute(propId, "name")
if j2cQueueResourceProperties.get(property,"") and value:
html = html + " <tr>" + lineSeparator
html = html + " <td class=\"left\">" + j2cQueueResourceProperties[property] + "</td>" + lineSeparator
html = html + " <td>" + value + "</td>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Close out the table
html = html + "</table>" + lineSeparator
return html
# Function - Build the Activation Specification XML
def buildActSpecXML(level, actSpecs):
# Generate the title and table tags
xml = indent(level) + "<actspecs>" + lineSeparator
# Iterate through each Activation Spec for this scope and build table entries
for actSpec in actSpecs:
xml = xml + indent(level + 1) + "<actspec name=\"" + actSpec.split("(")[0].replace('\"', '') + "\">" + lineSeparator
# List the Activation Spec properties
for property,desc in actSpecProperties.items():
value = AdminConfig.showAttribute(actSpec, property)
if value:
xml = xml + indent(level + 2) + "<property description=\"" + desc + "\">" + value + "</property>" + lineSeparator
# List the Activation Spec resource properties
resProps = AdminConfig.showAttribute(actSpec, 'resourceProperties')[1:-1].split()
for propId in resProps:
value = AdminConfig.showAttribute(propId, "value")
property = AdminConfig.showAttribute(propId, "name")
if actSpecResourceProperties.get(property,"") and value:
xml = xml + indent(level + 2) + "<property description=\"" + actSpecResourceProperties[property] + "\">" + value + "</property>" + lineSeparator
elif property == "arbitraryProperties":
props = value.split(",")
for prop in props:
propName = prop.split("=")[0]
propValue = prop.split("=")[1][1:-1]
if propName == "sslType" and propValue == "SPECIFIC":
propName = "SSL Configuration"
propValue = props[props.index(prop) + 1].split("=")[1][1:-1]
else:
propName = ""
if propName:
xml = xml + indent(level + 2) + "<property description=\"" + propName + "\">" + propValue + "</property>" + lineSeparator
xml = xml + indent(level + 1) + "</actspec>" + lineSeparator
# Close out the table
xml = xml + indent(level) + "</actspecs>" + lineSeparator
return xml
# Function - Build the Activation Specification Section
def buildActSpecSection(scopes, actSpecs):
# Generate the title and table tags
html = "<h4><a id=\"actspecs\"></a>Activation Specifications</h4>" + lineSeparator
html = html + "<table>" + lineSeparator
# Check for Activation Specs
if not actSpecs:
html = html + " <tr><th>None" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
html = html + " <tr><td> </td></tr>" + lineSeparator
else:
# Iterate through the scope and build the associated table entries if
# Data Sources are ound for the given scope
for scope in scopes:
type = scope.split(":")[0]
name = scope.split(":")[1]
if type == "cell":
title = "Cell: " + name
elif type == "cluster":
title = "Cluster: " + name
cluster_name = name
elif type == "cluster_member":
title = "Cluster: " + cluster_name + " | Cluster Member: " + name
name = name + ":" + cluster_name
elif type == "node":
node = name
title = "Node: " + name
elif type == "app_server":
title = "Node: " + node + " | Application Server: " + name.split(",")[0]
name = name.split(",")[0] + ":" + node
else:
continue
# Find Activation Specs for this scope and build table entries if found
id, actSpecIds = findConfigIds(type, name, actSpecs)
if not id:
continue
show = "y"
# Iterate through each Activation Spec for this scope and build table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.