text
stringlengths 4
1.02M
| meta
dict |
---|---|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.entry,
name='business-entry'
),
url(
regex=r'^log/$',
view=views.entry_log,
name='business-entry-log'
),
url(
regex=r'^overview/$',
view=views.overview,
name='business-overview',
),
]
| {
"content_hash": "c24aa99542aea1fdc15e1fc677360c9a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 33,
"avg_line_length": 18.095238095238095,
"alnum_prop": 0.5157894736842106,
"repo_name": "pterk/django-tcb",
"id": "e40c703a7a570857e2842a5957fd6a9d31727426",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "business/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "860"
},
{
"name": "HTML",
"bytes": "10807"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "45389"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
} |
PYTHON_VERSION_COMPATIBILITY = "PY3"
DEPS = [
'builder_name_schema',
'depot_tools/bot_update',
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
| {
"content_hash": "f46d0deeaa7dc974be129754ee4bab72",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 36,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.6819787985865724,
"repo_name": "aosp-mirror/platform_external_skia",
"id": "f19553a605b88d6b402f9dddaf8b476a22160f7b",
"size": "446",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "infra/bots/recipe_modules/vars/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12716940"
},
{
"name": "Batchfile",
"bytes": "904"
},
{
"name": "C",
"bytes": "620774"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "27394853"
},
{
"name": "GLSL",
"bytes": "67013"
},
{
"name": "Go",
"bytes": "80137"
},
{
"name": "HTML",
"bytes": "1002516"
},
{
"name": "Java",
"bytes": "32794"
},
{
"name": "JavaScript",
"bytes": "51666"
},
{
"name": "Lex",
"bytes": "4372"
},
{
"name": "Lua",
"bytes": "70974"
},
{
"name": "Makefile",
"bytes": "2295"
},
{
"name": "Objective-C",
"bytes": "35223"
},
{
"name": "Objective-C++",
"bytes": "34410"
},
{
"name": "PHP",
"bytes": "120845"
},
{
"name": "Python",
"bytes": "1002226"
},
{
"name": "Shell",
"bytes": "49974"
}
],
"symlink_target": ""
} |
__requires__ = 'setuptools==0.9.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install-2.7')()
)
| {
"content_hash": "9027bd893e0f881f10c3ca29a81df69e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 86,
"avg_line_length": 28,
"alnum_prop": 0.6116071428571429,
"repo_name": "t-rodynenko/simplequiz",
"id": "5424967687b50ef0155b998ea84b658251a763d7",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Scripts/easy_install-2.7-script.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.db import models
class Orderable(models.Model):
"""
Add extra field and default ordering column for and inline orderable model
"""
order = models.IntegerField(default=0)
class Meta:
abstract = True
ordering = ('order',) | {
"content_hash": "299b5710490cc1bf605a7116fa67c779",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 22.46153846153846,
"alnum_prop": 0.6061643835616438,
"repo_name": "marcofucci/django-inline-orderable",
"id": "df80f55925016bfddb5f808e923edecfa58d425d",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inline_orderable/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3842"
},
{
"name": "Python",
"bytes": "1481"
}
],
"symlink_target": ""
} |
"""
generate_graphs.py
------------------
Generate small synthetic graphs whose complete CLD will be computed.
"""
import cycles
import numpy as np
import networkx as nx
from numpy import arange
def is_valid(graph):
"""Return whether the graph is valid to run experiments on."""
rank = cycles.fundamental_group_rank(graph)
# return nx.density(graph) < 0.3 and nx.is_connected(graph) and rank < 50
return nx.is_connected(graph) and rank < 50
def save_graph(graph, filename):
"""Save the graph to the given path.
filename should be the name of the target file, without the format
extension.
"""
component = max(nx.connected_component_subgraphs(graph), key=len)
matrix = nx.adjacency_matrix(component).A
np.savetxt(filename + '.txt', matrix, fmt='%1.1f')
def generate_erdos_renyi():
"""Generate small synthetic ER graphs."""
for num_nodes in range(10, 31, 5):
for prob in arange(0.05, 0.4, 0.05):
for i in range(20):
graph = nx.erdos_renyi_graph(num_nodes, prob)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/ER_N={}_p={}_R={}_i={}'.format(num_nodes, int(prob * 1000), rank, i)
save_graph(graph, name)
def generate_barabasi_albert():
"""Generate small synthetic BA graphs."""
for num_nodes in range(10, 31, 5):
for edges_per_step in range(2, 6):
for i in range(20):
graph = nx.barabasi_albert_graph(num_nodes, edges_per_step)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/BA_N={}_m={}_R={}_i={}'.format(num_nodes, edges_per_step, rank, i)
save_graph(graph, name)
def generate_watts_strogatz():
"""Generate small synthetic WS graphs."""
for num_nodes in range(10, 31, 5):
for degree in [2, 4]:
for prob in arange(0.05, 0.4, 0.05):
for i in range(20):
graph = nx.watts_strogatz_graph(num_nodes, degree, prob)
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
name = 'data/WS_N={}_d={}_p={}_R={}_i={}'.format(num_nodes, degree, int(prob * 1000), rank, i)
save_graph(graph, name)
def generate_other():
"""Generate other small graphs."""
graph = nx.florentine_families_graph()
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
filename = 'data/{}_N={}_R={}'.format('florentine', len(graph), rank)
save_graph(graph, filename)
graph = nx.karate_club_graph()
if is_valid(graph):
rank = cycles.fundamental_group_rank(graph)
filename = 'data/{}_N={}_R={}'.format('karate', len(graph), rank)
save_graph(graph, filename)
def main():
"""Generate small graphs of different kinds."""
generate_erdos_renyi()
generate_barabasi_albert()
generate_watts_strogatz()
generate_other()
if __name__ == '__main__':
main()
| {
"content_hash": "84727440c3bba6ab8fef91cfb69a85a1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 118,
"avg_line_length": 33.02105263157895,
"alnum_prop": 0.5773031558814153,
"repo_name": "leotrs/graph_homotopy",
"id": "b191af64d44ef32bc54ee859144141e877534ae8",
"size": "3137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_graphs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "9561"
},
{
"name": "Python",
"bytes": "33231"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='rq_test1',
packages=['rq_test1'],
version='0.3.0',
description='Simple statistical functions implemented in readable Python.',
author='Sherif Soliman',
author_email='sherif@ssoliman.com',
copyright='Copyright (c) 2016 Sherif Soliman',
url='https://github.com/rquirozr/Test-Package2',
# download_url='https://github.com/sheriferson/simplestatistics/tarball/0.3.0',
keywords=['statistics', 'math'],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Operating System :: MacOS',
'Operating System :: Unix',
'Topic :: Education',
'Topic :: Utilities'
]
)
| {
"content_hash": "568ecb5a7ce5f9f1ed3661a21f1cec35",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.6108910891089109,
"repo_name": "rquirozr/Test-Package2",
"id": "9cc7bd9d339432a3e2e9ed3e11c2e4efb6bae1a1",
"size": "2920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31887"
}
],
"symlink_target": ""
} |
"""Provide the device automations for Vacuum."""
from typing import Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DOMAIN,
CONF_TYPE,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from . import DOMAIN, STATE_DOCKED, STATE_CLEANING, STATE_RETURNING
CONDITION_TYPES = {"is_cleaning", "is_docked"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Vacuum devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_cleaning",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_docked",
}
)
return conditions
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_docked":
test_states = [STATE_DOCKED]
else:
test_states = [STATE_CLEANING, STATE_RETURNING]
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
state = hass.states.get(config[ATTR_ENTITY_ID])
return state is not None and state.state in test_states
return test_is_state
| {
"content_hash": "3d8db337ab42c3c9c7736145281e4ae3",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 85,
"avg_line_length": 31.734177215189874,
"alnum_prop": 0.6402074192261668,
"repo_name": "joopert/home-assistant",
"id": "6a41fe0490e13e79fa78f65ecd8988dd1792c9c6",
"size": "2507",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vacuum/device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import spotify
import threading
import json
import os
import nltk.metrics.agreement
import api_keys
# Get secret keys
KEYS = api_keys.get_keys()
logged_in_event = threading.Event()
def pretty_print(obj):
print json.dumps(obj, sort_keys=True, indent=4, separators=(',',': '))
def connection_state_listener(session):
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
logged_in_event.set()
# Specify configuration
config = spotify.Config()
config.user_agent = 'My awesome Spotify client'
config.tracefile = b'/tmp/libspotify-trace.log'
print "Opening session with user {}...".format(KEYS["SPOTIFY_USERNAME"])
# Open session and loop
session = spotify.Session(config)
loop = spotify.EventLoop(session)
loop.start()
session.on(
spotify.SessionEvent.CONNECTION_STATE_UPDATED,
connection_state_listener)
session.login(KEYS["SPOTIFY_USERNAME"],KEYS["SPOTIFY_PASSWORD"])
logged_in_event.wait()
print "Logged in and waiting..."
# Return the greatest common suffix in a list of strings
def greatest_common_suffix(list_of_strings):
reversed_strings = [' '.join(s.split()[::-1]) for s in list_of_strings]
reversed_gcs = os.path.commonprefix(reversed_strings)
gcs = ' '.join(reversed_gcs.split()[::-1])
return gcs
def score(target, item):
target = target.lower()
item = item.lower()
return nltk.metrics.edit_distance(target, item)*1.0 / len(target)
def match(target, candidate_list, distance_only=False):
""" Given a target string and a list of candidate strings, return the best
matching candidate.
"""
distances = []
for item in candidate_list:
dist = score(target, item)
distances.append(dist)
if distance_only:
return min(distances)
# Get index of minimum distance
return distances.index(min(distances))
def search_score(target_tracks, matching_tracks):
""" Given a list of track names to be matched, and a list of matching
tracks, returns a score that approximates the confidence that the match
is valid.
The score is based on the average of the edit distance between each target
track and its best match, offset by the difference in the length of each
list.
"""
distances = []
for target in target_tracks:
dist = match(target, matching_tracks, distance_only=True)
distances.append(dist)
return (sum(distances) / len(distances)) + abs(len(target_tracks)-
len(matching_tracks))/len(distances)
def search_for_album(show):
query = show["name"]
search = session.search(query)
# Execute search query
search = search.load()
album_results = search.albums
print '\nSearching for "{}"'.format(query)
# If we find no results, report error
if len(album_results) == 0:
raise StandardError("Error: no search results found.")
scores = []
for album in album_results:
album.load()
# Obtain track list
browser = album.browse().load()
tracks = browser.tracks
# Get lists of candidate album's track names and
# the actual track names
track_names = [clean_track_name(track.name, album, browser) for track in tracks]
target_names = [song["name"] for song in show["songs"]]
# Obtain a similarity score between the two lists
score = search_score(target_names, track_names)
# Save the score
scores.append(score)
# If none of the results have an acceptable score, report
# an error
if min(scores) > .3:
raise StandardError("Error: no results above threshold")
return album_results[scores.index(min(scores))]
def ascii(s):
return s.encode('ascii', 'ignore')
def add_spotify_song_data(song, spotify_track):
song["spotify_popularity"] = spotify_track.popularity
song["spotify_duration"] = spotify_track.duration / 1000
song["spotify_track"] = str(spotify_track.link)
song["spotify_track_name"] = spotify_track.name
song["spotify_match_score"] = match_score
artists= [str(artist.link) for artist in spotify_track.artists]
artist_names = [ascii(artist.name) for artist in spotify_track.artists]
song["spotify_artists"] = artists
song["spotify_artist_names"] = artist_names
song["spotify_track_index"] = spotify_track.index
def add_spotify_album_data(album, spotify_album):
# Save the cover art file found on Spotify
cover_art_file = '../data/cover_art/'+str(spotify_album.link)+'.jpg'
open(cover_art_file,'w+').write(spotify_album.cover().load().data)
# Record album-specific data
show["show_on_spotify"] = True
show["spotify_album"] = str(spotify_album.link)
show["spotify_album_year"] = spotify_album.year
show["spotify_album_artist"] = ascii(spotify_album.artist.name)
show["spotify_cover_art"] = cover_art_file
def clean_track_name(track_name, album, browser):
browser = album.browse().load()
tracks = browser.tracks
track_names = [track.name for track in tracks]
gcs = greatest_common_suffix(track_names)
track_name = ascii(track_name).lower()
album_name = ascii(album.name).lower().replace(' the musical','')
# Remove greatest common suffix if large enough
if len(gcs) > 3:
track_name = track_name.replace(gcs.lower(), '')
# Remove "(From "[show_name]")" from track name if present
track_name = track_name.replace('(from "{}")'.format(album_name),'')
# Remove "- Musical "[show_name]"" from track name if present
track_name = track_name.replace(' - musical "{}"'.format(album_name),'')
# Remove " - feat.*" if present
track_name = track_name.split(" - feat. ")[0]
return track_name
with open('../data/shows_combined.json.matched', 'r') as f:
data = json.load(f)
for show in data:
show_name = show["name"]
# Try to search Spotify for the album. If no suitable matches are found,
# note that the album was not found on Spotify and move on.
try:
album = search_for_album(show)
except StandardError as e:
show["show_on_spotify"] = False
print e
continue
# Load the album, get the track list, and produce a list of track names
# on the Spotify album
album.load()
browser = album.browse().load()
tracks = browser.tracks
track_names = [clean_track_name(track.name, album, browser) for track in tracks]
show["spotify_song_count"] = len(track_names)
add_spotify_album_data(show, album)
# Keep track of any songs that we find on spotify that we didn't have
# saved before
new_songs = []
# For each song in the show, find a match from the track list.
for song in show["songs"]:
track_index = match(song["name"], track_names)
matching_track = tracks[track_index]
matching_track_name = clean_track_name(matching_track.name, album, browser)
song_name = ascii(song["name"])
match_score = score(song_name,matching_track_name)
print '\t"{}", "{}": {}'.format(
song_name, matching_track_name, match_score)
if match_score < .7:
song["song_on_allmusicals"] = True
song["song_on_spotify"] = True
add_spotify_song_data(song, matching_track)
else:
new_song = {}
song["song_on_spotify"] = False
song["song_on_allmusicals"] = True
new_song["song_on_spotify"] = True
new_song["song_on_allmusicals"] = False
add_spotify_song_data(new_song, matching_track)
collected = [s["spotify_track"] for s in new_songs]
if new_song["spotify_track"] not in collected:
new_songs.append(new_song)
collected = [s["spotify_track"] for s in show["songs"]
if "spotify_track" in s]
new_songs = [s for s in new_songs if s["spotify_track"] not in collected]
show["songs"].extend(new_songs)
with open('../data/shows_w_spotify.json', 'w') as outfile:
json.dump(data, outfile)
| {
"content_hash": "70dbea1ead9603d63be789458c04a54c",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 82,
"avg_line_length": 28.91796875,
"alnum_prop": 0.7080913143320275,
"repo_name": "willwest/broadwaydb",
"id": "059f4e705407fd88b54870e000d121a2011c7e90",
"size": "7403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawl/crawl_spotify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9056"
},
{
"name": "HTML",
"bytes": "5834"
},
{
"name": "JavaScript",
"bytes": "536"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "33568"
},
{
"name": "R",
"bytes": "5854"
}
],
"symlink_target": ""
} |
import sys
from mauto import gui
from mauto.api import library
def show():
gui.show()
def select_repo():
gui.select_repo()
def list_macros():
return library.macros.keys()
def new_macro(*arg, **kwds):
return library.new_macro(*arg, **kwds)
def get_macro(name):
return library.get(name)
def remove_macro(name):
if library.get(name):
library.remove_macro(name)
def save_macro(name):
return library.save_macro(name)
def get_filepath(name):
return library.get_filepath(name)
def __main__():
app = gui.QtGui.QApplication(sys.argv)
w = gui.Layout()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
__main__()
| {
"content_hash": "f416cb853539322e5df49dace06bf2fd",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 42,
"avg_line_length": 14.416666666666666,
"alnum_prop": 0.6257225433526011,
"repo_name": "csaez/mauto",
"id": "9233f7c2437fb7455f008dd6631a612f6d896ac5",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mauto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52303"
}
],
"symlink_target": ""
} |
import abc
import collections
import os
import re
import shutil
import time
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from neutron._i18n import _, _LI, _LW, _LE
from neutron.agent.common import utils as agent_common_utils
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
DNSMASQ_SERVICE_NAME = 'dnsmasq'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in six.iteritems(self):
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def __str__(self):
pairs = ['%s=%s' % (k, v) for k, v in self.items()]
return ', '.join(sorted(pairs))
class NetModel(DictModel):
def __init__(self, d):
super(NetModel, self).__init__(d)
self._ns_name = "%s%s" % (NS_PREFIX, self.id)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
self.conf = conf
self.network = network
self.process_monitor = process_monitor
self.device_manager = DeviceManager(self.conf, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class DhcpLocalProcess(DhcpBase):
PORTS = []
def __init__(self, conf, network, process_monitor, version=None,
plugin=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin)
self.confs_dir = self.get_confs_dir(conf)
self.network_conf_dir = os.path.join(self.confs_dir, network.id)
common_utils.ensure_dir(self.network_conf_dir)
@staticmethod
def get_confs_dir(conf):
return os.path.abspath(os.path.normpath(conf.dhcp_confs))
def get_conf_file_name(self, kind):
"""Returns the file name for a given kind of config file."""
return os.path.join(self.network_conf_dir, kind)
def _remove_config_files(self):
shutil.rmtree(self.network_conf_dir, ignore_errors=True)
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
common_utils.ensure_dir(self.network_conf_dir)
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def _get_process_manager(self, cmd_callback=None):
return external_process.ProcessManager(
conf=self.conf,
uuid=self.network.id,
namespace=self.network.namespace,
default_cmd_callback=cmd_callback,
pid_file=self.get_conf_file_name('pid'),
run_as_root=True)
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
self._get_process_manager().disable()
if not retain_port:
self._destroy_namespace_and_port()
self._remove_config_files()
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface')
common_utils.replace_file(interface_file_path, value)
@property
def active(self):
return self._get_process_manager().active
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {constants.IP_VERSION_4:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
constants.IP_VERSION_6:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
_ID = 'id:'
@classmethod
def check_version(cls):
pass
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except OSError:
return []
def _build_cmdline_callback(self, pid_file):
# We ignore local resolv.conf if dns servers are specified
# or if local resolution is explicitly disabled.
_no_resolv = (
'--no-resolv' if self.conf.dnsmasq_dns_servers or
not self.conf.dnsmasq_local_resolv else '')
cmd = [
'dnsmasq',
'--no-hosts',
_no_resolv,
'--strict-order',
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
'--dhcp-match=set:ipxe,175',
]
if self.device_manager.driver.bridged:
cmd += [
'--bind-interfaces',
'--interface=%s' % self.interface_name,
]
else:
cmd += [
'--bind-dynamic',
'--interface=%s' % self.interface_name,
'--interface=tap*',
'--bridge-interface=%s,tap*' % self.interface_name,
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [n_const.DHCPV6_STATEFUL,
n_const.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = getattr(self.network, 'mtu', 0)
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
if self.conf.dnsmasq_base_log_dir:
log_dir = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id)
try:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
except OSError:
LOG.error(_LE('Error while create dnsmasq log dir: %s'),
log_dir)
else:
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
return cmd
def spawn_process(self):
"""Spawn the process, if it's not spawned already."""
# we only need to generate the lease file the first time dnsmasq starts
# rather than on every reload since dnsmasq will keep the file current
self._output_init_lease_file()
self._spawn_or_reload_process(reload_with_HUP=False)
def _spawn_or_reload_process(self, reload_with_HUP):
"""Spawns or reloads a Dnsmasq process for the network.
When reload_with_HUP is True, dnsmasq receives a HUP signal,
or it's reloaded if the process is not running.
"""
self._output_config_files()
pm = self._get_process_manager(
cmd_callback=self._build_cmdline_callback)
pm.enable(reload_cfg=reload_with_HUP)
self.process_monitor.register(uuid=self.network.id,
service_name=DNSMASQ_SERVICE_NAME,
monitored_process=pm)
def _release_lease(self, mac_address, ip, client_id):
"""Release a DHCP lease."""
if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
# Note(SridharG) dhcp_release is only supported for IPv4
# addresses. For more details, please refer to man page.
return
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
if client_id:
cmd.append(client_id)
ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
ip_wrapper.netns.execute(cmd, run_as_root=True)
def _output_config_files(self):
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dnsmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._spawn_or_reload_process(reload_with_HUP=True)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.
For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,
fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)
dnsmasq internal details for processing host file entries
1) dnsmasq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmasq uses this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmasq logs "no address available"
and fails to send DHCPOFFER message.
As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html
So if we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
n_const.DHCPV6_STATELESS))),
reverse=True)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
# if alloc is None, it means there is no need to allocate
# an IPv6 address because of stateless DHCPv6 network.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
no_dhcp, # A flag indicating that the address doesn't need a DHCP
# IP address.
no_opts, # A flag indication that options shouldn't be written
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
# Confirm whether Neutron server supports dns_name attribute in the
# ports API
dns_assignment = getattr(port, 'dns_assignment', None)
if dns_assignment:
dns_ip_map = {d.ip_address: d for d in dns_assignment}
for alloc in fixed_ips:
no_dhcp = False
no_opts = False
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
no_dhcp = addr_mode in (n_const.IPV6_SLAAC,
n_const.DHCPV6_STATELESS)
# we don't setup anything for SLAAC. It doesn't make sense
# to provide options for a client that won't use DHCP
no_opts = addr_mode == n_const.IPV6_SLAAC
# If dns_name attribute is supported by ports API, return the
# dns_assignment generated by the Neutron server. Otherwise,
# generate hostname and fqdn locally (previous behaviour)
if dns_assignment:
hostname = dns_ip_map[alloc.ip_address].hostname
fqdn = dns_ip_map[alloc.ip_address].fqdn
else:
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn, no_dhcp, no_opts)
def _get_port_extra_dhcp_opts(self, port):
return getattr(port, edo_ext.EXTRADHCPOPTS, False)
def _output_init_lease_file(self):
"""Write a fake lease file to bootstrap dnsmasq.
The generated file is passed to the --dhcp-leasefile option of dnsmasq.
This is used as a bootstrapping mechanism to avoid NAKing active leases
when a dhcp server is scheduled to another agent. Using a leasefile
will also prevent dnsmasq from NAKing or ignoring renewals after a
restart.
Format is as follows:
epoch-timestamp mac_addr ip_addr hostname client-ID
"""
filename = self.get_conf_file_name('leases')
buf = six.StringIO()
LOG.debug('Building initial lease file: %s', filename)
# we make up a lease time for the database entry
if self.conf.dhcp_lease_duration == -1:
# Even with an infinite lease, a client may choose to renew a
# previous lease on reboot or interface bounce so we should have
# an entry for it.
# Dnsmasq timestamp format for an infinite lease is 0.
timestamp = 0
else:
timestamp = int(time.time()) + self.conf.dhcp_lease_duration
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
# don't write ip address which belongs to a dhcp disabled subnet
# or an IPv6 SLAAC/stateless subnet
if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
# all that matters is the mac address and IP. the hostname and
# client ID will be overwritten on the next renewal.
buf.write('%s %s %s * *\n' %
(timestamp, port.mac_address, ip_address))
contents = buf.getvalue()
common_utils.replace_file(filename, contents)
LOG.debug('Done building initial lease file %s with contents:\n%s',
filename, contents)
return filename
@staticmethod
def _format_address_for_dnsmasq(address):
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
if netaddr.valid_ipv6(address):
return '[%s]' % address
return address
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
# NOTE(ihrachyshka): the loop should not log anything inside it, to
# avoid potential performance drop when lots of hosts are dumped
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
if no_dhcp:
if not no_opts and self._get_port_extra_dhcp_opts(port):
buf.write('%s,%s%s\n' %
(port.mac_address, 'set:', port.id))
continue
# don't write ip address which belongs to a dhcp disabled subnet.
if alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
if self._get_port_extra_dhcp_opts(port):
client_id = self._get_client_id(port)
if client_id and len(port.extra_dhcp_opts) > 1:
buf.write('%s,%s%s,%s,%s,%s%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address, 'set:', port.id))
elif client_id and len(port.extra_dhcp_opts) == 1:
buf.write('%s,%s%s,%s,%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address))
else:
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
common_utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s', filename)
return filename
def _get_client_id(self, port):
if self._get_port_extra_dhcp_opts(port):
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
return opt.opt_value
def _read_hosts_file_leases(self, filename):
leases = set()
try:
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
mac = host[0]
client_id = None
if host[1].startswith('set:'):
continue
if host[1].startswith(self._ID):
ip = host[3].strip('[]')
client_id = host[1][len(self._ID):]
else:
ip = host[2].strip('[]')
leases.add((ip, mac, client_id))
except (OSError, IOError):
LOG.debug('Error while reading hosts file %s', filename)
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
dhcp_port_exists = False
dhcp_port_on_this_host = self.device_manager.get_device_id(
self.network)
for port in self.network.ports:
client_id = self._get_client_id(port)
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address, client_id))
if port.device_id == dhcp_port_on_this_host:
dhcp_port_exists = True
for ip, mac, client_id in old_leases - new_leases:
self._release_lease(mac, ip, client_id)
if not dhcp_port_exists:
self.device_manager.driver.unplug(
self.interface_name, namespace=self.network.namespace)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmasq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for host_tuple in self._iter_hosts():
port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
common_utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
options, subnet_index_map = self._generate_opts_per_subnet()
options += self._generate_opts_per_port(subnet_index_map)
name = self.get_conf_file_name('opts')
common_utils.replace_file(name, '\n'.join(options))
return name
def _generate_opts_per_subnet(self):
options = []
subnet_index_map = {}
if self.conf.enable_isolated_metadata or self.conf.force_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
isolated_subnets = self.get_isolated_subnets(self.network)
for i, subnet in enumerate(self.network.subnets):
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
addr_mode == n_const.IPV6_SLAAC)):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_index_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == constants.IPv4_ANY:
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (self.conf.force_metadata or
(isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4)):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
elif not isolated_subnets[subnet.id] and gateway:
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, gateway)
)
if subnet.ip_version == 4:
host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
self.network.subnets
if (s.ip_version == 4 and
s.cidr != subnet.cidr)])
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
return options, subnet_index_map
def _generate_opts_per_port(self, subnet_index_map):
options = []
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if self._get_port_extra_dhcp_opts(port):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
continue
opt_ip_version = opt.ip_version
if opt_ip_version in port_ip_versions:
options.append(
self._format_option(opt_ip_version, port.id,
opt.opt_name, opt.opt_value))
else:
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
"because it's ip_version %(version)d "
"is not in port's address IP versions"),
{'opt': opt.opt_name,
'version': opt_ip_version})
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_index_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if len(vx_ips) > 1:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
return options
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(self.interface_name,
namespace=self.network.namespace)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
pattern = "(tag:(.*),)?(.*)$"
matches = re.match(pattern, option)
extra_tag = matches.groups()[0]
option = matches.groups()[2]
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
if extra_tag:
tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
else:
tags = ('tag:' + tag, '%s' % option)
return ','.join(tags + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a neutron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router) when enable_isolated_metadata is True, or for all the
networks when the force_metadata flags is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.force_metadata:
return True
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
class DeviceManager(object):
def __init__(self, conf, plugin):
self.conf = conf
self.plugin = plugin
self.driver = agent_common_utils.load_interface_driver(conf)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return common_utils.get_dhcp_agent_device_id(network.id,
self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway.get('gateway')
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
# Check for and remove the on-link route for the old
# gateway being replaced, if it is outside the subnet
is_old_gateway_not_in_subnet = (gateway and
not ipam_utils.check_subnet_ip(
subnet.cidr, gateway))
if is_old_gateway_not_in_subnet:
v4_onlink = device.route.list_onlink_routes(
constants.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(
constants.IP_VERSION_6)
existing_onlink_routes = set(
r['cidr'] for r in v4_onlink + v6_onlink)
if gateway in existing_onlink_routes:
device.route.delete_route(gateway, scope='link')
is_new_gateway_not_in_subnet = (subnet.gateway_ip and
not ipam_utils.check_subnet_ip(
subnet.cidr,
subnet.gateway_ip))
if is_new_gateway_not_in_subnet:
device.route.add_route(subnet.gateway_ip, scope='link')
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
"""Set up the existing DHCP port, if there is one."""
# To avoid pylint thinking that port might be undefined after
# the following loop...
port = None
# Look for an existing DHCP port for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
# If using gateway IPs on this port, we can skip the
# following code, whose purpose is just to review and
# update the Neutron-allocated IP addresses for the
# port.
if self.driver.use_gateway_ips:
return port
# Otherwise break out, as we now have the DHCP port
# whose subnets and addresses we need to review.
break
else:
return None
# Compare what the subnets should be against what is already
# on the port.
dhcp_enabled_subnet_ids = set(dhcp_subnets)
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
# If those differ, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
# Collect the subnets and fixed IPs that the port already
# has, for subnets that are still in the DHCP-enabled set.
wanted_fixed_ips = []
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
wanted_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
# Add subnet IDs for new DHCP-enabled subnets.
wanted_fixed_ips.extend(
dict(subnet_id=s)
for s in dhcp_enabled_subnet_ids - port_subnet_ids)
# Update the port to have the calculated subnets and fixed
# IPs. The Neutron server will allocate a fresh IP for
# each subnet that doesn't already have one.
port = self.plugin.update_dhcp_port(
port.id,
{'port': {'network_id': network.id,
'fixed_ips': wanted_fixed_ips}})
if not port:
raise exceptions.Conflict()
return port
def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets):
"""Setup the reserved DHCP port, if there is one."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == n_const.DEVICE_ID_RESERVED_DHCP_PORT:
try:
port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
except oslo_messaging.RemoteError as e:
if e.exc_type == n_exc.DhcpPortInUse:
LOG.info(_LI("Skipping DHCP port %s as it is "
"already in use"), port.id)
continue
raise
if port:
return port
def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets):
"""Create and set up new DHCP port for the specified network."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})
# Make a list of the subnets that need a unique IP address for
# this DHCP port.
if self.driver.use_gateway_ips:
unique_ip_subnets = []
else:
unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=unique_ip_subnets)
return self.plugin.create_dhcp_port({'port': port_dict})
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
# The ID that the DHCP port will have (or already has).
device_id = self.get_device_id(network)
# Get the set of DHCP-enabled subnets on this network.
dhcp_subnets = {subnet.id: subnet for subnet in network.subnets
if subnet.enable_dhcp}
# There are 3 cases: either the DHCP port already exists (but
# might need to be updated for a changed set of subnets); or
# some other code has already prepared a 'reserved' DHCP port,
# and we just need to adopt that; or we need to create a new
# DHCP port. Try each of those in turn until we have a DHCP
# port.
for setup_method in (self._setup_existing_dhcp_port,
self._setup_reserved_dhcp_port,
self._setup_new_dhcp_port):
dhcp_port = setup_method(network, device_id, dhcp_subnets)
if dhcp_port:
break
else:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=dhcp_subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def _update_dhcp_port(self, network, port):
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
def _cleanup_stale_devices(self, network, dhcp_port):
LOG.debug("Cleaning stale devices for network %s", network.id)
dev_name = self.driver.get_device_name(dhcp_port)
ns_ip = ip_lib.IPWrapper(namespace=network.namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
# delete all devices except current active DHCP port device
if d.name != dev_name:
LOG.debug("Found stale device %s, deleting", d.name)
self.driver.unplug(d.name, namespace=network.namespace)
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
self._update_dhcp_port(network, port)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
namespace=network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
try:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace,
mtu=network.get('mtu'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to plug DHCP port for '
'network %s. Releasing port.'),
network.id)
self.plugin.release_dhcp_port(network.id, port.device_id)
self.fill_dhcp_udp_checksums(namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
if not ipv6_utils.is_auto_address_subnet(subnet):
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.driver.use_gateway_ips:
# For each DHCP-enabled subnet, add that subnet's gateway
# IP address to the Linux device for the DHCP port.
for subnet in network.subnets:
if not subnet.enable_dhcp:
continue
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
if self.conf.enable_isolated_metadata:
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
self._set_default_route(network, interface_name)
try:
self._cleanup_stale_devices(network, port)
except Exception:
# catch everything as we don't want to fail because of
# cleanup step
LOG.error(_LE("Exception during stale dhcp device cleanup"))
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
if device_name:
self.driver.unplug(device_name, namespace=network.namespace)
else:
LOG.debug('No interface exists for network %s', network.id)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
def fill_dhcp_udp_checksums(self, namespace):
"""Ensure DHCP reply packets always have correct UDP checksums."""
iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
namespace=namespace)
ipv4_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill'
% constants.DHCP_RESPONSE_PORT)
iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
iptables_mgr.apply()
| {
"content_hash": "48cffafd2d1ef9374e0c7443bbb8d5ba",
"timestamp": "",
"source": "github",
"line_count": 1286,
"max_line_length": 79,
"avg_line_length": 41.052099533437016,
"alnum_prop": 0.5495614948951566,
"repo_name": "bigswitch/neutron",
"id": "ee855dc9acca2ce3d560d74e8550cd4278ff212b",
"size": "53429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/dhcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8468247"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
} |
from PythonQt import QtCore, QtGui
from director import lcmUtils
from director.simpletimer import SimpleTimer
from director.timercallback import TimerCallback
import subprocess
import os
import sys
class LCMLoggerWidget(object):
def __init__(self, statusBar=None):
self.manager = lcmUtils.LCMLoggerManager()
self.statusBar = statusBar
self.lastActiveLogFile = None
self.numProcesses = 0
self.numLogFiles = 0
self.userTag = ''
self.button = QtGui.QPushButton('')
self.button.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.button.connect('customContextMenuRequested(const QPoint&)', self.showContextMenu)
self.button.connect('clicked()', self.onClick)
self.timer = TimerCallback(targetFps=0.25)
self.timer.callback = self.updateState
self.timer.start()
def updateState(self):
t = SimpleTimer()
self.manager.updateExistingLoggerProcesses()
activeLogFiles = self.manager.getActiveLogFilenames()
self.numProcesses = len(self.manager.getActiveLoggerPids())
self.numLogFiles = len(activeLogFiles)
if self.numLogFiles == 1:
self.lastActiveLogFile = activeLogFiles[0]
if self.numProcesses == 0:
self.button.text = 'start logger'
elif self.numProcesses == 1:
self.button.text = 'stop logger'
elif self.numProcesses > 1:
self.button.text = 'stop all loggers'
statusDescription = 'active' if self.numProcesses else 'last'
logFileDescription = self.lastActiveLogFile or '<unknown>'
self.button.setToolTip('%s log file: %s' % (statusDescription, logFileDescription))
def onClick(self):
if self.numProcesses == 0:
self.manager.startNewLogger(tag=self.userTag)
self.updateState()
self.showStatusMessage('start logging: ' + self.lastActiveLogFile)
else:
self.manager.killAllLoggingProcesses()
self.showStatusMessage('stopped logging')
self.updateState()
def showStatusMessage(self, msg, timeout=2000):
if self.statusBar:
self.statusBar.showMessage(msg, timeout)
def showContextMenu(self, clickPosition):
globalPos = self.button.mapToGlobal(clickPosition)
menu = QtGui.QMenu()
action = menu.addAction('Stop logger')
action.enabled = (self.numProcesses > 0)
action = menu.addAction('Stop and delete log file')
action.enabled = (self.numProcesses > 0 and self.lastActiveLogFile)
action = menu.addAction('Set logger tag')
action.enabled = (self.numProcesses == 0)
action = menu.addAction('Copy log filename')
action.enabled = (self.lastActiveLogFile is not None)
action = menu.addAction('Review log')
action.enabled = (self.lastActiveLogFile is not None)
selectedAction = menu.exec_(globalPos)
if selectedAction is None:
return
if selectedAction.text == 'Copy log filename':
clipboard = QtGui.QApplication.instance().clipboard()
clipboard.setText(self.lastActiveLogFile)
self.showStatusMessage('copy to clipboard: ' + self.lastActiveLogFile)
elif selectedAction.text == 'Stop logger':
self.manager.killAllLoggingProcesses()
self.showStatusMessage('stopped logger')
self.updateState()
elif selectedAction.text == 'Stop and delete log file':
logFileToRemove = self.lastActiveLogFile
self.manager.killAllLoggingProcesses()
self.updateState()
os.remove(logFileToRemove)
self.showStatusMessage('deleted: ' + logFileToRemove)
elif selectedAction.text == 'Set logger tag':
inputDialog = QtGui.QInputDialog()
inputDialog.setInputMode(inputDialog.TextInput)
inputDialog.setLabelText('Log file tag:')
inputDialog.setWindowTitle('Enter tag')
inputDialog.setTextValue(self.userTag)
result = inputDialog.exec_()
if result:
tag = inputDialog.textValue()
self.userTag = tag
self.showStatusMessage('Set lcm logger tag: ' + self.userTag)
elif selectedAction.text == 'Review log':
newEnv = dict(os.environ)
newEnv['LCM_DEFAULT_URL'] = newEnv['LCM_REVIEW_DEFAULT_URL']
devnull = open(os.devnull, 'w')
# Pass entire command line invocation of director to subprocess including cfg and json paths
subprocess.Popen(sys.argv, stdout=devnull, stderr=devnull, env=newEnv)
subprocess.Popen(['lcm-logplayer-gui', self.lastActiveLogFile], stdout=devnull, stderr=devnull, env=newEnv)
subprocess.Popen(['bot-procman-sheriff', '-o'], stdout=devnull, stderr=devnull, env=newEnv)
| {
"content_hash": "408f95b4b06ac6a10c445888df1a57d4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 119,
"avg_line_length": 37.80152671755725,
"alnum_prop": 0.6453957996768982,
"repo_name": "patmarion/director",
"id": "838101ffdf62d920116635dde6730232dcdc090e",
"size": "4952",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/director/lcmloggerwidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "121912"
},
{
"name": "C++",
"bytes": "565385"
},
{
"name": "CMake",
"bytes": "82478"
},
{
"name": "Dockerfile",
"bytes": "2510"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "161948"
},
{
"name": "Makefile",
"bytes": "5014"
},
{
"name": "Python",
"bytes": "2282093"
},
{
"name": "Shell",
"bytes": "14291"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from typing import Any, Dict, List, Set, Tuple, Optional, Text
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
from django.http import HttpResponse
import django.contrib.auth
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib.actions import do_create_user
from zerver.models import UserProfile, Realm, get_user_profile_by_id, \
get_user_profile_by_email, remote_user_to_email, email_to_username, \
get_realm, get_realm_by_email_domain
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
from social_core.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
from social_core.exceptions import AuthFailed, SocialAuthBaseException
from django.contrib.auth import authenticate
from zerver.lib.users import check_full_name
from zerver.lib.request import JsonableError
from zerver.lib.utils import check_subdomain, get_subdomain
from social_django.models import DjangoStorage
from social_django.strategy import DjangoStrategy
def pad_method_dict(method_dict):
# type: (Dict[Text, bool]) -> Dict[Text, bool]
"""Pads an authentication methods dict to contain all auth backends
supported by the software, regardless of whether they are
configured on this server"""
for key in AUTH_BACKEND_NAME_MAP:
if key not in method_dict:
method_dict[key] = False
return method_dict
def auth_enabled_helper(backends_to_check, realm):
# type: (List[Text], Optional[Realm]) -> bool
if realm is not None:
enabled_method_dict = realm.authentication_methods_dict()
pad_method_dict(enabled_method_dict)
else:
enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS)
pad_method_dict(enabled_method_dict)
for supported_backend in django.contrib.auth.get_backends():
for backend_name in backends_to_check:
backend = AUTH_BACKEND_NAME_MAP[backend_name]
if enabled_method_dict[backend_name] and isinstance(supported_backend, backend):
return True
return False
def ldap_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'LDAP'], realm)
def email_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Email'], realm)
def password_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return ldap_auth_enabled(realm) or email_auth_enabled(realm)
def dev_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Dev'], realm)
def google_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'Google'], realm)
def github_auth_enabled(realm=None):
# type: (Optional[Realm]) -> bool
return auth_enabled_helper([u'GitHub'], realm)
def any_oauth_backend_enabled(realm=None):
# type: (Optional[Realm]) -> bool
"""Used by the login page process to determine whether to show the
'OR' for login with Google"""
return auth_enabled_helper([u'GitHub', u'Google'], realm)
def common_get_active_user_by_email(email, return_data=None):
# type: (Text, Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return None
if not user_profile.is_active:
if return_data is not None:
return_data['inactive_user'] = True
return None
if user_profile.realm.deactivated:
if return_data is not None:
return_data['inactive_realm'] = True
return None
return user_profile
class ZulipAuthMixin(object):
def get_user(self, user_profile_id):
# type: (int) -> Optional[UserProfile]
""" Get a UserProfile object from the user_profile_id. """
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class SocialAuthMixin(ZulipAuthMixin):
auth_backend_name = None # type: Text
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
raise NotImplementedError
def authenticate(self,
realm_subdomain='', # type: Optional[Text]
storage=None, # type: Optional[DjangoStorage]
strategy=None, # type: Optional[DjangoStrategy]
user=None, # type: Optional[Dict[str, Any]]
return_data=None, # type: Optional[Dict[str, Any]]
response=None, # type: Optional[Dict[str, Any]]
backend=None # type: Optional[GithubOAuth2]
):
# type: (...) -> Optional[UserProfile]
"""
Django decides which `authenticate` to call by inspecting the
arguments. So it's better to create `authenticate` function
with well defined arguments.
Keeping this function separate so that it can easily be
overridden.
"""
if user is None:
user = {}
if return_data is None:
return_data = {}
if response is None:
response = {}
return self._common_authenticate(self,
realm_subdomain=realm_subdomain,
storage=storage,
strategy=strategy,
user=user,
return_data=return_data,
response=response,
backend=backend)
def _common_authenticate(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[UserProfile]
return_data = kwargs.get('return_data', {})
email_address = self.get_email_address(*args, **kwargs)
if not email_address:
return_data['invalid_email'] = True
return None
try:
user_profile = get_user_profile_by_email(email_address)
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(kwargs.get("realm_subdomain"),
user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not auth_enabled_helper([self.auth_backend_name], user_profile.realm):
return_data["auth_backend_disabled"] = True
return None
return user_profile
def process_do_auth(self, user_profile, *args, **kwargs):
# type: (UserProfile, *Any, **Any) -> Optional[HttpResponse]
# These functions need to be imported here to avoid cyclic
# dependency.
from zerver.views.auth import (login_or_register_remote_user,
redirect_to_subdomain_login_url)
from zerver.views.registration import redirect_and_log_into_subdomain
return_data = kwargs.get('return_data', {})
inactive_user = return_data.get('inactive_user')
inactive_realm = return_data.get('inactive_realm')
invalid_subdomain = return_data.get('invalid_subdomain')
invalid_email = return_data.get('invalid_email')
if inactive_user or inactive_realm:
# Redirect to login page. We can't send to registration
# workflow with these errors. We will redirect to login page.
return None
if invalid_email:
# In case of invalid email, we will end up on registration page.
# This seems better than redirecting to login page.
logging.warning(
"{} got invalid email argument.".format(self.auth_backend_name)
)
strategy = self.strategy # type: ignore # This comes from Python Social Auth.
request = strategy.request
email_address = self.get_email_address(*args, **kwargs)
full_name = self.get_full_name(*args, **kwargs)
is_signup = strategy.session_get('is_signup') == '1'
subdomain = strategy.session_get('subdomain')
if not subdomain:
return login_or_register_remote_user(request, email_address,
user_profile, full_name,
invalid_subdomain=bool(invalid_subdomain),
is_signup=is_signup)
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
return redirect_to_subdomain_login_url()
return redirect_and_log_into_subdomain(realm, full_name, email_address,
is_signup=is_signup)
def auth_complete(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
"""
Returning `None` from this function will redirect the browser
to the login page.
"""
try:
# Call the auth_complete method of BaseOAuth2 is Python Social Auth
return super(SocialAuthMixin, self).auth_complete(*args, **kwargs) # type: ignore
except AuthFailed:
return None
except SocialAuthBaseException as e:
logging.exception(e)
return None
class ZulipDummyBackend(ZulipAuthMixin):
"""
Used when we want to log you in but we don't know which backend to use.
"""
def authenticate(self, username=None, realm_subdomain=None, use_dummy_backend=False,
return_data=None):
# type: (Optional[Text], Optional[Text], bool, Optional[Dict[str, Any]]) -> Optional[UserProfile]
assert username is not None
if use_dummy_backend:
user_profile = common_get_active_user_by_email(username)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=None, realm_subdomain=None, return_data=None):
# type: (Optional[Text], Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
""" Authenticate a user based on email address as the user name. """
if username is None or password is None:
# Return immediately. Otherwise we will look for a SQL row with
# NULL username. While that's probably harmless, it's needless
# exposure.
return None
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not password_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['password_auth_disabled'] = True
return None
if not email_auth_enabled(user_profile.realm):
if return_data is not None:
return_data['email_auth_disabled'] = True
return None
if user_profile.check_password(password):
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
return user_profile
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for mobile devices
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
"""
def authenticate(self, google_oauth2_token=None, realm_subdomain=None, return_data=None):
# type: (Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
if return_data is None:
return_data = {}
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] in (True, "true"):
try:
user_profile = get_user_profile_by_email(token_payload["email"])
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
if not user_profile.is_active:
return_data["inactive_user"] = True
return None
if user_profile.realm.deactivated:
return_data["inactive_realm"] = True
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return_data["invalid_subdomain"] = True
return None
if not google_auth_enabled(realm=user_profile.realm):
return_data["google_auth_disabled"] = True
return None
return user_profile
else:
return_data["valid_attestation"] = False
return None
class ZulipRemoteUserBackend(RemoteUserBackend):
create_unknown_user = False
def authenticate(self, remote_user, realm_subdomain=None):
# type: (str, Optional[Text]) -> Optional[UserProfile]
if not remote_user:
return None
email = remote_user_to_email(remote_user)
user_profile = common_get_active_user_by_email(email)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
if not auth_enabled_helper([u"RemoteUser"], user_profile.realm):
return None
return user_profile
class ZulipLDAPException(Exception):
pass
class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
# Don't use Django LDAP's permissions functions
def has_perm(self, user, perm, obj=None):
# type: (UserProfile, Any, Any) -> bool
# Using Any type is safe because we are not doing anything with
# the arguments.
return False
def has_module_perms(self, user, app_label):
# type: (UserProfile, str) -> bool
return False
def get_all_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def get_group_permissions(self, user, obj=None):
# type: (UserProfile, Any) -> Set
# Using Any type is safe because we are not doing anything with
# the arguments.
return set()
def django_to_ldap_username(self, username):
# type: (Text) -> Text
if settings.LDAP_APPEND_DOMAIN:
if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN):
raise ZulipLDAPException("Username does not match LDAP domain.")
return email_to_username(username)
return username
def ldap_to_django_username(self, username):
# type: (str) -> str
if settings.LDAP_APPEND_DOMAIN:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None, return_data=None):
# type: (Text, str, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
try:
if settings.REALMS_HAVE_SUBDOMAINS:
self._realm = get_realm(realm_subdomain)
else:
self._realm = get_realm_by_email_domain(username)
username = self.django_to_ldap_username(username)
user_profile = ZulipLDAPAuthBackendBase.authenticate(self, username, password)
if user_profile is None:
return None
if not check_subdomain(realm_subdomain, user_profile.realm.subdomain):
return None
return user_profile
except Realm.DoesNotExist:
return None
except ZulipLDAPException:
return None
def get_or_create_user(self, username, ldap_user):
# type: (str, _LDAPUser) -> Tuple[UserProfile, bool]
try:
user_profile = get_user_profile_by_email(username)
if not user_profile.is_active or user_profile.realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
if not ldap_auth_enabled(user_profile.realm):
raise ZulipLDAPException("LDAP Authentication is not enabled")
return user_profile, False
except UserProfile.DoesNotExist:
# No need to check for an inactive user since they don't exist yet
if self._realm.deactivated:
raise ZulipLDAPException("Realm has been deactivated")
full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"]
short_name = full_name = ldap_user.attrs[full_name_attr][0]
try:
full_name = check_full_name(full_name)
except JsonableError as e:
raise ZulipLDAPException(e.error)
if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"]
short_name = ldap_user.attrs[short_name_attr][0]
user_profile = do_create_user(username, None, self._realm, full_name, short_name)
return user_profile, True
# Just like ZulipLDAPAuthBackend, but doesn't let you log in.
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
def authenticate(self, username, password, realm_subdomain=None):
# type: (Text, str, Optional[Text]) -> None
return None
class DevAuthBackend(ZulipAuthMixin):
# Allow logging in as any user without a password.
# This is used for convenience when developing Zulip.
def authenticate(self, username, realm_subdomain=None, return_data=None):
# type: (Text, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile]
user_profile = common_get_active_user_by_email(username, return_data=return_data)
if user_profile is None:
return None
if not dev_auth_enabled(user_profile.realm):
return None
return user_profile
class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2):
auth_backend_name = u"GitHub"
def get_email_address(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Text]
try:
return kwargs['response']['email']
except KeyError:
return None
def get_full_name(self, *args, **kwargs):
# type: (*Any, **Any) -> Text
# In case of any error return an empty string. Name is used by
# the registration page to pre-populate the name field. However,
# if it is not supplied, our registration process will make sure
# that the user enters a valid name.
try:
name = kwargs['response']['name']
except KeyError:
name = ''
if name is None:
return ''
return name
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[HttpResponse]
"""
This function is called once the OAuth2 workflow is complete. We
override this function to:
1. Inject `return_data` and `realm_admin` kwargs. These will
be used by `authenticate()` function to make the decision.
2. Call the proper `do_auth` function depending on whether
we are doing individual, team or organization based GitHub
authentication.
The actual decision on authentication is done in
SocialAuthMixin._common_authenticate().
"""
kwargs['return_data'] = {}
request = self.strategy.request
kwargs['realm_subdomain'] = get_subdomain(request)
user_profile = None
team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID
org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME
if (team_id is None and org_name is None):
try:
user_profile = GithubOAuth2.do_auth(self, *args, **kwargs)
except AuthFailed:
logging.info("User authentication failed.")
user_profile = None
elif (team_id):
backend = GithubTeamOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub team.")
user_profile = None
elif (org_name):
backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri)
try:
user_profile = backend.do_auth(*args, **kwargs)
except AuthFailed:
logging.info("User is not member of GitHub organization.")
user_profile = None
return self.process_do_auth(user_profile, *args, **kwargs)
AUTH_BACKEND_NAME_MAP = {
u'Dev': DevAuthBackend,
u'Email': EmailAuthBackend,
u'GitHub': GitHubAuthBackend,
u'Google': GoogleMobileOauth2Backend,
u'LDAP': ZulipLDAPAuthBackend,
u'RemoteUser': ZulipRemoteUserBackend,
} # type: Dict[Text, Any]
| {
"content_hash": "d21354776353532ca60d82ede648cad6",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 114,
"avg_line_length": 39.99102333931777,
"alnum_prop": 0.6127946127946128,
"repo_name": "ryanbackman/zulip",
"id": "ac52a7f2ee4df7b6b7e9b3a3b84e79f465b6fdae",
"size": "22275",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zproject/backends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "392722"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "590505"
},
{
"name": "JavaScript",
"bytes": "1783783"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87372"
},
{
"name": "Python",
"bytes": "3908421"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "38065"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
import matplotlib.pyplot as plt
import pandas as pd
def initialize_matplotlib():
inches_per_pt = 1.0 / 72.27
fig_width = 240 * inches_per_pt # width in inches
fig_height = 160 * inches_per_pt #.4 * fig_width
rc('axes', labelsize=6)
rc('axes', titlesize=6)
rc('axes', unicode_minus=False)
rc('axes', grid=False)
rc('figure', figsize=(fig_width, fig_height))
rc('grid', linestyle=':')
rc('font', family='serif')
rc('legend', fontsize=5)
rc('lines', linewidth=.7)
rc('ps', usedistiller='xpdf')
rc('text', usetex=True)
rc('xtick', labelsize=6)
rc('ytick', labelsize=6)
initialize_matplotlib()
df = pd.read_excel('results_for_figure1.xlsx', sheetname='Figure3')
styles = {
'TribeFlow-Dyn':'D',
'TribeFlow':'o',
#'FPMC':
#'PRLME':
}
colors = {
'LFM-1k':'g',
'LFM-G':'m',
'Bkite':'y',
'FourSQ':'b',
'Yoo':'r'
}
for method in styles:
for dset in colors:
idx = (df['Name'] == method) & (df['Dataset'] == dset)
x_ax = df[idx]['Runtime_s']
y_ax = df[idx]['MRR']
horizontalalignment = 'left'
verticalalignment = 'bottom'
if colors[dset] == 'g':
verticalalignment = 'top'
for x, y in zip(x_ax, y_ax):
plt.text(x, y, \
method + '\n' + \
dset, fontsize=7, \
verticalalignment=verticalalignment, \
horizontalalignment=horizontalalignment)
ps = colors[dset] + styles[method]
plt.semilogx(x_ax, y_ax, ps, alpha=.5, markersize=5)
ax = plt.gca()
ax.tick_params(direction='out', pad=0.3)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.ylim((0, 0.16))
plt.xlim((1e2, 1e6))
plt.minorticks_off()
plt.ylabel('MRR', labelpad=0)
plt.xlabel('Training Time (s)', labelpad=0)
plt.tight_layout(pad=0.2)
plt.savefig('figure3.pdf')
| {
"content_hash": "96edde13485a09be9d943cf5fd6cc6fe",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 67,
"avg_line_length": 26.457831325301203,
"alnum_prop": 0.5655737704918032,
"repo_name": "flaviovdf/tribeflow",
"id": "f86056c51beecacdac10dd2ecb37a3c7a2ee74f7",
"size": "2214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/paper-data/plot_figure3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "16016"
},
{
"name": "Jupyter Notebook",
"bytes": "58814"
},
{
"name": "Makefile",
"bytes": "337"
},
{
"name": "Python",
"bytes": "158324"
},
{
"name": "Shell",
"bytes": "3233"
}
],
"symlink_target": ""
} |
import time
import fixtures
import mock
import nova
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
from nova.tests.unit import policy_fixture
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import guest as libvirt_guest
class TestSerialConsoleLiveMigrate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestSerialConsoleLiveMigrate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
nova.tests.unit.fake_network.set_stub_network_methods(self)
self.flags(compute_driver='libvirt.LibvirtDriver')
self.flags(enabled=True, group="serial_console")
self.flags(enabled=False, group="vnc")
self.flags(enabled=False, group="spice")
self.flags(use_usb_tablet=False, group="libvirt")
self.flags(host="test_compute1")
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.compute = self.start_service('compute', host='test_compute1')
self.consoleauth = self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector')
@mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
@mock.patch.object(fakelibvirt.Domain, 'migrateToURI2')
@mock.patch('nova.virt.libvirt.host.Host.get_connection')
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=1024)
@mock.patch('os.path.getsize', return_value=1024)
@mock.patch('nova.conductor.tasks.live_migrate.LiveMigrationTask.'
'_check_destination_is_not_source', return_value=False)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_serial_console_live_migrate(self, mock_create_image,
mock_conductor_source_check,
mock_path_get_size,
mock_get_disk_size,
mock_host_get_connection,
mock_migrate_to_uri,
mock_get_job_info,
mock_get_volume_connector):
"""Regression test for bug #1595962.
If the graphical consoles VNC and SPICE are disabled, the
live-migration of an instance will result in an ERROR state.
VNC and SPICE are usually disabled on IBM z systems platforms
where graphical consoles are not available. The serial console
is then enabled and VNC + SPICE are disabled.
The error will be raised at
https://github.com/openstack/nova/blob/
4f33047d07f5a11b208c344fe206aba01cd8e6fe/
nova/virt/libvirt/driver.py#L5842-L5852
"""
mock_get_job_info.return_value = libvirt_guest.JobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000)
mock_host_get_connection.return_value = fake_connection
server_attr = dict(name='server1',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server = self.api.post_server({'server': server_attr})
server_id = server['id']
self.wait_till_active_or_timeout(server_id)
post = {"os-migrateLive": {
"block_migration": False,
"disk_over_commit": False,
"host": "test_compute1"
}}
try:
# This should succeed
self.admin_api.post_server_action(server_id, post)
self.wait_till_active_or_timeout(server_id)
except Exception as ex:
self.fail(ex.response.content)
def wait_till_active_or_timeout(self, server_id):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != "ACTIVE" and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != "ACTIVE":
self.fail("The server is not active after the timeout.")
| {
"content_hash": "09e3b430e052cd881faa98c34c80c139",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 74,
"avg_line_length": 43.378787878787875,
"alnum_prop": 0.6105483758295495,
"repo_name": "jianghuaw/nova",
"id": "df0fb6af7a3e81a3a5165a6df9ec3f92450808e6",
"size": "6301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/regressions/test_bug_1595962.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
import sys
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import ET
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.common.types import InvalidCredsError
from libcloud.backup.base import BackupTargetJob
from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData
from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import BackupFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionData_v2_3_Tests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.active_api_version = '2.3'
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_targets()
def test_list_targets(self):
targets = self.driver.list_targets()
self.assertEqual(len(targets), 2)
self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10')
self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise')
def test_create_target(self):
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.extra['servicePlan'], 'Enterprise')
def test_create_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
def test_create_target_EXISTS(self):
DimensionDataMockHttp.type = 'EXISTS'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).')
def test_update_target(self):
target = self.driver.list_targets()[0]
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_update_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
self.driver.update_target(target)
def test_update_target_STR(self):
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_delete_target(self):
target = self.driver.list_targets()[0]
self.assertTrue(self.driver.delete_target(target))
def test_ex_add_client_to_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_list_available_client_types(target)[0]
storage_policy = self.driver.ex_list_available_storage_policies(target)[0]
schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0]
self.assertTrue(
self.driver.ex_add_client_to_target(target, client, storage_policy,
schedule_policy, 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_add_client_to_target_STR(self):
self.assertTrue(
self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy',
'12AM - 6AM', 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_get_backup_details_for_target(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
self.assertEqual(response.service_plan, 'Enterprise')
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertEqual(client.running_job.progress, 5)
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
self.assertEqual(len(client.alert.notify_list), 2)
self.assertTrue(isinstance(client.alert.notify_list, list))
def test_ex_get_backup_details_for_target_NOBACKUP(self):
target = self.driver.list_targets()[0].address
DimensionDataMockHttp.type = 'NOBACKUP'
response = self.driver.ex_get_backup_details_for_target(target)
self.assertTrue(response is None)
def test_ex_cancel_target_job(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
client = response.clients[0]
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
success = client.running_job.cancel()
self.assertTrue(success)
def test_ex_cancel_target_job_with_extras(self):
success = self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertTrue(success)
def test_ex_cancel_target_job_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertEqual(context.exception.code, 'ERROR')
"""Test a backup info for a target that does not have a client"""
def test_ex_get_backup_details_for_target_NO_CLIENT(self):
DimensionDataMockHttp.type = 'NOCLIENT'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Essentials')
self.assertEqual(len(response.clients), 0)
"""Test a backup details that has a client, but no alerting or running jobs"""
def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self):
DimensionDataMockHttp.type = 'NOJOB'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Enterprise')
self.assertTrue(isinstance(response.clients, list))
self.assertEqual(len(response.clients), 1)
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertIsNone(client.running_job)
self.assertIsNone(client.alert)
"""Test getting backup info for a server that doesn't exist"""
def test_ex_get_backup_details_for_target_DISABLED(self):
DimensionDataMockHttp.type = 'DISABLED'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup')
def test_ex_list_available_client_types(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_client_types(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].type, 'FA.Linux')
self.assertEqual(answer[0].is_file_system, True)
self.assertEqual(answer[0].description, 'Linux File system')
def test_ex_list_available_storage_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_storage_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name,
'30 Day Storage Policy + Secondary Copy')
self.assertEqual(answer[0].retention_period, 30)
self.assertEqual(answer[0].secondary_location, 'Primary')
def test_ex_list_available_schedule_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_schedule_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name, '12AM - 6AM')
self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM')
def test_ex_remove_client_from_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertTrue(self.driver.ex_remove_client_from_target(target, client))
def test_ex_remove_client_from_target_STR(self):
self.assertTrue(
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
)
def test_ex_remove_client_from_target_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
self.assertEqual(context.exception.code, 'ERROR')
self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg)
def test_priv_target_to_target_address(self):
target = self.driver.list_targets()[0]
self.assertEqual(
self.driver._target_to_target_address(target),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_STR(self):
self.assertEqual(
self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._target_to_target_address([1, 2, 3])
def test_priv_client_to_client_id(self):
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertEqual(
self.driver._client_to_client_id(client),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_STR(self):
self.assertEqual(
self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._client_to_client_id([1, 2, 3])
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = BackupFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_FAIL(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_type.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_storagePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_schedulePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_client_SUCCESS_PUT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOCLIENT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_DISABLED.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOJOB.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT(
self, method, url, body, headers):
if method != 'POST':
raise InvalidRequestError('Only POST is accepted for this test')
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan))
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'GET':
if url.endswith('disable'):
body = self.fixtures.load(
'_backup_DISABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'_backup_INFO.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP(
self, method, url, body, headers):
assert(method == 'GET')
body = self.fixtures.load('server_server_NOBACKUP.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS(
self, method, url, body, headers):
# only POSTs are implemented
# If we get any other method something has gone wrong
assert(method == 'POST')
body = self.fixtures.load(
'_backup_EXISTS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != 'Essentials':
raise InvalidRequestError("Expected Essentials backup plan in request")
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN)
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client_FAIL.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| {
"content_hash": "e2dd54ff9b98d7b7c84aaee0306ce870",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 154,
"avg_line_length": 48.051867219917014,
"alnum_prop": 0.6566642200250421,
"repo_name": "Kami/libcloud",
"id": "13039d4c9c3fc4b5a5b8455345b3dfb8a524d7df",
"size": "23943",
"binary": false,
"copies": "10",
"ref": "refs/heads/trunk",
"path": "libcloud/test/backup/test_dimensiondata_v2_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9122888"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from django.contrib.auth.models import User, Group
from main.models import Image, BasicUser, Project, AnnotationsJson
class BasicUserSerializer(serializers.ModelSerializer):
images_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
projects_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Project.objects.all())
annotations_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=AnnotationsJson.objects.all())
class Meta:
model = BasicUser
fields = ['id', 'display_name', 'email', 'projects_by_user', 'images_by_user', 'annotations_by_user']
def get_authenticated_user(validated_data):
email = validated_data.pop("owner_email")
# if not User.objects.filter(email=email).exists():
# user = User.objects.create_user(email, email, email)
# user.save()
return User.objects.get(email=email)
class ProjectSerializer(serializers.ModelSerializer):
# images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
owner = serializers.ReadOnlyField(source='owner.email')
class Meta:
model = Project
fields = ['id', 'name', 'owner', 'labels_json']
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
return Project.objects.create(owner=owner, **validated_data)
class ImageSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.email')
project_id = serializers.ReadOnlyField(source='part_of_project.id')
class Meta:
model = Image
fields = ['id', 'title', 'description', 'owner', 'image', 'project_id']
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
project_id = validated_data.pop("project_id")
return Image.objects.create(owner=owner, part_of_project=Project.objects.get(id=project_id), **validated_data)
class AnnotationsJsonSerializer(serializers.ModelSerializer):
#images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all())
owner = serializers.ReadOnlyField(source='owner.email')
image_id = serializers.ReadOnlyField(source='on_image.id')
class Meta:
model = AnnotationsJson
fields = ['id', 'owner', 'content_json', "image_id"]
def create(self, validated_data, *args, **kwargs):
owner = get_authenticated_user(validated_data)
image_id = validated_data.pop("image_id")
return AnnotationsJson.objects.create(owner=owner, on_image=Image.objects.get(id=image_id), **validated_data)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ['id','name',]
def create(self, validated_data, *args, **kwargs):
return Group.objects.create(**validated_data)
class UserSerializer(serializers.ModelSerializer):
images_by_user = ImageSerializer(read_only=True, many=True)
images_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='images_by_user', many=True, queryset=Image.objects.all())
projects_by_user = ProjectSerializer(read_only=True, many=True)
projects_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='projects_by_user', many=True, queryset=Project.objects.all())
annotations_by_user = AnnotationsJsonSerializer(read_only=True, many=True)
annotations_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='annotations_by_user', many=True, queryset=AnnotationsJson.objects.all())
groups = GroupSerializer(many=True)
class Meta:
model = User
fields = ['email', 'projects_by_user', 'projects_by_user_id', 'images_by_user', 'images_by_user_id', 'annotations_by_user', 'annotations_by_user_id', 'groups',]
| {
"content_hash": "6e175d47745b8312f396c0f2a964ef11",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 168,
"avg_line_length": 41.63551401869159,
"alnum_prop": 0.7207631874298541,
"repo_name": "kartta-labs/noter-backend",
"id": "1ffad3cff4511a08e683410821a44fe468a54211",
"size": "4455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noter_backend/main/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1889"
},
{
"name": "Python",
"bytes": "56419"
},
{
"name": "Shell",
"bytes": "2057"
}
],
"symlink_target": ""
} |
import subprocess
from typing import List
import rich_click as click
PYTHON_VERSIONS = ["3.7", "3.8", "3.9"]
GHCR_IO_PREFIX = "ghcr.io"
GHCR_IO_IMAGES = [
"{prefix}/{repo}/{branch}/ci/python{python_version}:latest",
"{prefix}/{repo}/{branch}/prod/python{python_version}:latest",
]
# noinspection StrFormat
def pull_push_all_images(
source_prefix: str,
target_prefix: str,
images: List[str],
source_branch: str,
source_repo: str,
target_branch: str,
target_repo: str,
):
for python_version in PYTHON_VERSIONS:
for image in images:
source_image = image.format(
prefix=source_prefix, branch=source_branch, repo=source_repo, python_version=python_version
)
target_image = image.format(
prefix=target_prefix, branch=target_branch, repo=target_repo, python_version=python_version
)
print(f"Copying image: {source_image} -> {target_image}")
subprocess.run(["docker", "pull", source_image], check=True)
subprocess.run(["docker", "tag", source_image, target_image], check=True)
subprocess.run(["docker", "push", target_image], check=True)
@click.group(invoke_without_command=True)
@click.option("--source-branch", type=str, default="main", help="Source branch name [main]")
@click.option("--target-branch", type=str, default="main", help="Target branch name [main]")
@click.option("--source-repo", type=str, default="apache/airflow", help="Source repo")
@click.option("--target-repo", type=str, default="apache/airflow", help="Target repo")
def main(
source_branch: str,
target_branch: str,
source_repo: str,
target_repo: str,
):
pull_push_all_images(
GHCR_IO_PREFIX, GHCR_IO_PREFIX, GHCR_IO_IMAGES, source_branch, source_repo, target_branch, target_repo
)
if __name__ == "__main__":
main()
| {
"content_hash": "a2985ce80b7acb21bc45dda59ee6ef03",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 32.8448275862069,
"alnum_prop": 0.6409448818897637,
"repo_name": "bolkedebruin/airflow",
"id": "bcb81c55223f83811ce5dc8c8f06553314c88a0a",
"size": "3064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/retag_docker_images.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from django import forms
from . import models
class ThoughtForm(forms.ModelForm):
class Meta:
fields = ('condition', 'notes')
model = models.Thought | {
"content_hash": "fdb32d64b2fe5f85c8337a0f535d12c9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 19,
"alnum_prop": 0.6666666666666666,
"repo_name": "treehouse/livestream-django-feelings",
"id": "a38eb3232e52925c352e40ed085c1b625f609ec2",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feelings/thoughts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65"
},
{
"name": "HTML",
"bytes": "18469"
},
{
"name": "JavaScript",
"bytes": "1252960"
},
{
"name": "Python",
"bytes": "38118"
}
],
"symlink_target": ""
} |
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from cleancat import Integer, Schema, StopValidation, String, ValidationError
from cleancat.sqla import SQLAEmbeddedReference, SQLAReference, object_as_dict
Base = declarative_base()
class Person(Base):
__tablename__ = 'cleancattest'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
age = sa.Column(sa.Integer)
@pytest.fixture
def sqla_session():
"""Set up an SQLA connection, create all tables, and return a session."""
engine = sa.create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
Person.query = session.query_property()
return session
def test_object_as_dict():
steve = Person(name='Steve', age=30)
assert object_as_dict(steve) == {'id': None, 'age': 30, 'name': 'Steve'}
@pytest.mark.usefixtures('sqla_session')
class TestSQLAReferenceField:
def test_it_updates_an_existing_instance(self, sqla_session):
steve = Person(name='Steve', age=30)
sqla_session.add(steve)
sqla_session.commit()
clean_val = SQLAReference(Person).clean(str(steve.id))
assert isinstance(clean_val, Person)
assert clean_val.id == steve.id
def test_updating_missing_instance_fails(self):
expected_err_msg = 'Object does not exist.'
with pytest.raises(ValidationError, match=expected_err_msg):
SQLAReference(Person).clean('id-that-does-not-exist')
def test_it_can_be_optional(self):
field = SQLAReference(Person, required=False)
with pytest.raises(StopValidation) as e:
field.clean(None)
assert e.value.args[0] is None
@pytest.mark.usefixtures('sqla_session')
class TestSchemaWithSQLAEmbeddedReference:
@pytest.fixture
def book_schema_cls(self):
class PersonSchema(Schema):
name = String()
age = Integer()
class BookSchema(Schema):
author = SQLAEmbeddedReference(
Person, PersonSchema, required=False
)
title = String(required=False)
return BookSchema
def test_it_creates_a_new_instance(self, book_schema_cls):
schema = book_schema_cls({'author': {'name': 'New Author', 'age': 30}})
data = schema.full_clean()
author = data['author']
assert isinstance(author, Person)
assert not author.id
assert author.name == 'New Author'
assert author.age == 30
def test_it_updates_an_existing_instance(
self, book_schema_cls, sqla_session
):
steve = Person(name='Steve', age=30)
sqla_session.add(steve)
sqla_session.commit()
schema = book_schema_cls(
{'author': {'id': str(steve.id), 'name': 'Updated', 'age': 50}}
)
data = schema.full_clean()
author = data['author']
assert isinstance(author, Person)
assert author.id == steve.id
assert author.name == 'Updated'
assert author.age == 50
def test_updating_missing_instance_fails(self, book_schema_cls):
schema = book_schema_cls(
{'author': {'id': 123456789, 'name': 'Arbitrary Non-existent ID'}}
)
pytest.raises(ValidationError, schema.full_clean)
assert schema.field_errors == {'author': 'Object does not exist.'}
def test_it_can_be_optional(self, book_schema_cls):
schema = book_schema_cls(
{'title': 'Book without an author', 'author': None}
)
data = schema.full_clean()
assert data == {'title': 'Book without an author', 'author': None}
| {
"content_hash": "b74f75147e5134d22334750466519be5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 33.669642857142854,
"alnum_prop": 0.635905595332803,
"repo_name": "closeio/cleancat",
"id": "3916bc564c76cf88ce4dbb25a04c99fde1efd255",
"size": "3771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqla.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90101"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.contrib import admin
urlpatterns = patterns('account.views',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
)
| {
"content_hash": "f49ac8c8d2bfc00e31880597368cd25b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 30.285714285714285,
"alnum_prop": 0.6745283018867925,
"repo_name": "gdgand/Festi",
"id": "42a2d4b05daeaaa1d305580b5b2b63757a28c278",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "festi/account/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367769"
},
{
"name": "CoffeeScript",
"bytes": "15698"
},
{
"name": "Erlang",
"bytes": "2128"
},
{
"name": "HTML",
"bytes": "97067"
},
{
"name": "JavaScript",
"bytes": "71030"
},
{
"name": "Python",
"bytes": "36611"
},
{
"name": "Ruby",
"bytes": "583"
},
{
"name": "Shell",
"bytes": "1176"
}
],
"symlink_target": ""
} |
import unittest
from vehicle import Vehicle
class UtDemo(unittest.TestCase):
'''A Unit Test Demo'''
def setUp(self):
"Create a list of test files"
self.time_list=['20120912072912','20120913072230',20120912073312]
for f in self.time_list:
print f
def test_int(self):
self.assertEquals(2,2,'number not equals')
def test_vehicle(self):
v = Vehicle('Corolla')
v.display()
if __name__=='__main__': unittest.main()
| {
"content_hash": "730f7487ea4d40ff972543d6859e5821",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 24.9,
"alnum_prop": 0.6024096385542169,
"repo_name": "vollov/py-lab",
"id": "a960771067f3064aa34cee6b5f73f7c43b0d9d21",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/oo/utdemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22"
},
{
"name": "JavaScript",
"bytes": "685"
},
{
"name": "PLSQL",
"bytes": "6838"
},
{
"name": "Python",
"bytes": "254226"
},
{
"name": "Shell",
"bytes": "734"
},
{
"name": "Smarty",
"bytes": "1829"
}
],
"symlink_target": ""
} |
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
# Connects to the current device, returning a MonkeyDevice object
device = MonkeyRunner.waitForConnection()
# Installs the Android package. Notice that this method returns a boolean, so you can test
# to see if the installation worked.
device.installPackage('../app/target/net-d53dev-dslfy-android-1.0.apk')
# sets a variable with the package's internal name
package = 'net.d53dev.dslfy.android'
# sets a variable with the name of an Activity in the package
activity = 'net.d53dev.dslfy.android.ui.CarouselActivity'
# sets the name of the component to start
runComponent = package + '/' + activity
# Runs the component
device.startActivity(component=runComponent)
MonkeyRunner.sleep(5)
device.type('example@example.com')
# Takes a screenshot
result = device.takeSnapshot()
# Writes the screenshot to a file
result.writeToFile('screenshot.png','png')
| {
"content_hash": "418732af8f97a9a2ab958fb6c523943a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 90,
"avg_line_length": 30.8,
"alnum_prop": 0.7813852813852814,
"repo_name": "d53dave/DSLFY-Android",
"id": "b9374572e102990fb5735a85b1c956380dcf5865",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration-tests/monkeyrunnerTestSuite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187405"
},
{
"name": "Python",
"bytes": "980"
}
],
"symlink_target": ""
} |
import sys
number = 0
while number >= 0:
print "Enter number:"
number = float(sys.stdin.readline())
if number >= 0:
if number % 2 == 0:
print "Even"
else:
print "Odd"
print "Bye"
| {
"content_hash": "82e55ed33e6b245aa18d90b9fe6c3a65",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.5129310344827587,
"repo_name": "nathano/Perl_to_Python_Converter",
"id": "6adfe6c6e9ace17ee4fbdb2e555bacf164d19366",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/subset4/odd0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "20894"
},
{
"name": "Python",
"bytes": "2735"
}
],
"symlink_target": ""
} |
"""
Snap7 client used for connection to a siemens 7 server.
"""
import re
import logging
from ctypes import byref, create_string_buffer, sizeof
from ctypes import Array, c_byte, c_char_p, c_int, c_int32, c_uint16, c_ulong, c_void_p
from datetime import datetime
from typing import List, Optional, Tuple, Union
from .common import check_error, ipv4, load_library
from .types import S7SZL, Areas, BlocksList, S7CpInfo, S7CpuInfo, S7DataItem
from .types import S7OrderCode, S7Protection, S7SZLList, TS7BlockInfo, WordLen
from .types import S7Object, buffer_size, buffer_type, cpu_statuses, param_types
from .types import S7CpuInfo, RemotePort, wordlen_to_ctypes, block_types
logger = logging.getLogger(__name__)
def error_wrap(func):
"""Parses a s7 error code returned the decorated function."""
def f(*args, **kw):
code = func(*args, **kw)
check_error(code, context="client")
return f
class Client:
"""
A snap7 client
Examples:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("127.0.0.1", 0, 0, 1012)
>>> client.get_connected()
True
>>> data = client.db_read(1, 0, 4)
>>> data
bytearray(b"\\x00\\x00\\x00\\x00")
>>> data[3] = 0b00000001
>>> data
bytearray(b'\\x00\\x00\\x00\\x01')
>>> client.db_write(1, 0, data)
"""
def __init__(self, lib_location: Optional[str] = None):
"""Creates a new `Client` instance.
Args:
lib_location: Full path to the snap7.dll file. Optional.
Examples:
>>> import snap7
>>> client = snap7.client.Client() # If the `snap7.dll` file is in the path location
>>> client = snap7.client.Client(lib_location="/path/to/snap7.dll") # If the `snap7.dll` file is in another location
>>> client
<snap7.client.Client object at 0x0000028B257128E0>
"""
self._read_callback = None
self._callback = None
self._pointer = None
self._library = load_library(lib_location)
self.create()
def __del__(self):
self.destroy()
def create(self):
"""Creates a SNAP7 client.
"""
logger.info("creating snap7 client")
self._library.Cli_Create.restype = c_void_p
self._pointer = S7Object(self._library.Cli_Create())
def destroy(self) -> Optional[int]:
"""Destroys the Client object.
Returns:
Error code from snap7 library.
Examples:
>>> client.destroy()
640719840
"""
logger.info("destroying snap7 client")
if self._pointer:
return self._library.Cli_Destroy(byref(self._pointer))
self._pointer = None
return None
def plc_stop(self) -> int:
"""Puts the CPU in STOP mode
Returns:
Error code from snap7 library.
"""
logger.info("stopping plc")
return self._library.Cli_PlcStop(self._pointer)
def plc_cold_start(self) -> int:
"""Puts the CPU in RUN mode performing a COLD START.
Returns:
Error code from snap7 library.
"""
logger.info("cold starting plc")
return self._library.Cli_PlcColdStart(self._pointer)
def plc_hot_start(self) -> int:
"""Puts the CPU in RUN mode performing an HOT START.
Returns:
Error code from snap7 library.
"""
logger.info("hot starting plc")
return self._library.Cli_PlcHotStart(self._pointer)
def get_cpu_state(self) -> str:
"""Returns the CPU status (running/stopped)
Returns:
Description of the cpu state.
Raises:
:obj:`ValueError`: if the cpu state is invalid.
Examples:
>>> client.get_cpu_statE()
'S7CpuStatusRun'
"""
state = c_int(0)
self._library.Cli_GetPlcStatus(self._pointer, byref(state))
try:
status_string = cpu_statuses[state.value]
except KeyError:
raise ValueError(f"The cpu state ({state.value}) is invalid")
logger.debug(f"CPU state is {status_string}")
return status_string
def get_cpu_info(self) -> S7CpuInfo:
"""Returns some information about the AG.
Returns:
:obj:`S7CpuInfo`: data structure with the information.
Examples:
>>> cpu_info = client.get_cpu_info()
>>> print(cpu_info)
"<S7CpuInfo ModuleTypeName: b'CPU 315-2 PN/DP'
SerialNumber: b'S C-C2UR28922012'
ASName: b'SNAP7-SERVER' Copyright: b'Original Siemens Equipment'
ModuleName: b'CPU 315-2 PN/DP'>
"""
info = S7CpuInfo()
result = self._library.Cli_GetCpuInfo(self._pointer, byref(info))
check_error(result, context="client")
return info
@error_wrap
def disconnect(self) -> int:
"""Disconnect a client.
Returns:
Error code from snap7 library.
"""
logger.info("disconnecting snap7 client")
return self._library.Cli_Disconnect(self._pointer)
@error_wrap
def connect(self, address: str, rack: int, slot: int, tcpport: int = 102) -> int:
"""Connects a Client Object to a PLC.
Args:
address: IP address of the PLC.
rack: rack number where the PLC is located.
slot: slot number where the CPU is located.
tcpport: port of the PLC.
Returns:
Error code from snap7 library.
Example:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0) # port is implicit = 102.
"""
logger.info(f"connecting to {address}:{tcpport} rack {rack} slot {slot}")
self.set_param(RemotePort, tcpport)
return self._library.Cli_ConnectTo(
self._pointer, c_char_p(address.encode()),
c_int(rack), c_int(slot))
def db_read(self, db_number: int, start: int, size: int) -> bytearray:
"""Reads a part of a DB from a PLC
Note:
Use it only for reading DBs, not Marks, Inputs, Outputs.
Args:
db_number: number of the DB to be read.
start: byte index from where is start to read from.
size: amount of bytes to be read.
Returns:
Buffer read.
Example:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0)
>>> buffer = client.db_read(1, 10, 4) # reads the db number 1 starting from the byte 10 until byte 14.
>>> buffer
bytearray(b'\\x00\\x00')
"""
logger.debug(f"db_read, db_number:{db_number}, start:{start}, size:{size}")
type_ = wordlen_to_ctypes[WordLen.Byte.value]
data = (type_ * size)()
result = (self._library.Cli_DBRead(
self._pointer, db_number, start, size,
byref(data)))
check_error(result, context="client")
return bytearray(data)
@error_wrap
def db_write(self, db_number: int, start: int, data: bytearray) -> int:
"""Writes a part of a DB into a PLC.
Args:
db_number: number of the DB to be read.
start: byte index to start writing to.
data: buffer to be write.
Returns:
Buffer written.
Example:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0)
>>> buffer = bytearray([0b00000001])
>>> client.db_write(1, 10, buffer) # writes the bit number 0 from the byte 10 to TRUE.
"""
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
size = len(data)
cdata = (type_ * size).from_buffer_copy(data)
logger.debug(f"db_write db_number:{db_number} start:{start} size:{size} data:{data}")
return self._library.Cli_DBWrite(self._pointer, db_number, start, size,
byref(cdata))
def delete(self, block_type: str, block_num: int) -> int:
"""Delete a block into AG.
Args:
block_type: type of block.
block_num: block number.
Returns:
Error code from snap7 library.
"""
logger.info("deleting block")
blocktype = block_types[block_type]
result = self._library.Cli_Delete(self._pointer, blocktype, block_num)
return result
def full_upload(self, _type: str, block_num: int) -> Tuple[bytearray, int]:
"""Uploads a block from AG with Header and Footer infos.
The whole block (including header and footer) is copied into the user
buffer.
Args:
_type: type of block.
block_num: number of block.
Returns:
Tuple of the buffer and size.
"""
_buffer = buffer_type()
size = c_int(sizeof(_buffer))
block_type = block_types[_type]
result = self._library.Cli_FullUpload(self._pointer, block_type,
block_num, byref(_buffer),
byref(size))
check_error(result, context="client")
return bytearray(_buffer)[:size.value], size.value
def upload(self, block_num: int) -> bytearray:
"""Uploads a block from AG.
Note:
Upload means from the PLC to the PC.
Args:
block_num: block to be upload.
Returns:
Buffer with the uploaded block.
"""
logger.debug(f"db_upload block_num: {block_num}")
block_type = block_types['DB']
_buffer = buffer_type()
size = c_int(sizeof(_buffer))
result = self._library.Cli_Upload(self._pointer, block_type, block_num,
byref(_buffer), byref(size))
check_error(result, context="client")
logger.info(f'received {size} bytes')
return bytearray(_buffer)
@error_wrap
def download(self, data: bytearray, block_num: int = -1) -> int:
"""Download a block into AG.
A whole block (including header and footer) must be available into the
user buffer.
Note:
Download means from the PC to the PLC.
Args:
data: buffer data.
block_num: new block number.
Returns:
Error code from snap7 library.
"""
type_ = c_byte
size = len(data)
cdata = (type_ * len(data)).from_buffer_copy(data)
return self._library.Cli_Download(self._pointer, block_num,
byref(cdata), size)
def db_get(self, db_number: int) -> bytearray:
"""Uploads a DB from AG using DBRead.
Note:
This method can't be use for 1200/1500 PLCs.
Args:
db_number: db number to be read from.
Returns:
Buffer with the data read.
Example:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0)
>>> buffer = client.db_get(1) # reads the db number 1.
>>> buffer
bytearray(b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00...<truncated>\\x00\\x00")
"""
logger.debug(f"db_get db_number: {db_number}")
_buffer = buffer_type()
result = self._library.Cli_DBGet(
self._pointer, db_number, byref(_buffer),
byref(c_int(buffer_size)))
check_error(result, context="client")
return bytearray(_buffer)
def read_area(self, area: Areas, dbnumber: int, start: int, size: int) -> bytearray:
"""Reads a data area from a PLC
With it you can read DB, Inputs, Outputs, Merkers, Timers and Counters.
Args:
area: area to be read from.
dbnumber: number of the db to be read from. In case of Inputs, Marks or Outputs, this should be equal to 0.
start: byte index to start reading.
size: number of bytes to read.
Returns:
Buffer with the data read.
Raises:
:obj:`ValueError`: if the area is not defined in the `Areas`
Example:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0)
>>> buffer = client.read_area(Areas.DB, 1, 10, 4) # Reads the DB number 1 from the byte 10 to the byte 14.
>>> buffer
bytearray(b'\\x00\\x00')
"""
if area not in Areas:
raise ValueError(f"{area} is not implemented in types")
elif area == Areas.TM:
wordlen = WordLen.Timer
elif area == Areas.CT:
wordlen = WordLen.Counter
else:
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
logger.debug(f"reading area: {area.name} dbnumber: {dbnumber} start: {start}: amount {size}: wordlen: {wordlen.name}={wordlen.value}")
data = (type_ * size)()
result = self._library.Cli_ReadArea(self._pointer, area.value, dbnumber, start,
size, wordlen.value, byref(data))
check_error(result, context="client")
return bytearray(data)
@error_wrap
def write_area(self, area: Areas, dbnumber: int, start: int, data: bytearray) -> int:
"""Writes a data area into a PLC.
Args:
area: area to be write.
dbnumber: number of the db to be write to. In case of Inputs, Marks or Outputs, this should be equal to 0.
start: byte index to start writting.
data: buffer to be write.
Returns:
Snap7 error code.
Exmaple:
>>> import snap7
>>> client = snap7.client.Client()
>>> client.connect("192.168.0.1", 0, 0)
>>> buffer = bytearray([0b00000001])
>>> client.write_area(Areas.DB, 1, 10, buffer) # Writes the bit 0 of the byte 10 from the DB number 1 to TRUE.
"""
if area == Areas.TM:
wordlen = WordLen.Timer
elif area == Areas.CT:
wordlen = WordLen.Counter
else:
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[WordLen.Byte.value]
size = len(data)
logger.debug(f"writing area: {area.name} dbnumber: {dbnumber} start: {start}: size {size}: "
f"wordlen {wordlen.name}={wordlen.value} type: {type_}")
cdata = (type_ * len(data)).from_buffer_copy(data)
return self._library.Cli_WriteArea(self._pointer, area.value, dbnumber, start,
size, wordlen.value, byref(cdata))
def read_multi_vars(self, items) -> Tuple[int, S7DataItem]:
"""Reads different kind of variables from a PLC simultaneously.
Args:
items: list of items to be read.
Returns:
Tuple with the return code from the snap7 library and the list of items.
"""
result = self._library.Cli_ReadMultiVars(self._pointer, byref(items),
c_int32(len(items)))
check_error(result, context="client")
return result, items
def list_blocks(self) -> BlocksList:
"""Returns the AG blocks amount divided by type.
Returns:
Block list structure object.
Examples:
>>> block_list = client.list_blocks()
>>> print(block_list)
<block list count OB: 0 FB: 0 FC: 0 SFB: 0 SFC: 0x0 DB: 1 SDB: 0>
"""
logger.debug("listing blocks")
blocksList = BlocksList()
result = self._library.Cli_ListBlocks(self._pointer, byref(blocksList))
check_error(result, context="client")
logger.debug(f"blocks: {blocksList}")
return blocksList
def list_blocks_of_type(self, blocktype: str, size: int) -> Union[int, Array]:
"""This function returns the AG list of a specified block type.
Args:
blocktype: specified block type.
size: size of the block type.
Returns:
If size is 0, it returns a 0, otherwise an `Array` of specified block type.
Raises:
:obj:`ValueError`: if the `blocktype` is not valid.
"""
_blocktype = block_types.get(blocktype)
if not _blocktype:
raise ValueError("The blocktype parameter was invalid")
logger.debug(f"listing blocks of type: {_blocktype} size: {size}")
if size == 0:
return 0
data = (c_uint16 * size)()
count = c_int(size)
result = self._library.Cli_ListBlocksOfType(
self._pointer, _blocktype,
byref(data),
byref(count))
logger.debug(f"number of items found: {count}")
check_error(result, context="client")
return data
def get_block_info(self, blocktype: str, db_number: int) -> TS7BlockInfo:
"""Returns detailed information about a block present in AG.
Args:
blocktype: specified block type.
db_number: number of db to get information from.
Returns:
Structure of information from block.
Raises:
:obj:`ValueError`: if the `blocktype` is not valid.
Examples:
>>> block_info = client.get_block_info("DB", 1)
>>> print(block_info)
Block type: 10
Block number: 1
Block language: 5
Block flags: 1
MC7Size: 100
Load memory size: 192
Local data: 0
SBB Length: 20
Checksum: 0
Version: 1
Code date: b'1999/11/17'
Interface date: b'1999/11/17'
Author: b''
Family: b''
Header: b''
"""
blocktype_ = block_types.get(blocktype)
if not blocktype_:
raise ValueError("The blocktype parameter was invalid")
logger.debug(f"retrieving block info for block {db_number} of type {blocktype_}")
data = TS7BlockInfo()
result = self._library.Cli_GetAgBlockInfo(self._pointer, blocktype_, db_number, byref(data))
check_error(result, context="client")
return data
@error_wrap
def set_session_password(self, password: str) -> int:
"""Send the password to the PLC to meet its security level.
Args:
password: password to set.
Returns:
Snap7 code.
Raises:
:obj:`ValueError`: if the length of the `password` is more than 8 characters.
"""
if len(password) > 8:
raise ValueError("Maximum password length is 8")
return self._library.Cli_SetSessionPassword(self._pointer,
c_char_p(password.encode()))
@error_wrap
def clear_session_password(self) -> int:
"""Clears the password set for the current session (logout).
Returns:
Snap7 code.
"""
return self._library.Cli_ClearSessionPassword(self._pointer)
def set_connection_params(self, address: str, local_tsap: int, remote_tsap: int) -> None:
"""Sets internally (IP, LocalTSAP, RemoteTSAP) Coordinates.
Note:
This function must be called just before `Cli_Connect()`.
Args:
address: PLC/Equipment IPV4 Address, for example "192.168.1.12"
local_tsap: Local TSAP (PC TSAP)
remote_tsap: Remote TSAP (PLC TSAP)
Raises:
:obj:`ValueError`: if the `address` is not a valid IPV4.
:obj:`ValueError`: if the result of setting the connection params is
different than 0.
"""
if not re.match(ipv4, address):
raise ValueError(f"{address} is invalid ipv4")
result = self._library.Cli_SetConnectionParams(self._pointer, address,
c_uint16(local_tsap),
c_uint16(remote_tsap))
if result != 0:
raise ValueError("The parameter was invalid")
def set_connection_type(self, connection_type: int):
""" Sets the connection resource type, i.e the way in which the Clients connects to a PLC.
Args:
connection_type: 1 for PG, 2 for OP, 3 to 10 for S7 Basic
Raises:
:obj:`ValueError`: if the result of setting the connection type is
different than 0.
"""
result = self._library.Cli_SetConnectionType(self._pointer,
c_uint16(connection_type))
if result != 0:
raise ValueError("The parameter was invalid")
def get_connected(self) -> bool:
"""Returns the connection status
Note:
Sometimes returns True, while connection is lost.
Returns:
True if is connected, otherwise false.
"""
connected = c_int32()
result = self._library.Cli_GetConnected(self._pointer, byref(connected))
check_error(result, context="client")
return bool(connected)
def ab_read(self, start: int, size: int) -> bytearray:
"""Reads a part of IPU area from a PLC.
Args:
start: byte index from where start to read.
size: amount of bytes to read.
Returns:
Buffer with the data read.
"""
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
data = (type_ * size)()
logger.debug(f"ab_read: start: {start}: size {size}: ")
result = self._library.Cli_ABRead(self._pointer, start, size,
byref(data))
check_error(result, context="client")
return bytearray(data)
def ab_write(self, start: int, data: bytearray) -> int:
"""Writes a part of IPU area into a PLC.
Args:
start: byte index from where start to write.
data: buffer with the data to be written.
Returns:
Snap7 code.
"""
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
size = len(data)
cdata = (type_ * size).from_buffer_copy(data)
logger.debug(f"ab write: start: {start}: size: {size}: ")
return self._library.Cli_ABWrite(
self._pointer, start, size, byref(cdata))
def as_ab_read(self, start: int, size: int, data) -> int:
"""Reads a part of IPU area from a PLC asynchronously.
Args:
start: byte index from where start to read.
size: amount of bytes to read.
data: buffer where the data will be place.
Returns:
Snap7 code.
"""
logger.debug(f"ab_read: start: {start}: size {size}: ")
result = self._library.Cli_AsABRead(self._pointer, start, size,
byref(data))
check_error(result, context="client")
return result
def as_ab_write(self, start: int, data: bytearray) -> int:
"""Writes a part of IPU area into a PLC asynchronously.
Args:
start: byte index from where start to write.
data: buffer with the data to be written.
Returns:
Snap7 code.
"""
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
size = len(data)
cdata = (type_ * size).from_buffer_copy(data)
logger.debug(f"ab write: start: {start}: size: {size}: ")
result = self._library.Cli_AsABWrite(
self._pointer, start, size, byref(cdata))
check_error(result, context="client")
return result
def as_compress(self, time: int) -> int:
""" Performs the Compress action asynchronously.
Args:
time: timeout.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsCompress(self._pointer, time)
check_error(result, context="client")
return result
def as_copy_ram_to_rom(self, timeout: int = 1) -> int:
"""Performs the Copy Ram to Rom action asynchronously.
Args:
timeout: time to wait unly fail.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsCopyRamToRom(self._pointer, timeout)
check_error(result, context="client")
return result
def as_ct_read(self, start: int, amount: int, data) -> int:
"""Reads counters from a PLC asynchronously.
Args:
start: byte index to start to read from.
amount: amount of bytes to read.
data: buffer where the value read will be place.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsCTRead(self._pointer, start, amount, byref(data))
check_error(result, context="client")
return result
def as_ct_write(self, start: int, amount: int, data: bytearray) -> int:
"""Write counters into a PLC.
Args:
start: byte index to start to write from.
amount: amount of bytes to write.
data: buffer to be write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Counter.value]
cdata = (type_ * amount).from_buffer_copy(data)
result = self._library.Cli_AsCTWrite(self._pointer, start, amount, byref(cdata))
check_error(result, context="client")
return result
def as_db_fill(self, db_number: int, filler) -> int:
"""Fills a DB in AG with a given byte.
Args:
db_number: number of DB to fill.
filler: buffer to fill with.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsDBFill(self._pointer, db_number, filler)
check_error(result, context="client")
return result
def as_db_get(self, db_number: int, _buffer, size) -> bytearray:
"""Uploads a DB from AG using DBRead.
Note:
This method will not work in 1200/1500.
Args:
db_number: number of DB to get.
_buffer: buffer where the data read will be place.
size: amount of bytes to be read.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsDBGet(self._pointer, db_number, byref(_buffer), byref(size))
check_error(result, context="client")
return result
def as_db_read(self, db_number: int, start: int, size: int, data) -> Array:
"""Reads a part of a DB from a PLC.
Args:
db_number: number of DB to be read.
start: byte index from where start to read from.
size: amount of bytes to read.
data: buffer where the data read will be place.
Returns:
Snap7 code.
Examples:
>>> import ctypes
>>> data = (ctypes.c_uint8 * size_to_read)() # In this ctypes array data will be stored.
>>> result = client.as_db_read(1, 0, size_to_read, data)
>>> result # 0 = success
0
"""
result = self._library.Cli_AsDBRead(self._pointer, db_number, start, size, byref(data))
check_error(result, context="client")
return result
def as_db_write(self, db_number: int, start: int, size: int, data) -> int:
"""Writes a part of a DB into a PLC.
Args:
db_number: number of DB to be write.
start: byte index from where start to write to.
size: amount of bytes to write.
data: buffer to be write.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsDBWrite(self._pointer, db_number, start, size, byref(data))
check_error(result, context="client")
return result
def as_download(self, data: bytearray, block_num: int) -> int:
"""Download a block into AG asynchronously.
Note:
A whole block (including header and footer) must be available into the user buffer.
Args:
block_num: new block number.
data: buffer where the data will be place.
Returns:
Snap7 code.
"""
size = len(data)
type_ = c_byte * len(data)
cdata = type_.from_buffer_copy(data)
result = self._library.Cli_AsDownload(self._pointer, block_num, byref(cdata), size)
check_error(result)
return result
@error_wrap
def compress(self, time: int) -> int:
"""Performs the Compress action.
Args:
time: timeout.
Returns:
Snap7 code.
"""
return self._library.Cli_Compress(self._pointer, time)
@error_wrap
def set_param(self, number: int, value: int) -> int:
"""Writes an internal Server Parameter.
Args:
number: number of argument to be written.
value: value to be written.
Returns:
Snap7 code.
"""
logger.debug(f"setting param number {number} to {value}")
type_ = param_types[number]
return self._library.Cli_SetParam(self._pointer, number, byref(type_(value)))
def get_param(self, number: int) -> int:
"""Reads an internal Server parameter.
Args:
number: number of argument to be read.
Return:
Value of the param read.
"""
logger.debug(f"retreiving param number {number}")
type_ = param_types[number]
value = type_()
code = self._library.Cli_GetParam(self._pointer, c_int(number), byref(value))
check_error(code)
return value.value
def get_pdu_length(self) -> int:
"""Returns info about the PDU length (requested and negotiated).
Returns:
PDU length.
Examples:
>>> client.get_pdu_length()
480
"""
logger.info("getting PDU length")
requested_ = c_uint16()
negotiated_ = c_uint16()
code = self._library.Cli_GetPduLength(self._pointer, byref(requested_), byref(negotiated_))
check_error(code)
return negotiated_.value
def get_plc_datetime(self) -> datetime:
"""Returns the PLC date/time.
Returns:
Date and time as datetime
Examples:
>>> client.get_plc_datetime()
datetime.datetime(2021, 4, 6, 12, 12, 36)
"""
type_ = c_int32
buffer = (type_ * 9)()
result = self._library.Cli_GetPlcDateTime(self._pointer, byref(buffer))
check_error(result, context="client")
return datetime(
year=buffer[5] + 1900,
month=buffer[4] + 1,
day=buffer[3],
hour=buffer[2],
minute=buffer[1],
second=buffer[0]
)
@error_wrap
def set_plc_datetime(self, dt: datetime) -> int:
"""Sets the PLC date/time with a given value.
Args:
dt: datetime to be set.
Returns:
Snap7 code.
"""
type_ = c_int32
buffer = (type_ * 9)()
buffer[0] = dt.second
buffer[1] = dt.minute
buffer[2] = dt.hour
buffer[3] = dt.day
buffer[4] = dt.month - 1
buffer[5] = dt.year - 1900
return self._library.Cli_SetPlcDateTime(self._pointer, byref(buffer))
def check_as_completion(self, p_value) -> int:
"""Method to check Status of an async request. Result contains if the check was successful, not the data value itself
Args:
p_value: Pointer where result of this check shall be written.
Returns:
Snap7 code. If 0 - Job is done successfully. If 1 - Job is either pending or contains s7errors
"""
result = self._library.Cli_CheckAsCompletion(self._pointer, p_value)
check_error(result, context="client")
return result
def set_as_callback(self, pfn_clicompletion, p_usr):
# Cli_SetAsCallback
result = self._library.Cli_SetAsCallback(self._pointer, pfn_clicompletion, p_usr)
check_error(result, context='client')
return result
def wait_as_completion(self, timeout: int) -> int:
"""Snap7 Cli_WaitAsCompletion representative.
Args:
timeout: ms to wait for async job
Returns:
Snap7 code.
"""
# Cli_WaitAsCompletion
result = self._library.Cli_WaitAsCompletion(self._pointer, c_ulong(timeout))
check_error(result, context="client")
return result
def _prepare_as_read_area(self, area: Areas, size: int) -> Tuple[WordLen, Array]:
if area not in Areas:
raise ValueError(f"{area} is not implemented in types")
elif area == Areas.TM:
wordlen = WordLen.Timer
elif area == Areas.CT:
wordlen = WordLen.Counter
else:
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[wordlen.value]
usrdata = (type_ * size)()
return wordlen, usrdata
def as_read_area(self, area: Areas, dbnumber: int, start: int, size: int, wordlen: WordLen, pusrdata) -> int:
"""Reads a data area from a PLC asynchronously.
With it you can read DB, Inputs, Outputs, Merkers, Timers and Counters.
Args:
area: memory area to be read from.
dbnumber: The DB number, only used when area=Areas.DB
start: offset to start writing
size: number of units to read
pusrdata: buffer where the data will be place.
wordlen: length of the word to be read.
Returns:
Snap7 code.
"""
logger.debug(f"reading area: {area.name} dbnumber: {dbnumber} start: {start}: amount {size}: wordlen: {wordlen.name}={wordlen.value}")
result = self._library.Cli_AsReadArea(self._pointer, area.value, dbnumber, start, size, wordlen.value, pusrdata)
check_error(result, context="client")
return result
def _prepare_as_write_area(self, area: Areas, data: bytearray) -> Tuple[WordLen, Array]:
if area not in Areas:
raise ValueError(f"{area} is not implemented in types")
elif area == Areas.TM:
wordlen = WordLen.Timer
elif area == Areas.CT:
wordlen = WordLen.Counter
else:
wordlen = WordLen.Byte
type_ = wordlen_to_ctypes[WordLen.Byte.value]
cdata = (type_ * len(data)).from_buffer_copy(data)
return wordlen, cdata
def as_write_area(self, area: Areas, dbnumber: int, start: int, size: int, wordlen: WordLen, pusrdata) -> int:
"""Writes a data area into a PLC asynchronously.
Args:
area: memory area to be written.
dbnumber: The DB number, only used when area=Areas.DB
start: offset to start writing.
size: amount of bytes to be written.
wordlen: length of the word to be written.
pusrdata: buffer to be written.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
logger.debug(f"writing area: {area.name} dbnumber: {dbnumber} start: {start}: size {size}: "
f"wordlen {wordlen} type: {type_}")
cdata = (type_ * len(pusrdata)).from_buffer_copy(pusrdata)
res = self._library.Cli_AsWriteArea(self._pointer, area.value, dbnumber, start, size, wordlen.value, byref(cdata))
check_error(res, context="client")
return res
def as_eb_read(self, start: int, size: int, data) -> int:
"""Reads a part of IPI area from a PLC asynchronously.
Args:
start: byte index from where to start reading from.
size: amount of bytes to read.
data: buffer where the data read will be place.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsEBRead(self._pointer, start, size, byref(data))
check_error(result, context="client")
return result
def as_eb_write(self, start: int, size: int, data: bytearray) -> int:
"""Writes a part of IPI area into a PLC.
Args:
start: byte index from where to start writing from.
size: amount of bytes to write.
data: buffer to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
cdata = (type_ * size).from_buffer_copy(data)
result = self._library.Cli_AsEBWrite(self._pointer, start, size, byref(cdata))
check_error(result, context="client")
return result
def as_full_upload(self, _type: str, block_num: int) -> int:
"""Uploads a block from AG with Header and Footer infos.
Note:
Upload means from PLC to PC.
Args:
_type: type of block.
block_num: number of block to upload.
Returns:
Snap7 code.
"""
_buffer = buffer_type()
size = c_int(sizeof(_buffer))
block_type = block_types[_type]
result = self._library.Cli_AsFullUpload(self._pointer, block_type, block_num, byref(_buffer), byref(size))
check_error(result, context="client")
return result
def as_list_blocks_of_type(self, blocktype: str, data, count) -> int:
"""Returns the AG blocks list of a given type.
Args:
blocktype: block type.
data: buffer where the data will be place.
count: pass.
Returns:
Snap7 code.
Raises:
:obj:`ValueError`: if the `blocktype` is invalid
"""
_blocktype = block_types.get(blocktype)
if not _blocktype:
raise ValueError("The blocktype parameter was invalid")
result = self._library.Cli_AsListBlocksOfType(self._pointer, _blocktype, byref(data), byref(count))
check_error(result, context="client")
return result
def as_mb_read(self, start: int, size: int, data) -> int:
"""Reads a part of Merkers area from a PLC.
Args:
start: byte index from where to start to read from.
size: amount of byte to read.
data: buffer where the data read will be place.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsMBRead(self._pointer, start, size, byref(data))
check_error(result, context="client")
return result
def as_mb_write(self, start: int, size: int, data: bytearray) -> int:
"""Writes a part of Merkers area into a PLC.
Args:
start: byte index from where to start to write to.
size: amount of byte to write.
data: buffer to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
cdata = (type_ * size).from_buffer_copy(data)
result = self._library.Cli_AsMBWrite(self._pointer, start, size, byref(cdata))
check_error(result, context="client")
return result
def as_read_szl(self, ssl_id: int, index: int, s7_szl: S7SZL, size) -> int:
"""Reads a partial list of given ID and Index.
Args:
ssl_id: TODO
index: TODO
s7_szl: TODO
size: TODO
Returns:
Snap7 code.
"""
result = self._library.Cli_AsReadSZL(self._pointer, ssl_id, index, byref(s7_szl), byref(size))
check_error(result, context="client")
return result
def as_read_szl_list(self, szl_list, items_count) -> int:
"""Reads the list of partial lists available in the CPU.
Args:
szl_list: TODO
items_count: TODO
Returns:
Snap7 code.
"""
result = self._library.Cli_AsReadSZLList(self._pointer, byref(szl_list), byref(items_count))
check_error(result, context="client")
return result
def as_tm_read(self, start: int, amount: int, data) -> bytearray:
"""Reads timers from a PLC.
Args:
start: byte index to start read from.
amount: amount of bytes to read.
data: buffer where the data will be placed.
Returns:
Snap7 code.
"""
result = self._library.Cli_AsTMRead(self._pointer, start, amount, byref(data))
check_error(result, context="client")
return result
def as_tm_write(self, start: int, amount: int, data: bytearray) -> int:
"""Write timers into a PLC.
Args:
start: byte index to start writing to.
amount: amount of bytes to write.
data: buffer to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Timer.value]
cdata = (type_ * amount).from_buffer_copy(data)
result = self._library.Cli_AsTMWrite(self._pointer, start, amount, byref(cdata))
check_error(result)
return result
def as_upload(self, block_num: int, _buffer, size) -> int:
"""Uploads a block from AG.
Note:
Uploads means from PLC to PC.
Args:
block_num: block number to upload.
_buffer: buffer where the data will be place.
size: amount of bytes to uplaod.
Returns:
Snap7 code.
"""
block_type = block_types['DB']
result = self._library.Cli_AsUpload(self._pointer, block_type, block_num, byref(_buffer), byref(size))
check_error(result, context="client")
return result
def copy_ram_to_rom(self, timeout: int = 1) -> int:
"""Performs the Copy Ram to Rom action.
Args:
timeout: timeout time.
Returns:
Snap7 code.
"""
result = self._library.Cli_CopyRamToRom(self._pointer, timeout)
check_error(result, context="client")
return result
def ct_read(self, start: int, amount: int) -> bytearray:
"""Reads counters from a PLC.
Args:
start: byte index to start read from.
amount: amount of bytes to read.
Returns:
Buffer read.
"""
type_ = wordlen_to_ctypes[WordLen.Counter.value]
data = (type_ * amount)()
result = self._library.Cli_CTRead(self._pointer, start, amount, byref(data))
check_error(result, context="client")
return bytearray(data)
def ct_write(self, start: int, amount: int, data: bytearray) -> int:
"""Write counters into a PLC.
Args:
start: byte index to start write to.
amount: amount of bytes to write.
data: buffer data to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Counter.value]
cdata = (type_ * amount).from_buffer_copy(data)
result = self._library.Cli_CTWrite(self._pointer, start, amount, byref(cdata))
check_error(result)
return result
def db_fill(self, db_number: int, filler: int) -> int:
"""Fills a DB in AG with a given byte.
Args:
db_number: db number to fill.
filler: value filler.
Returns:
Snap7 code.
"""
result = self._library.Cli_DBFill(self._pointer, db_number, filler)
check_error(result)
return result
def eb_read(self, start: int, size: int) -> bytearray:
"""Reads a part of IPI area from a PLC.
Args:
start: byte index to start read from.
size: amount of bytes to read.
Returns:
Data read.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
data = (type_ * size)()
result = self._library.Cli_EBRead(self._pointer, start, size, byref(data))
check_error(result, context="client")
return bytearray(data)
def eb_write(self, start: int, size: int, data: bytearray) -> int:
"""Writes a part of IPI area into a PLC.
Args:
start: byte index to be written.
size: amount of bytes to write.
data: data to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
cdata = (type_ * size).from_buffer_copy(data)
result = self._library.Cli_EBWrite(self._pointer, start, size, byref(cdata))
check_error(result)
return result
def error_text(self, error: int) -> str:
"""Returns a textual explanation of a given error number.
Args:
error: error number.
Returns:
Text error.
"""
text_length = c_int(256)
error_code = c_int32(error)
text = create_string_buffer(buffer_size)
response = self._library.Cli_ErrorText(error_code, byref(text), text_length)
check_error(response)
result = bytearray(text)[:text_length.value].decode().strip('\x00')
return result
def get_cp_info(self) -> S7CpInfo:
"""Returns some information about the CP (communication processor).
Returns:
Structure object containing the CP information.
"""
cp_info = S7CpInfo()
result = self._library.Cli_GetCpInfo(self._pointer, byref(cp_info))
check_error(result)
return cp_info
def get_exec_time(self) -> int:
"""Returns the last job execution time in milliseconds.
Returns:
Execution time value.
"""
time = c_int32()
result = self._library.Cli_GetExecTime(self._pointer, byref(time))
check_error(result)
return time.value
def get_last_error(self) -> int:
"""Returns the last job result.
Returns:
Returns the last error value.
"""
last_error = c_int32()
result = self._library.Cli_GetLastError(self._pointer, byref(last_error))
check_error(result)
return last_error.value
def get_order_code(self) -> S7OrderCode:
"""Returns the CPU order code.
Returns:
Order of the code in a structure object.
"""
order_code = S7OrderCode()
result = self._library.Cli_GetOrderCode(self._pointer, byref(order_code))
check_error(result)
return order_code
def get_pg_block_info(self, block: bytearray) -> TS7BlockInfo:
"""Returns detailed information about a block loaded in memory.
Args:
block: buffer where the data will be place.
Returns:
Structure object that contains the block information.
"""
block_info = TS7BlockInfo()
size = c_int(len(block))
buffer = (c_byte * len(block)).from_buffer_copy(block)
result = self._library.Cli_GetPgBlockInfo(self._pointer, byref(buffer), byref(block_info), size)
check_error(result)
return block_info
def get_protection(self) -> S7Protection:
"""Gets the CPU protection level info.
Returns:
Structure object with protection attributes.
"""
s7_protection = S7Protection()
result = self._library.Cli_GetProtection(self._pointer, byref(s7_protection))
check_error(result)
return s7_protection
def iso_exchange_buffer(self, data: bytearray) -> bytearray:
"""Exchanges a given S7 PDU (protocol data unit) with the CPU.
Args:
data: buffer to exchange.
Returns:
Snap7 code.
"""
size = c_int(len(data))
cdata = (c_byte * len(data)).from_buffer_copy(data)
response = self._library.Cli_IsoExchangeBuffer(self._pointer, byref(cdata), byref(size))
check_error(response)
result = bytearray(cdata)[:size.value]
return result
def mb_read(self, start: int, size: int) -> bytearray:
"""Reads a part of Merkers area from a PLC.
Args:
start: byte index to be read from.
size: amount of bytes to read.
Returns:
Buffer with the data read.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
data = (type_ * size)()
result = self._library.Cli_MBRead(self._pointer, start, size, byref(data))
check_error(result, context="client")
return bytearray(data)
def mb_write(self, start: int, size: int, data: bytearray) -> int:
"""Writes a part of Merkers area into a PLC.
Args:
start: byte index to be written.
size: amount of bytes to write.
data: buffer to write.
Returns:
Snap7 code.
"""
type_ = wordlen_to_ctypes[WordLen.Byte.value]
cdata = (type_ * size).from_buffer_copy(data)
result = self._library.Cli_MBWrite(self._pointer, start, size, byref(cdata))
check_error(result)
return result
def read_szl(self, ssl_id: int, index: int = 0x0000) -> S7SZL:
"""Reads a partial list of given ID and Index.
Args:
ssl_id: ssl id to be read.
index: index to be read.
Returns:
SZL structure object.
"""
s7_szl = S7SZL()
size = c_int(sizeof(s7_szl))
result = self._library.Cli_ReadSZL(self._pointer, ssl_id, index, byref(s7_szl), byref(size))
check_error(result, context="client")
return s7_szl
def read_szl_list(self) -> bytearray:
"""Reads the list of partial lists available in the CPU.
Returns:
Buffer read.
"""
szl_list = S7SZLList()
items_count = c_int(sizeof(szl_list))
response = self._library.Cli_ReadSZLList(self._pointer, byref(szl_list), byref(items_count))
check_error(response, context="client")
result = bytearray(szl_list.List)[:items_count.value]
return result
def set_plc_system_datetime(self) -> int:
"""Sets the PLC date/time with the host (PC) date/time.
Returns:
Snap7 code.
"""
result = self._library.Cli_SetPlcSystemDateTime(self._pointer)
check_error(result)
return result
def tm_read(self, start: int, amount: int) -> bytearray:
"""Reads timers from a PLC.
Args:
start: byte index from where is start to read from.
amount: amount of byte to be read.
Returns:
Buffer read.
"""
wordlen = WordLen.Timer
type_ = wordlen_to_ctypes[wordlen.value]
data = (type_ * amount)()
result = self._library.Cli_TMRead(self._pointer, start, amount, byref(data))
check_error(result, context="client")
return bytearray(data)
def tm_write(self, start: int, amount: int, data: bytearray) -> int:
"""Write timers into a PLC.
Args:
start: byte index from where is start to write to.
amount: amount of byte to be written.
data: data to be write.
Returns:
Snap7 code.
"""
wordlen = WordLen.Timer
type_ = wordlen_to_ctypes[wordlen.value]
cdata = (type_ * amount).from_buffer_copy(data)
result = self._library.Cli_TMWrite(self._pointer, start, amount, byref(cdata))
check_error(result)
return result
def write_multi_vars(self, items: List[S7DataItem]) -> int:
"""Writes different kind of variables into a PLC simultaneously.
Args:
items: list of items to be written.
Returns:
Snap7 code.
"""
items_count = c_int32(len(items))
data = bytearray()
for item in items:
data += bytearray(item)
cdata = (S7DataItem * len(items)).from_buffer_copy(data)
result = self._library.Cli_WriteMultiVars(self._pointer, byref(cdata), items_count)
check_error(result, context="client")
return result
| {
"content_hash": "c8014c09a10026ef6a60ab84dddf21b5",
"timestamp": "",
"source": "github",
"line_count": 1566,
"max_line_length": 142,
"avg_line_length": 33.577905491698594,
"alnum_prop": 0.5620637848734382,
"repo_name": "gijzelaerr/python-snap7",
"id": "103d5e553222560b908d12bd0b5935cab020fe73",
"size": "52583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snap7/client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "288"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "Python",
"bytes": "267248"
}
],
"symlink_target": ""
} |
from django.forms.models import ModelForm, model_to_dict
from moderation.models import MODERATION_STATUS_PENDING,\
MODERATION_STATUS_REJECTED
from django.core.exceptions import ObjectDoesNotExist
class BaseModeratedObjectForm(ModelForm):
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance:
try:
if instance.moderated_object.moderation_status in\
[MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED] and\
not instance.moderated_object.moderator.\
visible_until_rejected:
initial =\
model_to_dict(instance.moderated_object.changed_object)
kwargs.setdefault('initial', {})
kwargs['initial'].update(initial)
except ObjectDoesNotExist:
pass
super(BaseModeratedObjectForm, self).__init__(*args, **kwargs)
| {
"content_hash": "f68dac2c244a621c7676c08e9ea49ee8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 38.8,
"alnum_prop": 0.6113402061855671,
"repo_name": "ebrelsford/django-moderation",
"id": "c352b6d4d92d1064992910fab7f8c068c1bcc264",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/moderation/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150827"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
} |
"""Helper classes that list&validate all attributes to serialize to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.training.tracking import tracking
@six.add_metaclass(abc.ABCMeta)
class SavedModelSaver(object):
"""Saver defining the methods and properties used to serialize Keras objects.
"""
def __init__(self, obj):
self.obj = obj
@abc.abstractproperty
def object_identifier(self):
"""String stored in object identifier field in the SavedModel proto.
Returns:
A string with the object identifier, which is used at load time.
"""
raise NotImplementedError
@property
def tracking_metadata(self):
"""String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this layer.
"""
# TODO(kathywu): check that serialized JSON can be loaded (e.g., if an
# object is in the python property)
return json_utils.Encoder().encode(self.python_properties)
def list_extra_dependencies_for_serialization(self, serialization_cache):
"""Lists extra dependencies to serialize to SavedModel.
By overriding this method, extra dependencies can be attached to the
serialized Layer. For example, this is used to save the list of `variables`
and `trainable_variables`, which are python properties in a Layer object,
but are represented as a static list in the SavedModel.
Args:
serialization_cache: A dictionary shared between all objects in the same
object graph. This object is passed to both
`_list_extra_dependencies_for_serialization` and
`_list_functions_for_serialization`.
Returns:
A dictionary mapping attribute names to trackable objects. The entire list
of attributes are listed in the `saved_model._LayerAttributes` class.
"""
return self.objects_to_serialize(serialization_cache)
def list_functions_for_serialization(self, serialization_cache):
"""Lists extra functions to serialize to the SavedModel.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to `Function` or
`ConcreteFunction`.
"""
fns = self.functions_to_serialize(serialization_cache)
# The parent AutoTrackable class saves all user-defined tf.functions, and
# returns them in _list_functions_for_serialization(). Add these functions
# to the dict.
fns.update(
tracking.AutoTrackable._list_functions_for_serialization( # pylint:disable=protected-access
self.obj, serialization_cache))
return fns
@abc.abstractproperty
def python_properties(self):
"""Returns dictionary of python properties to save in the metadata.
This dictionary must be serializable and deserializable to/from JSON.
When loading, the items in this dict are used to initialize the object and
define attributes in the revived object.
"""
raise NotImplementedError
@abc.abstractmethod
def objects_to_serialize(self, serialization_cache):
"""Returns dictionary of extra checkpointable objects to serialize.
See `functions_to_serialize` for an explanation of this function's
effects.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to checkpointable objects.
"""
raise NotImplementedError
@abc.abstractmethod
def functions_to_serialize(self, serialization_cache):
"""Returns extra functions to include when serializing a Keras object.
Normally, when calling exporting an object to SavedModel, only the
functions and objects defined by the user are saved. For example:
```
obj = tf.Module()
obj.v = tf.Variable(1.)
@tf.function
def foo(...): ...
obj.foo = foo
w = tf.Variable(1.)
tf.saved_model.save(obj, 'path/to/saved/model')
loaded = tf.saved_model.load('path/to/saved/model')
loaded.v # Variable with the same value as obj.v
loaded.foo # Equivalent to obj.foo
loaded.w # AttributeError
```
Assigning trackable objects to attributes creates a graph, which is used for
both checkpointing and SavedModel serialization.
When the graph generated from attribute tracking is insufficient, extra
objects and functions may be added at serialization time. For example,
most models do not have their call function wrapped with a @tf.function
decorator. This results in `model.call` not being saved. Since Keras objects
should be revivable from the SavedModel format, the call function is added
as an extra function to serialize.
This function and `objects_to_serialize` is called multiple times when
exporting to SavedModel. Please use the cache to avoid generating new
functions and objects. A fresh cache is created for each SavedModel export.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to `Function` or
`ConcreteFunction`.
"""
raise NotImplementedError
| {
"content_hash": "4894553db2e3400db1fac952867dc151",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 100,
"avg_line_length": 34.56962025316456,
"alnum_prop": 0.7226290735994141,
"repo_name": "gunan/tensorflow",
"id": "0065e6d786e95bed952abb1ca730c3f3cd19ff56",
"size": "6151",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/saving/saved_model/base_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import json
from mpl_toolkits.mplot3d import Axes3D
#sys.path.append(os.path.join(os.path.dirname(__file__),"../"))
from crowdsourcing.interfaces.mechanical_turk import *
from crowdsourcing.interfaces.local_webserver import *
from crowdsourcing.util.image_search import *
from crowdsourcing.annotation_types.classification import *
from crowdsourcing.annotation_types.bbox import *
from crowdsourcing.annotation_types.part import *
# directory containing the images we want to annotate
IMAGE_DIR = 'data/classification/imagenet'
OUTPUT_FOLDER = 'ImageNet4'
USE_MTURK = True
ONLINE = True
WORKERS_PER_IMAGE = 0
with open('keys.json') as f: keys = json.load(f)
# Amazon account information for paying for mturk tasks, see
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.htm
AWS_ACCESS_KEY = keys.AWS_ACCESS_KEY
AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY
SANDBOX = False
# API key for Flickr image search, see https://www.flickr.com/services/api/misc.api_keys.html
FLICKR_API_KEY = keys.FLICKR_API_KEY
FLICKR_API_SECRET_KEY = keys.FLICKR_API_SECRET_KEY
MAX_PHOTOS = 4000
HOST = 'sbranson.no-ip.org'
# The name of the objects we want to collect. Images will be obtained by crawling flickr
# image search for each object, and we want to use mturk to filter out images that
# don't contain the object of interest
CLASSES = [ { 'object_name' : 'beaker', 'definition' : 'A flatbottomed jar made of glass or plastic; used for chemistry', 'search' : ['beaker', 'beaker chemistry', 'beaker lab'], 'wikipedia_url' : 'https://en.wikipedia.org/wiki/Beaker_(glassware)', 'example_image_urls' : ['http://imagenet.stanford.edu/nodes/12/02815834/99/998d93ef3fdd9a30034cda8f0ce246b7bb13ebc4.thumb', 'http://imagenet.stanford.edu/nodes/12/02815834/51/5171dde0d020b00923d4297d88d427326846efb2.thumb', 'http://imagenet.stanford.edu/nodes/12/02815834/d0/d06ccaf38a410e0b59bfe73819eb7bd0028bb8f1.thumb', 'https://sbranson.no-ip.org/online_crowdsourcing/not_beaker.jpg' ] },
{ 'object_name' : 'scorpion', 'definition' : 'Arachnid of warm dry regions having a long segmented tail ending in a venomous stinger', 'search' : ['scorpion', 'scorpion arachnid'], 'wikipedia_url' : 'https://en.wikipedia.org/wiki/Scorpion', 'example_image_urls' : ['http://imagenet.stanford.edu/nodes/2/01770393/b0/b02dcf2c1d8c7a735b52ab74300c342124e4be5c.thumb', 'http://imagenet.stanford.edu/nodes/2/01770393/31/31af6ea97dd040ec2ddd6ae86fe1f601ecfc8c02.thumb', 'http://imagenet.stanford.edu/nodes/2/01770393/38/382e998365d5667fc333a7c8f5f6e74e3c1fe164.thumb', 'http://imagenet.stanford.edu/nodes/2/01770393/88/88bc0f14c9779fad2bc364f5f4d8269d452e26c2.thumb'] },
{ 'object_name' : 'apiary', 'definition' : 'A shed containing a number of beehives', 'search' : ['apiary'], 'wikipedia_url' : 'https://en.wikipedia.org/wiki/Apiary', 'example_image_urls' : ['http://imagenet.stanford.edu/nodes/10/02727426/1f/1f6f71add82d10edad8b3630ec26490055c70a5d.thumb', 'http://imagenet.stanford.edu/nodes/10/02727426/94/94a3624ff3e639fe2d8ae836e91ca7e8fcdd0ed7.thumb', 'http://imagenet.stanford.edu/nodes/10/02727426/15/15a37da46bddd5010d3f1d1996899b8472c9556b.thumb', 'http://imagenet.stanford.edu/nodes/10/02727426/01/013b499a063b6ea83218c5ed63ea811bce5a9974.thumb'] },
{ 'object_name' : 'cardigan', 'definition' : 'Knitted jacket that is fastened up the front with buttons or a zipper', 'search' : ['cardigan'], 'wikipedia_url' : 'https://en.wikipedia.org/wiki/Cardigan_(sweater)', 'example_image_urls' : ['http://imagenet.stanford.edu/nodes/9/02963159/d7/d7419041a96e8baf9a870c81d549ad0b345c8127.thumb', 'http://imagenet.stanford.edu/nodes/9/02963159/34/34256aaf7b10073ec16dc5ddb0b31305878de875.thumb', 'http://imagenet.stanford.edu/nodes/9/02963159/e8/e8a50045dd40da5299ee8817052edfc090b05355.thumb', 'http://imagenet.stanford.edu/nodes/9/02963159/38/38216bf40fafe4bb526fabb430188c24b968a152.thumb'] }
]
for c in CLASSES:
# directories to store images and results
output_folder = os.path.join(OUTPUT_FOLDER, c['object_name'])
image_folder = os.path.join('output', output_folder, 'flickr')
if not os.path.exists(image_folder):
os.makedirs(image_folder)
# Download images from Flickr
FlickrImageSearch(c['search'], image_folder, FLICKR_API_KEY, FLICKR_API_SECRET_KEY, max_photos=MAX_PHOTOS)
# Load an unlabelled dataset by scanning a directory of images
dataset = CrowdDatasetBinaryClassification(name=c['object_name'])
dataset.scan_image_directory(os.path.join(image_folder, 'images'))
if USE_MTURK:
crowdsource = MTurkCrowdsourcer(dataset, AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY, HOST, output_folder, sandbox=SANDBOX,
hit_params = c, online = ONLINE, thumbnail_size = (100,100), initial_assignments_per_image=WORKERS_PER_IMAGE)
else:
crowdsource = LocalCrowdsourcer(dataset, HOST, output_folder, hit_params = c, online = ONLINE, thumbnail_size = (100,100), initial_assignments_per_image=WORKERS_PER_IMAGE, port=8080)
crowdsource.run()
| {
"content_hash": "f5401d1cf82b1db8149333eb79cd82be",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 675,
"avg_line_length": 73.37142857142857,
"alnum_prop": 0.7610981308411215,
"repo_name": "sbranson/online_crowdsourcing",
"id": "9353ada9e3fba211f9f107c0a3fd4e8811908e3a",
"size": "5136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/collect_annotations_imagenet3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15706"
},
{
"name": "Python",
"bytes": "444456"
}
],
"symlink_target": ""
} |
"""This application demonstrates how to perform basic operations with the
Google Cloud Translate API
For more information, the documentation at
https://cloud.google.com/translate/docs.
"""
import argparse
from google.cloud import translate
import six
def detect_language(text):
"""Detects the text's language."""
translate_client = translate.Client()
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.detect_language(text)
print('Text: {}'.format(text))
print('Confidence: {}'.format(result['confidence']))
print('Language: {}'.format(result['language']))
def list_languages():
"""Lists all available languages."""
translate_client = translate.Client()
results = translate_client.get_languages()
for language in results:
print(u'{name} ({language})'.format(**language))
def list_languages_with_target(target):
"""Lists all available languages and localizes them to the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
translate_client = translate.Client()
results = translate_client.get_languages(target_language=target)
for language in results:
print(u'{name} ({language})'.format(**language))
def translate_text_with_model(target, text, model=translate.NMT):
"""Translates text into the target language.
Make sure your project is whitelisted.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
translate_client = translate.Client()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(
text, target_language=target, model=model)
print(u'Text: {}'.format(result['input']))
print(u'Translation: {}'.format(result['translatedText']))
print(u'Detected source language: {}'.format(
result['detectedSourceLanguage']))
def translate_text(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
translate_client = translate.Client()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(
text, target_language=target)
print(u'Text: {}'.format(result['input']))
print(u'Translation: {}'.format(result['translatedText']))
print(u'Detected source language: {}'.format(
result['detectedSourceLanguage']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
detect_langage_parser = subparsers.add_parser(
'detect-language', help=detect_language.__doc__)
detect_langage_parser.add_argument('text')
list_languages_parser = subparsers.add_parser(
'list-languages', help=list_languages.__doc__)
list_languages_with_target_parser = subparsers.add_parser(
'list-languages-with-target', help=list_languages_with_target.__doc__)
list_languages_with_target_parser.add_argument('target')
translate_text_parser = subparsers.add_parser(
'translate-text', help=translate_text.__doc__)
translate_text_parser.add_argument('target')
translate_text_parser.add_argument('text')
args = parser.parse_args()
if args.command == 'detect-language':
detect_language(args.text)
elif args.command == 'list-languages':
list_languages()
elif args.command == 'list-languages-with-target':
list_languages_with_target(args.target)
elif args.command == 'translate-text':
translate_text(args.target, args.text)
| {
"content_hash": "bd15ed9ab28536f5318f5c3b79b14878",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 33.109375,
"alnum_prop": 0.6882963662104766,
"repo_name": "sharbison3/python-docs-samples",
"id": "91aef63d5c849f7a804e892742b330c9f01bb9c3",
"size": "4837",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "translate/cloud-client/snippets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "26110"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Python",
"bytes": "994536"
},
{
"name": "Shell",
"bytes": "9331"
}
],
"symlink_target": ""
} |
"""
Collects Web IDL definitions in IDL files into a Python object per Blink
component.
Collected IDL definitions are parsed into ASTs and saved into a file with
a label of Blink component.
"""
import optparse
import utilities
import web_idl
from idl_parser import idl_parser
from idl_parser import idl_lexer
_VALID_COMPONENTS = ('core', 'modules', 'extensions_chromeos')
def parse_options():
parser = optparse.OptionParser()
parser.add_option(
'--idl_list_file',
type='string',
help="a file path which lists IDL file paths to process")
parser.add_option(
'--component',
type='choice',
choices=_VALID_COMPONENTS,
help="specify a component name")
parser.add_option(
'--for_testing',
action='store_true',
help=("specify this option if the IDL definitions are meant for "
"testing only"))
parser.add_option('--output', type='string', help="the output file path")
options, args = parser.parse_args()
required_option_names = ('idl_list_file', 'component', 'output')
for opt_name in required_option_names:
if getattr(options, opt_name) is None:
parser.error("--{} is a required option.".format(opt_name))
if args:
parser.error("Unknown arguments {}".format(args))
return options, args
def main():
options, _ = parse_options()
filepaths = utilities.read_idl_files_list_from_file(options.idl_list_file)
lexer = idl_lexer.IDLLexer()
parser = idl_parser.IDLParser(lexer)
ast_group = web_idl.AstGroup(
component=web_idl.Component(options.component),
for_testing=bool(options.for_testing))
for filepath in filepaths:
ast_group.add_ast_node(idl_parser.ParseFile(parser, filepath))
ast_group.write_to_file(options.output)
if __name__ == '__main__':
main()
| {
"content_hash": "8fe1a96a4840617d3ed484177499ecac",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 28.846153846153847,
"alnum_prop": 0.6544,
"repo_name": "nwjs/chromium.src",
"id": "750304dd2492835f6e3dd81d4720935ff21b54d9",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw70",
"path": "third_party/blink/renderer/bindings/scripts/collect_idl_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import copy
import json
import os
from datetime import datetime, timedelta
from django.conf import settings
from django.core import mail
import mock
import pytest
from waffle.testutils import override_switch
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.activity.models import (
MAX_TOKEN_USE_COUNT, ActivityLog, ActivityLogToken)
from olympia.activity.utils import (
ACTIVITY_MAIL_GROUP, ActivityEmailEncodingError, ActivityEmailParser,
ActivityEmailTokenError, ActivityEmailUUIDError, add_email_to_activity_log,
add_email_to_activity_log_wrapper, log_and_notify,
notify_about_activity_log, send_activity_mail)
from olympia.addons.models import Addon, AddonReviewerFlags
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.amo.urlresolvers import reverse
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
sample_message_file = os.path.join(TESTS_DIR, 'emails', 'message.json')
with open(sample_message_file) as file_object:
sample_message_content = json.loads(file_object.read())
class TestEmailParser(TestCase):
def test_basic_email(self):
parser = ActivityEmailParser(sample_message_content['Message'])
assert parser.get_uuid() == '5a0b8a83d501412589cc5d562334b46b'
assert parser.reply == (
'This is a developer reply to an AMO. It\'s nice.')
def test_with_invalid_msg(self):
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser('youtube?v=dQw4w9WgXcQ')
def test_with_empty_to(self):
message = copy.deepcopy(sample_message_content['Message'])
message['To'] = None
parser = ActivityEmailParser(message)
with self.assertRaises(ActivityEmailUUIDError):
# It should fail, but not because of a Not Iterable TypeError,
# instead we handle that gracefully and raise an exception that
# we control and catch later.
parser.get_uuid()
def test_empty_text_body(self):
"""We receive requests that either have no `TextBody` or it's None
https://github.com/mozilla/addons-server/issues/8848
"""
message = copy.deepcopy(sample_message_content['Message'])
message['TextBody'] = None
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser(message)
message = copy.deepcopy(sample_message_content['Message'])
message.pop('TextBody', None)
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser(message)
@override_switch('activity-email-bouncing', active=True)
class TestEmailBouncing(TestCase):
BOUNCE_REPLY = (
'Hello,\n\nAn email was received, apparently from you. Unfortunately '
'we couldn\'t process it because of:\n%s\n\nPlease visit %s to leave '
'a reply instead.\n--\nMozilla Add-ons\n%s')
def setUp(self):
self.bounce_reply = (
self.BOUNCE_REPLY % ('%s', settings.SITE_URL, settings.SITE_URL))
self.email_text = sample_message_content['Message']
@mock.patch('olympia.activity.utils.ActivityLog.create')
def test_no_note_logged(self, log_mock):
# First set everything up so it's working
addon = addon_factory()
version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
user = user_factory()
self.grant_permission(user, '*:*')
ActivityLogToken.objects.create(
user=user, version=version,
uuid='5a0b8a83d501412589cc5d562334b46b')
# Make log_mock return false for some reason.
log_mock.return_value = False
# No exceptions thrown, but no log means something went wrong.
assert not add_email_to_activity_log_wrapper(self.email_text)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (
self.bounce_reply % 'Undefined Error.')
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_because_invalid_token(self):
# Fails because the token doesn't exist in ActivityToken.objects
assert not add_email_to_activity_log_wrapper(self.email_text)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (
self.bounce_reply %
'UUID found in email address TO: header but is not a valid token '
'(5a0b8a83d501412589cc5d562334b46b).')
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_because_invalid_email(self):
# Fails because the token doesn't exist in ActivityToken.objects
email_text = copy.deepcopy(self.email_text)
email_text['To'] = [{
'EmailAddress': 'foobar@addons.mozilla.org',
'FriendlyName': 'not a valid activity mail reply'}]
assert not add_email_to_activity_log_wrapper(email_text)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (
self.bounce_reply %
'TO: address does not contain activity email uuid ('
'foobar@addons.mozilla.org).')
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_parser_because_malformed_message(self):
assert not add_email_to_activity_log_wrapper("blah de blah")
# No From or Reply means no bounce, alas.
assert len(mail.outbox) == 0
def _test_exception_in_parser_but_can_send_email(self, message):
assert not add_email_to_activity_log_wrapper(message)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == (
self.bounce_reply % 'Invalid or malformed json message object.')
assert mail.outbox[0].subject == 'Re: your email to us'
assert mail.outbox[0].to == ['bob@dole.org']
def test_exception_in_parser_but_from_defined(self):
"""Unlikely scenario of an email missing a body but having a From."""
self._test_exception_in_parser_but_can_send_email(
{'From': {'EmailAddress': 'bob@dole.org'}})
def test_exception_in_parser_but_reply_to_defined(self):
"""Even more unlikely scenario of an email missing a body but having a
ReplyTo."""
self._test_exception_in_parser_but_can_send_email(
{'ReplyTo': {'EmailAddress': 'bob@dole.org'}})
def test_exception_to_notifications_alias(self):
email_text = copy.deepcopy(self.email_text)
email_text['To'] = [{
'EmailAddress': 'notifications@%s' % settings.INBOUND_EMAIL_DOMAIN,
'FriendlyName': 'not a valid activity mail reply'}]
assert not add_email_to_activity_log_wrapper(email_text)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert ('This email address is not meant to receive emails '
'directly.') in out.body
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
@override_switch('activity-email-bouncing', active=False)
def test_exception_but_bouncing_waffle_off(self):
# Fails because the token doesn't exist in ActivityToken.objects
assert not add_email_to_activity_log_wrapper(self.email_text)
# But no bounce.
assert len(mail.outbox) == 0
class TestAddEmailToActivityLog(TestCase):
def setUp(self):
self.addon = addon_factory(name='Badger', status=amo.STATUS_NOMINATED)
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
self.profile = user_factory()
self.token = ActivityLogToken.objects.create(
version=version, user=self.profile)
self.token.update(uuid='5a0b8a83d501412589cc5d562334b46b')
self.parser = ActivityEmailParser(sample_message_content['Message'])
def test_developer_comment(self):
self.profile.addonuser_set.create(addon=self.addon)
note = add_email_to_activity_log(self.parser)
assert note.log == amo.LOG.DEVELOPER_REPLY_VERSION
self.token.refresh_from_db()
assert self.token.use_count == 1
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Addons:Review')
note = add_email_to_activity_log(self.parser)
assert note.log == amo.LOG.REVIEWER_REPLY_VERSION
self.token.refresh_from_db()
assert self.token.use_count == 1
def test_with_max_count_token(self):
"""Test with an invalid token."""
self.token.update(use_count=MAX_TOKEN_USE_COUNT + 1)
with self.assertRaises(ActivityEmailTokenError):
assert not add_email_to_activity_log(self.parser)
self.token.refresh_from_db()
assert self.token.use_count == MAX_TOKEN_USE_COUNT + 1
def test_with_unpermitted_token(self):
"""Test when the token user doesn't have a permission to add a note."""
with self.assertRaises(ActivityEmailTokenError):
assert not add_email_to_activity_log(self.parser)
self.token.refresh_from_db()
assert self.token.use_count == 0
def test_non_existent_token(self):
self.token.update(uuid='12345678901234567890123456789012')
with self.assertRaises(ActivityEmailUUIDError):
assert not add_email_to_activity_log(self.parser)
def test_broken_token(self):
parser = ActivityEmailParser(
copy.deepcopy(sample_message_content['Message']))
parser.email['To'][0]['EmailAddress'] = 'reviewreply+1234@foo.bar'
with self.assertRaises(ActivityEmailUUIDError):
assert not add_email_to_activity_log(parser)
class TestLogAndNotify(TestCase):
def setUp(self):
self.developer = user_factory()
self.developer2 = user_factory()
self.reviewer = user_factory()
self.grant_permission(self.reviewer, 'Addons:Review',
'Addon Reviewers')
self.addon = addon_factory()
self.version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
self.addon.addonuser_set.create(user=self.developer)
self.addon.addonuser_set.create(user=self.developer2)
self.task_user = user_factory(id=settings.TASK_USER_ID)
def _create(self, action, author=None):
author = author or self.reviewer
details = {
'comments': u'I spy, with my líttle €ye...',
'version': self.version.version}
activity = ActivityLog.create(
action, self.addon, self.version, user=author, details=details)
activity.update(created=self.days_ago(1))
return activity
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
[reply_to] = call[1]['reply_to']
assert reply_to.startswith('reviewreply+')
assert reply_to.endswith(settings.INBOUND_EMAIL_DOMAIN)
return recipients
def _check_email_info_request(self, call, url, reason_text, days_text):
subject = call[0][0]
body = call[0][1]
assert subject == u'Mozilla Add-ons: Action Required for %s %s' % (
self.addon.name, self.version.version)
assert ('visit %s' % url) in body
assert ('receiving this email because %s' % reason_text) in body
if days_text is not None:
assert 'If we do not hear from you within' in body
assert days_text in body
assert 'reviewing version %s of the add-on %s' % (
self.version.version, self.addon.name) in body
def _check_email(self, call, url, reason_text):
subject = call[0][0]
body = call[0][1]
assert subject == u'Mozilla Add-ons: %s %s' % (
self.addon.name, self.version.version)
assert ('visit %s' % url) in body
assert ('receiving this email because %s' % reason_text) in body
assert 'If we do not hear from you within' not in body
@mock.patch('olympia.activity.utils.send_mail')
def test_reviewer_request_for_information(self, send_mail_mock):
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() + timedelta(days=7))
self._create(amo.LOG.REQUEST_INFORMATION, self.reviewer)
log_and_notify(
amo.LOG.REQUEST_INFORMATION, 'blah', self.reviewer, self.version)
assert send_mail_mock.call_count == 2 # Both authors.
sender = '%s <notifications@%s>' % (
self.reviewer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email_info_request(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'seven (7) days of this notification')
self._check_email_info_request(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'seven (7) days of this notification')
@mock.patch('olympia.activity.utils.send_mail')
def test_reviewer_request_for_information_close_date(self, send_mail_mock):
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() + timedelta(days=1))
self._create(amo.LOG.REQUEST_INFORMATION, self.reviewer)
log_and_notify(
amo.LOG.REQUEST_INFORMATION, 'blah', self.reviewer, self.version)
assert send_mail_mock.call_count == 2 # Both authors.
sender = '%s <notifications@%s>' % (
self.reviewer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email_info_request(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'one (1) day of this notification')
self._check_email_info_request(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'one (1) day of this notification')
@mock.patch('olympia.activity.utils.send_mail')
def test_reviewer_request_for_information_far_date(self, send_mail_mock):
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() + timedelta(days=21))
self._create(amo.LOG.REQUEST_INFORMATION, self.reviewer)
log_and_notify(
amo.LOG.REQUEST_INFORMATION, 'blah', self.reviewer, self.version)
assert send_mail_mock.call_count == 2 # Both authors.
sender = '%s <notifications@%s>' % (
self.reviewer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email_info_request(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'21 days of this notification')
self._check_email_info_request(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
'21 days of this notification')
def test_post_reviewer_request_for_information(self):
GroupUser.objects.filter(user=self.reviewer).delete()
self.grant_permission(
self.reviewer, 'Addons:PostReview', 'Reviewers: Foo')
self.test_reviewer_request_for_information()
def test_content_reviewer_request_for_information(self):
GroupUser.objects.filter(user=self.reviewer).delete()
self.grant_permission(
self.reviewer, 'Addons:ContentReview', 'Reviewers: Bar')
self.test_reviewer_request_for_information()
@mock.patch('olympia.activity.utils.send_mail')
def test_developer_reply(self, send_mail_mock):
# Set pending info request flag to make sure
# it has been dropped after the reply.
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() + timedelta(days=1))
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == u'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
sender = '%s <notifications@%s>' % (
self.developer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.')
review_url = absolutify(
reverse('reviewers.review',
kwargs={'addon_id': self.version.addon.pk,
'channel': 'listed'},
add_prefix=False))
self._check_email(
send_mail_mock.call_args_list[1],
review_url, 'you reviewed this add-on.')
self.addon = Addon.objects.get(pk=self.addon.pk)
assert not self.addon.pending_info_request
@mock.patch('olympia.activity.utils.send_mail')
def test_reviewer_reply(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer.
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.REVIEWER_REPLY_VERSION
comments = u'Thîs ïs a revïewer replyîng'
log_and_notify(action, comments, self.reviewer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
assert logs[0].details['comments'] == u'Thîs ïs a revïewer replyîng'
assert send_mail_mock.call_count == 2 # Both authors.
sender = '%s <notifications@%s>' % (
self.reviewer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.')
self._check_email(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.')
@mock.patch('olympia.activity.utils.send_mail')
def test_log_with_no_comment(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
action = amo.LOG.APPROVAL_NOTES_CHANGED
log_and_notify(
action=action, comments=None, note_creator=self.developer,
version=self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
assert not logs[0].details # No details json because no comment.
assert send_mail_mock.call_count == 2 # One author, one reviewer.
sender = '%s <notifications@%s>' % (
self.developer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
assert u'Approval notes changed' in (
send_mail_mock.call_args_list[0][0][1])
assert u'Approval notes changed' in (
send_mail_mock.call_args_list[1][0][1])
def test_staff_cc_group_is_empty_no_failure(self):
Group.objects.create(name=ACTIVITY_MAIL_GROUP, rules='None:None')
log_and_notify(amo.LOG.REJECT_VERSION, u'á', self.reviewer,
self.version)
@mock.patch('olympia.activity.utils.send_mail')
def test_staff_cc_group_get_mail(self, send_mail_mock):
self.grant_permission(self.reviewer, 'None:None', ACTIVITY_MAIL_GROUP)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
sender = '%s <notifications@%s>' % (
self.developer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
assert len(recipients) == 2
# self.reviewers wasn't on the thread, but gets an email anyway.
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
review_url = absolutify(
reverse('reviewers.review',
kwargs={'addon_id': self.version.addon.pk,
'channel': 'listed'},
add_prefix=False))
self._check_email(send_mail_mock.call_args_list[1],
review_url,
'you are member of the activity email cc group.')
@mock.patch('olympia.activity.utils.send_mail')
def test_mail_needinfo_correct_subject(self, send_mail_mock):
self.grant_permission(self.reviewer, 'None:None', ACTIVITY_MAIL_GROUP)
action = amo.LOG.REQUEST_INFORMATION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
sender = '%s <notifications@%s>' % (
self.developer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
developer_subject = send_mail_mock.call_args_list[0][0][0]
assert developer_subject == (
u'Mozilla Add-ons: Action Required for '
'%s %s' % (self.addon.name, self.version.version))
reviewer_subject = send_mail_mock.call_args_list[1][0][0]
assert reviewer_subject == u'Mozilla Add-ons: %s %s' % (
self.addon.name, self.version.version)
assert len(recipients) == 2
# self.reviewers wasn't on the thread, but gets an email anyway.
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
review_url = absolutify(
reverse('reviewers.review',
kwargs={'addon_id': self.version.addon.pk,
'channel': 'listed'},
add_prefix=False))
self._check_email(send_mail_mock.call_args_list[1],
review_url,
'you are member of the activity email cc group.')
@mock.patch('olympia.activity.utils.send_mail')
def test_task_user_doesnt_get_mail(self, send_mail_mock):
"""The task user account is used to auto-sign unlisted addons, amongst
other things, but we don't want that user account to get mail."""
self._create(amo.LOG.APPROVE_VERSION, self.task_user)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 1
assert self.developer2.email in recipients
assert self.task_user.email not in recipients
@mock.patch('olympia.activity.utils.send_mail')
def test_ex_reviewer_doesnt_get_mail(self, send_mail_mock):
"""If a reviewer has now left the team don't email them."""
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# Take his joob!
GroupUser.objects.get(group=Group.objects.get(name='Addon Reviewers'),
user=self.reviewer).delete()
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 1
assert self.developer2.email in recipients
assert self.reviewer.email not in recipients
@mock.patch('olympia.activity.utils.send_mail')
def test_review_url_listed(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == u'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.')
review_url = absolutify(
reverse('reviewers.review', add_prefix=False,
kwargs={'channel': 'listed', 'addon_id': self.addon.pk}))
self._check_email(send_mail_mock.call_args_list[1],
review_url, 'you reviewed this add-on.')
@mock.patch('olympia.activity.utils.send_mail')
def test_review_url_unlisted(self, send_mail_mock):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted',
'Addon Reviewers')
# One from the reviewer.
self._create(amo.LOG.COMMENT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = u'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == u'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.')
review_url = absolutify(
reverse('reviewers.review', add_prefix=False,
kwargs={'channel': 'unlisted', 'addon_id': self.addon.pk}))
self._check_email(send_mail_mock.call_args_list[1],
review_url, 'you reviewed this add-on.')
@mock.patch('olympia.activity.utils.send_mail')
def test_from_name_escape(self, send_mail_mock):
self.reviewer.update(display_name='mr "quote" escape')
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
action = amo.LOG.REVIEWER_REPLY_VERSION
comments = u'Thîs ïs a revïewer replyîng'
log_and_notify(action, comments, self.reviewer, self.version)
sender = r'"mr \"quote\" escape" <notifications@%s>' % (
settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
@mock.patch('olympia.activity.utils.send_mail')
def test_comment_entity_decode(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
action = amo.LOG.REVIEWER_REPLY_VERSION
comments = u'This email's entities should be decoded'
log_and_notify(action, comments, self.reviewer, self.version)
body = send_mail_mock.call_args_list[1][0][1]
assert "email's entities should be decoded" in body
assert "&" not in body
@mock.patch('olympia.activity.utils.send_mail')
def test_notify_about_previous_activity(self, send_mail_mock):
# Create an activity to use when notifying.
activity = self._create(amo.LOG.REQUEST_INFORMATION, self.reviewer)
notify_about_activity_log(self.addon, self.version, activity)
assert ActivityLog.objects.count() == 1 # No new activity created.
assert send_mail_mock.call_count == 2 # Both authors.
sender = '%s <notifications@%s>' % (
self.reviewer.name, settings.INBOUND_EMAIL_DOMAIN)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email_info_request(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
days_text=None)
self._check_email_info_request(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
days_text=None)
@pytest.mark.django_db
def test_send_activity_mail():
subject = u'This ïs ã subject'
message = u'And... this ïs a messãge!'
addon = addon_factory()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
user = user_factory()
recipients = [user, ]
from_email = 'bob@bob.bob'
action = ActivityLog.create(amo.LOG.DEVELOPER_REPLY_VERSION, user=user)
send_activity_mail(
subject, message, latest_version, recipients, from_email, action.id)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == message
assert mail.outbox[0].subject == subject
uuid = latest_version.token.get(user=user).uuid.hex
reference_header = '<{addon}/{version}@{site}>'.format(
addon=latest_version.addon.id, version=latest_version.id,
site=settings.INBOUND_EMAIL_DOMAIN)
message_id = '<{addon}/{version}/{action}@{site}>'.format(
addon=latest_version.addon.id, version=latest_version.id,
action=action.id, site=settings.INBOUND_EMAIL_DOMAIN)
assert mail.outbox[0].extra_headers['In-Reply-To'] == reference_header
assert mail.outbox[0].extra_headers['References'] == reference_header
assert mail.outbox[0].extra_headers['Message-ID'] == message_id
reply_email = 'reviewreply+%s@%s' % (uuid, settings.INBOUND_EMAIL_DOMAIN)
assert mail.outbox[0].reply_to == [reply_email]
| {
"content_hash": "39a6cdf315eaf72ad79c36c9a6fb7a7e",
"timestamp": "",
"source": "github",
"line_count": 761,
"max_line_length": 79,
"avg_line_length": 45.26281208935611,
"alnum_prop": 0.636144578313253,
"repo_name": "atiqueahmedziad/addons-server",
"id": "564194dd4adcf408d5e1a4066ce4e3f718d32b14",
"size": "34509",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/activity/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810065"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "599024"
},
{
"name": "JavaScript",
"bytes": "1070220"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5272277"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1497"
}
],
"symlink_target": ""
} |
"""
Molecule images.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from rdkit.Chem import Draw
from vs_utils.features import Featurizer
from vs_utils.utils import image_utils, ob_utils
class MolImage(Featurizer):
"""
Molecule images.
Parameters
----------
size : int, optional (default 32)
Size (in any dimension) of generated images.
flatten : bool, optional (default False)
Whether to flatten the pixel array. If False, the features for each
molecule will be a 3D array.
engine : str, optional (default 'obabel')
Which engine to use to generate images. Choose from 'obabel' or
'rdkit'.
"""
name = 'image'
def __init__(self, size=32, flatten=False, engine='obabel'):
self.size = size
if not flatten:
self.topo_view = True
self.flatten = flatten
self.engine = engine
def _featurize(self, mol):
"""
Generate a 2D depiction of a molecule.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
dim = (self.size, self.size)
if self.engine == 'obabel':
image = ob_utils.MolImage(self.size)(mol)
elif self.engine == 'rdkit':
image = Draw.MolToImage(mol, dim, fitImage=True)
image = image.convert('RGB') # drop alpha channel
else:
raise NotImplementedError(self.engine)
pixels = image_utils.get_pixels(image)
if self.flatten:
pixels = pixels.ravel()
return pixels
| {
"content_hash": "f73f15441f4b4d3c8590a025bf703687",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 27.796610169491526,
"alnum_prop": 0.5841463414634146,
"repo_name": "rbharath/pande-gas",
"id": "298b3faab97f255355a47f9439ecdc8cac9e9614",
"size": "1640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vs_utils/features/images.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "247790"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
} |
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import uuid
import os
class Entry(models.Model):
def get_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join('licitacao', filename)
created = models.DateTimeField(_(u'Data de Criação'), auto_now_add=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
description = models.TextField(_(u'Objeto da Licitação'))
process = models.CharField(_(u'Processo Licitatório Nº'), max_length=20)
price = models.CharField(_(u'Tomada de Preços Nº'), max_length=20)
attach = models.FileField(_(u'Arquivo'), upload_to=get_file_path,
help_text='Selecione um arquivo')
def admin_attach(self):
if self.attach:
return "<a href='%s'>Baixar</a>" % self.attach.url
else:
return "Nenhum arquivo encontrado"
admin_attach.allow_tags = True
admin_attach.short_description = _(u'Arquivo')
def __unicode__(self):
return unicode(self.process)
class Meta:
verbose_name = _(u'Licitação')
verbose_name_plural = _(u'Licitações')
ordering = ['-created', 'description', 'process', 'price']
| {
"content_hash": "b73466ec25183bb31845c1bdff0aefe0",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 36.78378378378378,
"alnum_prop": 0.6385011021307861,
"repo_name": "klebercode/pmsal",
"id": "e5b8e866e930c18a142b42916b67ae5f4a0a4f0e",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmsal/bid/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129165"
},
{
"name": "HTML",
"bytes": "78095"
},
{
"name": "JavaScript",
"bytes": "204743"
},
{
"name": "Python",
"bytes": "52737"
}
],
"symlink_target": ""
} |
import argparse
import numpy as np
import sys
import queue
import uuid
import tritonclient.grpc as grpcclient
from tritonclient.utils import InferenceServerException
FLAGS = None
class UserData:
def __init__(self):
self._completed_requests = queue.Queue()
def sync_send(triton_client, result_list, values, batch_size, sequence_id,
model_name, model_version):
count = 1
for value in values:
# Create the tensor for INPUT
value_data = np.full(shape=[batch_size, 1],
fill_value=value,
dtype=np.int32)
inputs = []
inputs.append(grpcclient.InferInput('INPUT', value_data.shape, "INT32"))
# Initialize the data
inputs[0].set_data_from_numpy(value_data)
outputs = []
outputs.append(grpcclient.InferRequestedOutput('OUTPUT'))
# Issue the synchronous sequence inference.
result = triton_client.infer(model_name=model_name,
inputs=inputs,
outputs=outputs,
sequence_id=sequence_id,
sequence_start=(count == 1),
sequence_end=(count == len(values)))
result_list.append(result.as_numpy('OUTPUT'))
count = count + 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument(
'-u',
'--url',
type=str,
required=False,
default='localhost:8001',
help='Inference server URL and it gRPC port. Default is localhost:8001.'
)
parser.add_argument('-d',
'--dyna',
action="store_true",
required=False,
default=False,
help='Assume dynamic sequence model')
parser.add_argument('-o',
'--offset',
type=int,
required=False,
default=0,
help='Add offset to sequence ID used')
FLAGS = parser.parse_args()
try:
triton_client = grpcclient.InferenceServerClient(url=FLAGS.url,
verbose=FLAGS.verbose)
except Exception as e:
print("context creation failed: " + str(e))
sys.exit()
# We use custom "sequence" models which take 1 input
# value. The output is the accumulated value of the inputs. See
# src/custom/sequence.
int_sequence_model_name = "simple_dyna_sequence" if FLAGS.dyna else "simple_sequence"
string_sequence_model_name = "simple_string_dyna_sequence" if FLAGS.dyna else "simple_sequence"
model_version = ""
batch_size = 1
values = [11, 7, 5, 3, 2, 0, 1]
# Will use two sequences and send them synchronously. Note the
# sequence IDs should be non-zero because zero is reserved for
# non-sequence requests.
int_sequence_id0 = 1000 + FLAGS.offset * 2
int_sequence_id1 = 1001 + FLAGS.offset * 2
# For string sequence IDs, the dyna backend requires that the
# sequence id be decodable into an integer, otherwise we'll use
# a UUID4 sequence id and a model that doesn't require corrid
# control.
string_sequence_id0 = str(1002) if FLAGS.dyna else str(uuid.uuid4())
int_result0_list = []
int_result1_list = []
string_result0_list = []
user_data = UserData()
try:
sync_send(triton_client, int_result0_list, [0] + values, batch_size,
int_sequence_id0, int_sequence_model_name, model_version)
sync_send(triton_client, int_result1_list,
[100] + [-1 * val for val in values], batch_size,
int_sequence_id1, int_sequence_model_name, model_version)
sync_send(triton_client, string_result0_list,
[20] + [-1 * val for val in values], batch_size,
string_sequence_id0, string_sequence_model_name,
model_version)
except InferenceServerException as error:
print(error)
sys.exit(1)
for i in range(len(int_result0_list)):
int_seq0_expected = 1 if (i == 0) else values[i - 1]
int_seq1_expected = 101 if (i == 0) else values[i - 1] * -1
# For string sequence ID we are testing two different backends
if i == 0 and FLAGS.dyna:
string_seq0_expected = 20
elif i == 0 and not FLAGS.dyna:
string_seq0_expected = 21
elif i != 0 and FLAGS.dyna:
string_seq0_expected = values[i - 1] * -1 + int(
string_result0_list[i - 1][0][0])
else:
string_seq0_expected = values[i - 1] * -1
# The dyna_sequence custom backend adds the correlation ID
# to the last request in a sequence.
if FLAGS.dyna and (i != 0) and (values[i - 1] == 1):
int_seq0_expected += int_sequence_id0
int_seq1_expected += int_sequence_id1
string_seq0_expected += int(string_sequence_id0)
print("[" + str(i) + "] " + str(int_result0_list[i][0][0]) + " : " +
str(int_result1_list[i][0][0]) + " : " +
str(string_result0_list[i][0][0]))
if ((int_seq0_expected != int_result0_list[i][0][0]) or
(int_seq1_expected != int_result1_list[i][0][0]) or
(string_seq0_expected != string_result0_list[i][0][0])):
print("[ expected ] " + str(int_seq0_expected) + " : " +
str(int_seq1_expected) + " : " + str(string_seq0_expected))
sys.exit(1)
print("PASS: Sequence")
| {
"content_hash": "622185862e6bb254c29d73d6f4835719",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 99,
"avg_line_length": 37.522012578616355,
"alnum_prop": 0.5439155212872947,
"repo_name": "triton-inference-server/client",
"id": "68bd15f1b09797748e2a313536e1614d25a0872f",
"size": "7522",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/examples/simple_grpc_sequence_sync_infer_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "12998"
},
{
"name": "C++",
"bytes": "1722864"
},
{
"name": "CMake",
"bytes": "90419"
},
{
"name": "Go",
"bytes": "8979"
},
{
"name": "Java",
"bytes": "99082"
},
{
"name": "JavaScript",
"bytes": "5249"
},
{
"name": "Python",
"bytes": "552945"
},
{
"name": "Scala",
"bytes": "8330"
},
{
"name": "Shell",
"bytes": "7084"
}
],
"symlink_target": ""
} |
from .equals import equals
def equals_nocurry_test():
assert equals("foo", "foo")
def equals_curry_test():
assert equals("foo")("foo")
| {
"content_hash": "d73f0c0fe75ce031796cb4f3d3a96407",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 31,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.6530612244897959,
"repo_name": "jackfirth/pyramda",
"id": "2cc8d1c17dbe108b15a8c1719e33e84932986290",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramda/relation/equals_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41755"
}
],
"symlink_target": ""
} |
from werkzeug.utils import cached_property
from flask import json
from flask.testing import FlaskClient
from .repr import repr_str_short
class TestResponseMixin(object):
@cached_property
def json(self):
return json.loads(self.data)
@cached_property
def content(self):
return self.data.decode(self.charset)
def __repr__(self, full=False):
content = self.content if full else repr_str_short(self.content, 128)
return '<Response {}: {}>'.format(self.status, content)
class TestClient(FlaskClient):
def open(self, *args, **kwargs):
if 'json' in kwargs:
kwargs['data'] = json.dumps(kwargs.pop('json'))
kwargs['content_type'] = 'application/json'
return super(TestClient, self).open(*args, **kwargs)
def register_test_helpers(app):
if not issubclass(app.response_class, TestResponseMixin):
class TestResponse(TestResponseMixin, app.response_class):
pass
app.response_class = TestResponse
app.test_client_class = TestClient
def check_gevent_concurrency(sleep='time.sleep', callback=None):
if isinstance(sleep, str):
module = __import__(''.join(sleep.split('.')[:-1]))
sleep = getattr(module, sleep.split('.')[-1])
callback = callback or (lambda x: print('concurrency={}'.format(x)))
check_gevent_concurrency._flag = False
def _set_concurrency():
sleep(0.01)
check_gevent_concurrency._flag = True
def _check_concurrency():
sleep(0.02)
callback(check_gevent_concurrency._flag)
import gevent
gevent.joinall([
gevent.spawn(_check_concurrency),
gevent.spawn(_set_concurrency),
])
| {
"content_hash": "162ae9e387947a80ca750fc8430f0f5b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 29.5,
"alnum_prop": 0.6469900642898889,
"repo_name": "vgavro/flask-vgavro-utils",
"id": "37245fa1a444389310415d92230eee8ae2abfc44",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_vgavro_utils/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "79862"
},
{
"name": "Shell",
"bytes": "2338"
}
],
"symlink_target": ""
} |
import difflib
import sys
import pgcs.core.data
import pgcs.core.diff
import pgcs.core.load
core = pgcs.core
import pgcs.html.diff
html = pgcs.html
def get_object_name(object):
return "%s.%s" % (object.namespace.name, object.get_name())
def get_diff_name(diff):
return get_object_name(diff.objects[0])
def _copy_constraint(parent, name, definition):
print "ALTER TABLE %s ADD CONSTRAINT %s %s;" % (parent, name, definition)
def copy_primary_key(parent, object):
columns = [c.name for c in object.columns]
definition = "PRIMARY KEY (%s)" % ", ".join(columns)
_copy_constraint(parent, object.name, definition)
def copy_foreign_key(parent, object):
_copy_constraint(parent, object.name, object.definition)
def copy_check_column_constraint(parent, object):
_copy_constraint(parent, object.name, object.definition)
def alter_check_column_constraint(parent, object):
print "ALTER TABLE %s DROP CONSTRAINT %s;" % (parent, object.name)
copy_check_column_constraint(parent, object)
def copy_unique_column_constraint(parent, object):
_copy_constraint(parent, object.name, object.definition)
copiers = {
core.data.PrimaryKey: copy_primary_key,
core.data.ForeignKey: copy_foreign_key,
core.data.CheckColumnConstraint: copy_check_column_constraint,
core.data.UniqueColumnConstraint: copy_unique_column_constraint,
}
alterers = {
core.data.CheckColumnConstraint: alter_check_column_constraint,
}
def copy_entry(parent, entry):
obj1, obj2 = entry.objects
if obj1 is not None and obj2 is None:
kind = type(obj1)
copier = copiers.get(kind)
if copier:
copier(parent, obj1)
if obj1 is not None and obj2 is not None:
kind = type(obj1)
alterer = alterers.get(kind)
if alterer:
alterer(parent, obj1)
def copy_entries(parent_diff, named_object_list):
parent_name = get_diff_name(parent_diff)
if named_object_list:
for entry in named_object_list.entries:
copy_entry(parent_name, entry)
def create_trigger(parent, object):
print "%s;" % object.description
def drop_trigger(parent, object):
print "DROP TRIGGER %s ON %s;" % (object.name, parent)
def alter_trigger(parent, object):
drop_trigger(parent, object)
create_trigger(parent, object)
def alter_trigger_entries(parent_diff, named_object_list):
parent_name = get_diff_name(parent_diff)
if named_object_list:
for entry in named_object_list.entries:
obj1, obj2 = entry.objects
if obj1 is not None and obj2 is not None:
alter_trigger(parent_name, obj1)
elif obj1 is not None:
create_trigger(parent_name, obj1)
elif obj2 is not None:
drop_trigger(parent_name, obj2)
def copy_table_column(table, object):
definition = "%s.%s" % (object.type.namespace.name, object.type.name)
if object.notnull:
definition += " NOT NULL"
if object.default:
definition += " DEFAULT %s" % object.default
print "ALTER TABLE %s ADD COLUMN %s %s;" % (table, object.name, definition)
def handle_table_columns(table_diff, seq1, seq2):
table_name = get_diff_name(table_diff)
hash1 = [html.diff.NamedHash(o) for o in seq1]
hash2 = [html.diff.NamedHash(o) for o in seq2]
match = difflib.SequenceMatcher(a=hash1, b=hash2)
inserted = {}
deleted = {}
for tag, i1, i2, j1, j2 in match.get_opcodes():
if tag in ("delete", "replace"):
for obj in seq1[i1:i2]:
deleted[obj.name] = obj
elif tag == "insert":
for obj in seq2[j1:j2]:
inserted[obj.name] = obj
for name, obj in deleted.iteritems():
if name not in inserted:
copy_table_column(table_name, obj)
def handle_table(diff):
if diff.columns:
handle_table_columns(diff, *diff.columns.lists)
copy_entries(diff, diff.constraints)
alter_trigger_entries(diff, diff.triggers)
def handle_function(diff):
if diff.source1:
obj1, obj2 = diff.objects
if obj1 is not None and obj2 is not None:
for referer in obj2.xrefs:
assert isinstance(referer, core.data.Trigger)
drop_trigger(get_object_name(referer.table), referer)
name = get_object_name(obj2)
print "DROP FUNCTION %s;" % name
print "CREATE FUNCTION %s ... ADD CODE HERE ... ;" % name
print "ALTER FUNCTION %s OWNER TO galleria;" % get_object_name(obj2)
for referer in obj2.xrefs:
create_trigger(get_object_name(referer.table), referer)
handlers = {
# core.diff.Function: handle_function,
core.diff.Table: handle_table,
}
def get_entries(named_object_list):
if named_object_list:
return named_object_list.entries
else:
return []
def get_diff(entry):
obj1, obj2 = entry.diff.objects
if obj1 is not None and obj2 is not None:
return entry.diff
else:
return None
def _get_diffs(diff):
for entry in get_entries(diff.languages):
yield get_diff(entry)
for entry in get_entries(diff.namespaces):
for seq in (entry.diff.types,
entry.diff.indexes,
entry.diff.tables,
entry.diff.views,
entry.diff.sequences,
entry.diff.functions):
for e in get_entries(seq):
yield get_diff(e)
def get_diffs(diff):
for diff in _get_diffs(diff):
if diff is not None:
yield diff
def main():
source, target = sys.argv[1:]
databases = core.load.load_databases([source, target])
diff_tree = core.diff.diff_databases(databases)
for diff in get_diffs(diff_tree):
kind = type(diff)
handler = handlers.get(kind)
if handler:
handler(diff)
if __name__ == "__main__":
main()
| {
"content_hash": "9aeeb767d4bb3aee25a52deffd3edc8f",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 76,
"avg_line_length": 26.814070351758794,
"alnum_prop": 0.704272863568216,
"repo_name": "somia/pgcs",
"id": "0159e28aee588fc8c2ac8045c198cbbc8c0f7f71",
"size": "5336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgcs/tool/alter.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from puzzle.server.settings import BaseConfig
def test_BaseConfig():
assert not BaseConfig.DEBUG
| {
"content_hash": "8b49ef7f955366303fa946333d2b4125",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 20.6,
"alnum_prop": 0.7864077669902912,
"repo_name": "robinandeer/puzzle",
"id": "395321d448c6f0794439271bff3be236432bd966",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/server/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "251"
},
{
"name": "HTML",
"bytes": "55258"
},
{
"name": "JavaScript",
"bytes": "1100"
},
{
"name": "Python",
"bytes": "233511"
}
],
"symlink_target": ""
} |
import argparse
import os
import subprocess
import shlex
import re
polly_src_dir = '''@POLLY_SOURCE_DIR@'''
polly_lib_dir = '''@POLLY_LIB_DIR@'''
shlibext = '''@LLVM_SHLIBEXT@'''
llvm_tools_dir = '''@LLVM_TOOLS_DIR@'''
link_polly_into_tools = not '''@LINK_POLLY_INTO_TOOLS@'''.lower() in {'','0','n','no','off','false','notfound','link_polly_into_tools-notfound'}
runre = re.compile(r'\s*\;\s*RUN\s*\:(?P<tool>.*)')
filecheckre = re.compile(r'\s*(?P<tool>.*)\|\s*(?P<filecheck>FileCheck\s[^|]*)')
emptyline = re.compile(r'\s*(\;\s*)?')
commentline = re.compile(r'\s*(\;.*)?')
def ltrim_emptylines(lines,meta=None):
while len(lines) and emptyline.fullmatch(lines[0]):
del lines[0]
if meta is not None:
del meta[0]
def rtrim_emptylines(lines):
while len(lines) and emptyline.fullmatch(lines[-1]):
del lines[-1]
def trim_emptylines(lines):
ltrim_emptylines(lines)
rtrim_emptylines(lines)
def complete_exename(path, filename):
complpath = os.path.join(path, filename)
if os.path.isfile(complpath):
return complpath
elif os.path.isfile(complpath + '.exe'):
return complpath + '.exe'
return filename
def indention(line):
for i,c in enumerate(line):
if c != ' ' and c != '\t':
return i
return None
def common_indent(lines):
indentions = (indention(line) for line in lines)
indentions = (indent for indent in indentions if indent is not None)
return min(indentions,default=0)
funcre = re.compile(r'^ Function: \S*$')
regionre = re.compile(r'^ Region: \S*$')
depthre = re.compile(r'^ Max Loop Depth: .*')
paramre = re.compile(r' [0-9a-z-A-Z_]+\: .*')
def classyfier1(lines):
i = iter(lines)
line = i.__next__()
while True:
if line.startswith("Printing analysis 'Polly - Calculate dependences' for region: "):
yield {'PrintingDependenceInfo'}
elif line.startswith("remark: "):
yield {'Remark'}
elif funcre.fullmatch(line):
yield {'Function'}
elif regionre.fullmatch(line):
yield { 'Region'}
elif depthre.fullmatch(line):
yield {'MaxLoopDepth'}
elif line == ' Invariant Accesses: {':
while True:
yield { 'InvariantAccesses'}
if line == ' }':
break
line = i.__next__()
elif line == ' Context:':
yield {'Context'}
line = i.__next__()
yield {'Context'}
elif line == ' Assumed Context:':
yield {'AssumedContext'}
line = i.__next__()
yield {'AssumedContext'}
elif line == ' Invalid Context:':
yield {'InvalidContext'}
line = i.__next__()
yield {'InvalidContext'}
elif line == ' Boundary Context:':
yield {'BoundaryContext'}
line = i.__next__()
yield {'BoundaryContext'}
line = i.__next__()
while paramre.fullmatch(line):
yield {'Param'}
line = i.__next__()
continue
elif line == ' Arrays {':
while True:
yield {'Arrays'}
if line == ' }':
break
line = i.__next__()
elif line == ' Arrays (Bounds as pw_affs) {':
while True:
yield {'PwAffArrays'}
if line == ' }':
break
line = i.__next__()
elif line.startswith(' Alias Groups ('):
while True:
yield {'AliasGroups'}
line = i.__next__()
if not line.startswith(' '):
break
continue
elif line == ' Statements {':
while True:
yield {'Statements'}
if line == ' }':
break
line = i.__next__()
elif line == ' RAW dependences:':
yield {'RAWDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
while line.startswith(' '):
yield {'RAWDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
continue
elif line == ' WAR dependences:':
yield {'WARDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
while line.startswith(' '):
yield {'WARDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
continue
elif line == ' WAW dependences:':
yield {'WAWDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
while line.startswith(' '):
yield {'WAWDep','BasicDep','Dep','DepInfo'}
line = i.__next__()
continue
elif line == ' Reduction dependences:':
yield {'RedDep','Dep','DepInfo'}
line = i.__next__()
while line.startswith(' '):
yield {'RedDep','Dep','DepInfo'}
line = i.__next__()
continue
elif line == ' Transitive closure of reduction dependences:':
yield {'TransitiveClosureDep','DepInfo'}
line = i.__next__()
while line.startswith(' '):
yield {'TransitiveClosureDep','DepInfo'}
line = i.__next__()
continue
elif line.startswith("New access function '"):
yield {'NewAccessFunction'}
elif line == 'Schedule before flattening {':
while True:
yield {'ScheduleBeforeFlattening'}
if line == '}':
break
line = i.__next__()
elif line == 'Schedule after flattening {':
while True:
yield {'ScheduleAfterFlattening'}
if line == '}':
break
line = i.__next__()
else:
yield set()
line = i.__next__()
def classyfier2(lines):
i = iter(lines)
line = i.__next__()
while True:
if funcre.fullmatch(line):
while line.startswith(' '):
yield {'FunctionDetail'}
line = i.__next__()
continue
elif line.startswith("Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: "):
yield {'PrintingIslAst'}
line = i.__next__()
while not line.startswith('Printing analysis'):
yield {'AstDetail'}
line = i.__next__()
continue
else:
yield set()
line = i.__next__()
replrepl = {'{{':'{{[{][{]}}','}}': '{{[}][}]}}', '[[':'{{\[\[}}',']]': '{{\]\]}}'}
replre = re.compile('|'.join(re.escape(k) for k in replrepl.keys()))
def main():
parser = argparse.ArgumentParser(description="Update CHECK lines")
parser.add_argument('testfile',help="File to update (absolute or relative to --testdir)")
parser.add_argument('--check-style',choices=['CHECK','CHECK-NEXT'],default='CHECK-NEXT',help="What kind of checks lines to generate")
parser.add_argument('--check-position',choices=['end','before-content','autodetect'],default='autodetect',help="Where to add the CHECK lines into the file; 'autodetect' searches for the first 'CHECK' line ind inserts it there")
parser.add_argument('--check-include',action='append',default=[], help="What parts of the output lines to check; use syntax 'CHECK=include' to apply to one CHECK-prefix only (by default, everything)")
parser.add_argument('--check-label-include',action='append',default=[],help="Use CHECK-LABEL for these includes")
parser.add_argument('--check-part-newline',action='store_true',help="Add empty line between different check parts")
parser.add_argument('--prefix-only',action='append',default=None,help="Update only these prefixes (default: all)")
parser.add_argument('--bindir',help="Location of the opt program")
parser.add_argument('--testdir',help="Root dir for unit tests")
parser.add_argument('--inplace','-i',action='store_true',help="Replace input file")
parser.add_argument('--output','-o',help="Write changed input to this file")
known = parser.parse_args()
if not known.inplace and known.output is None:
print("Must specify what to do with output (--output or --inplace)")
exit(1)
if known.inplace and known.output is not None:
print("--inplace and --output are mutually exclusive")
exit(1)
outfile = known.output
filecheckparser = argparse.ArgumentParser(add_help=False)
filecheckparser.add_argument('-check-prefix','--check-prefix',default='CHECK')
filename = known.testfile
for dir in ['.', known.testdir, os.path.join(polly_src_dir,'test'), polly_src_dir]:
if not dir:
continue
testfilename = os.path.join(dir,filename)
if os.path.isfile(testfilename):
filename = testfilename
break
if known.inplace:
outfile = filename
allchecklines = []
checkprefixes = []
with open(filename, 'r') as file:
oldlines = [line.rstrip('\r\n') for line in file.readlines()]
runlines = []
for line in oldlines:
m = runre.match(line)
if m:
runlines.append(m.group('tool'))
continuation = ''
newrunlines = []
for line in runlines:
if line.endswith('\\'):
continuation += line[:-2] + ' '
else:
newrunlines.append(continuation + line)
continuation = ''
if continuation:
newrunlines.append(continuation)
for line in newrunlines:
m = filecheckre.match(line)
if not m:
continue
tool, filecheck = m.group('tool', 'filecheck')
filecheck = shlex.split(filecheck)
tool = shlex.split(tool)
if known.bindir is not None:
tool[0] = complete_exename(known.bindir, tool[0])
if os.path.isdir(llvm_tools_dir):
tool[0] = complete_exename(llvm_tools_dir, tool[0])
check_prefix = filecheckparser.parse_known_args(filecheck)[0].check_prefix
if known.prefix_only is not None and not check_prefix in known.prefix_only:
continue
if check_prefix in checkprefixes:
continue
checkprefixes.append(check_prefix)
newtool = []
optstderr = None
for toolarg in tool:
toolarg = toolarg.replace('%s', filename)
toolarg = toolarg.replace('%S', os.path.dirname(filename))
if toolarg == '%loadPolly':
if not link_polly_into_tools:
newtool += ['-load',os.path.join(polly_lib_dir,'LLVMPolly' + shlibext)]
newtool.append('-polly-process-unprofitable')
newtool.append('-polly-remarks-minimal')
elif toolarg == '2>&1':
optstderr = subprocess.STDOUT
else:
newtool.append(toolarg)
tool = newtool
inpfile = None
i = 1
while i < len(tool):
if tool[i] == '<':
inpfile = tool[i + 1]
del tool[i:i + 2]
continue
i += 1
if inpfile:
with open(inpfile) as inp:
retlines = subprocess.check_output(tool,universal_newlines=True,stdin=inp,stderr=optstderr)
else:
retlines = subprocess.check_output(tool,universal_newlines=True,stderr=optstderr)
retlines = [line.replace('\t', ' ') for line in retlines.splitlines()]
check_include = []
for checkme in known.check_include + known.check_label_include:
parts = checkme.split('=')
if len(parts) == 2:
if parts[0] == check_prefix:
check_include.append(parts[1])
else:
check_include.append(checkme)
if check_include:
filtered_retlines = []
classified_retlines = []
lastmatch = None
for line,kind in ((line,class1.union(class2)) for line,class1,class2 in zip(retlines,classyfier1(retlines), classyfier2(retlines))):
match = kind.intersection(check_include)
if match:
if lastmatch != match:
filtered_retlines.append('')
classified_retlines.append({'Separator'})
filtered_retlines.append(line)
classified_retlines.append(kind)
lastmatch = match
retlines = filtered_retlines
else:
classified_retlines = (set() for line in retlines)
rtrim_emptylines(retlines)
ltrim_emptylines(retlines,classified_retlines)
retlines = [replre.sub(lambda m: replrepl[m.group(0)], line) for line in retlines]
indent = common_indent(retlines)
retlines = [line[indent:] for line in retlines]
checklines = []
previous_was_empty = True
for line,kind in zip(retlines,classified_retlines):
if line:
if known.check_style == 'CHECK' and known.check_label_include:
if not kind.isdisjoint(known.check_label_include):
checklines.append('; ' + check_prefix + '-LABEL: ' + line)
else:
checklines.append('; ' + check_prefix + ': ' + line)
elif known.check_style == 'CHECK':
checklines.append('; ' + check_prefix + ': ' + line)
elif known.check_label_include and known.check_label_include:
if not kind.isdisjoint(known.check_label_include):
checklines.append('; ' + check_prefix + '-LABEL: ' + line)
elif previous_was_empty:
checklines.append('; ' + check_prefix + ': ' + line)
else:
checklines.append('; ' + check_prefix + '-NEXT: ' + line)
else:
if previous_was_empty:
checklines.append('; ' + check_prefix + ': ' + line)
else:
checklines.append('; ' + check_prefix + '-NEXT: ' + line)
previous_was_empty = False
else:
if not 'Separator' in kind or known.check_part_newline:
checklines.append(';')
previous_was_empty = True
allchecklines.append(checklines)
if not checkprefixes:
return
checkre = re.compile(r'^\s*\;\s*(' + '|'.join([re.escape(s) for s in checkprefixes]) + ')(\-NEXT|\-DAG|\-NOT|\-LABEL|\-SAME)?\s*\:')
firstcheckline = None
firstnoncommentline = None
headerlines = []
newlines = []
uptonowlines = []
emptylines = []
lastwascheck = False
for line in oldlines:
if checkre.match(line):
if firstcheckline is None:
firstcheckline = len(newlines) + len(emptylines)
if not lastwascheck:
uptonowlines += emptylines
emptylines = []
lastwascheck = True
elif emptyline.fullmatch(line):
emptylines.append(line)
else:
newlines += uptonowlines
newlines += emptylines
newlines.append(line)
emptylines = []
uptonowlines = []
lastwascheck = False
for i,line in enumerate(newlines):
if not commentline.fullmatch(line):
firstnoncommentline = i
break
with open(outfile,'w',newline='') as file:
def writelines(lines):
for line in lines:
file.write(line)
file.write('\n')
if firstcheckline is not None and known.check_position == 'autodetect':
writelines(newlines[:firstcheckline])
writelines(uptonowlines)
for i,checklines in enumerate(allchecklines):
if i != 0:
file.write('\n')
writelines(checklines)
writelines(newlines[firstcheckline:])
writelines(emptylines)
elif firstnoncommentline is not None and known.check_position == 'before-content':
headerlines = newlines[:firstnoncommentline]
rtrim_emptylines(headerlines)
contentlines = newlines[firstnoncommentline:]
ltrim_emptylines(contentlines)
writelines(headerlines)
for checklines in allchecklines:
file.write('\n')
writelines(checklines)
file.write('\n')
writelines(contentlines)
writelines(uptonowlines)
writelines(emptylines)
else:
writelines(newlines)
rtrim_emptylines(newlines)
for checklines in allchecklines:
file.write('\n\n')
writelines(checklines)
if __name__ == '__main__':
main()
| {
"content_hash": "11a373183d30cb59cde05124b8af4c46",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 231,
"avg_line_length": 38.03532008830022,
"alnum_prop": 0.5286128845037725,
"repo_name": "youtube/cobalt_sandbox",
"id": "318fcfe53c9f3f4ce703588bf220df7c2442a800",
"size": "17414",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/polly/test/update_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "abudget.settings.base")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "af70ad57cae9de9eae862ef8bae54202",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 34.2,
"alnum_prop": 0.8011695906432749,
"repo_name": "koriaf/django-abudget",
"id": "d13cb3b3627616acd1ff8b3dddbe5f374b63c382",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/abudget/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5159"
},
{
"name": "Dockerfile",
"bytes": "320"
},
{
"name": "HTML",
"bytes": "25170"
},
{
"name": "JavaScript",
"bytes": "5058"
},
{
"name": "Python",
"bytes": "40674"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns
from views import IndexView
urlpatterns = patterns('',
(r'^$', IndexView.as_view()),
)
| {
"content_hash": "e24a551339657d338d3b89c721a6d66a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 37,
"avg_line_length": 18.714285714285715,
"alnum_prop": 0.6946564885496184,
"repo_name": "littleq0903/fumoufeed",
"id": "2161d4bf92ce7ec668493ab8ea5814b96d16512e",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fumoufeed/apps/globals/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "279"
},
{
"name": "JavaScript",
"bytes": "48303"
},
{
"name": "Python",
"bytes": "14156"
}
],
"symlink_target": ""
} |
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import netaddr
import os
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.StrOpt('dhcpbridge_flagfile',
default='/etc/nova/nova-dhcpbridge.conf',
help='location of flagfile for dhcpbridge'),
cfg.StrOpt('networks_path',
default='$state_path/networks',
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('network_device_mtu',
default=None,
help='MTU setting for vlan'),
cfg.StrOpt('dhcpbridge',
default='$bindir/nova-dhcpbridge',
help='location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=120,
help='Lifetime of a DHCP lease in seconds'),
cfg.StrOpt('dns_server',
default=None,
help='if set, uses specific dns server for dnsmasq'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='send gratuitous ARPs for HA setup'),
cfg.IntOpt('send_arp_for_ha_count',
default=3,
help='send this many gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(linux_net_opts)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
# new rules should have a zero [packet: byte] count
return '[0:0] -A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
if not wrap:
self.remove_chains.add(name)
chain_set.remove(name)
if not wrap:
self.remove_rules += filter(lambda r: r.chain == name, self.rules)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
if not wrap:
self.remove_rules += filter(lambda r: jump_snippet in r.rule,
self.rules)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
self.iptables_apply_deferred = False
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
@utils.synchronized('iptables', external=True)
def _apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
for table in tables:
current_table, _err = self.execute('%s-save' % (cmd,), '-c',
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
# We don't want to remove an entry if it has non-zero
# [packet:byte] counts and replace it with [0:0], so let's
# go look for a duplicate, and over-ride our table rule if
# found.
# ignore [packet:byte] counts at beginning of line
if rule_str.startswith('['):
rule_str = rule_str.split(']', 1)[1]
dup_filter = filter(lambda s: rule_str.strip() in s.strip(),
new_filter)
new_filter = filter(lambda s:
rule_str.strip() not in s.strip(),
new_filter)
# if no duplicates, use original rule
if dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
rule_str = str(dup)
else:
rule_str = str(rule)
rule_str.strip()
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
seen_lines = set()
def _weed_out_duplicates(line):
# ignore [packet:byte] counts at beginning of lines
if line.startswith('['):
line = line.split(']', 1)[1]
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
# it's a chain, for example, ":nova-billing - [0:0]"
# strip off everything except the chain name
line = line.split(':')[1]
line = line.split('- [')[0]
line = line.strip()
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
# it's a rule
# ignore [packet:byte] counts at beginning of lines
line = line.split(']', 1)[1]
line = line.strip()
for rule in remove_rules:
# ignore [packet:byte] counts at beginning of rules
rule_str = str(rule)
rule_str = rule_str.split(' ', 1)[1]
rule_str = rule_str.strip()
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates, letting the *last* occurrence take
# precendence. We also filter out anything in the "remove"
# lists.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter = filter(_weed_out_removes, new_filter)
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def metadata_forward():
"""Create forwarding rule for metadata."""
if FLAGS.metadata_host != '127.0.0.1':
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
else:
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 '
'-j REDIRECT --to-ports %s' %
FLAGS.metadata_port)
iptables_manager.apply()
def metadata_accept():
"""Create the filter accept rule for metadata."""
iptables_manager.ipv4['filter'].add_rule('INPUT',
'-s 0.0.0.0/0 -d %s '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def add_snat_rule(ip_range):
if FLAGS.routing_source_ip:
rule = '-s %s -j SNAT --to-source %s' % (ip_range,
FLAGS.routing_source_ip)
if FLAGS.public_interface:
rule += ' -o %s' % FLAGS.public_interface
iptables_manager.ipv4['nat'].add_rule('snat', rule)
iptables_manager.apply()
def init_host(ip_range=None):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
if not ip_range:
ip_range = FLAGS.fixed_range
add_snat_rule(ip_range)
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, FLAGS.metadata_host))
for dmz in FLAGS.dmz_cidr:
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, dmz))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def send_arp_for_ip(ip, device, count):
out, err = _execute('arping', '-U', ip,
'-A', '-I', device,
'-c', str(count),
run_as_root=True, check_exit_code=False)
if err:
LOG.debug(_('arping error for ip %s'), ip)
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if FLAGS.send_arp_for_ha and FLAGS.send_arp_for_ha_count > 0:
send_arp_for_ip(floating_ip, device, FLAGS.send_arp_for_ha_count)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule('OUTPUT',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip, device):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip, device):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip, device):
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
rule += ' -o %s' % device
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('float-snat', rule)]
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_execute('sysctl', '-w', 'net.ipv4.ip_forward=1', run_as_root=True)
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
full_ip = '%s/%s' % (network_ref['dhcp_server'],
network_ref['cidr'].rpartition('/')[2])
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
old_routes = []
result = _execute('ip', 'route', 'show', 'dev', dev,
run_as_root=True)
if result:
out, err = result
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', fields[0],
'dev', dev, run_as_root=True)
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if FLAGS.send_arp_for_ha and FLAGS.send_arp_for_ha_count > 0:
send_arp_for_ip(network_ref['dhcp_server'], dev,
FLAGS.send_arp_for_ha_count)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_lease(data))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref):
"""Get network's hosts config in dhcp-host format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_dhcp(data))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
data = db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host)
if data:
instance_set = set([datum['instance_uuid'] for datum in data])
default_gw_vif = {}
for instance_uuid in instance_set:
vifs = db.virtual_interface_get_by_instance(context,
instance_uuid)
if vifs:
#offer a default gateway to the first virtual interface
default_gw_vif[instance_uuid] = vifs[0]['id']
for datum in data:
if instance_uuid in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_uuid] != datum['vif_id']:
hosts.append(_host_dhcp_opts(datum))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, get_dhcp_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
# Check that the process exists and looks like a dnsmasq process
conffile = _dhcp_file(dev, 'conf')
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile.split('/')[-1] in out:
_execute('kill', '-9', pid, run_as_root=True)
else:
LOG.debug(_('Pid %d is stale, skip killing dnsmasq'), pid)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
if FLAGS.use_single_default_gateway:
# NOTE(vish): this will have serious performance implications if we
# are not in multi_host mode.
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
# Using symlinks can cause problems here so just compare the name
# of the file itself
if conffile.split('/')[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
return
except Exception as exc: # pylint: disable=W0703
LOG.error(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=set:\'%s\',%s,static,%ss' %
(network_ref['label'],
network_ref['dhcp_start'],
FLAGS.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
if FLAGS.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.error(_('killing radvd threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(data):
"""Return a host string for an address in leasefile format."""
if data['instance_updated']:
timestamp = data['instance_updated']
else:
timestamp = data['instance_created']
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
data['vif_address'],
data['address'],
data['instance_hostname'] or '*')
def _host_dhcp_network(data):
return 'NW-%s' % data['vif_id']
def _host_dhcp(data):
"""Return a host string for an address in dhcp-host format."""
if FLAGS.use_single_default_gateway:
return '%s,%s.%s,%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'],
'net:' + _host_dhcp_network(data))
else:
return '%s,%s.%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'])
def _host_dhcp_opts(data):
"""Return an empty gateway option."""
return '%s,%s' % (_host_dhcp_network(data), 3)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False, run_as_root=True)
return not err
def _dhcp_file(dev, kind):
"""Return path to a pid, leases or conf file for a bridge/device."""
utils.ensure_tree(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
utils.ensure_tree(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
def _create_veth_pair(dev1_name, dev2_name):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
if _device_exists(dev):
try:
utils.execute('ip', 'link', 'delete', dev1_name,
run_as_root=True, check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
LOG.exception("Error clearing stale veth %s" % dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
for dev in [dev1_name, dev2_name]:
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'promisc', 'on',
run_as_root=True)
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
FLAGS.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API"""
""" for for all Linux interface drivers."""
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
raise NotImplementedError()
def unplug(self, network):
"""Destory Linux device, return device name"""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name"""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
if network.get('vlan', None) is not None:
iface = FLAGS.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface,
network,
mac_address)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@classmethod
def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@classmethod
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True,
check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
mac_address, run_as_root=True,
check_exit_code=[0, 2, 254])
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', interface, 'mtu',
FLAGS.network_device_mtu, run_as_root=True,
check_exit_code=[0, 2, 254])
return interface
@classmethod
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
# NOTE(danms): We also need to copy routes to the bridge so as
# not to break existing connectivity on the interface
old_routes = []
out, err = _execute('ip', 'route', 'show', 'dev', interface)
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', *fields,
run_as_root=True)
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if (err and err != "device %s is already a member of a bridge;"
"can't enslave it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
else:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, 'type=internal',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-status=active',
'--', 'set', 'Interface', dev,
'external-ids:attached-mac=%s' % mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', dev, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, 'priority=1,actions=drop',
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
'udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal' %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl', '--', '--if-exists', 'del-port',
bridge, dev, run_as_root=True)
return dev
def get_dev(self, network):
dev = 'gw-' + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using QuantumManager
class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = 'brq'
GATEWAY_INTERFACE_PREFIX = 'gw-'
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
return bridge
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address)
if not _device_exists(bridge):
LOG.debug(_("Starting bridge %s "), bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug(_("Done starting bridge %s"), bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True, check_exit_code=[0, 2, 254])
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not _device_exists(dev):
return None
else:
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True,
check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
LOG.error(_("Failed unplugging gateway interface '%s'"), dev)
raise
LOG.debug(_("Unplugged gateway interface '%s'"), dev)
return dev
@classmethod
def create_tap_dev(_self, dev, mac_address=None):
if not _device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True, check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
iptables_manager = IptablesManager()
| {
"content_hash": "1fea03fb65ba6922d9fff22e789bfc48",
"timestamp": "",
"source": "github",
"line_count": 1341,
"max_line_length": 79,
"avg_line_length": 38.956002982848624,
"alnum_prop": 0.5266653905053599,
"repo_name": "savi-dev/nova",
"id": "f991b36594137307013ed25c8de7d3a42fefe13d",
"size": "53079",
"binary": false,
"copies": "3",
"ref": "refs/heads/silver",
"path": "nova/network/linux_net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7168734"
},
{
"name": "Shell",
"bytes": "16910"
}
],
"symlink_target": ""
} |
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
training_epochs = 2
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
not distributed_training_utils.global_batch_size_supported(
with_distribution))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_train, y_train)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
predict_inputs = {
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(
train_dataset, batch_size, with_distribution, repeat=training_epochs)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(eval_dataset, batch_size, with_distribution)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies_minus_tpu = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus]
tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(
distribution=tpu_strategies,
mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
# TODO(priyag): Add v2 optimizers here.
def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn]))
def strategy_and_input_combinations():
return (
combinations.times(
combinations.combine(distribution=strategies_minus_tpu),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])
+ combinations.combine(mode=['eager'],
use_numpy=[False],
use_validation_data=[False])) +
combinations.times(
combinations.combine(distribution=tpu_strategies),
combinations.combine(mode=['graph'],
use_numpy=[True, False],
use_validation_data=[True, False])))
def strategy_for_numpy_input_combinations():
return combinations.combine(
distribution=strategies_minus_tpu + tpu_strategies,
mode=['graph'])
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine(
distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((20, 3), np.float32)
targets = np.zeros((20, 4), np.float32)
sample_weights = np.ones((20), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_for_numpy_input_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
# CoreMirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(strategy_minus_tpu_combinations())
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
grouped_models = distribution.unwrap(model._distributed_model)
with distribution.scope():
for m in grouped_models:
self.assertAllClose(0.001, keras.backend.get_value(
m.optimizer.lr), atol=1e-05, rtol=1e-05)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.predict(dataset, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'You must specify a Keras Optimizer V2 when '
'using'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
class TestDistributionStrategyWithLossMasking(test.TestCase,
parameterized.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
def test_masking(self, distribution):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
if not distributed_training_utils.global_batch_size_supported(
distribution):
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@combinations.generate(all_strategy_combinations())
def test_eval_metrics_correctness(self, distribution):
with self.cached_session():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
distribute=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@combinations.generate(strategy_and_input_combinations())
def test_correctness(self, distribution, use_numpy, use_validation_data):
with self.cached_session():
default_tolerance = 1e-5
tol_table = {}
if isinstance(distribution, (
mirrored_strategy.MirroredStrategy,
mirrored_strategy.CoreMirroredStrategy,
distribute_lib._DefaultDistributionStrategy)): # pylint: disable=protected-access
# TODO(b/119257215): Weights are not exactly the same, so use larger
# tolerance for now. Predict should be related to weights.
tol_table = {
'weights_1': 1e-4,
'weights_2': 1e-4,
'predict_result_1': 1e-4,
}
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
def _create_model():
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
return model
model = _create_model()
initial_weights = model.get_weights()
del model # avoid accident usage.
def fit_eval_and_predict(with_distribution=None):
model = _create_model()
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.5),
metrics=['mse'],
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution,
x_train, y_train, x_predict))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
result['predict_result_1'] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
results_with_ds = fit_eval_and_predict(with_distribution=distribution)
results_without_ds = fit_eval_and_predict(with_distribution=None)
# Verify that the weights, training history, eval results, predict outputs
# are the same within some limits of tolerance.
for key in results_with_ds:
if (key.startswith('training_history') and
isinstance(distribution, tpu_strategy.TPUStrategy) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = tol_table.get(key, default_tolerance)
self.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
if __name__ == '__main__':
test.main()
| {
"content_hash": "9120081aa40f7d7cd1aea99f49ab237b",
"timestamp": "",
"source": "github",
"line_count": 1057,
"max_line_length": 92,
"avg_line_length": 38.23084200567644,
"alnum_prop": 0.6515466468695867,
"repo_name": "apark263/tensorflow",
"id": "92de8e643e7588365c23dc8513e197c0869c9ecf",
"size": "41099",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/keras_backward_compat_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__)
from zaqarclient.queues.v1 import client as zaqarclient
from zaqarclient.transport import errors as zaqar_errors
from heat.engine.clients import client_plugin
class ZaqarClientPlugin(client_plugin.ClientPlugin):
exceptions_module = zaqar_errors
service_types = [MESSAGING] = ['messaging']
DEFAULT_TTL = 3600
def _create(self):
return self.create_for_tenant(self.context.tenant_id)
def create_for_tenant(self, tenant_id):
con = self.context
if self.auth_token is None:
LOG.error(_LE("Zaqar connection failed, no auth_token!"))
return None
opts = {
'os_auth_token': con.auth_token,
'os_auth_url': con.auth_url,
'os_project_id': tenant_id,
'os_service_type': self.MESSAGING,
}
auth_opts = {'backend': 'keystone',
'options': opts}
conf = {'auth_opts': auth_opts}
endpoint = self.url_for(service_type=self.MESSAGING)
client = zaqarclient.Client(url=endpoint, conf=conf, version=1.1)
return client
def is_not_found(self, ex):
return isinstance(ex, zaqar_errors.ResourceNotFound)
| {
"content_hash": "acb4085b49c150e479a9e7dccf23dedb",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 28.17391304347826,
"alnum_prop": 0.6319444444444444,
"repo_name": "srznew/heat",
"id": "ba81c4db7a754a09a3f17aee49798774e4ff1514",
"size": "1871",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "heat/engine/clients/os/zaqar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6529810"
},
{
"name": "Shell",
"bytes": "33395"
}
],
"symlink_target": ""
} |
from datetime import datetime, date, timedelta
import pytest
import pytz
from icalendar import vRecur
from khal.khalendar.event import Event, AllDayEvent, LocalizedEvent, FloatingEvent
from .aux import normalize_component, _get_text
BERLIN = pytz.timezone('Europe/Berlin')
# the lucky people in Bogota don't know the pain that is DST
BOGOTA = pytz.timezone('America/Bogota')
LOCALE = {
'default_timezone': BERLIN,
'local_timezone': BERLIN,
'dateformat': '%d.%m.',
'timeformat': '%H:%M',
'longdateformat': '%d.%m.%Y',
'datetimeformat': '%d.%m. %H:%M',
'longdatetimeformat': '%d.%m.%Y %H:%M',
'unicode_symbols': True,
}
BOGOTA_LOCALE = LOCALE.copy()
BOGOTA_LOCALE['local_timezone'] = BOGOTA
BOGOTA_LOCALE['default_timezone'] = BOGOTA
MIXED_LOCALE = LOCALE.copy()
MIXED_LOCALE['local_timezone'] = BOGOTA
EVENT_KWARGS = {'calendar': 'foobar', 'locale': LOCALE}
def test_no_initialization():
with pytest.raises(ValueError):
Event('', '')
def test_invalid_keyword_argument():
with pytest.raises(TypeError):
Event.fromString(_get_text('event_dt_simple'), keyword='foo')
def test_raw_dt():
event_dt = _get_text('event_dt_simple')
start = BERLIN.localize(datetime(2014, 4, 9, 9, 30))
end = BERLIN.localize(datetime(2014, 4, 9, 10, 30))
event = Event.fromString(event_dt, start=start, end=end, **EVENT_KWARGS)
assert normalize_component(event.raw) == \
normalize_component(_get_text('event_dt_simple_inkl_vtimezone'))
assert event.relative_to(date(2014, 4, 9)) == '09:30-10:30: An Event'
event = Event.fromString(event_dt, **EVENT_KWARGS)
assert event.relative_to(date(2014, 4, 9)) == '09:30-10:30: An Event'
assert event.event_description == '09:30-10:30 09.04.2014: An Event'
assert event.recurring is False
assert event.duration == timedelta(hours=1)
assert event.uid == 'V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU'
assert event.ident == 'V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU'
assert event.organizer == ''
def test_update_simple():
event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS)
event_updated = Event.fromString(_get_text('event_dt_simple_updated'), **EVENT_KWARGS)
event.update_summary('A not so simple Event')
event.update_description('Everything has changed')
event.update_location('anywhere')
assert normalize_component(event.raw) == normalize_component(event_updated.raw)
def test_raw_d():
event_d = _get_text('event_d')
event = Event.fromString(event_d, **EVENT_KWARGS)
assert event.raw.split('\r\n') == _get_text('cal_d').split('\n')
assert event.relative_to(date(2014, 4, 9)) == 'An Event'
assert event.event_description == '09.04.2014: An Event'
def test_update_sequence():
event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS)
event.increment_sequence()
assert event._vevents['PROTO']['SEQUENCE'] == 0
event.increment_sequence()
assert event._vevents['PROTO']['SEQUENCE'] == 1
def test_event_organizer():
event = _get_text('event_dt_duration')
event = Event.fromString(event, **EVENT_KWARGS)
assert event.organizer == 'Frank Nord (frank@nord.tld)'
def test_transform_event():
"""test if transformation between different event types works"""
event_d = _get_text('event_d')
event = Event.fromString(event_d, **EVENT_KWARGS)
assert isinstance(event, AllDayEvent)
start = BERLIN.localize(datetime(2014, 4, 9, 9, 30))
end = BERLIN.localize(datetime(2014, 4, 9, 10, 30))
event.update_start_end(start, end)
assert isinstance(event, LocalizedEvent)
assert event.event_description == '09:30-10:30 09.04.2014: An Event'
analog_event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS)
assert normalize_component(event.raw) == normalize_component(analog_event.raw)
with pytest.raises(ValueError):
event.update_start_end(start, date(2014, 4, 9))
def test_update_event_d():
event_d = _get_text('event_d')
event = Event.fromString(event_d, **EVENT_KWARGS)
event.update_start_end(date(2014, 4, 20), date(2014, 4, 22))
assert event.event_description == '20.04. - 22.04.2014: An Event'
assert 'DTSTART;VALUE=DATE:20140420' in event.raw.split('\r\n')
assert 'DTEND;VALUE=DATE:20140423' in event.raw.split('\r\n')
def test_update_event_duration():
event_dur = _get_text('event_dt_duration')
event = Event.fromString(event_dur, **EVENT_KWARGS)
assert event.start == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
assert event.end == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
assert event.duration == timedelta(hours=1)
event.update_start_end(BERLIN.localize(datetime(2014, 4, 9, 8, 0)),
BERLIN.localize(datetime(2014, 4, 9, 12, 0)))
assert event.start == BERLIN.localize(datetime(2014, 4, 9, 8, 0))
assert event.end == BERLIN.localize(datetime(2014, 4, 9, 12, 0))
assert event.duration == timedelta(hours=4)
def test_dt_two_tz():
event_dt_two_tz = _get_text('event_dt_two_tz')
cal_dt_two_tz = _get_text('cal_dt_two_tz')
event = Event.fromString(event_dt_two_tz, **EVENT_KWARGS)
assert normalize_component(cal_dt_two_tz) == normalize_component(event.raw)
# local (Berlin) time!
assert event.relative_to(date(2014, 4, 9)) == '09:30-16:30: An Event'
assert event.event_description == '09:30-16:30 09.04.2014: An Event'
def test_event_dt_duration():
"""event has no end, but duration"""
event_dt_duration = _get_text('event_dt_duration')
event = Event.fromString(event_dt_duration, **EVENT_KWARGS)
assert event.relative_to(date(2014, 4, 9)) == '09:30-10:30: An Event'
assert event.end == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
assert event.event_description == '09:30-10:30 09.04.2014: An Event'
def test_event_dt_floating():
"""start and end time have no timezone, i.e. a floating event"""
event_str = _get_text('event_dt_floating')
event = Event.fromString(event_str, **EVENT_KWARGS)
assert isinstance(event, FloatingEvent)
assert event.relative_to(date(2014, 4, 9)) == '09:30-10:30: An Event'
assert event.event_description == '09:30-10:30 09.04.2014: An Event'
assert event.start == datetime(2014, 4, 9, 9, 30)
assert event.end == datetime(2014, 4, 9, 10, 30)
assert event.start_local == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
assert event.end_local == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
event = Event.fromString(event_str, calendar='foobar', locale=MIXED_LOCALE)
assert event.start == datetime(2014, 4, 9, 9, 30)
assert event.end == datetime(2014, 4, 9, 10, 30)
assert event.start_local == BOGOTA.localize(datetime(2014, 4, 9, 9, 30))
assert event.end_local == BOGOTA.localize(datetime(2014, 4, 9, 10, 30))
def test_event_dt_tz_missing():
"""localized event DTSTART;TZID=foo, but VTIMEZONE components missing"""
event_str = _get_text('event_dt_local_missing_tz')
event = Event.fromString(event_str, **EVENT_KWARGS)
assert event.start == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
assert event.end == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
assert event.start_local == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
assert event.end_local == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
event = Event.fromString(event_str, calendar='foobar', locale=MIXED_LOCALE)
assert event.start == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
assert event.end == BERLIN.localize(datetime(2014, 4, 9, 10, 30))
assert event.start_local == BOGOTA.localize(datetime(2014, 4, 9, 2, 30))
assert event.end_local == BOGOTA.localize(datetime(2014, 4, 9, 3, 30))
def test_event_dt_rr():
event_dt_rr = _get_text('event_dt_rr')
event = Event.fromString(event_dt_rr, **EVENT_KWARGS)
assert event.recurring is True
desc = '09:30-10:30: An Event ⟳'
assert event.relative_to(date(2014, 4, 9)) == desc
assert event.event_description == \
'09:30-10:30 09.04.2014: An Event\nRepeat: FREQ=DAILY;COUNT=10'
def test_event_d_rr():
event_d_rr = _get_text('event_d_rr')
event = Event.fromString(event_d_rr, **EVENT_KWARGS)
assert event.recurring is True
desc = 'Another Event ⟳'
assert event.relative_to(date(2014, 4, 9)) == desc
assert event.event_description == '09.04.2014: Another Event\nRepeat: FREQ=DAILY;COUNT=10'
start = date(2014, 4, 10)
end = date(2014, 4, 11)
event = Event.fromString(event_d_rr, start=start, end=end, **EVENT_KWARGS)
assert event.recurring is True
desc = 'Another Event ⟳'
assert event.relative_to(date(2014, 4, 10)) == desc
assert event.event_description == '10.04.2014: Another Event\nRepeat: FREQ=DAILY;COUNT=10'
def test_event_rd():
event_dt_rd = _get_text('event_dt_rd')
event = Event.fromString(event_dt_rd, **EVENT_KWARGS)
assert event.recurring is True
def test_event_d_long():
event_d_long = _get_text('event_d_long')
event = Event.fromString(event_d_long, **EVENT_KWARGS)
with pytest.raises(ValueError):
event.relative_to(date(2014, 4, 8))
assert event.relative_to(date(2014, 4, 9)) == '↦ Another Event'
assert event.relative_to(date(2014, 4, 10)) == '↔ Another Event'
assert event.relative_to(date(2014, 4, 11)) == '⇥ Another Event'
with pytest.raises(ValueError):
event.relative_to(date(2014, 4, 12))
assert event.event_description == '09.04. - 11.04.2014: Another Event'
def test_event_dt_long():
event_dt_long = _get_text('event_dt_long')
event = Event.fromString(event_dt_long, **EVENT_KWARGS)
assert event.relative_to(date(2014, 4, 9)) == '09:30→ : An Event'
# FIXME ugly! replace with one arrow
assert event.relative_to(date(2014, 4, 10)) == '→ → : An Event'
assert event.relative_to(date(2014, 4, 12)) == '→ 10:30: An Event'
assert event.event_description == '09.04.2014 09:30 - 12.04.2014 10:30: An Event'
def test_event_no_dst():
"""test the creation of a corect VTIMEZONE for timezones with no dst"""
event_no_dst = _get_text('event_no_dst')
cal_no_dst = _get_text('cal_no_dst')
event = Event.fromString(event_no_dst, calendar='foobar', locale=BOGOTA_LOCALE)
assert normalize_component(event.raw) == normalize_component(cal_no_dst)
assert event.event_description == '09:30-10:30 09.04.2014: An Event'
def test_event_raw_UTC():
"""test .raw() on events which are localized in UTC"""
event_utc = _get_text('event_dt_simple_zulu')
event = Event.fromString(event_utc, **EVENT_KWARGS)
assert event.raw == '\r\n'.join([
'''BEGIN:VCALENDAR''',
'''VERSION:2.0''',
'''PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN''',
'''BEGIN:VEVENT''',
'''SUMMARY:An Event''',
'''DTSTART:20140409T093000Z''',
'''DTEND:20140409T103000Z''',
'''DTSTAMP;VALUE=DATE-TIME:20140401T234817Z''',
'''UID:V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU''',
'''END:VEVENT''',
'''END:VCALENDAR\r\n'''])
def test_dtend_equals_dtstart():
event = Event.fromString(_get_text('event_d_same_start_end'),
calendar='foobar', locale=LOCALE)
assert event.end == event.start
def test_multi_uid():
"""test for support for events with consist of several sub events with
the same uid"""
orig_event_str = _get_text('event_rrule_recuid')
event = Event.fromString(orig_event_str, **EVENT_KWARGS)
for line in orig_event_str.split('\n'):
assert line in event.raw.split('\r\n')
def test_recur():
event = Event.fromString(_get_text('event_dt_rr'), **EVENT_KWARGS)
assert event.recurring is True
assert event.recurpattern == 'FREQ=DAILY;COUNT=10'
assert event.recurobject == vRecur({'COUNT': [10], 'FREQ': ['DAILY']})
def test_type_inference():
event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS)
assert type(event) == LocalizedEvent
event = Event.fromString(_get_text('event_dt_simple_zulu'), **EVENT_KWARGS)
assert type(event) == LocalizedEvent
def test_duplicate_event():
event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS)
dupe = event.duplicate()
assert dupe._vevents['PROTO']['UID'].to_ical() != 'V042MJ8B3SJNFXQOJL6P53OFMHJE8Z3VZWOU'
def test_remove_instance_from_rrule():
"""removing an instance from a recurring event"""
event = Event.fromString(_get_text('event_dt_rr'), **EVENT_KWARGS)
event.delete_instance(datetime(2014, 4, 10, 9, 30))
assert 'EXDATE:20140410T093000' in event.raw.split('\r\n')
event.delete_instance(datetime(2014, 4, 12, 9, 30))
assert 'EXDATE:20140410T093000,20140412T093000' in event.raw.split('\r\n')
def test_remove_instance_from_rdate():
"""removing an instance from a recurring event"""
event = Event.fromString(_get_text('event_dt_rd'), **EVENT_KWARGS)
assert 'RDATE' in event.raw
event.delete_instance(datetime(2014, 4, 10, 9, 30))
assert 'RDATE' not in event.raw
def test_remove_instance_from_two_rdate():
"""removing an instance from a recurring event which has two RDATE props"""
event = Event.fromString(_get_text('event_dt_two_rd'), **EVENT_KWARGS)
assert event.raw.count('RDATE') == 2
event.delete_instance(datetime(2014, 4, 10, 9, 30))
assert event.raw.count('RDATE') == 1
assert 'RDATE:20140411T093000,20140412T093000' in event.raw.split('\r\n')
def test_remove_instance_from_recuid():
"""remove an istane from an event which is specified via an additional VEVENT
with the same UID (which we call `recuid` here"""
event = Event.fromString(_get_text('event_rrule_recuid'), **EVENT_KWARGS)
assert event.raw.split('\r\n').count('UID:event_rrule_recurrence_id') == 2
event.delete_instance(BERLIN.localize(datetime(2014, 7, 7, 7, 0)))
assert event.raw.split('\r\n').count('UID:event_rrule_recurrence_id') == 1
assert 'EXDATE;TZID=Europe/Berlin:20140707T070000' in event.raw.split('\r\n')
| {
"content_hash": "ed7fd0298e9ba9113c25cc978d1ef9e2",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 94,
"avg_line_length": 41.225806451612904,
"alnum_prop": 0.6650305875657988,
"repo_name": "dzoep/khal",
"id": "244516a5f34574df13df9516de3e5fddfc1b4ffc",
"size": "14078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/event_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1152"
},
{
"name": "Python",
"bytes": "365652"
}
],
"symlink_target": ""
} |
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkGeometryFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkGeometryFilter(), 'Processing.',
('vtkDataSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| {
"content_hash": "f2d82109a401b84af6e3cf01caef802e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 38.81818181818182,
"alnum_prop": 0.6604215456674473,
"repo_name": "fvpolpeta/devide",
"id": "93b89ab1e81ac79131098fa293eedfb258120c3b",
"size": "488",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/vtk_basic/vtkGeometryFilter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from ..models import Period, Subject
from ..testhelper import TestHelper
from ..models.model_utils import EtagMismatchException
class TestPeriod(TestCase, TestHelper):
def setUp(self):
self.add(nodes="uio:admin(uioadmin).ifi",
subjects=["inf1100"],
periods=["old:begins(-2):ends(1)", "looong"],
assignments=["assignment1", "assignment2"],
assignmentgroups=["g1:examiner(examiner1)", "g2:examiner(examiner2)"])
def test_unique(self):
n = Period(parentnode=Subject.objects.get(short_name='inf1100'),
short_name='old', long_name='Old',
start_time=datetime.now(),
end_time=datetime.now())
self.assertRaises(IntegrityError, n.save)
def test_etag_update(self):
etag = datetime.now()
obj = self.inf1100_looong
obj.minimum_points = 66
self.assertRaises(EtagMismatchException, obj.etag_update, etag)
try:
obj.etag_update(etag)
except EtagMismatchException as e:
# Should not raise exception
obj.etag_update(e.etag)
obj2 = Period.objects.get(id=obj.id)
self.assertEquals(obj2.minimum_points, 66)
def test_where_is_admin(self):
self.assertEquals(Period.where_is_admin(self.uioadmin).count(), 2)
def test_clean(self):
self.inf1100_looong.start_time = datetime(2010, 1, 1)
self.inf1100_looong.end_time = datetime(2011, 1, 1)
self.inf1100_looong.clean()
self.inf1100_looong.start_time = datetime(2012, 1, 1)
self.assertRaises(ValidationError, self.inf1100_looong.clean)
def test_where_is_examiner(self):
q = Period.where_is_examiner(self.examiner1).order_by('short_name')
self.assertEquals(q.count(), 2)
self.assertEquals(q[0].short_name, 'looong')
self.assertEquals(q[1].short_name, 'old')
# Add on different period
self.add_to_path('uio.ifi;inf1010.spring10.oblig1.student1:examiner(examiner1)')
self.assertEquals(q.count(), 3)
self.assertEquals(q[0].short_name, 'looong')
self.assertEquals(q[1].short_name, 'old')
self.assertEquals(q[2].short_name, 'spring10')
def test_published_where_is_examiner(self):
examiner1 = User.objects.get(username='examiner1')
self.add_to_path('uio.ifi;inf1010.spring10.oblig1.student1:examiner(examiner1)')
q = Period.published_where_is_examiner(examiner1).order_by('short_name')
self.assertEquals(q.count(), 3)
assignment1010 = self.inf1010_spring10_oblig1_student1.parentnode
assignment1010.publishing_time = datetime.now() + timedelta(10)
assignment1010.save()
self.assertEquals(q.count(), 2)
| {
"content_hash": "2e1dcc688318728ae5a73c1e26ec0fae",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 88,
"avg_line_length": 41.958333333333336,
"alnum_prop": 0.6550810989738497,
"repo_name": "vegarang/devilry-django",
"id": "7bde64be0bce64428b9052a109c67e7833208315",
"size": "3021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/core/tests/period.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "697906"
},
{
"name": "Python",
"bytes": "931589"
}
],
"symlink_target": ""
} |
import logging
import botocore
from kotocore.session import Session
log = logging.getLogger(__name__)
class KambooConnection(object):
"""
Kamboo connection with botocore session initialized
"""
def __init__(self, service_name="ec2", region_name="us-east-1",
account_id=None,
credentials=None):
self.session = botocore.session.get_session()
self.service = service_name
self.region = region_name
self.account_id = account_id
self.credentials = credentials
if self.credentials:
self.session.set_credentials(**self.credentials)
Connection = Session(session=self.session).get_connection(service_name)
self.conn = Connection(region_name=self.region)
log.debug("KambooConnection: [%s, %s, %s]" % (self.account_id,
self.region,
self.service))
def __repr__(self):
return "KambooConnection: [%s, %s, %s]" % (self.account_id,
self.region,
self.service)
| {
"content_hash": "77473d628fd2951b21aaac1bf875cd0e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 37.59375,
"alnum_prop": 0.5286783042394015,
"repo_name": "henrysher/kamboo",
"id": "da8566d9519c63c7fc7d8d634174c3c850bb3647",
"size": "1784",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kamboo/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5569"
},
{
"name": "Python",
"bytes": "39556"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
} |
from quicklock import singleton
| {
"content_hash": "922b1475ddf99cf2f7f213ec456383ff",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.875,
"repo_name": "NateFerrero/quicklock",
"id": "0d9e271c6b3a013c198c081e9d7347da909fac42",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quicklock/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1001"
},
{
"name": "Python",
"bytes": "3215"
}
],
"symlink_target": ""
} |
"""
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
O(n)+O(n)
:param head: head node
:param n: the nth node from the end
:return: ListNode, head node
"""
# construct dummy
dummy = ListNode(0)
dummy.next = head
# get length of the linked list
length = 0
pre = dummy
while pre.next:
length += 1
pre=pre.next
# find & remove
pre = dummy
count = 0
while pre.next:
cur = pre.next
if count==length-n:
pre.next = cur.next # remove
break
else:
count += 1
pre = pre.next
return dummy.next
| {
"content_hash": "904d99197e9df29ab7460d4764e2e7e5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 83,
"avg_line_length": 23.943396226415093,
"alnum_prop": 0.4980299448384555,
"repo_name": "algorhythms/LeetCode",
"id": "41529a7bd2c40f1bb47863031dffc9d358a8a34b",
"size": "1269",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "019 Remove Nth Node From End of List.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.core.amaas_model import AMaaSModel
class Reference(AMaaSModel):
def __init__(self, reference_value, reference_primary=False, *args, **kwargs):
self.reference_value = reference_value
self.reference_primary = reference_primary
super(Reference, self).__init__(*args, **kwargs)
@property
def reference_primary(self):
if hasattr(self, '_reference_primary'):
return self._reference_primary
@reference_primary.setter
def reference_primary(self, value):
"""
Always convert to bool if the service/database returns 0 or 1
"""
if value is not None:
self._reference_primary = True if value else False | {
"content_hash": "4ace5ff3b2a714086b8ec9350ab36eaa",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.66375,
"repo_name": "amaas-fintech/amaas-core-sdk-python",
"id": "f34efe6aecbc3f1613edb3fce4c434b317c8d8a0",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amaascore/core/reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "529460"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'io/grpc'
_MODULE_NAME = 'grpc-core'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| {
"content_hash": "036263d1b02d446af0d86c44705143e7",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.5917808219178082,
"repo_name": "chromium/chromium",
"id": "132cdd2b52bc56ef6598638b8d00f47236b49620",
"size": "2457",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/io_grpc_grpc_core/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import datetime
import decimal
import uuid
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import DatabaseError, NotSupportedError, models
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models.constants import OnConflict
from django.db.models.expressions import Col
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import cached_property
class DatabaseOperations(BaseDatabaseOperations):
cast_char_field_without_max_length = "text"
cast_data_types = {
"DateField": "TEXT",
"DateTimeField": "TEXT",
}
explain_prefix = "EXPLAIN QUERY PLAN"
# List of datatypes to that cannot be extracted with JSON_EXTRACT() on
# SQLite. Use JSON_TYPE() instead.
jsonfield_datatype_values = frozenset(["null", "false", "true"])
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there's only a single field to insert, the limit is 500
(SQLITE_MAX_COMPOUND_SELECT).
"""
if len(fields) == 1:
return 500
elif len(fields) > 1:
return self.connection.features.max_query_params // len(fields)
else:
return len(objs)
def check_expression_support(self, expression):
bad_fields = (models.DateField, models.DateTimeField, models.TimeField)
bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
except (AttributeError, FieldError):
# Not every subexpression has an output_field which is fine
# to ignore.
pass
else:
if isinstance(output_field, bad_fields):
raise NotSupportedError(
"You cannot use Sum, Avg, StdDev, and Variance "
"aggregations on date/time fields in sqlite3 "
"since date/time is saved as text."
)
if (
isinstance(expression, models.Aggregate)
and expression.distinct
and len(expression.source_expressions) > 1
):
raise NotSupportedError(
"SQLite doesn't support DISTINCT on aggregate functions "
"accepting multiple arguments."
)
def date_extract_sql(self, lookup_type, field_name):
"""
Support EXTRACT with a user-defined function django_date_extract()
that's registered in connect(). Use single quotes because this is a
string and could otherwise cause a collision with a field name.
"""
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def fetch_returned_insert_rows(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the list of returned data.
"""
return cursor.fetchall()
def format_for_duration_arithmetic(self, sql):
"""Do nothing since formatting is handled in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name, tzname=None):
return "django_date_trunc('%s', %s, %s, %s)" % (
lookup_type.lower(),
field_name,
*self._convert_tznames_to_sql(tzname),
)
def time_trunc_sql(self, lookup_type, field_name, tzname=None):
return "django_time_trunc('%s', %s, %s, %s)" % (
lookup_type.lower(),
field_name,
*self._convert_tznames_to_sql(tzname),
)
def _convert_tznames_to_sql(self, tzname):
if tzname and settings.USE_TZ:
return "'%s'" % tzname, "'%s'" % self.connection.timezone_name
return "NULL", "NULL"
def datetime_cast_date_sql(self, field_name, tzname):
return "django_datetime_cast_date(%s, %s, %s)" % (
field_name,
*self._convert_tznames_to_sql(tzname),
)
def datetime_cast_time_sql(self, field_name, tzname):
return "django_datetime_cast_time(%s, %s, %s)" % (
field_name,
*self._convert_tznames_to_sql(tzname),
)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
return "django_datetime_extract('%s', %s, %s, %s)" % (
lookup_type.lower(),
field_name,
*self._convert_tznames_to_sql(tzname),
)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
return "django_datetime_trunc('%s', %s, %s, %s)" % (
lookup_type.lower(),
field_name,
*self._convert_tznames_to_sql(tzname),
)
def time_extract_sql(self, lookup_type, field_name):
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of parameters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index : index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = "SELECT " + ", ".join(["QUOTE(?)"] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(
# self->statement, parameters, allow_8bit_chars
# );
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(params, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def __references_graph(self, table_name):
query = """
WITH tables AS (
SELECT %s name
UNION
SELECT sqlite_master.name
FROM sqlite_master
JOIN tables ON (sql REGEXP %s || tables.name || %s)
) SELECT name FROM tables;
"""
params = (
table_name,
r'(?i)\s+references\s+("|\')?',
r'("|\')?\s*\(',
)
with self.connection.cursor() as cursor:
results = cursor.execute(query, params)
return [row[0] for row in results.fetchall()]
@cached_property
def _references_graph(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__references_graph)
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
if tables and allow_cascade:
# Simulate TRUNCATE CASCADE by recursively collecting the tables
# referencing the tables to be flushed.
tables = set(
chain.from_iterable(self._references_graph(table) for table in tables)
)
sql = [
"%s %s %s;"
% (
style.SQL_KEYWORD("DELETE"),
style.SQL_KEYWORD("FROM"),
style.SQL_FIELD(self.quote_name(table)),
)
for table in tables
]
if reset_sequences:
sequences = [{"table": table} for table in tables]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
def sequence_reset_by_name_sql(self, style, sequences):
if not sequences:
return []
return [
"%s %s %s %s = 0 %s %s %s (%s);"
% (
style.SQL_KEYWORD("UPDATE"),
style.SQL_TABLE(self.quote_name("sqlite_sequence")),
style.SQL_KEYWORD("SET"),
style.SQL_FIELD(self.quote_name("seq")),
style.SQL_KEYWORD("WHERE"),
style.SQL_FIELD(self.quote_name("name")),
style.SQL_KEYWORD("IN"),
", ".join(
["'%s'" % sequence_info["table"] for sequence_info in sequences]
),
),
]
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError(
"SQLite backend does not support timezone-aware datetimes when "
"USE_TZ is False."
)
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return str(value)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == "DateTimeField":
converters.append(self.convert_datetimefield_value)
elif internal_type == "DateField":
converters.append(self.convert_datefield_value)
elif internal_type == "TimeField":
converters.append(self.convert_timefield_value)
elif internal_type == "DecimalField":
converters.append(self.get_decimalfield_converter(expression))
elif internal_type == "UUIDField":
converters.append(self.convert_uuidfield_value)
elif internal_type == "BooleanField":
converters.append(self.convert_booleanfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ and not timezone.is_aware(value):
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def get_decimalfield_converter(self, expression):
# SQLite stores only 15 significant digits. Digits coming from
# float inaccuracy must be removed.
create_decimal = decimal.Context(prec=15).create_decimal_from_float
if isinstance(expression, Col):
quantize_value = decimal.Decimal(1).scaleb(
-expression.output_field.decimal_places
)
def converter(value, expression, connection):
if value is not None:
return create_decimal(value).quantize(
quantize_value, context=expression.output_field.context
)
else:
def converter(value, expression, connection):
if value is not None:
return create_decimal(value)
return converter
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def convert_booleanfield_value(self, value, expression, connection):
return bool(value) if value in (1, 0) else value
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join(f"({sql})" for sql in placeholder_rows_sql)
return f"VALUES {values_sql}"
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a ^ operator, so use the user-defined POWER
# function that's registered in connect().
if connector == "^":
return "POWER(%s)" % ",".join(sub_expressions)
elif connector == "#":
return "BITXOR(%s)" % ",".join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ["+", "-", "*", "/"]:
raise DatabaseError("Invalid connector for timedelta: %s." % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError("Too many params for timedelta operations.")
return "django_format_dtdelta(%s)" % ", ".join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
params = (*lhs_params, *rhs_params)
if internal_type == "TimeField":
return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), params
return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), params
def insert_statement(self, on_conflict=None):
if on_conflict == OnConflict.IGNORE:
return "INSERT OR IGNORE INTO"
return super().insert_statement(on_conflict=on_conflict)
def return_insert_columns(self, fields):
# SQLite < 3.35 doesn't support an INSERT...RETURNING statement.
if not fields:
return "", ()
columns = [
"%s.%s"
% (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
)
for field in fields
]
return "RETURNING %s" % ", ".join(columns), ()
def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields):
if (
on_conflict == OnConflict.UPDATE
and self.connection.features.supports_update_conflicts_with_target
):
return "ON CONFLICT(%s) DO UPDATE SET %s" % (
", ".join(map(self.quote_name, unique_fields)),
", ".join(
[
f"{field} = EXCLUDED.{field}"
for field in map(self.quote_name, update_fields)
]
),
)
return super().on_conflict_suffix_sql(
fields,
on_conflict,
update_fields,
unique_fields,
)
| {
"content_hash": "1f88bdf59f612f53c123b9945245e625",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 88,
"avg_line_length": 39.0848623853211,
"alnum_prop": 0.5788979519981222,
"repo_name": "auvipy/django",
"id": "7c7cfce1ba045408aeb698f040928a0fc8b46e84",
"size": "17041",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "django/db/backends/sqlite3/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87610"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146241"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16014747"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
"""Definition of the PhraseElement class and associated subclasses:
- NounPhraseElement
- AdjectivePhraseElement
- VerbPhraseElement
- ClausePhraseElement
"""
import six
from .base import NLGElement
from .string import StringElement
from .word import WordElement
from ..util import get_phrase_helper
from ..lexicon.feature import ELIDED, NUMBER
from ..lexicon.feature import category as cat
from ..lexicon.feature import internal
from ..lexicon.feature import clause
from ..lexicon.feature import discourse
__all__ = ['PhraseElement', 'AdjectivePhraseElement', 'NounPhraseElement']
class PhraseElement(NLGElement):
def __init__(self, lexicon, category):
"""Create a phrase of the given type."""
super(PhraseElement, self).__init__(category=category, lexicon=lexicon)
self.features[ELIDED] = False
self.helper = get_phrase_helper(language=self.lexicon.language,
phrase_type='phrase')()
@property
def head(self):
return self.features[internal.HEAD]
@head.setter
def head(self, value):
if isinstance(value, NLGElement):
head = value
else:
head = StringElement(string=value)
head.parent = self
self.features[internal.HEAD] = head
def get_children(self):
"""Return the child components of the phrase.
The returned list will depend of the category of the element:
- Clauses consist of cue phrases, front modifiers, pre-modifiers
subjects, verb phrases and complements.
- Noun phrases consist of the specifier, pre-modifiers, the noun
subjects, complements and post-modifiers.
- Verb phrases consist of pre-modifiers, the verb group,
complements and post-modifiers.
- Canned text phrases have no children.
- All other phrases consist of pre-modifiers, the main phrase
element, complements and post-modifiers.
"""
children = []
if self.category == cat.CLAUSE:
children.append(self.cue_phrase or [])
children.extend(self.front_modifiers or [])
children.extend(self.premodifiers or [])
children.extend(self.subjects or [])
children.extend(self.verb_phrase or [])
children.extend(self.complements or [])
elif self.category == cat.NOUN_PHRASE:
children.append(self.specifier or [])
children.extend(self.premodifiers or [])
children.append(self.head or [])
children.extend(self.complements or [])
children.extend(self.postmodifiers or [])
elif self.category == cat.VERB_PHRASE:
children.extend(self.premodifiers or [])
children.append(self.head or [])
children.extend(self.complements or [])
children.extend(self.postmodifiers or [])
else:
children.extend(self.premodifiers or [])
children.append(self.head or [])
children.extend(self.complements or [])
children.extend(self.postmodifiers or [])
children = (child for child in children if child)
children = [
StringElement(string=child)
if not isinstance(child, NLGElement) else child
for child in children]
return children
def add_complement(self, complement):
"""Adds a new complement to the phrase element.
Complements will be realised in the syntax after the head
element of the phrase. Complements differ from post-modifiers
in that complements are crucial to the understanding of a phrase
whereas post-modifiers are optional.
If the new complement being added is a clause or a
CoordinatedPhraseElement then its clause status feature is set
to ClauseStatus.SUBORDINATE and it's discourse function is set
to DiscourseFunction.OBJECT by default unless an existing
discourse function exists on the complement.
"""
complements = self.features[internal.COMPLEMENTS] or []
if (
complement.category == cat.CLAUSE
# TODO: define CoordinatedPhraseElement
# or isinstance(complement, CoordinatedPhraseElement)
):
complement[internal.CLAUSE_STATUS] = clause.SUBORDINATE
if not complement.discourse_function:
complement[internal.DISCOURSE_FUNCTION] = discourse.OBJECT
complement.parent = self
complements.append(complement)
self.features[internal.COMPLEMENTS] = complements
def add_post_modifier(self, new_post_modifier):
"""Add the argument post_modifer as the phrase post modifier,
and set the parent of the post modifier as the current sentence.
"""
new_post_modifier.parent = self
current_post_modifiers = self.postmodifiers or []
current_post_modifiers.append(new_post_modifier)
self.postmodifiers = current_post_modifiers
def add_pre_modifier(self, new_pre_modifier):
"""Add the argument pre_modifer as the phrase pre modifier,
and set the parent of the pre modifier as the current sentence.
"""
new_pre_modifier.parent = self
current_pre_modifiers = self.premodifiers or []
current_pre_modifiers.append(new_pre_modifier)
self.premodifiers = current_pre_modifiers
def realise(self):
return self.helper.realise(phrase=self)
class AdjectivePhraseElement(PhraseElement):
"""This class defines a adjective phrase.
It is essentially a wrapper around the
PhraseElement class, with methods for setting common constituents
such as pre_modifiers.
"""
def __init__(self, lexicon):
super(AdjectivePhraseElement, self).__init__(
category=cat.ADJECTIVE_PHRASE, lexicon=lexicon)
@property
def adjective(self):
return self.head
@adjective.setter
def adjective(self, adjective):
if isinstance(adjective, six.text_type):
# Create a word, if not found in lexicon
adjective = self.lexicon.first(adjective, category=cat.ADJECTIVE)
self.features[internal.HEAD] = adjective
class NounPhraseElement(PhraseElement):
"""
This class defines a noun phrase. It is essentially a wrapper around the
PhraseElement class, with methods for setting common
constituents such as specifier. For example, the setNoun method
in this class sets the head of the element to be the specified noun
From an API perspective, this class is a simplified version of the
NPPhraseSpec class in simplenlg V3. It provides an alternative way for
creating syntactic structures, compared to directly manipulating a V4
PhraseElement.
"""
def __init__(self, lexicon, phrase=None):
super(NounPhraseElement, self).__init__(
category=cat.NOUN_PHRASE,
lexicon=lexicon)
self.helper = get_phrase_helper(
language=self.lexicon.language,
phrase_type=cat.NOUN_PHRASE)()
if phrase:
self.features.update(phrase.features)
self.parent = phrase.parent
@property
def noun(self):
return self.head
@noun.setter
def noun(self, value):
self.features[cat.NOUN] = value
self.features[internal.HEAD] = value
@property
def pronoun(self):
return self.features[cat.PRONOUN]
@pronoun.setter
def pronoun(self, value):
self.features[cat.PRONOUN] = value
self.features[internal.HEAD] = value
@property
def specifier(self):
return self.features[internal.SPECIFIER]
@specifier.setter
def specifier(self, value):
if isinstance(value, NLGElement):
specifier = value
else:
specifier = self.lexicon.first(value, category=cat.DETERMINER)
if specifier:
specifier.features[internal.DISCOURSE_FUNCTION] = discourse.SPECIFIER
specifier.parent = self
if isinstance(self.head, WordElement) and self.head.category == cat.PRONOUN:
self.noun = self.lexicon.first(self.head.base_form, category=cat.NOUN)
if specifier.number:
self.features[NUMBER] = specifier.number
self.features[internal.SPECIFIER] = specifier
def add_modifier(self, modifier):
self.helper.add_modifier(phrase=self, modifier=modifier)
def check_if_ne_only_negation(self):
return self.specifier.ne_only_negation or self.head.ne_only_negation
| {
"content_hash": "1565b6fea4b52a958f1e7ecc9481e08f",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 88,
"avg_line_length": 35.454918032786885,
"alnum_prop": 0.6534504681539707,
"repo_name": "brouberol/pynlg",
"id": "54cd8559984922f71866949f4f7d5cf822e84086",
"size": "8670",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pynlg/spec/phrase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "242719"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import jupyter_client
import sys
import threading
import time
from concurrent import futures
import grpc
import ipython_pb2
import ipython_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
TIMEOUT = 60*60*24*365*100 # 100 years
class IPython(ipython_pb2_grpc.IPythonServicer):
def __init__(self, server):
self._status = ipython_pb2.STARTING
self._server = server
def start(self):
print("starting...")
sys.stdout.flush()
self._km, self._kc = jupyter_client.manager.start_new_kernel(kernel_name='python')
self._status = ipython_pb2.RUNNING
def execute(self, request, context):
print("execute code:\n")
print(request.code)
sys.stdout.flush()
stdout_queue = queue.Queue(maxsize = 10)
stderr_queue = queue.Queue(maxsize = 10)
image_queue = queue.Queue(maxsize = 5)
def _output_hook(msg):
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream':
stdout_queue.put(content['text'])
elif msg_type in ('display_data', 'execute_result'):
stdout_queue.put(content['data'].get('text/plain', ''))
if 'image/png' in content['data']:
image_queue.put(content['data']['image/png'])
elif msg_type == 'error':
stderr_queue.put('\n'.join(content['traceback']))
payload_reply = []
def execute_worker():
reply = self._kc.execute_interactive(request.code,
output_hook=_output_hook,
timeout=TIMEOUT)
payload_reply.append(reply)
t = threading.Thread(name="ConsumerThread", target=execute_worker)
t.start()
while t.is_alive():
while not stdout_queue.empty():
output = stdout_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.TEXT,
output=output)
while not stderr_queue.empty():
output = stderr_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.ERROR,
type=ipython_pb2.TEXT,
output=output)
while not image_queue.empty():
output = image_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.IMAGE,
output=output)
while not stdout_queue.empty():
output = stdout_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.TEXT,
output=output)
while not stderr_queue.empty():
output = stderr_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.ERROR,
type=ipython_pb2.TEXT,
output=output)
while not image_queue.empty():
output = image_queue.get()
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.IMAGE,
output=output)
if payload_reply:
result = []
for payload in payload_reply[0]['content']['payload']:
if payload['data']['text/plain']:
result.append(payload['data']['text/plain'])
if result:
yield ipython_pb2.ExecuteResponse(status=ipython_pb2.SUCCESS,
type=ipython_pb2.TEXT,
output='\n'.join(result))
def cancel(self, request, context):
self._km.interrupt_kernel()
return ipython_pb2.CancelResponse()
def complete(self, request, context):
reply = self._kc.complete(request.code, request.cursor, reply=True, timeout=TIMEOUT)
return ipython_pb2.CompletionResponse(matches=reply['content']['matches'])
def status(self, request, context):
return ipython_pb2.StatusResponse(status = self._status)
def stop(self, request, context):
self._server.stop(0)
sys.exit(0)
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
ipython = IPython(server)
ipython_pb2_grpc.add_IPythonServicer_to_server(ipython, server)
server.add_insecure_port('[::]:' + port)
server.start()
ipython.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve(sys.argv[1])
| {
"content_hash": "fb9ca424d405ffceb62fd0c88b69b923",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 92,
"avg_line_length": 36.97872340425532,
"alnum_prop": 0.5237821250479479,
"repo_name": "vipul1409/zeppelin",
"id": "98fa616c2d034d52e73e1be1af06389fdcd0d3fb",
"size": "5995",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "python/src/main/resources/grpc/python/ipython_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12046"
},
{
"name": "CSS",
"bytes": "88655"
},
{
"name": "Groovy",
"bytes": "9274"
},
{
"name": "HTML",
"bytes": "309510"
},
{
"name": "Java",
"bytes": "4389371"
},
{
"name": "JavaScript",
"bytes": "592817"
},
{
"name": "Jupyter Notebook",
"bytes": "84915"
},
{
"name": "Python",
"bytes": "119001"
},
{
"name": "R",
"bytes": "21301"
},
{
"name": "Roff",
"bytes": "60995"
},
{
"name": "Ruby",
"bytes": "3101"
},
{
"name": "Scala",
"bytes": "344340"
},
{
"name": "Shell",
"bytes": "78697"
},
{
"name": "Thrift",
"bytes": "5234"
},
{
"name": "XSLT",
"bytes": "1326"
}
],
"symlink_target": ""
} |
import mock
from nose.tools import eq_
from django import forms
import amo.tests
from files.utils import WebAppParser
class TestWebAppParser(amo.tests.TestCase):
@mock.patch('files.utils.WebAppParser.get_json_data')
def test_no_developer_name(self, get_json_data):
get_json_data.return_value = {
'name': 'Blah'
}
with self.assertRaises(forms.ValidationError) as e:
# The argument to parse() is supposed to be a filename, it doesn't
# matter here though since we are mocking get_json_data().
WebAppParser().parse('')
eq_(e.exception.messages, ["Developer name is required in the manifest"
" in order to display it on the app's "
"listing."])
@mock.patch('files.utils.WebAppParser.get_json_data')
def test_empty_developer_object(self, get_json_data):
get_json_data.return_value = {
'name': 'Blah',
'developer': {}
}
with self.assertRaises(forms.ValidationError) as e:
# The argument to parse() is supposed to be a filename, it doesn't
# matter here though since we are mocking get_json_data().
WebAppParser().parse('')
eq_(e.exception.messages, ["Developer name is required in the manifest"
" in order to display it on the app's "
"listing."])
@mock.patch('files.utils.WebAppParser.get_json_data')
def test_developer_name(self, get_json_data):
get_json_data.return_value = {
'name': 'Blah',
'developer': {
'name': 'Mozilla Marketplace Testing'
}
}
# The argument to parse() is supposed to be a filename, it doesn't
# matter here though since we are mocking get_json_data().
parsed_results = WebAppParser().parse('')
eq_(parsed_results['developer_name'], 'Mozilla Marketplace Testing')
@mock.patch('files.utils.WebAppParser.get_json_data')
def test_name_with_translations(self, get_json_data):
get_json_data.return_value = {
'name': 'Blah',
'developer': {
'name': 'Mozilla Marketplace Testing'
},
'default_locale': 'en-US',
'locales': {
'fr': {
'name': 'Blah (fr)',
},
'es': {
'name': 'Blah (es)',
}
}
}
# The argument to parse() is supposed to be a filename, it doesn't
# matter here though since we are mocking get_json_data().
parsed_results = WebAppParser().parse('')
eq_(parsed_results['name'].get('fr'), 'Blah (fr)')
eq_(parsed_results['name'].get('es'), 'Blah (es)')
eq_(parsed_results['name'].get('en-US'), 'Blah')
eq_(parsed_results['name'].get('de'), None)
eq_(parsed_results['default_locale'], 'en-US')
@mock.patch('files.utils.WebAppParser.get_json_data')
def test_name_with_translations_fallback(self, get_json_data):
get_json_data.return_value = {
'name': 'Blah',
'description': 'Blah Description',
'developer': {
'name': 'Mozilla Marketplace Testing'
},
'default_locale': 'en-US',
'locales': {
'fr': {
'description': 'Blah Description (fr)',
},
'es': {
'name': 'Blah (es)',
}
}
}
# The argument to parse() is supposed to be a filename, it doesn't
# matter here though since we are mocking get_json_data().
parsed_results = WebAppParser().parse('')
eq_(parsed_results['name'].get('fr'), 'Blah') # Falls back to default.
eq_(parsed_results['name'].get('es'), 'Blah (es)')
eq_(parsed_results['name'].get('en-US'), 'Blah')
eq_(parsed_results['name'].get('de'), None)
eq_(parsed_results['default_locale'], 'en-US')
| {
"content_hash": "367ab33c7947f16d279e723383aca727",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 40.19417475728155,
"alnum_prop": 0.5285024154589372,
"repo_name": "spasovski/zamboni",
"id": "2c60fdff0cd58dff66b744142e762942b1661579",
"size": "4140",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mkt/files/tests/test_utils_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "885279"
},
{
"name": "JavaScript",
"bytes": "1677601"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6279560"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
} |
"""
SeqFindr BLAST methods
"""
import subprocess
import shutil
import os
import sys
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast.Applications import NcbitblastnCommandline
from Bio.Blast.Applications import NcbitblastxCommandline
import SeqFindr.util
def make_BLAST_database(fasta_file):
"""
Given a fasta_file, generate a nucleotide BLAST database
Database will end up in DB/ of working directory or OUTPUT/DB if an
output directory is given in the arguments
:param fasta_file: full path to a fasta file
:type fasta_file: string
:rtype: the strain id **(must be delimited by '_')**
"""
proc = subprocess.Popen(["makeblastdb", "-in", fasta_file, "-dbtype",
'nucl'], stdout=subprocess.PIPE)
sys.stderr.write(proc.stdout.read())
for file_ext in ['.nhr', '.nin', '.nsq']:
path = fasta_file + file_ext
shutil.move(path, os.path.join('DBs', os.path.basename(path)))
sys.stderr.write(("Getting %s and assocaiated database files to the DBs "
"location\n") % (fasta_file))
shutil.copy2(fasta_file, os.path.join('DBs', os.path.basename(fasta_file)))
return os.path.basename(fasta_file).split('_')[0]
def run_BLAST(query, database, args, cons_run):
"""
Given a mfa of query sequences of interest & a database, search for them.
Important to note:
* Turns dust filter off,
* Only a single target sequence (top hit),
* Output in XML format as blast.xml.
# TODO: Add evalue filtering ?
# TODO: add task='blastn' to use blastn scoring ?
.. warning:: default is megablast
.. warning:: tblastx funcationality has not been checked
:param query: the fullpath to the vf.mfa
:param database: the full path of the databse to search for the vf in
:param args: the arguments parsed to argparse
:param cons_run: part of a mapping consensus run
:type query: string
:type database: string
:type args: argparse args (dictionary)
:type cons_run: boolean
:returns: the path of the blast.xml file
"""
tmp1 = os.path.splitext(query.split('/')[-1])[0]
tmp2 = os.path.splitext(database.split('/')[-1])[0]
if not cons_run:
outfile = os.path.join("BLAST_results/",
"DB="+tmp1+"ID="+tmp2+"_blast.xml")
else:
outfile = os.path.join("BLAST_results/",
"cons_DB="+tmp1+"ID="+tmp2+"_blast.xml")
protein = False
# File type not specified, determine using util.is_protein()
if args.reftype is None:
if SeqFindr.util.is_protein(query) != -1:
protein = True
sys.stderr.write('%s is protein' % (query))
elif args.reftype == 'prot':
protein = True
sys.stderr.write('%s is protein\n' % (query))
run_command = ''
if protein:
sys.stderr.write('Using tblastn\n')
run_command = NcbitblastnCommandline(query=query, seg='no',
db=database, outfmt=5, num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue, out=outfile)
else:
if args.tblastx:
sys.stderr.write('Using tblastx\n')
run_command = NcbitblastxCommandline(query=query, seg='no',
db=database, outfmt=5, num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue,
out=outfile)
else:
sys.stderr.write('Using blastn\n')
if args.short == False:
run_command = NcbiblastnCommandline(query=query, dust='no',
db=database, outfmt=5,
num_threads=args.BLAST_THREADS,
max_target_seqs=1, evalue=args.evalue,
out=outfile)
else:
sys.stderr.write('Optimising for short query sequences\n')
run_command = NcbiblastnCommandline(query=query, dust='no',
db=database, outfmt=5, word_size=7,
num_threads=args.BLAST_THREADS, evalue=1000,
max_target_seqs=1, out=outfile)
sys.stderr.write(str(run_command)+"\n")
run_command()
return os.path.join(os.getcwd(), outfile)
def parse_BLAST(blast_results, tol, cov, careful):
"""
Using NCBIXML parse the BLAST results, storing & returning good hits
:param blast_results: full path to a blast run output file (in XML format)
:param tol: the cutoff threshold (see above for explaination)
:param cov: alignement coverage cut-off (see above for explaination)
:type blast_results: string
:type tol: float
:type cov: float
:rtype: list of satifying hit names
"""
if os.path.isfile(os.path.expanduser(blast_results)):
hits = []
for record in NCBIXML.parse(open(blast_results)):
for align in record.alignments:
for hsp in align.hsps:
hit_name = record.query.split(',')[1].strip()
# cutoff is now calculated with reference to the alignment length
cutoff = hsp.identities/float(hsp.align_length)
# added condition that the alignment length (hsp.align_length) must be at least equal to the length of the target sequence
# added coverage option allowing the alignment length to be shorter than the length of the target sequence (DEFAULT=1)
if cutoff >= tol and (record.query_length * cov) <= hsp.align_length:
hits.append(hit_name.strip())
# New method for the --careful option
# added condition that the alignment length (hsp.align_length) must be at least equal to the length of the target sequence
elif cutoff >= tol-careful and (record.query_length * cov) <= hsp.align_length:
print "Please confirm this hit:"
print "Name,SeqFindr score,Len(align),Len(query),Identities,Gaps"
print "%s,%f,%i,%i,%i,%i" % (hit_name, cutoff, hsp.align_length, record.query_length, hsp.identities, hsp.gaps)
accept = raw_input("Should this be considered a hit? (y/N)")
if accept == '':
pass
elif accept.lower() == 'n':
pass
elif accept.lower() == 'y':
hits.append(hit_name.strip())
else:
print "Input must be y, n or enter."
print "Assuming n"
else:
pass
else:
sys.stderr.write("BLAST results do not exist. Exiting.\n")
sys.exit(1)
return hits
| {
"content_hash": "c443b56cb0b8225b5f7620c82200a77f",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 142,
"avg_line_length": 41.194117647058825,
"alnum_prop": 0.5740396972725974,
"repo_name": "nbenzakour/SeqFindR",
"id": "57341478e01f8b5f18350e77602568877dabfa6a",
"size": "7635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SeqFindr/blast.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "2390"
},
{
"name": "Python",
"bytes": "62775"
},
{
"name": "Shell",
"bytes": "3594"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from traits.api import List, Str, HasTraits, Float, Int
# =============standard library imports ========================
from numpy import random, char
import time
# =============local library imports ==========================
from pychron.hardware.gauges.base_controller import BaseGaugeController
class BaseMicroIonController(BaseGaugeController):
address = '01'
mode = 'rs485'
def load_additional_args(self, config, *args, **kw):
self.address = self.config_get(config, 'General', 'address', optional=False)
self.display_name = self.config_get(config, 'General', 'display_name', default=self.name)
self.mode = self.config_get(config, 'Communications', 'mode', default='rs485')
self._load_gauges(config)
return True
def get_pressures(self, verbose=False):
kw = {'verbose': verbose, 'force': True}
b = self.get_convectron_b_pressure(**kw)
self._set_gauge_pressure('CG2', b)
time.sleep(0.05)
a = self.get_convectron_a_pressure(**kw)
self._set_gauge_pressure('CG1', a)
time.sleep(0.05)
ig = self.get_ion_pressure(**kw)
self._set_gauge_pressure('IG', ig)
return ig, a, b
def set_degas(self, state):
key = 'DG'
value = 'ON' if state else 'OFF'
cmd = self._build_command(key, value)
r = self.ask(cmd)
r = self._parse_response(r)
return r
def get_degas(self):
key = 'DGS'
cmd = self._build_command(key)
r = self.ask(cmd)
r = self._parse_response(r)
return r
def get_ion_pressure(self, **kw):
name = 'IG'
return self._get_pressure(name, **kw)
def get_convectron_a_pressure(self, **kw):
name = 'CG1'
return self._get_pressure(name, **kw)
def get_convectron_b_pressure(self, **kw):
name = 'CG2'
return self._get_pressure(name, **kw)
def set_ion_gauge_state(self, state):
key = 'IG1'
value = 'ON' if state else 'OFF'
cmd = self._build_command(key, value)
r = self.ask(cmd)
r = self._parse_response(r)
return r
def get_process_control_status(self, channel=None):
key = 'PCS'
cmd = self._build_command(key, channel)
r = self.ask(cmd)
r = self._parse_response(r)
if channel is None:
if r is None:
# from numpy import random,char
r = random.randint(0, 2, 6)
r = ','.join(char.array(r))
r = r.split(',')
return r
def _read_pressure(self, gauge, verbose=False):
if isinstance(gauge, str):
name = gauge
else:
name = gauge.name
key = 'DS'
cmd = self._build_command(key, name)
r = self.ask(cmd, verbose=verbose)
r = self._parse_response(r, name)
return r
def _build_command(self, key, value=None):
# prepend key with our address
# example of new string formating
# see http://docs.python.org/library/string.html#formatspec
if self.mode == 'rs485':
key = '#{}{}'.format(self.address, key)
if value is not None:
args = (key, value)
else:
args = (key,)
c = ' '.join(args)
return c
def _parse_response(self, r, name):
if self.simulation or r is None:
from numpy.random import normal
if name == 'IG':
loc, scale = 1e-9, 5e-9
else:
loc, scale = 1e-2, 5e-3
return abs(normal(loc, scale))
return r
# ============= EOF ====================================
| {
"content_hash": "333cca1c5e5718c8173106421dbef65d",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 97,
"avg_line_length": 28.671755725190838,
"alnum_prop": 0.5332800851970181,
"repo_name": "UManPychron/pychron",
"id": "2cea69e0db6cd4bd09e2dca6fedda43d106aa17a",
"size": "4553",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/hardware/gauges/granville_phillips/base_micro_ion_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from difflib import SequenceMatcher
import logging
import re
import urllib
import requests
from sqlalchemy import Table, Column, Integer, String, Unicode, Boolean, func
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema as schema
from flexget import plugin
from flexget.event import event
from flexget.plugins.filter.series import normalize_series_name
from flexget.utils.database import with_session
api_key = '6c228565a45a302e49fb7d2dab066c9ab948b7be/'
search_show = 'http://api.trakt.tv/search/shows.json/'
episode_summary = 'http://api.trakt.tv/show/episode/summary.json/'
show_summary = 'http://api.trakt.tv/show/summary.json/'
Base = schema.versioned_base('api_trakt', 2)
log = logging.getLogger('api_trakt')
class TraktContainer(object):
def __init__(self, init_dict=None):
if isinstance(init_dict, dict):
self.update_from_dict(init_dict)
def update_from_dict(self, update_dict):
for col in self.__table__.columns:
if isinstance(update_dict.get(col.name), (basestring, int, float)):
setattr(self, col.name, update_dict[col.name])
genres_table = Table('trakt_series_genres', Base.metadata,
Column('tvdb_id', Integer, ForeignKey('trakt_series.tvdb_id')),
Column('genre_id', Integer, ForeignKey('trakt_genres.id')))
class TraktGenre(TraktContainer, Base):
__tablename__ = 'trakt_genres'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=True)
actors_table = Table('trakt_series_actors', Base.metadata,
Column('tvdb_id', Integer, ForeignKey('trakt_series.tvdb_id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')))
class TraktActors(TraktContainer, Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
class TraktEpisode(TraktContainer, Base):
__tablename__ = 'trakt_episodes'
tvdb_id = Column(Integer, primary_key=True, autoincrement=False)
episode_name = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
overview = Column(Unicode)
expired = Column(Boolean)
first_aired = Column(Integer)
first_aired_iso = Column(Unicode)
first_aired_utc = Column(Integer)
screen = Column(Unicode)
series_id = Column(Integer, ForeignKey('trakt_series.tvdb_id'), nullable=False)
class TraktSeries(TraktContainer, Base):
__tablename__ = 'trakt_series'
tvdb_id = Column(Integer, primary_key=True, autoincrement=False)
tvrage_id = Column(Unicode)
imdb_id = Column(Unicode)
title = Column(Unicode)
year = Column(Integer)
genre = relation('TraktGenre', secondary=genres_table, backref='series')
network = Column(Unicode, nullable=True)
certification = Column(Unicode)
country = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(Integer)
first_aired_iso = Column(Unicode)
first_aired_utc = Column(Integer)
air_day = Column(Unicode)
air_day_utc = Column(Unicode)
air_time = Column(Unicode)
air_time_utc = Column(Unicode)
runtime = Column(Integer)
last_updated = Column(Integer)
poster = Column(String)
fanart = Column(String)
banner = Column(String)
status = Column(String)
url = Column(Unicode)
episodes = relation('TraktEpisode', backref='series', cascade='all, delete, delete-orphan')
actors = relation('TraktActors', secondary=actors_table, backref='series')
def update(self, session):
tvdb_id = self.tvdb_id
url = ('%s%s%s' % (show_summary, api_key, tvdb_id))
try:
data = requests.get(url).json()
except requests.RequestException as e:
raise LookupError('Request failed %s' % url)
if data:
if data['title']:
for i in data['images']:
data[i] = data['images'][i]
if data['genres']:
genres = {}
for genre in data['genres']:
db_genre = session.query(TraktGenre).filter(TraktGenre.name == genre).first()
if not db_genre:
genres['name'] = genre
db_genre = TraktGenre(genres)
if db_genre not in self.genre:
self.genre.append(db_genre)
if data['people']['actors']:
series_actors = data['people']['actors']
for i in series_actors:
if i['name']:
db_character = session.query(TraktActors).filter(TraktActors.name == i['name']).first()
if not db_character:
db_character = TraktActors(i)
if db_character not in self.actors:
self.actors.append(db_character)
if data['title']:
TraktContainer.update_from_dict(self, data)
else:
raise LookupError('Could not update information to database for Trakt on ')
def __repr__(self):
return '<Traktv Name=%s, TVDB_ID=%s>' % (self.title, self.tvdb_id)
class TraktSearchResult(Base):
__tablename__ = 'trakt_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_series.tvdb_id'), nullable=True)
series = relation(TraktSeries, backref='search_strings')
def get_series_id(title):
norm_series_name = normalize_series_name(title)
series_name = urllib.quote_plus(norm_series_name)
url = search_show + api_key + series_name
series = None
try:
response = requests.get(url)
except requests.RequestException:
log.warning('Request failed %s' % url)
return
try:
data = response.json()
except ValueError:
log.debug('Error Parsing Traktv Json for %s' % title)
return
if 'status' in data:
log.debug('Returned Status %s' % data['status'])
else:
for item in data:
if normalize_series_name(item['title']) == norm_series_name:
series = item['tvdb_id']
if not series:
for item in data:
title_match = SequenceMatcher(lambda x: x in '\t',
normalize_series_name(item['title']), norm_series_name).ratio()
if not series and title_match > .9:
log.debug('Warning: Using lazy matching because title was not found exactly for %s' % title)
series = item['tvdb_id']
if not series:
log.debug('Trakt.tv Returns only EXACT Name Matching: %s' % title)
return series
class ApiTrakt(object):
@staticmethod
@with_session
def lookup_series(title=None, tvdb_id=None, only_cached=False, session=None):
if not title and not tvdb_id:
raise LookupError('No criteria specified for Trakt.tv Lookup')
series = None
def id_str():
return '<name=%s, tvdb_id=%s>' % (title, tvdb_id)
if tvdb_id:
series = session.query(TraktSeries).filter(TraktSeries.tvdb_id == tvdb_id).first()
if not series and title:
series_filter = session.query(TraktSeries).filter(func.lower(TraktSeries.title) == title.lower())
series = series_filter.first()
if not series:
found = session.query(TraktSearchResult).filter(func.lower(TraktSearchResult.search) ==
title.lower()).first()
if found and found.series:
series = found.series
if not series:
if only_cached:
raise LookupError('Series %s not found from cache' % id_str())
log.debug('Series %s not found in cache, looking up from trakt.' % id_str())
if tvdb_id is not None:
series = TraktSeries()
series.tvdb_id = tvdb_id
series.update(session=session)
if series.title:
session.add(series)
if tvdb_id is None and title is not None:
series_lookup = get_series_id(title)
if series_lookup:
series = session.query(TraktSeries).filter(TraktSeries.tvdb_id == series_lookup).first()
if not series and series_lookup:
series = TraktSeries()
series.tvdb_id = series_lookup
series.update(session=session)
if series.title:
session.add(series)
if title.lower() != series.title.lower():
session.add(TraktSearchResult(search=title, series=series))
else:
raise LookupError('Unknown Series title from Traktv: %s' % id_str())
if not series:
raise LookupError('No results found from traktv for %s' % id_str())
if not series.title:
raise LookupError('Nothing Found for %s' % id_str())
if series:
series.episodes
series.genre
series.actors
return series
@staticmethod
@with_session
def lookup_episode(title=None, seasonnum=None, episodenum=None, tvdb_id=None, session=None, only_cached=False):
series = ApiTrakt.lookup_series(title=title, tvdb_id=tvdb_id, only_cached=only_cached, session=session)
if not series:
raise LookupError('Could not identify series')
if series.tvdb_id:
ep_description = '%s.S%sE%s' % (series.title, seasonnum, episodenum)
episode = session.query(TraktEpisode).filter(TraktEpisode.series_id == series.tvdb_id).\
filter(TraktEpisode.season == seasonnum).filter(TraktEpisode.number == episodenum).first()
url = episode_summary + api_key + '%s/%s/%s' % (series.tvdb_id, seasonnum, episodenum)
elif title:
title = normalize_series_name(title)
title = re.sub(' ', '-', title)
ep_description = '%s.S%sE%s' % (series.title, seasonnum, episodenum)
episode = session.query(TraktEpisode).filter(title == series.title).\
filter(TraktEpisode.season == seasonnum).\
filter(TraktEpisode.number == episodenum).first()
url = episode_summary + api_key + '%s/%s/%s' % (title, seasonnum, episodenum)
if not episode:
if only_cached:
raise LookupError('Episode %s not found in cache' % ep_description)
log.debug('Episode %s not found in cache, looking up from trakt.' % ep_description)
try:
data = requests.get(url)
except requests.RequestException:
log.debug('Error Retrieving Trakt url: %s' % url)
try:
data = data.json()
except ValueError:
log.debug('Error parsing Trakt episode json for %s' % title)
if data:
if 'status' in data:
raise LookupError('Error looking up episode')
ep_data = data['episode']
if ep_data:
episode = session.query(TraktEpisode).filter(TraktEpisode.tvdb_id == ep_data['tvdb_id']).first()
if not episode:
ep_data['episode_name'] = ep_data.pop('title')
for i in ep_data['images']:
ep_data[i] = ep_data['images'][i]
del ep_data['images']
episode = TraktEpisode(ep_data)
series.episodes.append(episode)
session.merge(episode)
if episode:
return episode
else:
raise LookupError('No results found for (%s)' % episode)
@event('plugin.register')
def register_plugin():
plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
| {
"content_hash": "581b8fe9051d4ae87553acc22536da66",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 116,
"avg_line_length": 40.89333333333333,
"alnum_prop": 0.5841212911640039,
"repo_name": "voriux/Flexget",
"id": "cc95851925484ee131e00806c713eec4d20d03f4",
"size": "12268",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/plugins/api_trakt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "1849035"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Module API
def check_required(constraint, value):
if not (constraint and value is None):
return True
return False
| {
"content_hash": "0345ce3353c73e8c3e9c6652eefd893e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7183098591549296,
"repo_name": "okfn/jsontableschema-py",
"id": "c164388dd86443558e614f31a139047c4f9e1e67",
"size": "308",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tableschema/constraints/required.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "134974"
}
],
"symlink_target": ""
} |
from paasta_tools.autoscaling import cluster_boost
from paasta_tools.cli.utils import execute_paasta_cluster_boost_on_remote_master
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
def add_subparser(subparsers):
boost_parser = subparsers.add_parser(
'boost',
help="Set, print the status, or clear a capacity boost for a given region in a PaaSTA cluster",
description=(
"'paasta boost' is used to temporary provision more capacity in a given cluster "
"It operates by ssh'ing to a Mesos master of a remote cluster, and "
"interracting with the boost in the local zookeeper cluster. If you set or clear "
"a boost, you may want to run the cluster autoscaler manually afterwards."
),
epilog=(
"The boost command may time out during heavy load. When that happens "
"users may execute the ssh command directly, in order to bypass the timeout."
),
)
boost_parser.add_argument(
'-v', '--verbose',
action='count',
dest="verbose",
default=0,
help="""Print out more output regarding the state of the cluster.
Multiple v options increase verbosity. Maximum is 3.""",
)
boost_parser.add_argument(
'-c', '--cluster',
type=str,
required=True,
help="""Paasta cluster(s) to boost. This option can take comma separated values.
If auto-completion doesn't work, you can get a list of cluster with `paasta list-clusters'""",
).completer = lazy_choices_completer(list_clusters)
boost_parser.add_argument(
'--soa-dir',
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
boost_parser.add_argument(
'-p', '--pool',
type=str,
default='default',
help="Name of the pool you want to increase the capacity. Default is 'default' pool.",
)
boost_parser.add_argument(
'-b', '--boost',
type=float,
default=cluster_boost.DEFAULT_BOOST_FACTOR,
help="Boost factor to apply. Default is 1.5. A big failover should be 2, 3 is the max.",
)
boost_parser.add_argument(
'-d', '--duration',
type=int,
default=cluster_boost.DEFAULT_BOOST_DURATION,
help="Duration of the capacity boost in minutes. Default is 40",
)
boost_parser.add_argument(
'-f', '--force',
action='store_true',
dest='override',
help="Replace an existing boost. Default is false",
)
boost_parser.add_argument(
'action',
choices=[
'set',
'status',
'clear',
],
help="You can view the status, set or clear a boost.",
)
boost_parser.set_defaults(command=paasta_boost)
def paasta_boost(args):
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
all_clusters = list_clusters(soa_dir=soa_dir)
clusters = args.cluster.split(',')
for cluster in clusters:
if cluster not in all_clusters:
paasta_print(
"Error: {} doesn't look like a valid cluster. ".format(cluster) +
"Here is a list of valid paasta clusters:\n" + "\n".join(all_clusters),
)
return 1
return_code, output = execute_paasta_cluster_boost_on_remote_master(
clusters=clusters,
system_paasta_config=system_paasta_config,
action=args.action,
pool=args.pool,
duration=args.duration if args.action == 'set' else None,
override=args.override if args.action == 'set' else None,
boost=args.boost if args.action == 'set' else None,
verbose=args.verbose,
)
paasta_print(output)
return return_code
| {
"content_hash": "de6f0b25a7d3656b688b9054e91eb8e8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 103,
"avg_line_length": 37.822429906542055,
"alnum_prop": 0.6209537929330368,
"repo_name": "somic/paasta",
"id": "3d51c2fbbd2dbb713de05e33044727eabf9105e4",
"size": "4647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paasta_tools/cli/cmds/boost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "71885"
},
{
"name": "Makefile",
"bytes": "6598"
},
{
"name": "Python",
"bytes": "3231060"
},
{
"name": "Shell",
"bytes": "16324"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AnonymousUser
from django.db import models
from django.db.models.aggregates import Count
from polymorphic.manager import PolymorphicManager
from shop.order_signals import processing
from shop.util.compat.db import atomic
#==============================================================================
# Product
#==============================================================================
class ProductStatisticsManager(PolymorphicManager):
"""
A Manager for all the non-object manipulation needs, mostly statistics and
other "data-mining" toys.
"""
def top_selling_products(self, quantity):
"""
This method "mines" the previously passed orders, and gets a list of
products (of a size equal to the quantity parameter), ordered by how
many times they have been purchased.
"""
# Importing here is fugly, but it saves us from circular imports...
from shop.models.ordermodel import OrderItem
# Get an aggregate of product references and their respective counts
top_products_data = OrderItem.objects.values(
'product').annotate(
product_count=Count('product')
).order_by('product_count'
)[:quantity]
# The top_products_data result should be in the form:
# [{'product_reference': '<product_id>', 'product_count': <count>}, ..]
top_products_list = [] # The actual list of products
for values in top_products_data:
prod = values.get('product')
# We could eventually return the count easily here, if needed.
top_products_list.append(prod)
return top_products_list
class ProductManager(PolymorphicManager):
"""
A more classic manager for Product filtering and manipulation.
"""
def active(self):
return self.filter(active=True)
#==============================================================================
# Order
#==============================================================================
class OrderManager(models.Manager):
def get_latest_for_user(self, user):
"""
Returns the last Order (from a time perspective) a given user has
placed.
"""
if user and not isinstance(user, AnonymousUser):
return self.filter(user=user).order_by('-modified')[0]
else:
return None
def get_unconfirmed_for_cart(self, cart):
return self.filter(cart_pk=cart.pk, status__lt=self.model.CONFIRMED)
def remove_old_orders(self, cart):
"""
Removes all old unconfirmed order objects.
"""
old_orders = self.get_unconfirmed_for_cart(cart)
old_orders.delete()
def create_order_object(self, cart, request):
"""
Create an empty order object and fill it with the given cart data.
"""
order = self.model()
order.cart_pk = cart.pk
order.user = cart.user
order.status = self.model.PROCESSING # Processing
order.order_subtotal = cart.subtotal_price
order.order_total = cart.total_price
return order
@atomic
def create_from_cart(self, cart, request):
"""
This creates a new Order object (and all the rest) from a passed Cart
object.
Specifically, it creates an Order with corresponding OrderItems and
eventually corresponding ExtraPriceFields
This will only actually commit the transaction once the function exits
to minimize useless database access.
The `state` parameter is further passed to process_cart_item,
process_cart, and post_process_cart, so it can be used as a way to
store per-request arbitrary information.
Emits the ``processing`` signal.
"""
# must be imported here!
from shop.models.ordermodel import (
ExtraOrderItemPriceField,
ExtraOrderPriceField,
OrderItem,
)
from shop.models.cartmodel import CartItem
# First, let's remove old orders
self.remove_old_orders(cart)
# Create an empty order object
order = self.create_order_object(cart, request)
order.save()
# Let's serialize all the extra price arguments in DB
for field in cart.extra_price_fields:
eoi = ExtraOrderPriceField()
eoi.order = order
eoi.label = unicode(field[0])
eoi.value = field[1]
if len(field) == 3:
eoi.data = field[2]
eoi.save()
# There, now move on to the order items.
cart_items = CartItem.objects.filter(cart=cart)
for item in cart_items:
item.update(request)
order_item = OrderItem()
order_item.order = order
order_item.product_reference = item.product.get_product_reference()
order_item.product_name = item.product.get_name()
order_item.product = item.product
order_item.unit_price = item.product.get_price()
order_item.quantity = item.quantity
order_item.line_total = item.line_total
order_item.line_subtotal = item.line_subtotal
order_item.save()
# For each order item, we save the extra_price_fields to DB
for field in item.extra_price_fields:
eoi = ExtraOrderItemPriceField()
eoi.order_item = order_item
# Force unicode, in case it has àö...
eoi.label = unicode(field[0])
eoi.value = field[1]
if len(field) == 3:
eoi.data = field[2]
eoi.save()
processing.send(self.model, order=order, cart=cart)
return order
| {
"content_hash": "409fcf52d7744c0909425d5391a25753",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 36.141975308641975,
"alnum_prop": 0.5772843723313408,
"repo_name": "febsn/django-shop",
"id": "134033087bdaba26d39dd2d265e396aaa47d798e",
"size": "5881",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "shop/models_bases/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9179"
},
{
"name": "Python",
"bytes": "410245"
},
{
"name": "Shell",
"bytes": "916"
}
],
"symlink_target": ""
} |
import mockups
from datetime import datetime
from django.contrib.auth.models import User, UNUSABLE_PASSWORD
from mockups import Mockup, Factory
from mockups import generators
class UserFactory(Factory):
username = generators.UUIDGenerator(max_length=30)
first_name = generators.LoremWordGenerator(1)
last_name = generators.LoremWordGenerator(1)
password = generators.StaticGenerator(UNUSABLE_PASSWORD)
is_active = generators.StaticGenerator(True)
# don't generate admin users
is_staff = generators.StaticGenerator(False)
is_superuser = generators.StaticGenerator(False)
date_joined = generators.DateTimeGenerator(max_date=datetime.now())
last_login = generators.DateTimeGenerator(max_date=datetime.now())
class UserMockup(Mockup):
'''
:class:`UserMockup` is automatically used by default to create new
``User`` instances. It uses the following values to assure that you can
use the generated instances without any modification:
* ``username`` only contains chars that are allowed by django's auth forms.
* ``email`` is unique.
* ``first_name`` and ``last_name`` are single, random words of the lorem
ipsum text.
* ``is_staff`` and ``is_superuser`` are always ``False``.
* ``is_active`` is always ``True``.
* ``date_joined`` and ``last_login`` are always in the past and it is
assured that ``date_joined`` will be lower than ``last_login``.
'''
# don't follow permissions and groups
follow_m2m = False
factory = UserFactory
def __init__(self, *args, **kwargs):
'''
By default the password is set to an unusable value, this makes it
impossible to login with the generated users. If you want to use for
example ``mockups.create_one('auth.User')`` in your unittests to have
a user instance which you can use to login with the testing client you
can provide a ``username`` and a ``password`` argument. Then you can do
something like::
mockups.create_one('auth.User', username='foo', password='bar`)
self.client.login(username='foo', password='bar')
'''
self.username = kwargs.pop('username', None)
self.password = kwargs.pop('password', None)
super(UserMockup, self).__init__(*args, **kwargs)
if self.username:
self.update_fieldname_generator(
username = generators.StaticGenerator(self.username)
)
def unique_email(self, model, instance):
if User.objects.filter(email=instance.email):
raise mockups.InvalidConstraint(('email',))
def prepare_class(self):
self.add_constraint(self.unique_email)
def post_process_instance(self, instance):
# make sure user's last login was not before he joined
if instance.last_login < instance.date_joined:
instance.last_login = instance.date_joined
if self.password:
instance.set_password(self.password)
return instance
mockups.register(User, UserMockup, fail_silently=True)
| {
"content_hash": "69275feeae0c368909d2d645413af9f2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 40.103896103896105,
"alnum_prop": 0.6716321243523317,
"repo_name": "sorl/django-mockups",
"id": "3dc64993e062fc1ffa705163bbc407aa838989d8",
"size": "3112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mockups/contrib/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "88743"
}
],
"symlink_target": ""
} |
"""
This module provies an interface to the Elastic MapReduce (EMR)
service from AWS.
"""
from boto.emr.connection import EmrConnection
from boto.emr.step import Step, StreamingStep, JarStep
from boto.emr.bootstrap_action import BootstrapAction
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
def regions():
"""
Get all available regions for the Amazon Elastic MapReduce service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return get_regions('elasticmapreduce', connection_cls=EmrConnection)
def connect_to_region(region_name, **kw_params):
return connect('elasticmapreduce', region_name,
connection_cls=EmrConnection, **kw_params)
| {
"content_hash": "a0378f24b974b293ff6239860b31341b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 31.375,
"alnum_prop": 0.7410358565737052,
"repo_name": "xq262144/hue",
"id": "dfa53c7337195e9d7ff07c5124d6130b28aa4cf9",
"size": "1963",
"binary": false,
"copies": "32",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/boto-2.46.1/boto/emr/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
} |
import os
from collections import defaultdict
from .smb_utils import smb_connect, get_netbios_name, NameError
from smb.base import SharedDevice
DEFAULT_TIMEOUT = 30
DEFAULT_SHARE = 'data'
class IfcbConnectionError(Exception):
pass
def do_nothing(*args, **kw):
pass
class RemoteIfcb(object):
def __init__(self, addr, username, password, netbios_name=None, timeout=DEFAULT_TIMEOUT,
share=DEFAULT_SHARE, directory='', connect=True):
self.addr = addr
self.username = username
self.password = password
self.timeout = timeout
self.share = share
self.connect = connect
self.netbios_name = netbios_name
self.directory = directory
self._c = None
def open(self):
if self._c is not None:
return
try:
self._c = smb_connect(self.addr, self.username, self.password, self.netbios_name, self.timeout)
except:
raise IfcbConnectionError('unable to connect to IFCB')
def close(self):
if self._c is not None:
self._c.close()
self._c = None
def __enter__(self):
if self.connect:
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def ensure_connected(self):
if self._c is None:
raise IfcbConnectionError('IFCB is not connected')
def is_responding(self):
# tries to get NetBIOS name to see if IFCB is responding
if self.netbios_name is not None:
return True # FIXME determine connection state
if self._c is not None:
return True
else:
try:
get_netbios_name(self.addr, timeout=self.timeout)
return True
except:
return False
def list_shares(self):
self.ensure_connected()
for share in self._c.listShares():
if share.type == SharedDevice.DISK_TREE:
yield share.name
def share_exists(self):
self.ensure_connected()
for share in self.list_shares():
if share.lower() == self.share.lower():
return True
return False
def list_filesets(self):
"""list fileset lids, most recent first"""
self.ensure_connected()
fs = defaultdict(lambda: 0)
for f in self._c.listPath(self.share, self.directory):
if f.isDirectory:
continue
fn = f.filename
lid, ext = os.path.splitext(fn)
if ext in ['.hdr','.roi','.adc']:
fs[lid] += 1
complete_sets = []
for lid, c in fs.items():
if c == 3: # complete fileset
complete_sets.append(lid)
return sorted(complete_sets, reverse=True)
def transfer_fileset(self, lid, local_directory, skip_existing=True, create_directories=True):
self.ensure_connected()
if create_directories:
os.makedirs(local_directory, exist_ok=True)
n_copied = 0
for ext in ['hdr', 'adc', 'roi']:
fn = '{}.{}'.format(lid, ext)
local_path = os.path.join(local_directory, fn)
remote_path = os.path.join(self.directory, fn)
temp_local_path = local_path + '.temp_download'
if skip_existing and os.path.exists(local_path):
lf_size = os.path.getsize(local_path)
rf = self._c.getAttributes(self.share, remote_path)
if lf_size == rf.file_size:
continue
with open(temp_local_path, 'wb') as fout:
self._c.retrieveFile(self.share, remote_path, fout, timeout=self.timeout)
os.rename(temp_local_path, local_path)
n_copied += 1
return n_copied > 0
def delete_fileset(self, lid):
self.ensure_connected()
for ext in ['hdr', 'adc', 'roi']:
self._c.deleteFiles(self.share, '{}.{}'.format(lid, ext))
def sync(self, local_directory, progress_callback=do_nothing, fileset_callback=do_nothing):
# local_directory can be
# * a path, or
# * a callbale returning a path when passed a bin lid
self.ensure_connected()
fss = self.list_filesets()
copied = []
failed = []
for lid in fss:
try:
if callable(local_directory):
destination_directory = local_directory(lid)
was_copied = self.transfer_fileset(lid, destination_directory, skip_existing=True)
if was_copied:
copied.append(lid)
fileset_callback(lid)
except:
failed.append(lid)
pass
progress_callback({
'total': len(fss),
'copied': copied,
'failed': failed,
'lid': lid
})
| {
"content_hash": "ae8b7173cfc845dfc3495c866c982732",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 107,
"avg_line_length": 36,
"alnum_prop": 0.5489130434782609,
"repo_name": "joefutrelle/pyifcb",
"id": "0c37ea582882e6065887aeec5c9a54eeaf1ac60d",
"size": "4968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ifcb/data/transfer/remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161062"
}
],
"symlink_target": ""
} |
import hassapi as hass
import globals
#
# App to send notification when door opened or closed
#
# Args:
#
# sensor: sensor to monitor e.g. input_binary.hall
#
# Release Notes
#
# Version 1.0:
# Initial Version
class DoorNotification(hass.Hass):
def initialize(self):
if "sensor" in self.args:
for sensor in self.split_device_list(self.args["sensor"]):
self.listen_state(self.state_change, sensor)
else:
self.listen_state(self.motion, "binary_sensor")
def state_change(self, entity, attribute, old, new, kwargs):
if new == "on" or new == "open":
state = "open"
else:
state = "closed"
self.log("{} is {}".format(self.friendly_name(entity), state))
self.notify("{} is {}".format(self.friendly_name(entity), state), name=globals.notify)
| {
"content_hash": "13a323a4821dcedf6eff673c8c4cbc91",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 94,
"avg_line_length": 27.70967741935484,
"alnum_prop": 0.6123399301513388,
"repo_name": "acockburn/appdaemon",
"id": "90c00182e0eda9c5000fef827c9ad91ce04397f5",
"size": "859",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conf/example_apps/door_notification.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96201"
},
{
"name": "Shell",
"bytes": "1768"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('post', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='start_date',
field=models.DateTimeField(default=datetime.datetime(2015, 8, 31, 15, 54, 31, 983451), help_text='Fecha y hora en que aparecera la publicaci\xf3n.'),
),
]
| {
"content_hash": "589174c5a97f9faf298cb15913af80d1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 161,
"avg_line_length": 25.94736842105263,
"alnum_prop": 0.6247464503042597,
"repo_name": "jualjiman/blog-django-campus",
"id": "82682484eb817da068361e99e7880924dc1ffc18",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/post/migrations/0002_auto_20150831_1554.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16033"
},
{
"name": "Shell",
"bytes": "5485"
}
],
"symlink_target": ""
} |
"""
byceps.services.language.dbmodels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from ...database import db
class Language(db.Model):
"""A language.
The code can be just `en` or `de`, but also `en-gb` or `de-de`.
"""
__tablename__ = 'languages'
code = db.Column(db.UnicodeText, primary_key=True)
def __init__(self, code: str) -> None:
self.code = code
| {
"content_hash": "e869741247b6a9a4ddab8af5ecba9639",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 21.08695652173913,
"alnum_prop": 0.5896907216494846,
"repo_name": "homeworkprod/byceps",
"id": "b1a5af39c0c0286f4f8f602cffa1ac039740555e",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "byceps/services/language/dbmodels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
} |
import logging
from ..all_steps.event_handler import EventHandler as TopEventHandler
from ..step1.data_handler import DataHandler
from ..step1.plot import Step1Plot
from .gui_handler import Step3GuiHandler
from ..utilities.retrieve_data_infos import RetrieveGeneralDataInfos
from .. import DataType
class EventHandler(TopEventHandler):
def import_button_clicked(self):
logging.info(f"{self.data_type} import button clicked")
self.parent.loading_flag = True
o_load = DataHandler(parent=self.parent,
data_type=self.data_type)
_folder = o_load.select_folder()
o_load.import_files_from_folder(folder=_folder, extension=[".tif", ".fits", ".tiff"])
o_load.import_time_spectra()
if not (self.parent.data_metadata[self.data_type]['data'] is None):
self.update_ui_after_loading_data(folder=_folder)
self.check_time_spectra_status()
def sample_list_selection_changed(self):
if not self.parent.loading_flag:
o_retrieve_data_infos = RetrieveGeneralDataInfos(parent=self.parent, data_type=DataType.normalized)
o_retrieve_data_infos.update()
self.parent.roi_normalized_image_view_changed(mouse_selection=False)
else:
self.parent.loading_flag = False
def import_button_clicked_automatically(self, folder=None):
o_load = DataHandler(parent=self.parent,
data_type=self.data_type)
o_load.import_files_from_folder(folder=folder, extension=[".tif", ".fits", ".tiff"])
o_load.import_time_spectra()
if self.parent.data_metadata[self.data_type]['data'].any():
self.update_ui_after_loading_data(folder=folder)
def update_ui_after_loading_data(self, folder=None):
self.parent.data_metadata[self.data_type]['folder'] = folder
self.parent.select_load_data_row(data_type=self.data_type, row=0)
self.parent.retrieve_general_infos(data_type=self.data_type)
self.parent.retrieve_general_data_infos(data_type=self.data_type)
o_plot = Step1Plot(parent=self.parent, data_type=self.data_type)
o_plot.initialize_default_roi()
o_plot.display_bragg_edge(mouse_selection=False)
o_gui = Step3GuiHandler(parent=self.parent)
o_gui.check_widgets()
self.check_time_spectra_status()
def check_time_spectra_status(self):
if str(self.parent.ui.time_spectra_2.text()):
self.parent.ui.display_warning_2.setVisible(False)
else:
self.parent.ui.display_warning_2.setVisible(True)
| {
"content_hash": "a12c7503bd8f9c671dcfdb715fa0d028",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 111,
"avg_line_length": 41.015625,
"alnum_prop": 0.6659047619047619,
"repo_name": "ornlneutronimaging/iBeatles",
"id": "0fa72a9db3db8d1802704853782149ee9f8523a1",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibeatles/step3/event_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "870567"
}
],
"symlink_target": ""
} |
import glob
import os
import re
import sys
import tempfile
from oslo_config import cfg
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
CONTRIB_DIR = os.path.join(ROOT, 'contrib')
PLUGIN_DIRS = glob.glob(os.path.join(CONTRIB_DIR, '*'))
ENV_DIR = os.path.join(ROOT, "etc", "heat", "environment.d")
TEMP_ENV_DIR = tempfile.mkdtemp()
for f in glob.glob(os.path.join(ENV_DIR, "*.yaml")):
with open(f, "r") as fin:
name = os.path.split(f)[-1]
with open(os.path.join(TEMP_ENV_DIR, name), "w") as fout:
fout.write(fin.read().replace("file:///", "file://%s/" % ROOT))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
cfg.CONF.import_opt('plugin_dirs', 'heat.common.config')
cfg.CONF.set_override(name='plugin_dirs', override=PLUGIN_DIRS)
cfg.CONF.import_opt('environment_dir', 'heat.common.config')
cfg.CONF.set_override(name='environment_dir', override=TEMP_ENV_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""Return a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SOURCES = {'heat': {'module': 'heat', 'path': ROOT}}
EXCLUDED_MODULES = ('heat.testing',
'heat.cmd',
'heat.common',
'heat.cloudinit',
'heat.cfn_client',
'heat.doc',
'heat.db',
'heat.engine.resources',
'heat.locale',
'heat.openstack',
'.*\.tests',
'.*\.resources')
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst', '.gitignore']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for title, info in SOURCES.items():
path = info['path']
modulename = info['module']
sys.stdout.write("Generating source documentation for %s\n" %
title)
INDEXOUT.write("\n%s\n" % title.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(title),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, title)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.makedirs(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([re.match(exclude, module)
for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (title, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print("Module %s updated, generating new documentation."
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'oslosphinx',
'ext.resources']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Heat'
copyright = u'2012,2013 Heat Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Heatdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'Heat.tex', u'Heat Documentation',
u'Heat Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/heat-api', 'heat-api',
u'REST API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cfn', 'heat-api-cfn',
u'CloudFormation compatible API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cloudwatch', 'heat-api-cloudwatch',
u'CloudWatch alike API service to the heat project',
[u'Heat Developers'], 1),
('man/heat-db-setup', 'heat-db-setup',
u'Command line utility to setup the Heat database',
[u'Heat Developers'], 1),
('man/heat-engine', 'heat-engine',
u'Service which performs the actions from the API calls made by the user',
[u'Heat Developers'], 1),
('man/heat-keystone-setup', 'heat-keystone-setup',
u'Script which sets up keystone for usage by Heat',
[u'Heat Developers'], 1),
('man/heat-keystone-setup-domain', 'heat-keystone-setup-domain',
u'Script which sets up a keystone domain for heat users and projects',
[u'Heat Developers'], 1),
('man/heat-manage', 'heat-manage',
u'Script which helps manage specific database operations',
[u'Heat Developers'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Heat', u'Heat Documentation',
u'Heat Developers', 'Heat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "993c9954d7be1dfa2e02549fedc9921f",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 79,
"avg_line_length": 35.47560975609756,
"alnum_prop": 0.6167755242351324,
"repo_name": "rdo-management/heat",
"id": "f57621c7c45c7faa98eb211f232d782e85c9460c",
"size": "15536",
"binary": false,
"copies": "2",
"ref": "refs/heads/mgt-master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5970886"
},
{
"name": "Shell",
"bytes": "25070"
}
],
"symlink_target": ""
} |
import json
import pika
import sys
from datetime import datetime, timedelta
import string
import random
class Client:
"""
Client application to the GRACC Request daemons
"""
def __init__(self, exchange, routing_key, url="amqp://guest:guest@localhost/"):
"""
Initialization function
:param str exchange: Exchange to send requests to.
:param str routing_key: Routing key to bind to.
:param str url: URL of the amqp connection. Can be in the form of scheme://username:password@host:port/vhost
"""
self.url = url
self.exchange = exchange
self.routing_key = routing_key
self.conn = None
self.messages_received = 0
self.last_messages = 0
def _createQueues(self, create_data):
"""
Create the necessary queues and exchanges for data and control messages to be received.
:param boolean create_data: Whether to create the data exchanges or not. Setting to true will create the data channels.
"""
if not self.conn:
self._createConn()
# Create a new channel
self.channel = self.conn.channel()
if create_data:
# Create the receive queue
self.data_queue = "data-queue-%s" % self._createName()
self.data_exchange = "data-exchange-%s" % self._createName()
self.data_key = "data-key-%s" % self._createName()
self.channel.queue_declare(queue=self.data_queue, durable=False, exclusive=True, auto_delete=True)
self.channel.exchange_declare(exchange=self.data_exchange, exchange_type='direct', durable=False, auto_delete=True)
self.channel.queue_bind(queue=self.data_queue, exchange=self.data_exchange, routing_key=self.data_key)
# Create the control queue
self.control_queue = "control-queue-%s" % self._createName()
self.control_exchange = "control-exchange-%s" % self._createName()
self.control_key = "control-key-%s" % self._createName()
self.channel.queue_declare(queue=self.control_queue, durable=False, exclusive=True, auto_delete=True)
self.channel.exchange_declare(exchange=self.control_exchange, exchange_type='direct', durable=False, auto_delete=True)
self.channel.queue_bind(queue=self.control_queue, exchange=self.control_exchange, routing_key=self.control_key)
def _createName(self, size=6):
"""
Create a unique string name.
"""
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def _createConn(self):
"""
Initiate the remote connection
"""
parameters = pika.URLParameters(self.url)
self.conn = pika.adapters.blocking_connection.BlockingConnection(parameters)
def _getControlMessage(self, channel, method, properties, body):
"""
Receives control messages from the remote agents
"""
# Receives the control messages
body_parsed = json.loads(body)
self.channel.basic_ack(delivery_tag=method.delivery_tag)
if body_parsed['stage'] == "finished":
def deadline_reached():
#print "Deadline reached"
self.channel.stop_consuming()
self.conn.add_timeout(1, deadline_reached)
def _getDataMessage(self, channel, method, properties, body):
"""
Receives the data messages
"""
self.channel.basic_ack(delivery_tag=method.delivery_tag)
self.messages_received += 1
self.callbackDataMessage(body)
def _checkStatus(self):
"""
Called every X seconds to check the status of the transfer.
If nothing has happened lately, then kill the connection.
"""
if self.last_messages == self.messages_received:
self.channel.stop_consuming()
else:
self.last_messages = self.messages_received
self.timer_id = self.conn.add_timeout(300, self._checkStatus)
def query(self, from_date, to_date, kind, getMessage=None, destination_exchange=None, destination_key=None):
"""
Query the remote agents for data.
:param datetime from_date: A python datetime object representing the begininng of the query's time interval.
:param datetime to_date: A python datetime object representing the end of the query's time interval
:param str kind: The kind of request. Either "raw", "summary", or "transfer_summary"
:param function getMessage: A callback to send the received records.
:param str destination_exchange: The name of the exchange to send data to.
:param str destination_key: The routing key to use for destination.
Either getMessage is None, or both destination_exchange and destination_key are None. getMessage is used
to retrieve data inline, while destination_exchange and destination_key are used to route traffic elsewhere.
destination_exchange has to already exist.
"""
# Check that we don't have conflicting variable states
assert (getMessage == None) or ((destination_exchange == None) and (destination_key == None))
# Convinence variable to see if we are receiving the data, or not
remote_destination = (destination_exchange != None) and (destination_key != None)
# Create the connection
self._createConn()
self._createQueues(not remote_destination)
# First, create the msg
msg = {}
msg["from"] = from_date.isoformat()
msg["to"] = to_date.isoformat()
msg["kind"] = kind
if remote_destination:
msg["destination"] = destination_exchange
msg["routing_key"] = destination_key
else:
msg["destination"] = self.data_exchange
msg["routing_key"] = self.data_key
msg["control"] = self.control_exchange
msg["control_key"] = self.control_key
# Now listen to the queues
self.callbackDataMessage = getMessage
self.channel.basic_consume(self._getControlMessage, self.control_queue)
if not remote_destination:
self.channel.basic_consume(self._getDataMessage, self.data_queue)
# Send the message
self.channel.basic_publish(self.exchange,
self.routing_key,
json.dumps(msg),
pika.BasicProperties(content_type='text/json',
delivery_mode=1))
# Begin the checkStatus timer
self.timer_id = self.conn.add_timeout(300, self._checkStatus)
self.channel.start_consuming()
self.conn.close()
self.conn = None
| {
"content_hash": "153edf0d42d83a850f822bafa4036563",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 128,
"avg_line_length": 40.175141242937855,
"alnum_prop": 0.6049782027844185,
"repo_name": "shreyb/gracc-request",
"id": "9327dda961cf92e1e7d9ff0a6b54ea1c472311e4",
"size": "7111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/graccreq/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "540"
},
{
"name": "Python",
"bytes": "85662"
},
{
"name": "Shell",
"bytes": "2795"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, with_statement
from mock import Mock, patch
from nose.tools import *
import parse
from behave import matchers, model, runner
class DummyMatcher(matchers.Matcher):
desired_result = None
def check_match(self, step):
return DummyMatcher.desired_result
class TestMatcher(object):
def setUp(self):
DummyMatcher.desired_result = None
def test_returns_none_if_check_match_returns_none(self):
matcher = DummyMatcher(None, None)
assert matcher.match('just a random step') is None
def test_returns_match_object_if_check_match_returns_arguments(self):
arguments = ['some', 'random', 'objects']
func = lambda x: -x
DummyMatcher.desired_result = arguments
matcher = DummyMatcher(func, None)
match = matcher.match('just a random step')
assert isinstance(match, model.Match)
assert match.func is func
assert match.arguments == arguments
class TestParseMatcher(object):
def setUp(self):
self.recorded_args = None
def record_args(self, *args, **kwargs):
self.recorded_args = (args, kwargs)
def test_returns_none_if_parser_does_not_match(self):
matcher = matchers.ParseMatcher(None, 'a string')
with patch.object(matcher.parser, 'parse') as parse:
parse.return_value = None
assert matcher.match('just a random step') is None
def test_returns_arguments_based_on_matches(self):
func = lambda x: -x
matcher = matchers.ParseMatcher(func, 'foo')
results = parse.Result([1, 2, 3], {'foo': 'bar', 'baz': -45.3},
{
0: (13, 14),
1: (16, 17),
2: (22, 23),
'foo': (32, 35),
'baz': (39, 44),
})
expected = [
(13, 14, '1', 1, None),
(16, 17, '2', 2, None),
(22, 23, '3', 3, None),
(32, 35, 'bar', 'bar', 'foo'),
(39, 44, '-45.3', -45.3, 'baz'),
]
with patch.object(matcher.parser, 'parse') as p:
p.return_value = results
m = matcher.match('some numbers 1, 2 and 3 and the bar is -45.3')
assert m.func is func
args = m.arguments
have = [(a.start, a.end, a.original, a.value, a.name) for a in args]
eq_(have, expected)
def test_named_arguments(self):
text = "has a {string}, an {integer:d} and a {decimal:f}"
matcher = matchers.ParseMatcher(self.record_args, text)
context = runner.Context(Mock())
m = matcher.match("has a foo, an 11 and a 3.14159")
m.run(context)
eq_(self.recorded_args, ((context,), {
'string': 'foo',
'integer': 11,
'decimal': 3.14159
}))
def test_positional_arguments(self):
text = "has a {}, an {:d} and a {:f}"
matcher = matchers.ParseMatcher(self.record_args, text)
context = runner.Context(Mock())
m = matcher.match("has a foo, an 11 and a 3.14159")
m.run(context)
eq_(self.recorded_args, ((context, 'foo', 11, 3.14159), {}))
class TestRegexMatcher(object):
def test_returns_none_if_regex_does_not_match(self):
matcher = matchers.RegexMatcher(None, 'a string')
regex = Mock()
regex.match.return_value = None
matcher.regex = regex
assert matcher.match('just a random step') is None
def test_returns_arguments_based_on_groups(self):
func = lambda x: -x
matcher = matchers.RegexMatcher(func, 'foo')
regex = Mock()
regex.groupindex = {'foo': 4, 'baz': 5}
match = Mock()
match.groups.return_value = ('1', '2', '3', 'bar', '-45.3')
positions = {
1: (13, 14),
2: (16, 17),
3: (22, 23),
4: (32, 35),
5: (39, 44),
}
match.start.side_effect = lambda idx: positions[idx][0]
match.end.side_effect = lambda idx: positions[idx][1]
regex.match.return_value = match
matcher.regex = regex
expected = [
(13, 14, '1', '1', None),
(16, 17, '2', '2', None),
(22, 23, '3', '3', None),
(32, 35, 'bar', 'bar', 'foo'),
(39, 44, '-45.3', '-45.3', 'baz'),
]
m = matcher.match('some numbers 1, 2 and 3 and the bar is -45.3')
assert m.func is func
args = m.arguments
have = [(a.start, a.end, a.original, a.value, a.name) for a in args]
eq_(have, expected)
def test_steps_with_same_prefix_are_not_ordering_sensitive(self):
# -- RELATED-TO: issue #280
def step_func1(context): pass
def step_func2(context): pass
matcher1 = matchers.RegexMatcher(step_func1, "I do something")
matcher2 = matchers.RegexMatcher(step_func2, "I do something more")
# -- CHECK: ORDERING SENSITIVITY
matched1 = matcher1.match(matcher2.string)
matched2 = matcher2.match(matcher1.string)
assert matched1 is None
assert matched2 is None
# -- CHECK: Can match itself (if step text is simple)
matched1 = matcher1.match(matcher1.string)
matched2 = matcher2.match(matcher2.string)
assert isinstance(matched1, model.Match)
assert isinstance(matched2, model.Match)
@raises(AssertionError)
def test_step_should_not_use_regex_begin_marker(self):
matchers.RegexMatcher(None, "^I do something")
@raises(AssertionError)
def test_step_should_not_use_regex_end_marker(self):
matchers.RegexMatcher(None, "I do something$")
@raises(AssertionError)
def test_step_should_not_use_regex_begin_and_end_marker(self):
matchers.RegexMatcher(None, "^I do something$")
def test_step_matcher_current_matcher():
current_matcher = matchers.current_matcher
# XXX-CHECK-PY23: If list() is needed.
for name, klass in list(matchers.matcher_mapping.items()):
matchers.use_step_matcher(name)
matcher = matchers.get_matcher(lambda x: -x, 'foo')
assert isinstance(matcher, klass)
matchers.current_matcher = current_matcher
| {
"content_hash": "ef61178e916fc3f81268c63ca0845ae1",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 34.87431693989071,
"alnum_prop": 0.5636164211845817,
"repo_name": "charleswhchan/behave",
"id": "5a2d940e2b44cab3278f9b09ccc69c0760ce07cc",
"size": "6382",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/test_matchers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "272"
},
{
"name": "Cucumber",
"bytes": "588345"
},
{
"name": "Python",
"bytes": "758627"
},
{
"name": "Shell",
"bytes": "856"
}
],
"symlink_target": ""
} |
from curious import deferred_to_real
from django.test import TestCase, override_settings
from django.db import connection
from curious_tests.models import Blog, Entry
class TestDeferredToReal(TestCase):
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blogs = [blog]
headlines = ('MySQL is a relational DB',
'Postgres is a really good relational DB',
'Neo4J is a graph DB')
self.entries = [Entry(headline=headline, blog=blog) for headline in headlines]
for entry in self.entries:
entry.save()
self.query_count = len(connection.queries)
def test_converts_deferred_objects_to_real_objects(self):
entries = list(Entry.objects.all().filter(blog__name='Databases').only('id'))
self.assertEquals(len(entries), 3)
# test objects are deferred
for entry in entries:
self.assertEquals('id' in entry.__dict__, True)
self.assertEquals('headline' in entry.__dict__, False)
# convert to real
entries = deferred_to_real(entries)
self.assertEquals(len(entries), 3)
for entry in entries:
self.assertEquals('id' in entry.__dict__, True)
self.assertEquals('headline' in entry.__dict__, True)
def test_conversion_uses_single_query(self):
# We have to prefix with .all() to prevent the object cache from returning complete
# objects from previous queries
entries = list(Entry.objects.all().filter(blog__name='Databases').only('id'))
with override_settings(DEBUG=True):
entries = list(deferred_to_real(entries))
self.assertEquals(len(connection.queries) - self.query_count, 1)
def test_converts_mixture_of_deferred_and_real_objects(self):
real_entries = list(Entry.objects.all().filter(blog__name='Databases'))
self.assertEquals(len(real_entries), 3)
# test objects are real
for entry in real_entries:
self.assertEquals('id' in entry.__dict__, True)
self.assertEquals('headline' in entry.__dict__, True)
deferred_entries = list(Entry.objects.all().filter(blog__name='Databases').only('id'))
self.assertEquals(len(deferred_entries), 3)
# test objects are deferred
for entry in deferred_entries:
self.assertEquals('id' in entry.__dict__, True)
self.assertEquals('headline' in entry.__dict__, False)
# convert to real and uniquefy
entries = deferred_to_real(real_entries+deferred_entries)
self.assertEquals(len(entries), 3)
for entry in entries:
self.assertEquals('id' in entry.__dict__, True)
self.assertEquals('headline' in entry.__dict__, True)
| {
"content_hash": "0efb17f2a9fccced6ce3de3bd4b65629",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 90,
"avg_line_length": 39.28787878787879,
"alnum_prop": 0.6826070188970305,
"repo_name": "ginkgobioworks/curious",
"id": "4b62ede549f3ff2bccb53acb732957c238474626",
"size": "2593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/curious_tests/test_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1393"
},
{
"name": "HTML",
"bytes": "10216"
},
{
"name": "JavaScript",
"bytes": "28045"
},
{
"name": "Makefile",
"bytes": "1211"
},
{
"name": "Python",
"bytes": "153171"
}
],
"symlink_target": ""
} |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Date, DateTime, F
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
InvalidQuery, Q, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<QuerySet %r>' % data
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk and
self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for i in range(len(ids)):
objs_without_pk[i].pk = ids[i]
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self.model._base_manager._insert(
item, fields=fields, using=self.db, return_id=True
)
if len(objs) > 1:
inserted_ids.extend(inserted_id)
if len(objs) == 1:
inserted_ids.append(inserted_id)
else:
self.model._base_manager._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self, field):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
elif self.model != field.model:
# If the query is used as a subquery for a ForeignKey with non-pk
# target field, make sure to select the target field in the subquery.
foreign_fields = getattr(field, 'foreign_related_fields', ())
if len(foreign_fields) == 1 and not foreign_fields[0].primary_key:
return self.values(foreign_fields[0].name)
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
is_fetched = attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
obj._prefetched_objects_cache[cache_name] = vals
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
try:
apply_rel_filter = manager._apply_rel_filters
except AttributeError:
warnings.warn(
"The `%s.%s` class must implement a `_apply_rel_filters()` "
"method that accepts a `QuerySet` as its single "
"argument and returns an appropriately filtered version "
"of it." % (manager.__class__.__module__, manager.__class__.__name__),
RemovedInDjango20Warning,
)
qs = manager.get_queryset()
else:
qs = apply_rel_filter(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| {
"content_hash": "e9cba6bd699037dad36dbf7092d6ee18",
"timestamp": "",
"source": "github",
"line_count": 1769,
"max_line_length": 115,
"avg_line_length": 40.351611079706046,
"alnum_prop": 0.587473032417136,
"repo_name": "indevgr/django",
"id": "61c52167c7465d54070b2ebf57aae8ca1da5a40b",
"size": "71382",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/db/models/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52294"
},
{
"name": "HTML",
"bytes": "174530"
},
{
"name": "JavaScript",
"bytes": "248130"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11350632"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.notify.pushover
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pushover platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushover/
"""
import logging
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
REQUIREMENTS = ['python-pushover==0.2']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-variable
def get_service(hass, config):
""" Get the pushover notification service. """
if not validate_config({DOMAIN: config},
{DOMAIN: ['user_key', CONF_API_KEY]},
_LOGGER):
return None
from pushover import InitError
try:
return PushoverNotificationService(config['user_key'],
config[CONF_API_KEY])
except InitError:
_LOGGER.error(
"Wrong API key supplied. "
"Get it at https://pushover.net")
return None
# pylint: disable=too-few-public-methods
class PushoverNotificationService(BaseNotificationService):
""" Implements notification service for Pushover. """
def __init__(self, user_key, api_token):
from pushover import Client
self._user_key = user_key
self._api_token = api_token
self.pushover = Client(
self._user_key, api_token=self._api_token)
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
from pushover import RequestError
try:
self.pushover.send_message(message, title=kwargs.get(ATTR_TITLE))
except RequestError:
_LOGGER.exception("Could not send pushover notification")
| {
"content_hash": "e5078800d58a98785697cdc0fa3cb519",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 31.88135593220339,
"alnum_prop": 0.6331738437001595,
"repo_name": "badele/home-assistant",
"id": "7c776300cdb2bfb5dc83fe75591b3a48fd54c742",
"size": "1881",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/pushover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1316899"
},
{
"name": "Python",
"bytes": "1133422"
},
{
"name": "Shell",
"bytes": "3943"
}
],
"symlink_target": ""
} |
import logging.config
import os
from alembic import context
from sqlalchemy import create_engine, pool
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=os.environ['ROCKET_DATABASE_URL'],
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_engine(
os.environ['ROCKET_DATABASE_URL'],
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection)
with context.begin_transaction():
context.run_migrations()
logging.config.fileConfig(context.config.config_file_name)
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| {
"content_hash": "cf72f5114c85d3953a0801fa6bcaca7e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 64,
"avg_line_length": 24.88679245283019,
"alnum_prop": 0.6808188021228203,
"repo_name": "xsnippet/xsnippet-api",
"id": "f9161656e8eadb779b7b28e44a727646f0c84b9f",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/storage/sql/migrations/env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "20699"
},
{
"name": "Rust",
"bytes": "103336"
}
],
"symlink_target": ""
} |
"""
HDBSCAN: Hierarchical Density-Based Spatial Clustering
of Applications with Noise
"""
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics import pairwise_distances
from scipy.sparse import issparse
from sklearn.neighbors import KDTree, BallTree
from joblib import Memory
from warnings import warn
from sklearn.utils import check_array
from joblib.parallel import cpu_count
from scipy.sparse import csgraph
from ._hdbscan_linkage import (
single_linkage,
mst_linkage_core,
mst_linkage_core_vector,
label,
)
from ._hdbscan_tree import (
condense_tree,
compute_stability,
get_clusters,
outlier_scores,
)
from ._hdbscan_reachability import mutual_reachability, sparse_mutual_reachability
from ._hdbscan_boruvka import KDTreeBoruvkaAlgorithm, BallTreeBoruvkaAlgorithm
from .dist_metrics import DistanceMetric
from .plots import CondensedTree, SingleLinkageTree, MinimumSpanningTree
from .prediction import PredictionData
FAST_METRICS = KDTree.valid_metrics + BallTree.valid_metrics + ["cosine", "arccos"]
# Author: Leland McInnes <leland.mcinnes@gmail.com>
# Steve Astels <sastels@gmail.com>
# John Healy <jchealy@gmail.com>
#
# License: BSD 3 clause
from numpy import isclose
def _tree_to_labels(
X,
single_linkage_tree,
min_cluster_size=10,
cluster_selection_method="eom",
allow_single_cluster=False,
match_reference_implementation=False,
cluster_selection_epsilon=0.0,
max_cluster_size=0,
):
"""Converts a pretrained tree and cluster size into a
set of labels and probabilities.
"""
condensed_tree = condense_tree(single_linkage_tree, min_cluster_size)
stability_dict = compute_stability(condensed_tree)
labels, probabilities, stabilities = get_clusters(
condensed_tree,
stability_dict,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation,
cluster_selection_epsilon,
max_cluster_size,
)
return (labels, probabilities, stabilities, condensed_tree, single_linkage_tree)
def _hdbscan_generic(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=None,
gen_min_span_tree=False,
**kwargs
):
if metric == "minkowski":
distance_matrix = pairwise_distances(X, metric=metric, p=p)
elif metric == "arccos":
distance_matrix = pairwise_distances(X, metric="cosine", **kwargs)
elif metric == "precomputed":
# Treating this case explicitly, instead of letting
# sklearn.metrics.pairwise_distances handle it,
# enables the usage of numpy.inf in the distance
# matrix to indicate missing distance information.
# TODO: Check if copying is necessary
distance_matrix = X.copy()
else:
distance_matrix = pairwise_distances(X, metric=metric, **kwargs)
if issparse(distance_matrix):
# raise TypeError('Sparse distance matrices not yet supported')
return _hdbscan_sparse_distance_matrix(
distance_matrix,
min_samples,
alpha,
metric,
p,
leaf_size,
gen_min_span_tree,
**kwargs
)
mutual_reachability_ = mutual_reachability(distance_matrix, min_samples, alpha)
min_spanning_tree = mst_linkage_core(mutual_reachability_)
# Warn if the MST couldn't be constructed around the missing distances
if np.isinf(min_spanning_tree.T[2]).any():
warn(
"The minimum spanning tree contains edge weights with value "
"infinity. Potentially, you are missing too many distances "
"in the initial distance matrix for the given neighborhood "
"size.",
UserWarning,
)
# mst_linkage_core does not generate a full minimal spanning tree
# If a tree is required then we must build the edges from the information
# returned by mst_linkage_core (i.e. just the order of points to be merged)
if gen_min_span_tree:
result_min_span_tree = min_spanning_tree.copy()
for index, row in enumerate(result_min_span_tree[1:], 1):
candidates = np.where(isclose(mutual_reachability_[int(row[1])], row[2]))[0]
candidates = np.intersect1d(
candidates, min_spanning_tree[:index, :2].astype(int)
)
candidates = candidates[candidates != row[1]]
assert len(candidates) > 0
row[0] = candidates[0]
else:
result_min_span_tree = None
# Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
return single_linkage_tree, result_min_span_tree
def _hdbscan_sparse_distance_matrix(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=40,
gen_min_span_tree=False,
**kwargs
):
assert issparse(X)
# Check for connected component on X
if csgraph.connected_components(X, directed=False, return_labels=False) > 1:
raise ValueError(
"Sparse distance matrix has multiple connected "
"components!\nThat is, there exist groups of points "
"that are completely disjoint -- there are no distance "
"relations connecting them\n"
"Run hdbscan on each component."
)
lil_matrix = X.tolil()
# Compute sparse mutual reachability graph
# if max_dist > 0, max distance to use when the reachability is infinite
max_dist = kwargs.get("max_dist", 0.0)
mutual_reachability_ = sparse_mutual_reachability(
lil_matrix, min_points=min_samples, max_dist=max_dist, alpha=alpha
)
# Check connected component on mutual reachability
# If more than one component, it means that even if the distance matrix X
# has one component, there exists with less than `min_samples` neighbors
if (
csgraph.connected_components(
mutual_reachability_, directed=False, return_labels=False
)
> 1
):
raise ValueError(
(
"There exists points with less than %s neighbors. "
"Ensure your distance matrix has non zeros values for "
"at least `min_sample`=%s neighbors for each points (i.e. K-nn graph), "
"or specify a `max_dist` to use when distances are missing."
)
% (min_samples, min_samples)
)
# Compute the minimum spanning tree for the sparse graph
sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability_)
# Convert the graph to scipy cluster array format
nonzeros = sparse_min_spanning_tree.nonzero()
nonzero_vals = sparse_min_spanning_tree[nonzeros]
min_spanning_tree = np.vstack(nonzeros + (nonzero_vals,)).T
# Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :][0]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
if gen_min_span_tree:
return single_linkage_tree, min_spanning_tree
else:
return single_linkage_tree, None
def _hdbscan_prims_kdtree(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=40,
gen_min_span_tree=False,
**kwargs
):
if X.dtype != np.float64:
X = X.astype(np.float64)
# The Cython routines used require contiguous arrays
if not X.flags["C_CONTIGUOUS"]:
X = np.array(X, dtype=np.double, order="C")
tree = KDTree(X, metric=metric, leaf_size=leaf_size, **kwargs)
# TO DO: Deal with p for minkowski appropriately
dist_metric = DistanceMetric.get_metric(metric, **kwargs)
# Get distance to kth nearest neighbour
core_distances = tree.query(
X, k=min_samples + 1, dualtree=True, breadth_first=True
)[0][:, -1].copy(order="C")
# Mutual reachability distance is implicit in mst_linkage_core_vector
min_spanning_tree = mst_linkage_core_vector(X, core_distances, dist_metric, alpha)
# Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
if gen_min_span_tree:
return single_linkage_tree, min_spanning_tree
else:
return single_linkage_tree, None
def _hdbscan_prims_balltree(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=40,
gen_min_span_tree=False,
**kwargs
):
if X.dtype != np.float64:
X = X.astype(np.float64)
# The Cython routines used require contiguous arrays
if not X.flags["C_CONTIGUOUS"]:
X = np.array(X, dtype=np.double, order="C")
tree = BallTree(X, metric=metric, leaf_size=leaf_size, **kwargs)
dist_metric = DistanceMetric.get_metric(metric, **kwargs)
# Get distance to kth nearest neighbour
core_distances = tree.query(
X, k=min_samples + 1, dualtree=True, breadth_first=True
)[0][:, -1].copy(order="C")
# Mutual reachability distance is implicit in mst_linkage_core_vector
min_spanning_tree = mst_linkage_core_vector(X, core_distances, dist_metric, alpha)
# Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
if gen_min_span_tree:
return single_linkage_tree, min_spanning_tree
else:
return single_linkage_tree, None
def _hdbscan_boruvka_kdtree(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=40,
approx_min_span_tree=True,
gen_min_span_tree=False,
core_dist_n_jobs=4,
**kwargs
):
if leaf_size < 3:
leaf_size = 3
if core_dist_n_jobs < 1:
core_dist_n_jobs = max(cpu_count() + 1 + core_dist_n_jobs, 1)
if X.dtype != np.float64:
X = X.astype(np.float64)
tree = KDTree(X, metric=metric, leaf_size=leaf_size, **kwargs)
alg = KDTreeBoruvkaAlgorithm(
tree,
min_samples,
metric=metric,
leaf_size=leaf_size // 3,
approx_min_span_tree=approx_min_span_tree,
n_jobs=core_dist_n_jobs,
**kwargs
)
min_spanning_tree = alg.spanning_tree()
# Sort edges of the min_spanning_tree by weight
row_order = np.argsort(min_spanning_tree.T[2])
min_spanning_tree = min_spanning_tree[row_order, :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
if gen_min_span_tree:
return single_linkage_tree, min_spanning_tree
else:
return single_linkage_tree, None
def _hdbscan_boruvka_balltree(
X,
min_samples=5,
alpha=1.0,
metric="minkowski",
p=2,
leaf_size=40,
approx_min_span_tree=True,
gen_min_span_tree=False,
core_dist_n_jobs=4,
**kwargs
):
if leaf_size < 3:
leaf_size = 3
if core_dist_n_jobs < 1:
core_dist_n_jobs = max(cpu_count() + 1 + core_dist_n_jobs, 1)
if X.dtype != np.float64:
X = X.astype(np.float64)
tree = BallTree(X, metric=metric, leaf_size=leaf_size, **kwargs)
alg = BallTreeBoruvkaAlgorithm(
tree,
min_samples,
metric=metric,
leaf_size=leaf_size // 3,
approx_min_span_tree=approx_min_span_tree,
n_jobs=core_dist_n_jobs,
**kwargs
)
min_spanning_tree = alg.spanning_tree()
# Sort edges of the min_spanning_tree by weight
min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = label(min_spanning_tree)
if gen_min_span_tree:
return single_linkage_tree, min_spanning_tree
else:
return single_linkage_tree, None
def check_precomputed_distance_matrix(X):
"""Perform check_array(X) after removing infinite values (numpy.inf) from the given distance matrix."""
tmp = X.copy()
tmp[np.isinf(tmp)] = 1
check_array(tmp)
def remap_condensed_tree(tree, internal_to_raw, outliers):
"""
Takes an internal condensed_tree structure and adds back in a set of points
that were initially detected as non-finite and returns that new tree.
These points will all be split off from the maximal node at lambda zero and
considered noise points.
Parameters
----------
tree: condensed_tree
internal_to_raw: dict
a mapping from internal integer index to the raw integer index
finite_index: ndarray
Boolean array of which entries in the raw data were finite
"""
finite_count = len(internal_to_raw)
outlier_count = len(outliers)
for i, (parent, child, lambda_val, child_size) in enumerate(tree):
if child < finite_count:
child = internal_to_raw[child]
else:
child = child + outlier_count
tree[i] = (parent + outlier_count, child, lambda_val, child_size)
outlier_list = []
root = tree[0][0] # Should I check to be sure this is the minimal lambda?
for outlier in outliers:
outlier_list.append((root, outlier, 0, 1))
outlier_tree = np.array(
outlier_list,
dtype=[
("parent", np.intp),
("child", np.intp),
("lambda_val", float),
("child_size", np.intp),
],
)
tree = np.append(outlier_tree, tree)
return tree
def remap_single_linkage_tree(tree, internal_to_raw, outliers):
"""
Takes an internal single_linkage_tree structure and adds back in a set of points
that were initially detected as non-finite and returns that new tree.
These points will all be merged into the final node at np.inf distance and
considered noise points.
Parameters
----------
tree: single_linkage_tree
internal_to_raw: dict
a mapping from internal integer index to the raw integer index
finite_index: ndarray
Boolean array of which entries in the raw data were finite
"""
finite_count = len(internal_to_raw)
outlier_count = len(outliers)
for i, (left, right, distance, size) in enumerate(tree):
if left < finite_count:
tree[i, 0] = internal_to_raw[left]
else:
tree[i, 0] = left + outlier_count
if right < finite_count:
tree[i, 1] = internal_to_raw[right]
else:
tree[i, 1] = right + outlier_count
outlier_tree = np.zeros((len(outliers), 4))
last_cluster_id = tree[tree.shape[0] - 1][0:2].max()
last_cluster_size = tree[tree.shape[0] - 1][3]
for i, outlier in enumerate(outliers):
outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)
last_cluster_id += 1
last_cluster_size += 1
tree = np.vstack([tree, outlier_tree])
return tree
def is_finite(matrix):
"""Returns true only if all the values of a ndarray or sparse matrix are finite"""
if issparse(matrix):
return np.alltrue(np.isfinite(matrix.tocoo().data))
else:
return np.alltrue(np.isfinite(matrix))
def get_finite_row_indices(matrix):
"""Returns the indices of the purely finite rows of a sparse matrix or dense ndarray"""
if issparse(matrix):
row_indices = np.array(
[i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))]
)
else:
row_indices = np.where(np.isfinite(matrix).sum(axis=1) == matrix.shape[1])[0]
return row_indices
def hdbscan(
X,
min_cluster_size=5,
min_samples=None,
alpha=1.0,
cluster_selection_epsilon=0.0,
max_cluster_size=0,
metric="minkowski",
p=2,
leaf_size=40,
algorithm="best",
memory=Memory(None, verbose=0),
approx_min_span_tree=True,
gen_min_span_tree=False,
core_dist_n_jobs=4,
cluster_selection_method="eom",
allow_single_cluster=False,
match_reference_implementation=False,
**kwargs
):
"""Perform HDBSCAN clustering from a vector array or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
min_cluster_size : int, optional (default=5)
The minimum number of samples in a group for that group to be
considered a cluster; groupings smaller than this size will be left
as noise.
min_samples : int, optional (default=None)
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
defaults to the min_cluster_size.
cluster_selection_epsilon: float, optional (default=0.0)
A distance threshold. Clusters below this value will be merged.
See [3]_ for more information. Note that this should not be used
if we want to predict the cluster labels for new points in future
(e.g. using approximate_predict), as the approximate_predict function
is not aware of this argument.
alpha : float, optional (default=1.0)
A distance scaling parameter as used in robust single linkage.
See [2]_ for more information.
max_cluster_size : int, optional (default=0)
A limit to the size of clusters returned by the eom algorithm.
Has no effect when using leaf clustering (where clusters are
usually small regardless) and can also be overridden in rare
cases by a high value for cluster_selection_epsilon. Note that
this should not be used if we want to predict the cluster labels
for new points in future (e.g. using approximate_predict), as
the approximate_predict function is not aware of this argument.
metric : string or callable, optional (default='minkowski')
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
p : int, optional (default=2)
p value to use if using the minkowski metric.
leaf_size : int, optional (default=40)
Leaf size for trees responsible for fast nearest
neighbour queries.
algorithm : string, optional (default='best')
Exactly which algorithm to use; hdbscan has variants specialised
for different characteristics of the data. By default this is set
to ``best`` which chooses the "best" algorithm given the nature of
the data. You can force other options if you believe you know
better. Options are:
* ``best``
* ``generic``
* ``prims_kdtree``
* ``prims_balltree``
* ``boruvka_kdtree``
* ``boruvka_balltree``
memory : instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
approx_min_span_tree : bool, optional (default=True)
Whether to accept an only approximate minimum spanning tree.
For some algorithms this can provide a significant speedup, but
the resulting clustering may be of marginally lower quality.
If you are willing to sacrifice speed for correctness you may want
to explore this; in general this should be left at the default True.
gen_min_span_tree : bool, optional (default=False)
Whether to generate the minimum spanning tree for later analysis.
core_dist_n_jobs : int, optional (default=4)
Number of parallel jobs to run in core distance computations (if
supported by the specific algorithm). For ``core_dist_n_jobs``
below -1, (n_cpus + 1 + core_dist_n_jobs) are used.
cluster_selection_method : string, optional (default='eom')
The method used to select clusters from the condensed tree. The
standard approach for HDBSCAN* is to use an Excess of Mass algorithm
to find the most persistent clusters. Alternatively you can instead
select the clusters at the leaves of the tree -- this provides the
most fine grained and homogeneous clusters. Options are:
* ``eom``
* ``leaf``
allow_single_cluster : bool, optional (default=False)
By default HDBSCAN* will not produce a single cluster, setting this
to t=True will override this and allow single cluster results in
the case that you feel this is a valid result for your dataset.
(default False)
match_reference_implementation : bool, optional (default=False)
There exist some interpretational differences between this
HDBSCAN* implementation and the original authors reference
implementation in Java. This can result in very minor differences
in clustering results. Setting this flag to True will, at a some
performance cost, ensure that the clustering results match the
reference implementation.
**kwargs : optional
Arguments passed to the distance metric
Returns
-------
labels : ndarray, shape (n_samples, )
Cluster labels for each point. Noisy samples are given the label -1.
probabilities : ndarray, shape (n_samples, )
Cluster membership strengths for each point. Noisy samples are assigned
0.
cluster_persistence : array, shape (n_clusters, )
A score of how persistent each cluster is. A score of 1.0 represents
a perfectly stable cluster that persists over all distance scales,
while a score of 0.0 represents a perfectly ephemeral cluster. These
scores can be guage the relative coherence of the clusters output
by the algorithm.
condensed_tree : record array
The condensed cluster hierarchy used to generate clusters.
single_linkage_tree : ndarray, shape (n_samples - 1, 4)
The single linkage tree produced during clustering in scipy
hierarchical clustering format
(see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html).
min_spanning_tree : ndarray, shape (n_samples - 1, 3)
The minimum spanning as an edgelist. If gen_min_span_tree was False
this will be None.
References
----------
.. [1] Campello, R. J., Moulavi, D., & Sander, J. (2013, April).
Density-based clustering based on hierarchical density estimates.
In Pacific-Asia Conference on Knowledge Discovery and Data Mining
(pp. 160-172). Springer Berlin Heidelberg.
.. [2] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the
cluster tree. In Advances in Neural Information Processing Systems
(pp. 343-351).
.. [3] Malzer, C., & Baum, M. (2019). A Hybrid Approach To Hierarchical
Density-based Cluster Selection. arxiv preprint 1911.02282.
"""
if min_samples is None:
min_samples = min_cluster_size
if not np.issubdtype(type(min_samples), np.integer) or \
not np.issubdtype(type(min_cluster_size), np.integer):
raise ValueError("Min samples and min cluster size must be integers!")
if min_samples <= 0 or min_cluster_size <= 0:
raise ValueError(
"Min samples and Min cluster size must be positive" " integers"
)
if min_cluster_size == 1:
raise ValueError("Min cluster size must be greater than one")
if np.issubdtype(type(cluster_selection_epsilon), np.integer):
cluster_selection_epsilon = float(cluster_selection_epsilon)
if type(cluster_selection_epsilon) is not float or cluster_selection_epsilon < 0.0:
raise ValueError("Epsilon must be a float value greater than or equal to 0!")
if not isinstance(alpha, float) or alpha <= 0.0:
raise ValueError("Alpha must be a positive float value greater than" " 0!")
if leaf_size < 1:
raise ValueError("Leaf size must be greater than 0!")
if metric == "minkowski":
if p is None:
raise TypeError("Minkowski metric given but no p value supplied!")
if p < 0:
raise ValueError(
"Minkowski metric with negative p value is not" " defined!"
)
if match_reference_implementation:
min_samples = min_samples - 1
min_cluster_size = min_cluster_size + 1
approx_min_span_tree = False
if cluster_selection_method not in ("eom", "leaf"):
raise ValueError(
"Invalid Cluster Selection Method: %s\n" 'Should be one of: "eom", "leaf"\n'
)
# Checks input and converts to an nd-array where possible
if metric != "precomputed" or issparse(X):
X = check_array(X, accept_sparse="csr", force_all_finite=False)
else:
# Only non-sparse, precomputed distance matrices are handled here
# and thereby allowed to contain numpy.inf for missing distances
check_precomputed_distance_matrix(X)
# Python 2 and 3 compliant string_type checking
if isinstance(memory, str):
memory = Memory(memory, verbose=0)
size = X.shape[0]
min_samples = min(size - 1, min_samples)
if min_samples == 0:
min_samples = 1
if algorithm != "best":
if metric != "precomputed" and issparse(X) and algorithm != "generic":
raise ValueError("Sparse data matrices only support algorithm 'generic'.")
if algorithm == "generic":
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_generic
)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs)
elif algorithm == "prims_kdtree":
if metric not in KDTree.valid_metrics:
raise ValueError("Cannot use Prim's with KDTree for this" " metric!")
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_prims_kdtree
)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs)
elif algorithm == "prims_balltree":
if metric not in BallTree.valid_metrics:
raise ValueError("Cannot use Prim's with BallTree for this" " metric!")
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_prims_balltree
)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs)
elif algorithm == "boruvka_kdtree":
if metric not in BallTree.valid_metrics:
raise ValueError("Cannot use Boruvka with KDTree for this" " metric!")
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_boruvka_kdtree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
approx_min_span_tree,
gen_min_span_tree,
core_dist_n_jobs,
**kwargs
)
elif algorithm == "boruvka_balltree":
if metric not in BallTree.valid_metrics:
raise ValueError("Cannot use Boruvka with BallTree for this" " metric!")
if (X.shape[0] // leaf_size) > 16000:
warn(
"A large dataset size and small leaf_size may induce excessive "
"memory usage. If you are running out of memory consider "
"increasing the ``leaf_size`` parameter."
)
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_boruvka_balltree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
approx_min_span_tree,
gen_min_span_tree,
core_dist_n_jobs,
**kwargs
)
else:
raise TypeError("Unknown algorithm type %s specified" % algorithm)
else:
if issparse(X) or metric not in FAST_METRICS:
# We can't do much with sparse matrices ...
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_generic
)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs)
elif metric in KDTree.valid_metrics:
# TO DO: Need heuristic to decide when to go to boruvka;
# still debugging for now
if X.shape[1] > 60:
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_prims_kdtree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
gen_min_span_tree,
**kwargs
)
else:
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_boruvka_kdtree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
approx_min_span_tree,
gen_min_span_tree,
core_dist_n_jobs,
**kwargs
)
else: # Metric is a valid BallTree metric
# TO DO: Need heuristic to decide when to go to boruvka;
# still debugging for now
if X.shape[1] > 60:
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_prims_balltree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
gen_min_span_tree,
**kwargs
)
else:
(single_linkage_tree, result_min_span_tree) = memory.cache(
_hdbscan_boruvka_balltree
)(
X,
min_samples,
alpha,
metric,
p,
leaf_size,
approx_min_span_tree,
gen_min_span_tree,
core_dist_n_jobs,
**kwargs
)
return (
_tree_to_labels(
X,
single_linkage_tree,
min_cluster_size,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation,
cluster_selection_epsilon,
max_cluster_size,
)
+ (result_min_span_tree,)
)
# Inherits from sklearn
class HDBSCAN(BaseEstimator, ClusterMixin):
"""Perform HDBSCAN clustering from vector array or distance matrix.
HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications
with Noise. Performs DBSCAN over varying epsilon values and integrates
the result to find a clustering that gives the best stability over epsilon.
This allows HDBSCAN to find clusters of varying densities (unlike DBSCAN),
and be more robust to parameter selection.
Parameters
----------
min_cluster_size : int, optional (default=5)
The minimum size of clusters; single linkage splits that contain
fewer points than this will be considered points "falling out" of a
cluster rather than a cluster splitting into two new clusters.
min_samples : int, optional (default=None)
The number of samples in a neighbourhood for a point to be
considered a core point.
metric : string, or callable, optional (default='euclidean')
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
p : int, optional (default=None)
p value to use if using the minkowski metric.
alpha : float, optional (default=1.0)
A distance scaling parameter as used in robust single linkage.
See [3]_ for more information.
cluster_selection_epsilon: float, optional (default=0.0)
A distance threshold. Clusters below this value will be merged.
See [5]_ for more information.
algorithm : string, optional (default='best')
Exactly which algorithm to use; hdbscan has variants specialised
for different characteristics of the data. By default this is set
to ``best`` which chooses the "best" algorithm given the nature of
the data. You can force other options if you believe you know
better. Options are:
* ``best``
* ``generic``
* ``prims_kdtree``
* ``prims_balltree``
* ``boruvka_kdtree``
* ``boruvka_balltree``
leaf_size: int, optional (default=40)
If using a space tree algorithm (kdtree, or balltree) the number
of points ina leaf node of the tree. This does not alter the
resulting clustering, but may have an effect on the runtime
of the algorithm.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
approx_min_span_tree : bool, optional (default=True)
Whether to accept an only approximate minimum spanning tree.
For some algorithms this can provide a significant speedup, but
the resulting clustering may be of marginally lower quality.
If you are willing to sacrifice speed for correctness you may want
to explore this; in general this should be left at the default True.
gen_min_span_tree: bool, optional (default=False)
Whether to generate the minimum spanning tree with regard
to mutual reachability distance for later analysis.
core_dist_n_jobs : int, optional (default=4)
Number of parallel jobs to run in core distance computations (if
supported by the specific algorithm). For ``core_dist_n_jobs``
below -1, (n_cpus + 1 + core_dist_n_jobs) are used.
cluster_selection_method : string, optional (default='eom')
The method used to select clusters from the condensed tree. The
standard approach for HDBSCAN* is to use an Excess of Mass algorithm
to find the most persistent clusters. Alternatively you can instead
select the clusters at the leaves of the tree -- this provides the
most fine grained and homogeneous clusters. Options are:
* ``eom``
* ``leaf``
allow_single_cluster : bool, optional (default=False)
By default HDBSCAN* will not produce a single cluster, setting this
to True will override this and allow single cluster results in
the case that you feel this is a valid result for your dataset.
prediction_data : boolean, optional
Whether to generate extra cached data for predicting labels or
membership vectors few new unseen points later. If you wish to
persist the clustering object for later re-use you probably want
to set this to True.
(default False)
match_reference_implementation : bool, optional (default=False)
There exist some interpretational differences between this
HDBSCAN* implementation and the original authors reference
implementation in Java. This can result in very minor differences
in clustering results. Setting this flag to True will, at a some
performance cost, ensure that the clustering results match the
reference implementation.
**kwargs : optional
Arguments passed to the distance metric
Attributes
----------
labels_ : ndarray, shape (n_samples, )
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
probabilities_ : ndarray, shape (n_samples, )
The strength with which each sample is a member of its assigned
cluster. Noise points have probability zero; points in clusters
have values assigned proportional to the degree that they
persist as part of the cluster.
cluster_persistence_ : ndarray, shape (n_clusters, )
A score of how persistent each cluster is. A score of 1.0 represents
a perfectly stable cluster that persists over all distance scales,
while a score of 0.0 represents a perfectly ephemeral cluster. These
scores can be guage the relative coherence of the clusters output
by the algorithm.
condensed_tree_ : CondensedTree object
The condensed tree produced by HDBSCAN. The object has methods
for converting to pandas, networkx, and plotting.
single_linkage_tree_ : SingleLinkageTree object
The single linkage tree produced by HDBSCAN. The object has methods
for converting to pandas, networkx, and plotting.
minimum_spanning_tree_ : MinimumSpanningTree object
The minimum spanning tree of the mutual reachability graph generated
by HDBSCAN. Note that this is not generated by default and will only
be available if `gen_min_span_tree` was set to True on object creation.
Even then in some optimized cases a tre may not be generated.
outlier_scores_ : ndarray, shape (n_samples, )
Outlier scores for clustered points; the larger the score the more
outlier-like the point. Useful as an outlier detection technique.
Based on the GLOSH algorithm by Campello, Moulavi, Zimek and Sander.
prediction_data_ : PredictionData object
Cached data used for predicting the cluster labels of new or
unseen points. Necessary only if you are using functions from
``hdbscan.prediction`` (see
:func:`~hdbscan.prediction.approximate_predict`,
:func:`~hdbscan.prediction.membership_vector`,
and :func:`~hdbscan.prediction.all_points_membership_vectors`).
exemplars_ : list
A list of exemplar points for clusters. Since HDBSCAN supports
arbitrary shapes for clusters we cannot provide a single cluster
exemplar per cluster. Instead a list is returned with each element
of the list being a numpy array of exemplar points for a cluster --
these points are the "most representative" points of the cluster.
relative_validity_ : float
A fast approximation of the Density Based Cluster Validity (DBCV)
score [4]. The only differece, and the speed, comes from the fact
that this relative_validity_ is computed using the mutual-
reachability minimum spanning tree, i.e. minimum_spanning_tree_,
instead of the all-points minimum spanning tree used in the
reference. This score might not be an objective measure of the
goodness of clusterering. It may only be used to compare results
across different choices of hyper-parameters, therefore is only a
relative score.
References
----------
.. [1] Campello, R. J., Moulavi, D., & Sander, J. (2013, April).
Density-based clustering based on hierarchical density estimates.
In Pacific-Asia Conference on Knowledge Discovery and Data Mining
(pp. 160-172). Springer Berlin Heidelberg.
.. [2] Campello, R. J., Moulavi, D., Zimek, A., & Sander, J. (2015).
Hierarchical density estimates for data clustering, visualization,
and outlier detection. ACM Transactions on Knowledge Discovery
from Data (TKDD), 10(1), 5.
.. [3] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the
cluster tree. In Advances in Neural Information Processing Systems
(pp. 343-351).
.. [4] Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and
Sander, J., 2014. Density-Based Clustering Validation. In SDM
(pp. 839-847).
.. [5] Malzer, C., & Baum, M. (2019). A Hybrid Approach To Hierarchical
Density-based Cluster Selection. arxiv preprint 1911.02282.
"""
def __init__(
self,
min_cluster_size=5,
min_samples=None,
cluster_selection_epsilon=0.0,
max_cluster_size=0,
metric="euclidean",
alpha=1.0,
p=None,
algorithm="best",
leaf_size=40,
memory=Memory(None, verbose=0),
approx_min_span_tree=True,
gen_min_span_tree=False,
core_dist_n_jobs=4,
cluster_selection_method="eom",
allow_single_cluster=False,
prediction_data=False,
match_reference_implementation=False,
**kwargs
):
self.min_cluster_size = min_cluster_size
self.min_samples = min_samples
self.alpha = alpha
self.max_cluster_size = max_cluster_size
self.cluster_selection_epsilon = cluster_selection_epsilon
self.metric = metric
self.p = p
self.algorithm = algorithm
self.leaf_size = leaf_size
self.memory = memory
self.approx_min_span_tree = approx_min_span_tree
self.gen_min_span_tree = gen_min_span_tree
self.core_dist_n_jobs = core_dist_n_jobs
self.cluster_selection_method = cluster_selection_method
self.allow_single_cluster = allow_single_cluster
self.match_reference_implementation = match_reference_implementation
self.prediction_data = prediction_data
self._metric_kwargs = kwargs
self._condensed_tree = None
self._single_linkage_tree = None
self._min_spanning_tree = None
self._raw_data = None
self._outlier_scores = None
self._prediction_data = None
self._relative_validity = None
def fit(self, X, y=None):
"""Perform HDBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
self : object
Returns self
"""
if self.metric != "precomputed":
# Non-precomputed matrices may contain non-finite values.
# Rows with these values
X = check_array(X, accept_sparse="csr", force_all_finite=False)
self._raw_data = X
self._all_finite = is_finite(X)
if ~self._all_finite:
# Pass only the purely finite indices into hdbscan
# We will later assign all non-finite points to the background -1 cluster
finite_index = get_finite_row_indices(X)
clean_data = X[finite_index]
internal_to_raw = {
x: y for x, y in zip(range(len(finite_index)), finite_index)
}
outliers = list(set(range(X.shape[0])) - set(finite_index))
else:
clean_data = X
elif issparse(X):
# Handle sparse precomputed distance matrices separately
X = check_array(X, accept_sparse="csr")
clean_data = X
else:
# Only non-sparse, precomputed distance matrices are allowed
# to have numpy.inf values indicating missing distances
check_precomputed_distance_matrix(X)
clean_data = X
kwargs = self.get_params()
# prediction data only applies to the persistent model, so remove
# it from the keyword args we pass on the the function
kwargs.pop("prediction_data", None)
kwargs.update(self._metric_kwargs)
(
self.labels_,
self.probabilities_,
self.cluster_persistence_,
self._condensed_tree,
self._single_linkage_tree,
self._min_spanning_tree,
) = hdbscan(clean_data, **kwargs)
if self.metric != "precomputed" and not self._all_finite:
# remap indices to align with original data in the case of non-finite entries.
self._condensed_tree = remap_condensed_tree(
self._condensed_tree, internal_to_raw, outliers
)
self._single_linkage_tree = remap_single_linkage_tree(
self._single_linkage_tree, internal_to_raw, outliers
)
new_labels = np.full(X.shape[0], -1)
new_labels[finite_index] = self.labels_
self.labels_ = new_labels
new_probabilities = np.zeros(X.shape[0])
new_probabilities[finite_index] = self.probabilities_
self.probabilities_ = new_probabilities
if self.prediction_data:
self.generate_prediction_data()
return self
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
y : ndarray, shape (n_samples, )
cluster labels
"""
self.fit(X)
return self.labels_
def generate_prediction_data(self):
"""
Create data that caches intermediate results used for predicting
the label of new/unseen points. This data is only useful if
you are intending to use functions from ``hdbscan.prediction``.
"""
if self.metric in FAST_METRICS:
min_samples = self.min_samples or self.min_cluster_size
if self.metric in KDTree.valid_metrics:
tree_type = "kdtree"
elif self.metric in BallTree.valid_metrics:
tree_type = "balltree"
else:
warn("Metric {} not supported for prediction data!".format(self.metric))
return
self._prediction_data = PredictionData(
self._raw_data,
self.condensed_tree_,
min_samples,
tree_type=tree_type,
metric=self.metric,
**self._metric_kwargs
)
else:
warn(
"Cannot generate prediction data for non-vector"
"space inputs -- access to the source data rather"
"than mere distances is required!"
)
def weighted_cluster_centroid(self, cluster_id):
"""Provide an approximate representative point for a given cluster.
Note that this technique assumes a euclidean metric for speed of
computation. For more general metrics use the ``weighted_cluster_medoid``
method which is slower, but can work with the metric the model trained
with.
Parameters
----------
cluster_id: int
The id of the cluster to compute a centroid for.
Returns
-------
centroid: array of shape (n_features,)
A representative centroid for cluster ``cluster_id``.
"""
if not hasattr(self, "labels_"):
raise AttributeError("Model has not been fit to data")
if cluster_id == -1:
raise ValueError(
"Cannot calculate weighted centroid for -1 cluster "
"since it is a noise cluster"
)
mask = self.labels_ == cluster_id
cluster_data = self._raw_data[mask]
cluster_membership_strengths = self.probabilities_[mask]
return np.average(cluster_data, weights=cluster_membership_strengths, axis=0)
def weighted_cluster_medoid(self, cluster_id):
"""Provide an approximate representative point for a given cluster.
Note that this technique can be very slow and memory intensive for
large clusters. For faster results use the ``weighted_cluster_centroid``
method which is faster, but assumes a euclidean metric.
Parameters
----------
cluster_id: int
The id of the cluster to compute a medoid for.
Returns
-------
centroid: array of shape (n_features,)
A representative medoid for cluster ``cluster_id``.
"""
if not hasattr(self, "labels_"):
raise AttributeError("Model has not been fit to data")
if cluster_id == -1:
raise ValueError(
"Cannot calculate weighted centroid for -1 cluster "
"since it is a noise cluster"
)
mask = self.labels_ == cluster_id
cluster_data = self._raw_data[mask]
cluster_membership_strengths = self.probabilities_[mask]
dist_mat = pairwise_distances(
cluster_data, metric=self.metric, **self._metric_kwargs
)
dist_mat = dist_mat * cluster_membership_strengths
medoid_index = np.argmin(dist_mat.sum(axis=1))
return cluster_data[medoid_index]
def dbscan_clustering(self, cut_distance, min_cluster_size=5):
"""Return clustering that would be equivalent to running DBSCAN* for a particular cut_distance (or epsilon)
DBSCAN* can be thought of as DBSCAN without the border points. As such these results may differ slightly
from sklearns implementation of dbscan in the non-core points.
This can also be thought of as a flat clustering derived from constant height cut through the single
linkage tree.
This represents the result of selecting a cut value for robust single linkage
clustering. The `min_cluster_size` allows the flat clustering to declare noise
points (and cluster smaller than `min_cluster_size`).
Parameters
----------
cut_distance : float
The mutual reachability distance cut value to use to generate a flat clustering.
min_cluster_size : int, optional
Clusters smaller than this value with be called 'noise' and remain unclustered
in the resulting flat clustering.
Returns
-------
labels : array [n_samples]
An array of cluster labels, one per datapoint. Unclustered points are assigned
the label -1.
"""
return self.single_linkage_tree_.get_clusters(
cut_distance=cut_distance,
min_cluster_size=min_cluster_size,
)
@property
def prediction_data_(self):
if self._prediction_data is None:
raise AttributeError("No prediction data was generated")
else:
return self._prediction_data
@property
def outlier_scores_(self):
if self._outlier_scores is not None:
return self._outlier_scores
else:
if self._condensed_tree is not None:
self._outlier_scores = outlier_scores(self._condensed_tree)
return self._outlier_scores
else:
raise AttributeError(
"No condensed tree was generated; try running fit first."
)
@property
def condensed_tree_(self):
if self._condensed_tree is not None:
return CondensedTree(
self._condensed_tree,
self.cluster_selection_method,
self.allow_single_cluster,
)
else:
raise AttributeError(
"No condensed tree was generated; try running fit first."
)
@property
def single_linkage_tree_(self):
if self._single_linkage_tree is not None:
return SingleLinkageTree(self._single_linkage_tree)
else:
raise AttributeError(
"No single linkage tree was generated; try running fit" " first."
)
@property
def minimum_spanning_tree_(self):
if self._min_spanning_tree is not None:
if self._raw_data is not None:
return MinimumSpanningTree(self._min_spanning_tree, self._raw_data)
else:
warn(
"No raw data is available; this may be due to using"
" a precomputed metric matrix. No minimum spanning"
" tree will be provided without raw data."
)
return None
else:
raise AttributeError(
"No minimum spanning tree was generated."
"This may be due to optimized algorithm variations that skip"
" explicit generation of the spanning tree."
)
@property
def exemplars_(self):
if self._prediction_data is not None:
return self._prediction_data.exemplars
elif self.metric in FAST_METRICS:
self.generate_prediction_data()
return self._prediction_data.exemplars
else:
raise AttributeError(
"Currently exemplars require the use of vector input data"
"with a suitable metric. This will likely change in the "
"future, but for now no exemplars can be provided"
)
@property
def relative_validity_(self):
if self._relative_validity is not None:
return self._relative_validity
if not self.gen_min_span_tree:
raise AttributeError(
"Minimum spanning tree not present. "
+ "Either HDBSCAN object was created with "
+ "gen_min_span_tree=False or the tree was "
+ "not generated in spite of it owing to "
+ "internal optimization criteria."
)
return
labels = self.labels_
sizes = np.bincount(labels + 1)
noise_size = sizes[0]
cluster_size = sizes[1:]
total = noise_size + np.sum(cluster_size)
num_clusters = len(cluster_size)
DSC = np.zeros(num_clusters)
min_outlier_sep = np.inf # only required if num_clusters = 1
correction_const = 2 # only required if num_clusters = 1
# Unltimately, for each Ci, we only require the
# minimum of DSPC(Ci, Cj) over all Cj != Ci.
# So let's call this value DSPC_wrt(Ci), i.e.
# density separation 'with respect to' Ci.
DSPC_wrt = np.ones(num_clusters) * np.inf
max_distance = 0
mst_df = self.minimum_spanning_tree_.to_pandas()
for edge in mst_df.iterrows():
label1 = labels[int(edge[1]["from"])]
label2 = labels[int(edge[1]["to"])]
length = edge[1]["distance"]
max_distance = max(max_distance, length)
if label1 == -1 and label2 == -1:
continue
elif label1 == -1 or label2 == -1:
# If exactly one of the points is noise
min_outlier_sep = min(min_outlier_sep, length)
continue
if label1 == label2:
# Set the density sparseness of the cluster
# to the sparsest value seen so far.
DSC[label1] = max(length, DSC[label1])
else:
# Check whether density separations with
# respect to each of these clusters can
# be reduced.
DSPC_wrt[label1] = min(length, DSPC_wrt[label1])
DSPC_wrt[label2] = min(length, DSPC_wrt[label2])
# In case min_outlier_sep is still np.inf, we assign a new value to it.
# This only makes sense if num_clusters = 1 since it has turned out
# that the MR-MST has no edges between a noise point and a core point.
min_outlier_sep = max_distance if min_outlier_sep == np.inf else min_outlier_sep
# DSPC_wrt[Ci] might be infinite if the connected component for Ci is
# an "island" in the MR-MST. Whereas for other clusters Cj and Ck, the
# MR-MST might contain an edge with one point in Cj and ther other one
# in Ck. Here, we replace the infinite density separation of Ci by
# another large enough value.
#
# TODO: Think of a better yet efficient way to handle this.
correction = correction_const * (
max_distance if num_clusters > 1 else min_outlier_sep
)
DSPC_wrt[np.where(DSPC_wrt == np.inf)] = correction
V_index = [
(DSPC_wrt[i] - DSC[i]) / max(DSPC_wrt[i], DSC[i])
for i in range(num_clusters)
]
score = np.sum(
[(cluster_size[i] * V_index[i]) / total for i in range(num_clusters)]
)
self._relative_validity = score
return self._relative_validity
| {
"content_hash": "0c0169dc0cb5b31fa989ab665a826b1d",
"timestamp": "",
"source": "github",
"line_count": 1524,
"max_line_length": 115,
"avg_line_length": 38.02690288713911,
"alnum_prop": 0.6160681931910341,
"repo_name": "scikit-learn-contrib/hdbscan",
"id": "ce0092d04e6b868365c4b8957b766dbc288e366d",
"size": "57977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdbscan/hdbscan_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "155467"
},
{
"name": "Jupyter Notebook",
"bytes": "6845240"
},
{
"name": "Python",
"bytes": "242605"
},
{
"name": "Shell",
"bytes": "1322"
},
{
"name": "TeX",
"bytes": "1790"
}
],
"symlink_target": ""
} |
import aquarium
from trace_parser import cmp_events_by_timestamp
class DeleteBreakdown(object):
def __init__(self, start_ev):
self._start_ts = start_ev._timestamp
self._seqnum = start_ev._arg
self._events = [ start_ev ]
@property
def seqnum(self):
return self._seqnum
@property
def last_event(self):
# this should not be necessary...
#l = sorted(self._events, cmp=cmp_events_by_timestamp, reverse=True)[0]
return self._events[-1]
def append_event(self, ev):
self._events.append(ev)
def generate_breakdown_data(self):
'''Generator that produces a triple seqnum:eventname:tscoff for each
chunk of work done inside the monitor for a delete call'''
slices = []
for e in self._events:
slices.append(e._timestamp - self._start_ts)
for v, e in zip(slices, self._events):
yield "%d:%s:%d" % (self._seqnum, e._evname, v)
def compute_overall_latency(self):
return self.last_event()._timestamp - self._start_ts
def __str__(self):
evdata = [ (ev._coreid, ev._evname, ev._timestamp) for ev in self._events ]
return "Breakdown { startts %d, seqnum %d, events=%r }" \
% (self._start_ts, self._seqnum, evdata)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print "Usage: %s trace_defs.json trace.data" % sys.argv[0]
sys.exit(1)
aq = aquarium.Aquarium(sys.argv[1])
t = aq.load_trace(sys.argv[2])
print "Processing %d events" % len(t._events)
evtypes = aq.get_event_types()
curdel_overall_start_ts=dict()
curdel_overall_seqnum = -1
overall_del_lats=dict()
found_start = False
# XXX: this is not very nice, as we're flattening a partial order by hand
# here in order to make queries about event ordering to skip partially
# recorded inner trace points that don't carry the sequence number yet :)
event_order = [ 'delete_enter', 'delete_lock', 'delete_queue_retry',
'delete_do_work', 'delete_remote_enq', 'delete_find_new_owner',
'delete_find_core_cont', 'delete_move_result_cont', 'delete_last',
'delete_queue_fin', 'delete_call_rx', 'delete_done' ]
# current breakdown object indexed by coreid
currbreak = dict()
# list of complete breakdown objects indexed by coreid
breakdowns = dict()
with open("raw_parsed.txt", 'w') as rawf:
# we seem to get better results without presorting events by
# timestamp, so we leave that out for now; events should be sorted by
# (coreid, timestamp) anyway.
for e in [ e for e in t._events if e.subsys.get_name() == "capops" ]:
rawf.write("%r,%d,%d\n" % (e,e._coreid,e._timestamp))
# find START signal
if e._evname == "start":
found_start = True
if not found_start:
# ignore events before start event
continue
# delete_enter is signalling start of new delete in monitor.
if e._evname == "delete_enter":
currbreak[e._coreid] = DeleteBreakdown(e)
if e._coreid not in breakdowns.keys():
breakdowns[e._coreid] = []
# delete_done is signalling end of delete in monitor
# just skip delete_done events for which we're not tracking a breakdown
elif e._evname == "delete_done" and e._coreid in currbreak.keys():
if e._arg != currbreak[e._coreid].seqnum:
print "[core %d] found delete_done with seqnum %d, last delete_enter was %d" \
% (e._coreid, e._arg, currbreak[e._coreid].seqnum)
print "skipping this set of trace points"
else:
currbreak[e._coreid].append_event(e)
breakdowns[e._coreid].append(currbreak[e._coreid])
# remove breakdown object for e._coreid from currbreak dict,
# so other code can check whether we're in the middle of a breakdown
# by checking whether the coreid is in the keyset of the dict
currbreak[e._coreid] = None
del currbreak[e._coreid]
elif e._evname in event_order and \
e._coreid in currbreak.keys():
if event_order.index(e._evname) > \
event_order.index(currbreak[e._coreid].last_event._evname):
currbreak[e._coreid].append_event(e)
# handle trace point before call to cap_delete() in user code
if e._evname == "user_delete_call":
curdel_overall_start_ts[e._coreid] = e._timestamp
curdel_overall_seqnum = e._arg
if e._coreid not in overall_del_lats.keys():
overall_del_lats[e._coreid] = []
# handle trace point after call to cap_delete() in user code
if e._evname == "user_delete_resp":
if curdel_overall_seqnum != e._arg:
print "[core %d] got delete_resp with seqnum %d, last delete_call was %d" \
% (e._coreid, e._arg & 0xFF, curdel_overall_seqnum & 0xFF)
print "skipping this set of trace points"
else:
if e._coreid in curdel_overall_start_ts.keys():
overall_del_lats[e._coreid].append(
e._timestamp - curdel_overall_start_ts[e._coreid])
for core in overall_del_lats.keys():
with open("core%d_overall_latencies.data" % core, 'w') as rawf:
for v in overall_del_lats[core]:
rawf.write("%d\n" % v)
for core in breakdowns.keys():
print "core %d:" % core
with open("core%d_monitor_latencies.data" % core, 'w') as rawf:
for b in breakdowns[core]:
off = 0
for ev in b._events:
off = ev._timestamp - b._start_ts
if (off < 0):
print "Breakdown has negative components?"
print b
break
if off < 0:
# don't process breakdowns with negative components
# further
continue
rawf.write('\n'.join(list(b.generate_breakdown_data())))
rawf.write('\n')
| {
"content_hash": "9cd0ebb02798858cc25b4f0021e60e04",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 98,
"avg_line_length": 42.64705882352941,
"alnum_prop": 0.5474329501915709,
"repo_name": "kishoredbn/barrelfish",
"id": "43b65e177a10858cc61210d8d19eb238c22ce31f",
"size": "6572",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/pyaquarium/parse_delete_last_remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2589287"
},
{
"name": "Awk",
"bytes": "9178"
},
{
"name": "Batchfile",
"bytes": "49856"
},
{
"name": "C",
"bytes": "77396109"
},
{
"name": "C++",
"bytes": "14632842"
},
{
"name": "CMake",
"bytes": "5175"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "278456"
},
{
"name": "Emacs Lisp",
"bytes": "23337"
},
{
"name": "Gnuplot",
"bytes": "3383"
},
{
"name": "Groff",
"bytes": "407423"
},
{
"name": "HTML",
"bytes": "377310"
},
{
"name": "Haskell",
"bytes": "147463"
},
{
"name": "Lex",
"bytes": "2872"
},
{
"name": "Logos",
"bytes": "31891"
},
{
"name": "Makefile",
"bytes": "850866"
},
{
"name": "Objective-C",
"bytes": "43119"
},
{
"name": "Perl",
"bytes": "2688059"
},
{
"name": "Perl6",
"bytes": "255974"
},
{
"name": "Prolog",
"bytes": "2571678"
},
{
"name": "Protocol Buffer",
"bytes": "2764"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Scilab",
"bytes": "5315"
},
{
"name": "Shell",
"bytes": "719683"
},
{
"name": "SuperCollider",
"bytes": "8638"
},
{
"name": "Tcl",
"bytes": "18714"
},
{
"name": "TeX",
"bytes": "411611"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "XSLT",
"bytes": "1792"
},
{
"name": "Yacc",
"bytes": "11190"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
import unittest
import tempfile
import uuid as _uuid
import pathlib
import io
from qiime2.core.testing.type import IntSequence1
from qiime2.core.testing.format import IntSequenceDirectoryFormat
from qiime2.core.archive.archiver import _ZipArchive, ArchiveRecord
from qiime2.core.archive.format.v0 import ArchiveFormat
class TestArchiveFormat(unittest.TestCase):
def setUp(self):
prefix = "qiime2-test-temp-"
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
def test_format_metadata(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1,
IntSequenceDirectoryFormat)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: "
"IntSequenceDirectoryFormat\n" % uuid)
def test_format_metadata_none(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1, None)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: null\n" % uuid)
def test_load_root_dir_metadata_uuid_mismatch(self):
fp = pathlib.Path(self.temp_dir.name) / 'root-dir-metadata-mismatch'
fp.mkdir()
r = _ZipArchive.setup(fp, 'foo', 'bar')
fake = ArchiveRecord(r.root, r.version_fp,
_uuid.uuid4(), # This will trick the format
r.version, r.framework_version)
ArchiveFormat.write(fake, IntSequence1, IntSequenceDirectoryFormat,
lambda x: None, None)
with self.assertRaisesRegex(
ValueError, 'root directory must match UUID.*metadata'):
ArchiveFormat(r)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1f9f4b344f7575f1c715da6ff18d631d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 35.6,
"alnum_prop": 0.6087844739530133,
"repo_name": "ebolyen/qiime2",
"id": "59cf949c22f5d4d391e8358bb1988c2a7e2f5d48",
"size": "2307",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiime2/core/archive/format/tests/test_v0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "492020"
}
],
"symlink_target": ""
} |
import logging
import urllib
import uuid
from collections import defaultdict
from copy import copy
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Set, Union, cast, overload
import requests
from typing_extensions import Literal, Protocol
import mbq.metrics
from .. import ServiceClient
logger = logging.getLogger(__name__)
UUIDType = Union[str, uuid.UUID]
# External type returned from internal and external OS Core clients. Keys
# are the org refs if UUIDs, or {ref_type}:{ref_id} if legacy int types.
# Values are lists of scopes. Should also include a "global" literal key.
FetchedPermissionsDoc = Dict[str, List[str]]
# Internal type stored in the cache. Keys are cache keys with prefixes, colons, etc.
# Values are pipe-delimited strings with an additional pipe on the end.
CachedPermissionsDoc = Dict[str, str]
RefType = Union[Literal["company", "vendor"]]
@dataclass
class RefSpec:
ref: Union[UUIDType, Literal["global"], int]
type: Optional[RefType] = None
@dataclass
class StaffPermissionsDoc:
is_superuser: bool
permissions: List[str]
@dataclass
class ConvenientOrgRefs:
org_refs: Set[str] = field(default_factory=set)
company_ids: Set[int] = field(default_factory=set)
vendor_ids: Set[int] = field(default_factory=set)
class ClientError(Exception):
"""Raised from within OSCoreClient implementations to denote the fetch
failed due to a client-side error.
"""
pass
class ServerError(Exception):
"""Raised from within OSCoreClient implementations to denote the fetch
failed due to a server-side error.
"""
pass
class OSCoreClient(Protocol):
def fetch_permissions(
self, person_id: UUIDType, org_ref: UUIDType
) -> FetchedPermissionsDoc:
...
def fetch_permissions_for_location(
self, person_id: UUIDType, location_id: int, location_type: RefType
) -> FetchedPermissionsDoc:
...
def fetch_all_permissions(self, person_id: UUIDType) -> FetchedPermissionsDoc:
...
def fetch_staff_permissions(self, person_id: UUIDType) -> StaffPermissionsDoc:
...
def fetch_org_refs_for_permission(
self, person_id: UUIDType, scope: str
) -> List[str]:
...
def fetch_persons_with_permission(self, scope: str, org_ref: UUIDType) -> List[str]:
...
def fetch_persons_with_permission_for_location(
self, scope: str, location_type: RefType, location_id: int
) -> List[str]:
...
class OSCoreServiceClient:
def __init__(self, client: ServiceClient):
# The copying and munging here is attempting to deal with differences
# in how the individual ServiceClients are configured. We can get rid
# of it if we standardize within the services.
# Note that we are doing a shallow copy so the Authenticator instance
# will be shared
self.client = copy(client)
self.client._post_process_response = None
self.client._headers = None
parsed = urllib.parse.urlparse(self.client._api_url)
self.client._api_url = f"{parsed.scheme}://{parsed.netloc}"
def _make_get_request(self, *args, **kwargs):
try:
return self.client.get(*args, **kwargs)
except requests.exceptions.HTTPError as e:
response = getattr(e, "response", None)
if response is not None and response.status_code // 100 == 4:
raise ClientError("Invalid request") from e
raise ServerError("Server error") from e
except Exception as e:
raise ServerError("Server error") from e
def fetch_permissions(
self, person_id: UUIDType, org_ref: UUIDType
) -> FetchedPermissionsDoc:
logger.debug(f"Fetching permissions from OS Core: {person_id}, {org_ref}")
return self._make_get_request(
f"/api/v1/people/{person_id}/permissions/by-org-ref",
params={"org_ref": org_ref},
)
def fetch_permissions_for_location(
self, person_id: UUIDType, location_id: int, location_type: RefType
) -> FetchedPermissionsDoc:
logger.debug(
f"Fetching permissions from OS Core: {person_id}, {location_type} {location_id}"
)
return self._make_get_request(
f"/api/v1/people/{person_id}/permissions/by-location",
params={"location_id": location_id, "location_type": location_type},
)
def fetch_all_permissions(self, person_id: UUIDType) -> FetchedPermissionsDoc:
logger.debug(f"Fetching all permissions from OS Core: {person_id}")
return self._make_get_request(f"/api/v1/people/{person_id}/permissions/all")
def fetch_staff_permissions(self, person_id: UUIDType) -> StaffPermissionsDoc:
logger.debug(f"Fetching staff permissions from OS Core: {person_id}")
data = self._make_get_request(f"/api/v1/people/{person_id}/internal-user-permissions")
return StaffPermissionsDoc(
is_superuser=data['is_superuser'],
permissions=data['permissions'],
)
def fetch_org_refs_for_permission(
self, person_id: UUIDType, scope: str
) -> List[str]:
logger.debug(
f"Fetching all orgs for which Person {person_id} has permission '{scope}'"
)
return self._make_get_request(
f"/api/v1/people/{person_id}/permissions/{scope}/orgs"
)["objects"]
def fetch_persons_with_permission(self, scope: str, org_ref: UUIDType) -> List[str]:
logger.debug(
f"Fetching all persons with permission '{scope}' in org {org_ref}"
)
return self._make_get_request(
f"/api/v1/permissions/people/by-org-ref",
params={'scope': scope, 'org_ref': org_ref}
)["objects"]
def fetch_persons_with_permission_for_location(
self, scope: str, location_type: RefType, location_id: int
) -> List[str]:
logger.debug(
f"Fetching all persons with permission '{scope}' in location "
"{location_id}, {location_type}"
)
return self._make_get_request(
f"/api/v1/permissions/people/by-location",
params={'scope': scope, 'location_type': location_type, 'location_id': location_id}
)["objects"]
class Registrar:
def __init__(self):
self._callback_error_name = "callback_error"
self._registry: Dict[str, List[Callable[..., None]]] = defaultdict(list)
def register_error_handler(self, fn: Callable[[str, Exception], None]) -> None:
""" Use this method to add a callback (fn) which will be executed when a callback
raises an exception
"""
self._registry[self._callback_error_name].append(fn)
def register(self, name: str, fn: Callable[..., None]) -> None:
""" Use this method to add a callback (fn) which will be executed when an event
(name) is emitted
"""
self._registry[name].append(fn)
def emit(self, name: str, *args, **kwargs) -> None:
""" Use this method to emit an event and trigger registered callbacks"""
for fn in self._registry[name]:
try:
fn(*args, **kwargs)
except Exception as e:
if name != self._callback_error_name:
self.emit(self._callback_error_name, name, e)
else:
raise
class PermissionsClient:
"""Cache-aware client for consuming the Permissions API from OS Core.
os_core_client: OSCoreClient Protocol implementation used to talk to OS Core. From
remote services, this should be a wrapped ServiceClient instance.
Use the provided OSCoreServiceClient wrapper in this contrib.
From OS Core itself, this should make local function calls.
cache_name: Name of the Django cache to use, default "default". Pass None
to disable caching.
cache_period_seconds: Expiration time on cache keys in seconds.
"""
_cache_prefix = "permissions_client"
_collector = None
def __init__(
self,
os_core_client: OSCoreClient,
cache_name="default",
cache_period_seconds=120,
):
self.registrar = Registrar()
self.os_core_client = os_core_client
if cache_name is not None:
from django.core.cache import caches # type: ignore
self.cache = caches[cache_name] if cache_name else None
self.cache_period_seconds = cache_period_seconds
else:
self.cache = None
self.cache_period_seconds = None
@property
def collector(self):
if self._collector is None:
if mbq.metrics._initialized is False:
raise RuntimeError("mbq.metrics is not initialized")
self._collector = mbq.metrics.Collector(
namespace="mbq.client.permissions",
tags={
"service": mbq.metrics._service,
"env": mbq.metrics._env.long_name,
},
)
return self._collector
def _cache_key(self, person_id: str, spec: RefSpec) -> str:
if spec.type is not None:
return f"{self._cache_prefix}:{person_id}:{spec.ref}:{spec.type}"
return f"{self._cache_prefix}:{person_id}:{spec.ref}"
def _global_cache_key(self, person_id: str) -> str:
return f"{self._cache_prefix}:{person_id}:global"
def _cache_read(
self, person_id: str, ref_specs: List[RefSpec]
) -> Optional[CachedPermissionsDoc]:
if not self.cache:
return None
keys = [self._global_cache_key(person_id)]
for spec in ref_specs:
if spec.ref != "global":
keys.append(self._cache_key(person_id, spec))
try:
with self.collector.timed("cache.read.time"):
fetched = self.cache.get_many(keys)
except Exception as e:
raise ServerError("Error reading from cache") from e
if len(fetched.keys()) != len(keys):
logger.debug(f"Not all keys found in cache, got: {fetched}")
self.collector.increment("cache.read", tags={"result": "miss"})
return None
logger.debug(f"Successful cache read: {fetched}")
self.collector.increment("cache.read", tags={"result": "hit"})
return fetched
def _cache_transform(
self, person_id: str, permissions_doc: FetchedPermissionsDoc
) -> CachedPermissionsDoc:
logger.debug(f"Transforming to cache representation: {permissions_doc}")
cache_doc = {}
for ref, scopes in permissions_doc.items():
org_ref: str
ref_type: Optional[RefType] = None
if ":" in ref:
split = ref.split(":")
ref_type, org_ref = cast(RefType, split[0]), split[1]
else:
org_ref = ref
joined_scopes = f"{'|'.join(scopes)}|"
cache_doc[
self._cache_key(person_id, RefSpec(org_ref, ref_type))
] = joined_scopes
return cache_doc
def _cache_write(self, doc: CachedPermissionsDoc) -> None:
if self.cache:
logger.debug(f"Writing to cache: {doc}")
try:
with self.collector.timed("cache.write.time"):
self.cache.set_many(doc, timeout=self.cache_period_seconds)
self.collector.increment("cache.write")
except Exception as e:
raise ServerError("Error writing to cache") from e
def _has_permission(
self, person_id: UUIDType, scope: str, specs: List[RefSpec]
) -> bool:
"""Returns bool of whether the given person has the given
scope on ALL RefSpecs specified.
"""
person_id = str(person_id)
cached_doc = self._cache_read(person_id, specs)
if not cached_doc:
if len(specs) > 1 or specs[0].ref == "global":
logger.debug("Using fetch_all_permissions")
fetched_doc = self.os_core_client.fetch_all_permissions(person_id)
else:
spec = specs[0]
if spec.type is not None:
logger.debug("Using fetch_permissions_for_location")
fetched_doc = self.os_core_client.fetch_permissions_for_location(
person_id, int(spec.ref), spec.type
)
else:
logger.debug("Using fetch_permissions")
assert isinstance(spec.ref, (uuid.UUID, str))
fetched_doc = self.os_core_client.fetch_permissions(
person_id, spec.ref
)
cached_doc = self._cache_transform(person_id, fetched_doc)
self._cache_write(cached_doc)
found = True
if f"{scope}|" in cached_doc.get(self._global_cache_key(person_id), ""):
pass
else:
for spec in specs:
cache_key = self._cache_key(person_id, spec)
if f"{scope}|" not in cached_doc.get(cache_key, ""):
found = False
break
return found
def has_global_permission(self, person_id: UUIDType, scope: str) -> bool:
"""Test whether the scope is granted to the person on the global scope."""
with self.collector.timed(
"has_permission.time", tags={"call": "has_global_permission"}
):
result = self._has_permission(person_id, scope, [RefSpec("global")])
self.collector.increment(
"has_permission",
tags={
"call": "has_global_permission",
"result": str(result),
"scope": scope,
},
)
self.registrar.emit(
"has_global_permission_completed", person_id, scope, result=result
)
return result
@overload # noqa: F811
def has_permission(
self, person_id: UUIDType, scope: str, org_ref: UUIDType
) -> bool:
...
@overload # noqa: F811
def has_permission(
self, person_id: UUIDType, scope: str, org_ref: int, ref_type: RefType
) -> bool:
...
def has_permission( # noqa: F811
self,
person_id: UUIDType,
scope: str,
org_ref: Union[UUIDType, int],
ref_type: Optional[RefType] = None,
) -> bool:
"""Test whether the scope is granted to the person on the
provided org or location references.
This should not be used to test for explicit global permissions, prefer
has_global_permission instead.
"""
with self.collector.timed(
"has_permission.time", tags={"call": "has_permission"}
):
result = self._has_permission(
person_id, scope, [RefSpec(org_ref, ref_type)]
)
self.collector.increment(
"has_permission",
tags={"call": "has_permission", "result": str(result), "scope": scope},
)
self.registrar.emit(
"has_permission_completed",
person_id,
scope,
org_ref,
ref_type=ref_type,
result=result,
)
return result
@overload # noqa: F811
def has_all_permissions(
self, person_id: UUIDType, scope: str, *, org_refs: List[UUIDType]
) -> bool:
...
@overload # noqa: F811
def has_all_permissions(
self, person_id: UUIDType, scope: str, *, org_refs: List[int], ref_type: RefType
) -> bool:
...
def has_all_permissions( # noqa: F811
self,
person_id: UUIDType,
scope: str,
*,
org_refs: Union[List[UUIDType], List[int]],
ref_type: Optional[RefType] = None,
) -> bool:
"""Test whether the scope is granted to the person on ALL of the
provided org or location references.
This should not be used to test for explicit global permissions, prefer
has_global_permission instead.
"""
with self.collector.timed(
"has_permission.time", tags={"type": "has_all_permissions"}
):
specs = [RefSpec(ref, ref_type) for ref in org_refs]
result = self._has_permission(person_id, scope, specs)
self.collector.increment(
"has_permission",
tags={"call": "has_all_permissions", "result": str(result), "scope": scope},
)
self.registrar.emit(
"has_all_permissions_completed",
person_id,
scope,
org_refs=org_refs,
ref_type=ref_type,
result=result,
)
return result
def _parse_raw_org_refs(self, raw_org_refs: List[str]) -> ConvenientOrgRefs:
company_ids, vendor_ids, org_refs = set(), set(), set()
for raw_ref in raw_org_refs:
if raw_ref.startswith("company"):
company_ids.add(int(raw_ref.split(":")[1]))
elif raw_ref.startswith("vendor"):
vendor_ids.add(int(raw_ref.split(":")[1]))
else:
org_refs.add(raw_ref)
return ConvenientOrgRefs(org_refs, company_ids, vendor_ids)
def get_org_refs_for_permission(
self, person_id: UUIDType, scope: str
) -> ConvenientOrgRefs:
""" Given a person and permission scope return all of the org or
location references where the person has that permission.
"""
with self.collector.timed(
"get_org_refs_for_permission.time", tags={"type": "get_org_refs_for_permission"}
):
result = self._parse_raw_org_refs(
self.os_core_client.fetch_org_refs_for_permission(person_id, scope)
)
self.collector.increment(
"get_org_refs_for_permission",
tags={"call": "get_org_refs_for_permission", "scope": scope},
)
self.registrar.emit(
"get_org_refs_for_permission_completed", person_id, scope, result=result
)
return result
@overload # noqa: F811
def get_persons_with_permission(
self, scope: str, org_ref: UUIDType
) -> List[str]:
...
@overload # noqa: F811
def get_persons_with_permission(
self, scope: str, org_ref: int, ref_type: RefType
) -> List[str]:
...
def get_persons_with_permission( # noqa: F811
self,
scope: str,
org_ref: Union[UUIDType, int],
ref_type: Optional[RefType] = None,
) -> List[str]:
with self.collector.timed(
"get_persons_with_permission.time", tags={"type": "get_persons_with_permission"}
):
if ref_type:
result = self.os_core_client.fetch_persons_with_permission_for_location(
scope, ref_type, int(org_ref)
)
else:
result = self.os_core_client.fetch_persons_with_permission(
scope, str(org_ref)
)
self.collector.increment(
"get_persons_with_permission",
tags={"call": "get_persons_with_permission", "scope": scope},
)
self.registrar.emit(
"get_persons_with_permission_completed",
scope,
org_ref,
ref_type=ref_type,
result=result,
)
return result
def get_staff_permissions(self, person_id: UUIDType) -> StaffPermissionsDoc:
with self.collector.timed(
"get_staff_permissions.time", tags={"type": "get_staff_permissions"}
):
result = self.os_core_client.fetch_staff_permissions(person_id)
self.collector.increment(
"get_staff_permissions",
tags={"call": "get_staff_permissions", "person_id": person_id},
)
self.registrar.emit(
"get_staff_permissions_completed",
person_id,
result=result,
)
return result
def has_staff_permission(self, person_id: UUIDType, scope: str) -> bool:
with self.collector.timed(
"has_staff_permission.time", tags={"type": "has_staff_permission"}
):
staff_permissions_doc = self.os_core_client.fetch_staff_permissions(person_id)
result = (
staff_permissions_doc.is_superuser
or scope in staff_permissions_doc.permissions
)
self.collector.increment(
"has_staff_permission",
tags={"call": "has_staff_permission", "person_id": person_id, "scope": scope},
)
self.registrar.emit(
"has_staff_permission_completed",
person_id,
scope,
result=result,
)
return result
| {
"content_hash": "323a2bcfa697d265bca3f124f7147167",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 95,
"avg_line_length": 34.634266886326195,
"alnum_prop": 0.5796508585834562,
"repo_name": "managedbyq/mbq.client",
"id": "a79de208d40f0e27c637b8f19bd19f78fe03a90b",
"size": "21023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbq/client/contrib/permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51978"
}
],
"symlink_target": ""
} |
from nose.tools import assert_raises, raises
from rx import Observable, Observer
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
from rx.subjects import AsyncSubject
from rx.internal.exceptions import DisposedException
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
def test_infinite():
subject = None
subscription = None
subscription1 = None
subscription2 = None
subscription3 = None
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_next(630, 8),
on_next(710, 9),
on_next(870, 10),
on_next(940, 11),
on_next(1020, 12)
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
nonlocal subject
subject = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
nonlocal subscription
subscription = xs.subscribe(subject)
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription.dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
nonlocal subscription1
subscription1 = subject.subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
nonlocal subscription2
subscription2 = subject.subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
nonlocal subscription3
subscription3 = subject.subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2.dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3.dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal()
results3.messages.assert_equal()
def test_finite():
subject = None
subscription = None
subscription1 = None
subscription2 = None
subscription3 = None
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_completed(630),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
nonlocal subject
subject = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
nonlocal subscription
subscription = xs.subscribe(subject)
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription.dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
nonlocal subscription1
subscription1 = subject.subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
nonlocal subscription2
subscription2 = subject.subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
nonlocal subscription3
subscription3 = subject.subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2.dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3.dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_next(630, 7), on_completed(630))
results3.messages.assert_equal(on_next(900, 7), on_completed(900))
def test_error():
subject = None
subscription = None
subscription1 = None
subscription2 = None
subscription3 = None
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_error(630, ex),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex2')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action(scheduler, state=None):
nonlocal subject
subject = AsyncSubject()
scheduler.schedule_absolute(100, action)
def action1(scheduler, state=None):
nonlocal subscription
subscription = xs.subscribe(subject)
scheduler.schedule_absolute(200, action1)
def action2(scheduler, state=None):
subscription.dispose()
scheduler.schedule_absolute(1000, action2)
def action3(scheduler, state=None):
nonlocal subscription1
subscription1 = subject.subscribe(results1)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state=None):
nonlocal subscription2
subscription2 = subject.subscribe(results2)
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state=None):
nonlocal subscription3
subscription3 = subject.subscribe(results3)
scheduler.schedule_absolute(900, action5)
def action6(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(600, action6)
def action7(scheduler, state=None):
subscription2.dispose()
scheduler.schedule_absolute(700, action7)
def action8(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(800, action8)
def action9(scheduler, state=None):
subscription3.dispose()
scheduler.schedule_absolute(950, action9)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_error(630, ex))
results3.messages.assert_equal(on_error(900, ex))
def test_canceled():
subject = None
subscription = None
subscription1 = None
subscription2 = None
subscription3 = None
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_completed(630),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
nonlocal subject
subject = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
nonlocal subscription
subscription = xs.subscribe(subject)
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription.dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
nonlocal subscription1
subscription1 = subject.subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
nonlocal subscription2
subscription2 = subject.subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
nonlocal subscription3
subscription3 = subject.subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2.dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3.dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_completed(630))
results3.messages.assert_equal(on_completed(900))
def test_subject_disposed():
subject = None
subscription1 = None
subscription2 = None
subscription3 = None
scheduler = TestScheduler()
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
nonlocal subject
subject = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
nonlocal subscription1
subscription1 = subject.subscribe(results1)
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
nonlocal subscription2
subscription2 = subject.subscribe(results2)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state=None):
nonlocal subscription3
subscription3 = subject.subscribe(results3)
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state=None):
subscription1.dispose()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state=None):
subject.dispose()
scheduler.schedule_absolute(600, action6)
def action7(scheduler, state=None):
subscription2.dispose()
scheduler.schedule_absolute(700, action7)
def action8(scheduler, state=None):
subscription3.dispose()
scheduler.schedule_absolute(800, action8)
def action9(scheduler, state=None):
subject.on_next(1)
scheduler.schedule_absolute(150, action9)
def action10(scheduler, state=None):
subject.on_next(2)
scheduler.schedule_absolute(250, action10)
def action11(scheduler, state=None):
subject.on_next(3)
scheduler.schedule_absolute(350, action11)
def action12(scheduler, state=None):
subject.on_next(4)
scheduler.schedule_absolute(450, action12)
def action13(scheduler, state=None):
subject.on_next(5)
scheduler.schedule_absolute(550, action13)
@raises(DisposedException)
def action14(scheduler, state=None):
subject.on_next(6)
scheduler.schedule_absolute(650, action14)
@raises(DisposedException)
def action15(scheduler, state=None):
subject.on_completed()
scheduler.schedule_absolute(750, action15)
@raises(DisposedException)
def action16(scheduler, state=None):
subject.on_error('ex')
scheduler.schedule_absolute(850, action16)
@raises(DisposedException)
def action17(scheduler, state=None):
subject.subscribe(None)
scheduler.schedule_absolute(950, action17)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal()
results3.messages.assert_equal()
| {
"content_hash": "7d413451e427f4a9b43dc62856931b3b",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 76,
"avg_line_length": 30.148780487804878,
"alnum_prop": 0.6677453280478926,
"repo_name": "Reactive-Extensions/RxPy",
"id": "c9d7c08a2b87463cc06e8d7a527884f58e80c23f",
"size": "12361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_asyncsubject.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "862477"
}
],
"symlink_target": ""
} |
from http.client import responses
import hashlib
from tornado import web, gen
from jupyterhub.services.auth import HubOAuthenticated
from remoteappmanager.logging.logging_mixin import LoggingMixin
from remoteappmanager.handlers.handler_authenticator import HubAuthenticator
class BaseHandler(HubOAuthenticated, web.RequestHandler, LoggingMixin):
"""Base class for the request handler.
Each request will be authenticated using JupyterHub as an OAuth
provider using the HubOAuthenticated mixin first before
being independently validated against the application's user model.
https://jupyterhub.readthedocs.io/en/0.8.1/api/services.auth.html
"""
#: The authenticator that is used to recognize and load
#: the internal user model.
authenticator = HubAuthenticator
@web.authenticated
@gen.coroutine
def prepare(self):
"""Runs before any specific handler. """
# Authenticate the user against the hub
self.current_user = yield self.authenticator.authenticate(self)
if self.current_user is None:
self.log.warn(
"Failed to authenticate user session with JupyterHub")
def render(self, template_name, **kwargs):
"""Reimplements render to pass well known information to the rendering
context.
"""
command_line_config = self.application.command_line_config
file_config = self.application.file_config
args = dict(
user=self.current_user,
base_url=command_line_config.base_urlpath,
logout_url=command_line_config.logout_url
)
args.update(kwargs)
args.update({
"analytics": {
"tracking_id": file_config.ga_tracking_id
} if file_config.ga_tracking_id else None
})
args.update({
"gravatar_id": (
hashlib.md5(
str(self.current_user.name).strip().lower().encode(
"utf-8")).hexdigest()
if self.current_user is not None
else None)
})
super(BaseHandler, self).render(template_name, **args)
def write_error(self, status_code, **kwargs):
"""Render error page for uncaught errors"""
# if it's a 404, just report it as such
if status_code == 404:
self.render('error.html',
status_code=status_code,
status_message="Not found",
message="Not found")
return
status_message = responses.get(status_code, 'Unknown HTTP Error')
message = ""
# If this error was caused by an uncaught exception
# log exception message and reference number as well
exc_info = kwargs.get('exc_info')
if exc_info:
exception = exc_info[1]
ref = self.log.issue(status_message, exception)
reason = getattr(exception, 'reason', '')
message = '{} Ref.: {}'.format(reason, ref)
self.render('error.html', status_code=status_code,
status_message=status_message, message=message)
| {
"content_hash": "4873d602ea54fd1232a94e5f77a4e398",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 34.92307692307692,
"alnum_prop": 0.6145374449339207,
"repo_name": "simphony/simphony-remote",
"id": "75db87c983037d0a6d8937d0f756d309c41f9f30",
"size": "3178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remoteappmanager/handlers/base_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14011"
},
{
"name": "JavaScript",
"bytes": "51718"
},
{
"name": "Makefile",
"bytes": "6052"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "418020"
},
{
"name": "Shell",
"bytes": "1690"
},
{
"name": "Vue",
"bytes": "46644"
}
],
"symlink_target": ""
} |
import socket
import threading
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers import basehttp
from django.test.testcases import TransactionTestCase
from django.core.management import call_command
class StoppableWSGIServer(basehttp.WSGIServer):
"""WSGIServer with short timeout, so that server thread can stop this server."""
def server_bind(self):
"""Sets timeout to 1 second."""
basehttp.WSGIServer.server_bind(self)
self.socket.settimeout(1)
def get_request(self):
"""Checks for timeout when getting request."""
try:
sock, address = self.socket.accept()
sock.settimeout(None)
return (sock, address)
except socket.timeout:
raise
class TestServerThread(threading.Thread):
"""Thread for running a http server while tests are running."""
def __init__(self, address, port):
self.address = address
self.port = port
self._stopevent = threading.Event()
self.started = threading.Event()
self.error = None
super(TestServerThread, self).__init__()
def run(self):
"""Sets up test server and database and loops over handling http requests."""
try:
handler = WSGIHandler()
server_address = (self.address, self.port)
httpd = StoppableWSGIServer(server_address, basehttp.WSGIRequestHandler)
httpd.set_app(handler)
self.started.set()
except basehttp.WSGIServerException as e:
self.error = e
self.started.set()
return
# Must do database stuff in this new thread if database in memory.
from django.conf import settings
if settings.DATABASE_ENGINE == 'sqlite3' \
and (not settings.TEST_DATABASE_NAME or settings.TEST_DATABASE_NAME == ':memory:'):
# Import the fixture data into the test database.
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0})
# Loop until we get a stop event.
while not self._stopevent.isSet():
httpd.handle_request()
def join(self, timeout=None):
"""Stop the thread and wait for it to finish."""
self._stopevent.set()
threading.Thread.join(self, timeout)
class TestServerTestCase(TransactionTestCase):
def start_test_server(self, address='localhost', port=8000):
"""Creates a live test server object (instance of WSGIServer)."""
self.server_thread = TestServerThread(address, port)
self.server_thread.start()
self.server_thread.started.wait()
if self.server_thread.error:
raise self.server_thread.error
def stop_test_server(self):
if self.server_thread:
self.server_thread.join()
| {
"content_hash": "3eface23d87b823b9eef1445b8a6ec80",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 95,
"avg_line_length": 37.135802469135804,
"alnum_prop": 0.6313164893617021,
"repo_name": "akvo/django-tastypie",
"id": "bd434f217b81963eadf4723db645bcce318a58d5",
"size": "3008",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/testcases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "768753"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
} |
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('FLASKAPP_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://root:root@localhost:3306/text2?charset=utf8' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
# DB_NAME = 'dev.db'
# # Put the db file in project root
# DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
# SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://root:root@localhost:3306/text2?charset=utf8'
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| {
"content_hash": "60138a52f0ce3eb2678ccce4d318b764",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 110,
"avg_line_length": 33.3,
"alnum_prop": 0.6648648648648648,
"repo_name": "xuxian94/flaskapp",
"id": "a959a438838e82c2ab65816b8baa004a18ff1f14",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskapp/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7650"
},
{
"name": "HTML",
"bytes": "44049"
},
{
"name": "JavaScript",
"bytes": "203811"
},
{
"name": "Python",
"bytes": "33398"
}
],
"symlink_target": ""
} |
import praw
import prawcore
import os
def reddit_login():
'''logs in the user using OAuth 2.0 and returns a redditor object for use'''
user_agent = 'PC:redditFavoriteGrab:v0.1 (by /u/Scien)'
r = praw.Reddit('mysettings', user_agent=user_agent)
try:
return r.user.me()
except prawcore.exceptions.Forbidden:
print('\nIt seems your credentials are invalid. Please check whether your praw.ini file is properly setup.')
return None
def main():
if os.path.isfile('./redditFavorites.txt'):
print('Please delete or move your current redditFavorites.txt to a safe place.')
return # exit the script if file problems
file = open('redditFavorites.txt','w')
redditor = reddit_login()
if redditor is None:
print('\nStopping script...')
return # exit the script if unable to log in to reddit
print('Welcome /u/{}. I will help you backup your saved posts on reddit :)'.format(redditor))
saved = redditor.saved(limit=None)
saved_posts = []
saved_comments = []
for post in saved: # separate out posts and commets
if isinstance(post, praw.models.Submission):
saved_posts.append(post)
elif isinstance(post, praw.models.Comment):
saved_comments.append(post)
for post in saved_posts:
# There is probably a better way to handle encoding here. I was failing in win due to console encoding differences.
file.write('[{0!a}] {1!a} - {2!a}\n'.format(post.shortlink, post.title, post.url))
print('Done creating a list of posts...')
for comment in saved_comments:
comment_url = comment.link_url + comment.id
file.write('[{0!a}] - Comment\n'.format(comment_url))
print('Done creating a list of comments...')
file.close()
if __name__ == '__main__':
main()
| {
"content_hash": "9a88cc9e52eb8ac11a02f95f78d6597b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 124,
"avg_line_length": 34,
"alnum_prop": 0.6395721925133689,
"repo_name": "mmmvdb/redditFavoriteGrab",
"id": "1f733b187bb513700aad4cfa13114d1a2a48143c",
"size": "1870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redditFavoritesGrab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1870"
}
],
"symlink_target": ""
} |
from google.cloud import dialogflow_v2
def sample_get_conversation():
# Create a client
client = dialogflow_v2.ConversationsClient()
# Initialize request argument(s)
request = dialogflow_v2.GetConversationRequest(
name="name_value",
)
# Make the request
response = client.get_conversation(request=request)
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Conversations_GetConversation_sync]
| {
"content_hash": "6dd18d717b2053777e0a522de857f253",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 24.842105263157894,
"alnum_prop": 0.7203389830508474,
"repo_name": "googleapis/python-dialogflow",
"id": "04fa5f4d5e743d9272d22a8a772d5609398037bf",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_generated_dialogflow_v2_conversations_get_conversation_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
from .OAuth2Util import OAuth2Util
| {
"content_hash": "b6c1bcc1877cf83ba3e4360d72360431",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.8571428571428571,
"repo_name": "13steinj/praw-OAuth2Util",
"id": "3f5ea3bd7a9326e06c831bbf69f70bcfef31a61c",
"size": "35",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "OAuth2Util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "40"
},
{
"name": "Python",
"bytes": "12939"
}
],
"symlink_target": ""
} |
'''
This script uses Pattern's sentiment analysis to find the average polarity and subjectivity of collected tweets about presidential candidates
by Ziyu (Selina) Wang
last modified: September 28, 2015
'''
from pattern.en import * # Importing Patern to be utilized later
# Creating a list with all the candidates names
candidates = ['HillaryClinton','DonaldTrump','BernieSanders','BenCarson','JebBush','TedCruz','MarcoRubio','MikeHuckabee','RandPaul','CarlyFiorina','ScottWalker','JohnKasich',"MartinO'Malley",'ChrisChristie','JimWebb','RickSantorum','BobbyJindal','LincolnChafee','LindseyGraham','GeorgePataki','JimGilmore','JillStein']
# Traverse through the list
for candidate in candidates:
# Creating three lists for storing data from the sentiment analysis
analysis = []
polarity = []
subjectivity = []
try:
with open(candidate+'.txt', 'r') as f1: # Trying to open the .txt file with all the tweets in it
# Traverse through the file line by line
for line in f1:
data = sentiment(line) # run sentiment analysis on each tweet
# Storing the analysis data in the corresponding list
analysis.append(data)
polarity.append(data[0])
subjectivity.append(data[1])
except:
print 'Running analysis failed' # Throw an error if the analysis failed to execute
if analysis: # if the analysis was succesful
# Calculating and displaying the number tweets collected
numberOfTweets = len(analysis)
print "Number of tweets about " + candidate + ': ' + str(numberOfTweets)
# Calculating and displaying the average polarity
averagePolarity = sum(polarity)/len(polarity)
print candidate + "'s average polarity: " + str(averagePolarity)
# Calculating and displaying the average subjectivity
averageSubjectivity = sum(subjectivity)/len(subjectivity)
print candidate + "'s average subjectivity: " + str(averageSubjectivity)
else: # If there are no tweets about a candidate, display this information
print 'There is no tweets about ' + candidate + ' collected'
f1.close(); # Close the .txt file to clean up
| {
"content_hash": "db3c9d642e44bf6f0ef4e67ac6936376",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 318,
"avg_line_length": 51.15,
"alnum_prop": 0.7512218963831867,
"repo_name": "SelinaWang/SoftwareDesignFall15",
"id": "5ad565c71687952a494cdb8a9cd41d18485f13a2",
"size": "2046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MP1/text_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43813"
}
],
"symlink_target": ""
} |
import os
import shutil
from nipype.interfaces.base import (traits, BaseInterfaceInputSpec,
TraitedSpec, BaseInterface,
File, Directory)
class CopyInputSpec(BaseInterfaceInputSpec):
in_file = traits.Either(File, Directory, exists=True, mandatory=True,
desc='The file or directory to copy')
dest = Directory(desc='The destination directory path'
' (default current directory)')
out_base_name = traits.Either(File, Directory,
desc='The destination file name'
' (default is the input file name)')
class CopyOutputSpec(TraitedSpec):
out_file = traits.Either(File, Directory, exists=True,
desc='The copied file or directory')
class Copy(BaseInterface):
"""The Copy interface copies a file to a destination directory."""
input_spec = CopyInputSpec
output_spec = CopyOutputSpec
def _run_interface(self, runtime):
self._out_file = self._copy(self.inputs.in_file, self.inputs.dest,
self.inputs.out_base_name)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._out_file
return outputs
def _copy(self, in_file, dest=None, out_base_name=None):
"""
Copies the given file.
:param in_file: the path of the file or directory to copy
:param dest: the destination directory path
(default is the current directory)
:param out_base_name: the destination file name
(default is the input file name)
:return: the copied file path
"""
if dest:
dest = os.path.abspath(dest)
if not os.path.exists(dest):
os.makedirs(dest)
else:
dest = os.getcwd()
if out_base_name:
# Remove the out file name directory.
_, out_base_name = os.path.split(out_base_name)
else:
# The default out file name is the input file name.
_, out_base_name = os.path.split(in_file)
out_file = os.path.join(dest, out_base_name)
if os.path.isdir(in_file):
shutil.copytree(in_file, out_file)
else:
shutil.copy(in_file, out_file)
return out_file
| {
"content_hash": "42b56648ac3a4ce54d81187b34006daa",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 34.54929577464789,
"alnum_prop": 0.566245413779046,
"repo_name": "ohsu-qin/qipipe",
"id": "132acafc2eca4544a688ccddeaf3be4deb39e894",
"size": "2453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qipipe/interfaces/copy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "528376"
}
],
"symlink_target": ""
} |
"""
Module for performing checks on a Kibana logging deployment
"""
import json
import ssl
try:
from urllib2 import HTTPError, URLError
import urllib2
except ImportError:
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
from openshift_checks.logging.logging import LoggingCheck
class Kibana(LoggingCheck):
"""Module that checks an integrated logging Kibana deployment"""
name = "kibana"
tags = ["health", "logging"]
def run(self):
"""Check various things and gather errors. Returns: result as hash"""
self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
kibana_pods, error = self.get_pods_for_component(
self.logging_namespace,
"kibana",
)
if error:
return {"failed": True, "changed": False, "msg": error}
check_error = self.check_kibana(kibana_pods)
if not check_error:
check_error = self._check_kibana_route()
if check_error:
msg = ("The following Kibana deployment issue was found:"
"{}".format(check_error))
return {"failed": True, "changed": False, "msg": msg}
# TODO(lmeyer): run it all again for the ops cluster
return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
def _verify_url_internal(self, url):
"""
Try to reach a URL from the host.
Returns: success (bool), reason (for failure)
"""
args = dict(
url=url,
follow_redirects='none',
validate_certs='no', # likely to be signed with internal CA
# TODO(lmeyer): give users option to validate certs
status_code=302,
)
result = self.execute_module('uri', args)
if result.get('failed'):
return result['msg']
return None
@staticmethod
def _verify_url_external(url):
"""
Try to reach a URL from ansible control host.
Returns: success (bool), reason (for failure)
"""
# This actually checks from the ansible control host, which may or may not
# really be "external" to the cluster.
# Disable SSL cert validation to work around internally signed certs
ctx = ssl.create_default_context()
ctx.check_hostname = False # or setting CERT_NONE is refused
ctx.verify_mode = ssl.CERT_NONE
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
return_code = urllib2.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
return str(urlerr)
# there appears to be no way to prevent urlopen from following redirects
if return_code != 200:
return 'Expected success (200) but got return code {}'.format(int(return_code))
return None
def check_kibana(self, pods):
"""Check to see if Kibana is up and working. Returns: error string."""
if not pods:
return "There are no Kibana pods deployed, so no access to the logging UI."
not_running = self.not_running_pods(pods)
if len(not_running) == len(pods):
return "No Kibana pod is in a running state, so there is no access to the logging UI."
elif not_running:
return (
"The following Kibana pods are not currently in a running state:\n"
"{pods}"
"However at least one is, so service may not be impacted."
).format(pods="".join(" " + pod['metadata']['name'] + "\n" for pod in not_running))
return None
def _get_kibana_url(self):
"""
Get kibana route or report error.
Returns: url (or empty), reason for failure
"""
# Get logging url
get_route = self.exec_oc(
self.logging_namespace,
"get route logging-kibana -o json",
[],
)
if not get_route:
return None, 'no_route_exists'
route = json.loads(get_route)
# check that the route has been accepted by a router
ingress = route["status"]["ingress"]
# ingress can be null if there is no router, or empty if not routed
if not ingress or not ingress[0]:
return None, 'route_not_accepted'
host = route.get("spec", {}).get("host")
if not host:
return None, 'route_missing_host'
return 'https://{}/'.format(host), None
def _check_kibana_route(self):
"""
Check to see if kibana route is up and working.
Returns: error string
"""
known_errors = dict(
no_route_exists=(
'No route is defined for Kibana in the logging namespace,\n'
'so the logging stack is not accessible. Is logging deployed?\n'
'Did something remove the logging-kibana route?'
),
route_not_accepted=(
'The logging-kibana route is not being routed by any router.\n'
'Is the router deployed and working?'
),
route_missing_host=(
'The logging-kibana route has no hostname defined,\n'
'which should never happen. Did something alter its definition?'
),
)
kibana_url, error = self._get_kibana_url()
if not kibana_url:
return known_errors.get(error, error)
# first, check that kibana is reachable from the master.
error = self._verify_url_internal(kibana_url)
if error:
if 'urlopen error [Errno 111] Connection refused' in error:
error = (
'Failed to connect from this master to Kibana URL {url}\n'
'Is kibana running, and is at least one router routing to it?'
).format(url=kibana_url)
elif 'urlopen error [Errno -2] Name or service not known' in error:
error = (
'Failed to connect from this master to Kibana URL {url}\n'
'because the hostname does not resolve.\n'
'Is DNS configured for the Kibana hostname?'
).format(url=kibana_url)
elif 'Status code was not' in error:
error = (
'A request from this master to the Kibana URL {url}\n'
'did not return the correct status code (302).\n'
'This could mean that Kibana is malfunctioning, the hostname is\n'
'resolving incorrectly, or other network issues. The output was:\n'
' {error}'
).format(url=kibana_url, error=error)
return 'Error validating the logging Kibana route:\n' + error
# in production we would like the kibana route to work from outside the
# cluster too; but that may not be the case, so allow disabling just this part.
if not self.get_var("openshift_check_efk_kibana_external", default=True):
return None
error = self._verify_url_external(kibana_url)
if error:
if 'urlopen error [Errno 111] Connection refused' in error:
error = (
'Failed to connect from the Ansible control host to Kibana URL {url}\n'
'Is the router for the Kibana hostname exposed externally?'
).format(url=kibana_url)
elif 'urlopen error [Errno -2] Name or service not known' in error:
error = (
'Failed to resolve the Kibana hostname in {url}\n'
'from the Ansible control host.\n'
'Is DNS configured to resolve this Kibana hostname externally?'
).format(url=kibana_url)
elif 'Expected success (200)' in error:
error = (
'A request to Kibana at {url}\n'
'returned the wrong error code:\n'
' {error}\n'
'This could mean that Kibana is malfunctioning, the hostname is\n'
'resolving incorrectly, or other network issues.'
).format(url=kibana_url, error=error)
error = (
'Error validating the logging Kibana route:\n{error}\n'
'To disable external Kibana route validation, set in your inventory:\n'
' openshift_check_efk_kibana_external=False'
).format(error=error)
return error
return None
| {
"content_hash": "68cf8b728a550d5292238d1af3b18ad2",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 102,
"avg_line_length": 39.86363636363637,
"alnum_prop": 0.5689851767388826,
"repo_name": "rhdedgar/openshift-tools",
"id": "efb14ab423dafc431ce546d0a75525817e2b45af",
"size": "8770",
"binary": false,
"copies": "1",
"ref": "refs/heads/stg",
"path": "openshift/installer/vendored/openshift-ansible-3.6.173.0.27/roles/openshift_health_checker/openshift_checks/logging/kibana.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "73250"
},
{
"name": "JavaScript",
"bytes": "960"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "20646861"
},
{
"name": "Shell",
"bytes": "903453"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(1, "../../")
import h2o
def runif_check(ip, port):
# Connect to a pre-existing cluster
uploaded_frame = h2o.upload_file(h2o.locate("bigdata/laptop/mnist/train.csv.gz"))
r_u = uploaded_frame[0].runif(1234)
imported_frame = h2o.import_file(h2o.locate("bigdata/laptop/mnist/train.csv.gz"))
r_i = imported_frame[0].runif(1234)
print "This demonstrates that seeding runif on identical frames with different chunk distributions provides " \
"different results. upload_file: {0}, import_frame: {1}.".format(r_u.mean(), r_i.mean())
if __name__ == "__main__":
h2o.run_test(sys.argv, runif_check)
| {
"content_hash": "3a85717732a1d81ca91e6921dafbac91",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 115,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.6676737160120846,
"repo_name": "PawarPawan/h2o-v3",
"id": "e8bdc9c80053d009e53171d94b8b51a68a7577e5",
"size": "662",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_munging/pyunit_runif_large.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "261942"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "140122"
},
{
"name": "Java",
"bytes": "5407730"
},
{
"name": "JavaScript",
"bytes": "88331"
},
{
"name": "Makefile",
"bytes": "31513"
},
{
"name": "Python",
"bytes": "2009340"
},
{
"name": "R",
"bytes": "1818630"
},
{
"name": "Rebol",
"bytes": "3997"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "44607"
},
{
"name": "TeX",
"bytes": "469926"
}
],
"symlink_target": ""
} |
from PIL import Image, ImageDraw
DirectionNames = [ 'North', 'West', 'South', 'East' ]
SpriteDir = "sprites"
def CreateAtlas( sheetName, default, sprite_size, atlas_size, directionalAnimations, manualAnimations ):
print( "Building: " + sheetName )
maxWidth = atlas_size[0]
maxHeight = atlas_size[1]
spriteWidth = sprite_size[0]
spriteHeight = sprite_size[1]
xml = """<?xml version="1.0" encoding="UTF-8"?>
<sprite name="{name}" image="{dir}/{name}.png" spriteWidth="{swidth}" spriteHeight="{sheight}">
""".format( name = sheetName, dir = SpriteDir, swidth = spriteWidth, sheight = spriteHeight )
# Generate an output atlas
atlasX = 0
atlasY = 0
atlas = Image.new( 'RGBA', ( maxWidth, maxHeight ), ( 255, 0, 255, 0 ) )
draw = ImageDraw.Draw( atlas )
draw.rectangle( ( 0, 0, maxWidth, maxHeight ), fill=( 255, 0, 255 ) )
# Start extracting images from the atlas
for actionType in directionalAnimations:
action = actionType['action']
sourceFileName = sheetName + "/" + actionType['file']
sourceAtlas = Image.open( sourceFileName )
# Go through each direction
for directionIndex in range(4):
directionName = DirectionNames[ directionIndex ]
offsetY = directionIndex * spriteHeight
# Write the animation header
animationName = action + directionName
if ( animationName == default ):
xml += " <animation name=\"{name}\" default=\"true\">\n".format( name = animationName )
else:
xml += " <animation name=\"{name}\">\n".format( name = animationName )
# Write out each frame in the animation
for col in range( actionType['start_col'], actionType['last_col'] + 1 ):
# Coordinates of the sprite in the source atlas
offsetX = col * spriteWidth
# Extract the sprite from it's source atlas
sprite = sourceAtlas.crop( ( offsetX,
offsetY,
offsetX + spriteWidth,
offsetY + spriteHeight ) )
# Pack it the sprite into the output atlas, and keep track of the coordinates
if ( atlasX + spriteWidth > maxWidth ):
atlasX = 0
atlasY += spriteHeight
if ( atlasY + spriteHeight > maxHeight ):
raise Exception( "Exceed sprite atlas height" )
atlas.paste( sprite, ( atlasX,
atlasY,
atlasX + spriteWidth,
atlasY + spriteHeight ) )
# Write the XML
xml += " <frame x=\"{x}\" y=\"{y}\" />\n".format( x = atlasX, y = atlasY )
atlasX += spriteWidth
# Write animation footer
xml += " </animation>\n"
# Now extract any manually defined animations
for animation in manualAnimations:
# Open the sprite atlas
sourceFileName = sheetName + "/" + animation['file']
sourceAtlas = Image.open( sourceFileName )
# Write the animation header
animationName = animation['name']
if ( animationName == default ):
xml += " <animation name=\"{name}\" default=\"true\">\n".format( name = animationName )
else:
xml += " <animation name=\"{name}\">\n".format( name = animationName )
# Iterate through all the animation frames
for frame in animation['frames']:
# Coordinates of the sprite in the source atlas
x = frame[0]
y = frame[1]
offsetX = col * spriteWidth
# Extract the sprite from it's source atlas
sprite = sourceAtlas.crop( ( offsetX,
offsetY,
offsetX + spriteWidth,
offsetY + spriteHeight ) )
# Pack it the sprite into the output atlas, and keep track of the coordinates
if ( atlasX + spriteWidth > maxWidth ):
atlasX = 0
atlasY += spriteHeight
if ( atlasY + spriteHeight > maxHeight ):
raise Exception( "Exceed sprite atlas height" )
atlas.paste( sprite, ( atlasX,
atlasY,
atlasX + spriteWidth,
atlasY + spriteHeight ) )
# Write the XML
xml += " <frame x=\"{x}\" y=\"{y}\" />\n".format( x = atlasX, y = atlasY )
atlasX += spriteWidth
# XML animation footer
xml += " </animation>\n"
# XML sprite footer
xml += "</sprite>"
atlas.save( sheetName + ".png" )
xmlfile = open( sheetName + ".sprite", 'w' )
xmlfile.write( xml )
xmlfile.close() | {
"content_hash": "debaac6f3dfdb3b10c282bbeaf97fe7c",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 104,
"avg_line_length": 31.786259541984734,
"alnum_prop": 0.627281460134486,
"repo_name": "smacdo/Dungeon-Crawler",
"id": "93f94897b82216c4750b8a49c94d8b427ff9c91f",
"size": "4164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/funcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "423"
},
{
"name": "C#",
"bytes": "1002588"
},
{
"name": "HTML",
"bytes": "38400"
},
{
"name": "Python",
"bytes": "11077"
}
],
"symlink_target": ""
} |
"""
Lädt die Favoriten eines Users
@author Markus Tacker <m@coderbyheart.de>
"""
import vertx
import mutex
import time
from com.xhaus.jyson import JysonCodec as json
from oauth import Consumer
from core.event_bus import EventBus
from util import parse_date
from datetime import datetime, timedelta
config = vertx.config()
curators = []
friends = []
aday = datetime.now() - timedelta(1)
def response_handler(resp, user_id):
favs = {'user_id': user_id, 'favorites': []}
@resp.body_handler
def body_handler(body):
if resp.status_code == 200:
favs['favorites'] = json.loads(body.to_string())
else:
print "Failed to fetch favorites: %s" % body.to_string()
EventBus.send('log.event', "user.favorites.list.result")
EventBus.send('user.favorites.list.result', json.dumps(favs))
def fetch_favorites(message):
user = message.body
consumer = Consumer(api_endpoint="https://api.twitter.com/", consumer_key=config['consumer_key'], consumer_secret=config['consumer_secret'], oauth_token=config['oauth_token'], oauth_token_secret=config['oauth_token_secret'])
consumer.get("/1.1/favorites/list.json", {'user_id': user['id'], 'count': 20}, lambda resp: response_handler(resp, user['id']))
EventBus.register_handler('user.favorites.list', False, fetch_favorites)
| {
"content_hash": "705ddba3a5923096b01a445b3e3123b6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 228,
"avg_line_length": 35.39473684210526,
"alnum_prop": 0.6877323420074349,
"repo_name": "Gruenderhub/twitter-autocurator",
"id": "e4afe5569213796864584f2909392f11c057e54f",
"size": "1361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fetchfavorites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15163"
}
],
"symlink_target": ""
} |
"""Tests for soft sorting tensorflow layers."""
import tensorflow.compat.v2 as tf
from soft_sort.matrix_factorization import data
class DataTest(tf.test.TestCase):
def setUp(self):
super().setUp()
tf.random.set_seed(0)
self._data = data.SyntheticData(
num_features=50, num_individuals=200, low_rank=10)
def test_make(self):
matrix = self._data.make()
self.assertEqual(matrix.shape, (50, 200))
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| {
"content_hash": "4ca98cf90a9536918d2a92cb7fc854f0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 58,
"avg_line_length": 21.869565217391305,
"alnum_prop": 0.6660039761431411,
"repo_name": "google-research/google-research",
"id": "52f354944ced07bd04e12c0d9723a1cf6c1b81d3",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soft_sort/matrix_factorization/data_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
Ironic console utilities.
"""
import errno
import os
import psutil
import signal
import subprocess
import time
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LW
from ironic.common import utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help=_('Path to serial console terminal program')),
cfg.StrOpt('terminal_cert_dir',
help=_('Directory containing the terminal SSL cert(PEM) for '
'serial console access')),
cfg.StrOpt('terminal_pid_dir',
help=_('Directory for holding terminal pid files. '
'If not specified, the temporary directory '
'will be used.')),
cfg.IntOpt('subprocess_checking_interval',
default=1,
help=_('Time interval (in seconds) for checking the status of '
'console subprocess.')),
cfg.IntOpt('subprocess_timeout',
default=10,
help=_('Time (in seconds) to wait for the console subprocess '
'to start.')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='console')
LOG = logging.getLogger(__name__)
def _get_console_pid_dir():
"""Return the directory for the pid file."""
return CONF.console.terminal_pid_dir or CONF.tempdir
def _ensure_console_pid_dir_exists():
"""Ensure that the console PID directory exists
Checks that the directory for the console PID file exists
and if not, creates it.
:raises: ConsoleError if the directory doesn't exist and cannot be created
"""
dir = _get_console_pid_dir()
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError as exc:
msg = (_("Cannot create directory '%(path)s' for console PID file."
" Reason: %(reason)s.") % {'path': dir, 'reason': exc})
LOG.error(msg)
raise exception.ConsoleError(message=msg)
def _get_console_pid_file(node_uuid):
"""Generate the pid file name to hold the terminal process id."""
pid_dir = _get_console_pid_dir()
name = "%s.pid" % node_uuid
path = os.path.join(pid_dir, name)
return path
def _get_console_pid(node_uuid):
"""Get the terminal process id from pid file."""
pid_path = _get_console_pid_file(node_uuid)
try:
with open(pid_path, 'r') as f:
pid_str = f.readline()
return int(pid_str)
except (IOError, ValueError):
raise exception.NoConsolePid(pid_path=pid_path)
def _stop_console(node_uuid):
"""Close the serial console for a node
Kills the console process and deletes the PID file.
:param node_uuid: the UUID of the node
:raises: NoConsolePid if no console PID was found
:raises: ConsoleError if unable to stop the console process
"""
try:
console_pid = _get_console_pid(node_uuid)
os.kill(console_pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
msg = (_("Could not stop the console for node '%(node)s'. "
"Reason: %(err)s.") % {'node': node_uuid, 'err': exc})
raise exception.ConsoleError(message=msg)
else:
LOG.warning(_LW("Console process for node %s is not running "
"but pid file exists while trying to stop "
"shellinabox console."), node_uuid)
finally:
ironic_utils.unlink_without_raise(_get_console_pid_file(node_uuid))
def make_persistent_password_file(path, password):
"""Writes a file containing a password until deleted."""
try:
utils.delete_if_exists(path)
with open(path, 'wb') as file:
os.chmod(path, 0o600)
file.write(password.encode())
return path
except Exception as e:
utils.delete_if_exists(path)
raise exception.PasswordFileFailedToCreate(error=e)
def get_shellinabox_console_url(port):
"""Get a url to access the console via shellinaboxd.
:param port: the terminal port for the node.
"""
console_host = CONF.my_ip
if netutils.is_valid_ipv6(console_host):
console_host = '[%s]' % console_host
scheme = 'https' if CONF.console.terminal_cert_dir else 'http'
return '%(scheme)s://%(host)s:%(port)s' % {'scheme': scheme,
'host': console_host,
'port': port}
def start_shellinabox_console(node_uuid, port, console_cmd):
"""Open the serial console for a node.
:param node_uuid: the uuid for the node.
:param port: the terminal port for the node.
:param console_cmd: the shell command that gets the console.
:raises: ConsoleError if the directory for the PID file cannot be created.
:raises: ConsoleSubprocessFailed when invoking the subprocess failed.
"""
# make sure that the old console for this node is stopped
# and the files are cleared
try:
_stop_console(node_uuid)
except exception.NoConsolePid:
pass
except processutils.ProcessExecutionError as exc:
LOG.warning(_LW("Failed to kill the old console process "
"before starting a new shellinabox console "
"for node %(node)s. Reason: %(err)s"),
{'node': node_uuid, 'err': exc})
_ensure_console_pid_dir_exists()
pid_file = _get_console_pid_file(node_uuid)
# put together the command and arguments for invoking the console
args = []
args.append(CONF.console.terminal)
if CONF.console.terminal_cert_dir:
args.append("-c")
args.append(CONF.console.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(port))
args.append("--background=%s" % pid_file)
args.append("-s")
args.append(console_cmd)
# run the command as a subprocess
try:
LOG.debug('Running subprocess: %s', ' '.join(args))
# use pipe here to catch the error in case shellinaboxd
# failed to start.
obj = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError) as e:
error = _("%(exec_error)s\n"
"Command: %(command)s") % {'exec_error': str(e),
'command': ' '.join(args)}
LOG.warning(error)
raise exception.ConsoleSubprocessFailed(error=error)
def _wait(node_uuid, popen_obj):
locals['returncode'] = popen_obj.poll()
# check if the console pid is created and the process is running.
# if it is, then the shellinaboxd is invoked successfully as a daemon.
# otherwise check the error.
if locals['returncode'] is not None:
if (locals['returncode'] == 0 and os.path.exists(pid_file) and
psutil.pid_exists(_get_console_pid(node_uuid))):
raise loopingcall.LoopingCallDone()
else:
(stdout, stderr) = popen_obj.communicate()
locals['errstr'] = _(
"Command: %(command)s.\n"
"Exit code: %(return_code)s.\n"
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {
'command': ' '.join(args),
'return_code': locals['returncode'],
'stdout': stdout,
'stderr': stderr}
LOG.warning(locals['errstr'])
raise loopingcall.LoopingCallDone()
if (time.time() > expiration):
locals['errstr'] = _("Timeout while waiting for console subprocess"
"to start for node %s.") % node_uuid
LOG.warning(locals['errstr'])
raise loopingcall.LoopingCallDone()
locals = {'returncode': None, 'errstr': ''}
expiration = time.time() + CONF.console.subprocess_timeout
timer = loopingcall.FixedIntervalLoopingCall(_wait, node_uuid, obj)
timer.start(interval=CONF.console.subprocess_checking_interval).wait()
if locals['errstr']:
raise exception.ConsoleSubprocessFailed(error=locals['errstr'])
def stop_shellinabox_console(node_uuid):
"""Close the serial console for a node.
:param node_uuid: the UUID of the node
:raises: ConsoleError if unable to stop the console process
"""
try:
_stop_console(node_uuid)
except exception.NoConsolePid:
LOG.warning(_LW("No console pid found for node %s while trying to "
"stop shellinabox console."), node_uuid)
| {
"content_hash": "0ce4780dd8560846ef2afb20d9cc65e2",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 79,
"avg_line_length": 34.810810810810814,
"alnum_prop": 0.5929458740017747,
"repo_name": "dims/ironic",
"id": "ca1a4e4b3d43716354e8a9df808eaddee637d68f",
"size": "9691",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/console_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3893123"
},
{
"name": "Shell",
"bytes": "48638"
}
],
"symlink_target": ""
} |
"""
Offset Codebook (OCB) mode.
OCB is Authenticated Encryption with Associated Data (AEAD) cipher mode
designed by Prof. Phillip Rogaway and specified in `RFC7253`_.
The algorithm provides both authenticity and privacy, it is very efficient,
it uses only one key and it can be used in online mode (so that encryption
or decryption can start before the end of the message is available).
This module implements the third and last variant of OCB (OCB3) and it only
works in combination with a 128-bit block symmetric cipher, like AES.
OCB is patented in US but `free licenses`_ exist for software implementations
meant for non-military purposes.
Example:
>>> from Cryptodome.Cipher import AES
>>> from Cryptodome.Random import get_random_bytes
>>>
>>> key = get_random_bytes(32)
>>> cipher = AES.new(key, AES.MODE_OCB)
>>> plaintext = b"Attack at dawn"
>>> ciphertext, mac = cipher.encrypt_and_digest(plaintext)
>>> # Deliver cipher.nonce, ciphertext and mac
...
>>> cipher = AES.new(key, AES.MODE_OCB, nonce=nonce)
>>> try:
>>> plaintext = cipher.decrypt_and_verify(ciphertext, mac)
>>> except ValueError:
>>> print "Invalid message"
>>> else:
>>> print plaintext
:undocumented: __package__
.. _RFC7253: http://www.rfc-editor.org/info/rfc7253
.. _free licenses: http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm
"""
from Cryptodome.Util.py3compat import b, bord, bchr, unhexlify
from Cryptodome.Util.number import long_to_bytes, bytes_to_long
from Cryptodome.Util.strxor import strxor
from Cryptodome.Hash import BLAKE2s
from Cryptodome.Random import get_random_bytes
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, expect_byte_string,
)
_raw_ocb_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_ocb", """
int OCB_start_operation(void *cipher,
const uint8_t *offset_0,
size_t offset_0_len,
void **pState);
int OCB_encrypt(void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int OCB_decrypt(void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int OCB_update(void *state,
const uint8_t *in,
size_t data_len);
int OCB_digest(void *state,
uint8_t *tag,
size_t tag_len);
int OCB_stop_operation(void *state);
""")
class OcbMode(object):
"""Offset Codebook (OCB) mode.
:undocumented: __init__
"""
def __init__(self, factory, nonce, mac_len, cipher_params):
if factory.block_size != 16:
raise ValueError("OCB mode is only available for ciphers"
" that operate on 128 bits blocks")
self.block_size = 16
"""The block size of the underlying cipher, in bytes."""
self.nonce = nonce
"""Nonce used for this session."""
if len(nonce) not in range(1, 16):
raise ValueError("Nonce must be at most 15 bytes long")
self._mac_len = mac_len
if not 8 <= mac_len <= 16:
raise ValueError("MAC tag must be between 8 and 16 bytes long")
# Cache for MAC tag
self._mac_tag = None
# Cache for unaligned associated data
self._cache_A = b("")
# Cache for unaligned ciphertext/plaintext
self._cache_P = b("")
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
# Compute Offset_0
params_without_key = dict(cipher_params)
key = params_without_key.pop("key")
nonce = (bchr(self._mac_len << 4 & 0xFF) +
bchr(0) * (14 - len(self.nonce)) +
bchr(1) +
self.nonce)
bottom = bord(nonce[15]) & 0x3F # 6 bits, 0..63
ktop = factory.new(key, factory.MODE_ECB, **params_without_key)\
.encrypt(nonce[:15] + bchr(bord(nonce[15]) & 0xC0))
stretch = ktop + strxor(ktop[:8], ktop[1:9]) # 192 bits
offset_0 = long_to_bytes(bytes_to_long(stretch) >>
(64 - bottom), 24)[8:]
# Create low-level cipher instance
raw_cipher = factory._create_base_cipher(cipher_params)
if cipher_params:
raise TypeError("Unknown keywords: " + str(cipher_params))
self._state = VoidPointer()
result = _raw_ocb_lib.OCB_start_operation(raw_cipher.get(),
offset_0,
c_size_t(len(offset_0)),
self._state.address_of())
if result:
raise ValueError("Error %d while instantiating the OCB mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
_raw_ocb_lib.OCB_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
raw_cipher.release()
def _update(self, assoc_data, assoc_data_len):
expect_byte_string(assoc_data)
result = _raw_ocb_lib.OCB_update(self._state.get(),
assoc_data,
c_size_t(assoc_data_len))
if result:
raise ValueError("Error %d while MAC-ing in OCB mode" % result)
def update(self, assoc_data):
"""Process the associated data.
If there is any associated data, the caller has to invoke
this method one or more times, before using
``decrypt`` or ``encrypt``.
By *associated data* it is meant any data (e.g. packet headers) that
will not be encrypted and will be transmitted in the clear.
However, the receiver shall still able to detect modifications.
If there is no associated data, this method must not be called.
The caller may split associated data in segments of any size, and
invoke this method multiple times, each time with the next segment.
:Parameters:
assoc_data : byte string
A piece of associated data.
"""
if self.update not in self._next:
raise TypeError("update() can only be called"
" immediately after initialization")
self._next = [self.encrypt, self.decrypt, self.digest,
self.verify, self.update]
if len(self._cache_A) > 0:
filler = min(16 - len(self._cache_A), len(assoc_data))
self._cache_A += assoc_data[:filler]
assoc_data = assoc_data[filler:]
if len(self._cache_A) < 16:
return self
# Clear the cache, and proceeding with any other aligned data
self._cache_A, seg = b(""), self._cache_A
self.update(seg)
update_len = len(assoc_data) // 16 * 16
self._cache_A = assoc_data[update_len:]
self._update(assoc_data, update_len)
return self
def _transcrypt_aligned(self, in_data, in_data_len,
trans_func, trans_desc):
out_data = create_string_buffer(in_data_len)
result = trans_func(self._state.get(),
in_data,
out_data,
c_size_t(in_data_len))
if result:
raise ValueError("Error %d while %sing in OCB mode"
% (result, trans_desc))
return get_raw_buffer(out_data)
def _transcrypt(self, in_data, trans_func, trans_desc):
# Last piece to encrypt/decrypt
if in_data is None:
out_data = self._transcrypt_aligned(self._cache_P,
len(self._cache_P),
trans_func,
trans_desc)
self._cache_P = b("")
return out_data
# Try to fill up the cache, if it already contains something
expect_byte_string(in_data)
prefix = b("")
if len(self._cache_P) > 0:
filler = min(16 - len(self._cache_P), len(in_data))
self._cache_P += in_data[:filler]
in_data = in_data[filler:]
if len(self._cache_P) < 16:
# We could not manage to fill the cache, so there is certainly
# no output yet.
return b("")
# Clear the cache, and proceeding with any other aligned data
prefix = self._transcrypt_aligned(self._cache_P,
len(self._cache_P),
trans_func,
trans_desc)
self._cache_P = b("")
# Process data in multiples of the block size
trans_len = len(in_data) // 16 * 16
result = self._transcrypt_aligned(in_data,
trans_len,
trans_func,
trans_desc)
if prefix:
result = prefix + result
# Left-over
self._cache_P = in_data[trans_len:]
return result
def encrypt(self, plaintext=None):
"""Encrypt the next piece of plaintext.
After the entire plaintext has been passed (but before `digest`),
you **must** call this method one last time with no arguments to collect
the final piece of ciphertext.
If possible, use the method `encrypt_and_digest` instead.
:Parameters:
plaintext : byte string
The next piece of data to encrypt or ``None`` to signify
that encryption has finished and that any remaining ciphertext
has to be produced.
:Return:
the ciphertext, as a byte string.
Its length may not match the length of the *plaintext*.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() can only be called after"
" initialization or an update()")
if plaintext is None:
self._next = [self.digest]
else:
self._next = [self.encrypt]
return self._transcrypt(plaintext, _raw_ocb_lib.OCB_encrypt, "encrypt")
def decrypt(self, ciphertext=None):
"""Decrypt the next piece of ciphertext.
After the entire ciphertext has been passed (but before `verify`),
you **must** call this method one last time with no arguments to collect
the remaining piece of plaintext.
If possible, use the method `decrypt_and_verify` instead.
:Parameters:
ciphertext : byte string
The next piece of data to decrypt or ``None`` to signify
that decryption has finished and that any remaining plaintext
has to be produced.
:Return:
the plaintext, as a byte string.
Its length may not match the length of the *ciphertext*.
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() can only be called after"
" initialization or an update()")
if ciphertext is None:
self._next = [self.verify]
else:
self._next = [self.decrypt]
return self._transcrypt(ciphertext,
_raw_ocb_lib.OCB_decrypt,
"decrypt")
def _compute_mac_tag(self):
if self._mac_tag is not None:
return
if self._cache_A:
self._update(self._cache_A, len(self._cache_A))
self._cache_A = b("")
mac_tag = create_string_buffer(16)
result = _raw_ocb_lib.OCB_digest(self._state.get(),
mac_tag,
c_size_t(len(mac_tag))
)
if result:
raise ValueError("Error %d while computing digest in OCB mode"
% result)
self._mac_tag = get_raw_buffer(mac_tag)[:self._mac_len]
def digest(self):
"""Compute the *binary* MAC tag.
Call this method after the final `encrypt` (the one with no arguments)
to obtain the MAC tag.
The MAC tag is needed by the receiver to determine authenticity
of the message.
:Return: the MAC, as a byte string.
"""
if self.digest not in self._next:
raise TypeError("digest() cannot be called now for this cipher")
assert(len(self._cache_P) == 0)
self._next = [self.digest]
if self._mac_tag is None:
self._compute_mac_tag()
return self._mac_tag
def hexdigest(self):
"""Compute the *printable* MAC tag.
This method is like `digest`.
:Return: the MAC, as a hexadecimal string.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def verify(self, received_mac_tag):
"""Validate the *binary* MAC tag.
Call this method after the final `decrypt` (the one with no arguments)
to check if the message is authentic and valid.
:Parameters:
received_mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.verify not in self._next:
raise TypeError("verify() cannot be called now for this cipher")
assert(len(self._cache_P) == 0)
self._next = [self.verify]
if self._mac_tag is None:
self._compute_mac_tag()
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=self._mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=received_mac_tag)
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Validate the *printable* MAC tag.
This method is like `verify`.
:Parameters:
hex_mac_tag : string
This is the *printable* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
self.verify(unhexlify(hex_mac_tag))
def encrypt_and_digest(self, plaintext):
"""Encrypt the message and create the MAC tag in one step.
:Parameters:
plaintext : byte string
The entire message to encrypt.
:Return:
a tuple with two byte strings:
- the encrypted data
- the MAC
"""
return self.encrypt(plaintext) + self.encrypt(), self.digest()
def decrypt_and_verify(self, ciphertext, received_mac_tag):
"""Decrypted the message and verify its authenticity in one step.
:Parameters:
ciphertext : byte string
The entire message to decrypt.
received_mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Return: the decrypted data (byte string).
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
plaintext = self.decrypt(ciphertext) + self.decrypt()
self.verify(received_mac_tag)
return plaintext
def _create_ocb_cipher(factory, **kwargs):
"""Create a new block cipher, configured in OCB mode.
:Parameters:
factory : module
A symmetric cipher module from `Cryptodome.Cipher`
(like `Cryptodome.Cipher.AES`).
:Keywords:
nonce : byte string
A value that must never be reused for any other encryption.
Its length can vary from 1 to 15 bytes.
If not specified, a random 15 bytes long nonce is generated.
mac_len : integer
Length of the MAC, in bytes.
It must be in the range ``[8..16]``.
The default is 16 (128 bits).
Any other keyword will be passed to the underlying block cipher.
See the relevant documentation for details (at least ``key`` will need
to be present).
"""
try:
nonce = kwargs.pop("nonce", None)
if nonce is None:
nonce = get_random_bytes(15)
mac_len = kwargs.pop("mac_len", 16)
except KeyError, e:
raise TypeError("Keyword missing: " + str(e))
return OcbMode(factory, nonce, mac_len, kwargs)
| {
"content_hash": "73bd4e0655e650cf25619ba93ece2238",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 80,
"avg_line_length": 37.62139917695473,
"alnum_prop": 0.5229162108947714,
"repo_name": "chronicwaffle/PokemonGo-DesktopMap",
"id": "f40871dbef3da83f15d951df73c6c25981303c8a",
"size": "19825",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/pylibs/win32/Cryptodome/Cipher/_mode_ocb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "29260"
},
{
"name": "JavaScript",
"bytes": "52980"
},
{
"name": "Python",
"bytes": "11998498"
},
{
"name": "Shell",
"bytes": "4097"
}
],
"symlink_target": ""
} |
"""Runtime type checking support.
For internal use only; no backwards-compatibility guarantees.
"""
import collections
import inspect
import sys
import types
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
class AbstractDoFnWrapper(DoFn):
"""An abstract class to create wrapper around DoFn"""
def __init__(self, dofn):
super(AbstractDoFnWrapper, self).__init__()
self.dofn = dofn
def _inspect_start_bundle(self):
return self.dofn.get_function_arguments('start_bundle')
def _inspect_process(self):
return self.dofn.get_function_arguments('process')
def _inspect_finish_bundle(self):
return self.dofn.get_function_arguments('finish_bundle')
def wrapper(self, method, args, kwargs):
return method(*args, **kwargs)
def start_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.start_bundle, args, kwargs)
def process(self, *args, **kwargs):
return self.wrapper(self.dofn.process, args, kwargs)
def finish_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.finish_bundle, args, kwargs)
def is_process_bounded(self):
return self.dofn.is_process_bounded()
class OutputCheckWrapperDoFn(AbstractDoFnWrapper):
"""A DoFn that verifies against common errors in the output type."""
def __init__(self, dofn, full_label):
super(OutputCheckWrapperDoFn, self).__init__(dofn)
self.full_label = full_label
def wrapper(self, method, args, kwargs):
try:
result = method(*args, **kwargs)
except TypeCheckError as e:
error_msg = ('Runtime type violation detected within ParDo(%s): '
'%s' % (self.full_label, e))
raise TypeCheckError, error_msg, sys.exc_info()[2]
else:
return self._check_type(result)
def _check_type(self, output):
if output is None:
return output
elif isinstance(output, (dict, basestring)):
object_type = type(output).__name__
raise TypeCheckError('Returning a %s from a ParDo or FlatMap is '
'discouraged. Please use list("%s") if you really '
'want this behavior.' %
(object_type, output))
elif not isinstance(output, collections.Iterable):
raise TypeCheckError('FlatMap and ParDo must return an '
'iterable. %s was returned instead.'
% type(output))
return output
class TypeCheckWrapperDoFn(AbstractDoFnWrapper):
"""A wrapper around a DoFn which performs type-checking of input and output.
"""
def __init__(self, dofn, type_hints, label=None):
super(TypeCheckWrapperDoFn, self).__init__(dofn)
self.dofn = dofn
self._process_fn = self.dofn._process_argspec_fn()
if type_hints.input_types:
input_args, input_kwargs = type_hints.input_types
self._input_hints = getcallargs_forhints(
self._process_fn, *input_args, **input_kwargs)
else:
self._input_hints = None
# TODO(robertwb): Multi-output.
self._output_type_hint = type_hints.simple_output_type(label)
def wrapper(self, method, args, kwargs):
result = method(*args, **kwargs)
return self._type_check_result(result)
def process(self, *args, **kwargs):
if self._input_hints:
actual_inputs = inspect.getcallargs(self._process_fn, *args, **kwargs)
for var, hint in self._input_hints.items():
if hint is actual_inputs[var]:
# self parameter
continue
_check_instance_type(hint, actual_inputs[var], var, True)
return self._type_check_result(self.dofn.process(*args, **kwargs))
def _type_check_result(self, transform_results):
if self._output_type_hint is None or transform_results is None:
return transform_results
def type_check_output(o):
# TODO(robertwb): Multi-output.
x = o.value if isinstance(o, (TaggedOutput, WindowedValue)) else o
self._type_check(self._output_type_hint, x, is_input=False)
# If the return type is a generator, then we will need to interleave our
# type-checking with its normal iteration so we don't deplete the
# generator initially just by type-checking its yielded contents.
if isinstance(transform_results, types.GeneratorType):
return GeneratorWrapper(transform_results, type_check_output)
for o in transform_results:
type_check_output(o)
return transform_results
def _type_check(self, type_constraint, datum, is_input):
"""Typecheck a PTransform related datum according to a type constraint.
This function is used to optionally type-check either an input or an output
to a PTransform.
Args:
type_constraint: An instance of a typehints.TypeContraint, one of the
white-listed builtin Python types, or a custom user class.
datum: An instance of a Python object.
is_input: True if 'datum' is an input to a PTransform's DoFn. False
otherwise.
Raises:
TypeError: If 'datum' fails to type-check according to 'type_constraint'.
"""
datum_type = 'input' if is_input else 'output'
try:
check_constraint(type_constraint, datum)
except CompositeTypeHintError as e:
raise TypeCheckError, e.message, sys.exc_info()[2]
except SimpleTypeHintError:
error_msg = ("According to type-hint expected %s should be of type %s. "
"Instead, received '%s', an instance of type %s."
% (datum_type, type_constraint, datum, type(datum)))
raise TypeCheckError, error_msg, sys.exc_info()[2]
| {
"content_hash": "9065e85b3c6bdcde370e4fe8a90b58b2",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 37.34969325153374,
"alnum_prop": 0.683311432325887,
"repo_name": "yk5/beam",
"id": "89a5f5c7e2cf2366d15e9bd8b86c31d0aa932395",
"size": "6873",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/typehints/typecheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "50057"
},
{
"name": "Java",
"bytes": "11703716"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2856021"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
} |
__doc__="""
Jumps to next instance shown in the preview field of the current Edit tab.
"""
import GlyphsApp
Doc = Glyphs.currentDocument
numberOfInstances = len( Glyphs.font.instances )
try:
currentInstanceNumber = Doc.windowController().activeEditViewController().selectedInstance()
if currentInstanceNumber > 1:
Doc.windowController().activeEditViewController().setSelectedInstance_( currentInstanceNumber - 1 )
else:
Doc.windowController().activeEditViewController().setSelectedInstance_( numberOfInstances )
except Exception, e:
print "Error:", e
| {
"content_hash": "89fe8d56ef4fa320c649d724008889ef",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 101,
"avg_line_length": 28.4,
"alnum_prop": 0.778169014084507,
"repo_name": "weiweihuanghuang/Glyphs-Scripts",
"id": "a8e9a22dd7e4490086352a77b12c21d5241d82b6",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Masters/Show previous instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316614"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
self.send_execute(functionName, funcArgs)
return self.recv_execute()
def send_execute(self, functionName, funcArgs):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.functionName = functionName
args.funcArgs = funcArgs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
try:
result.success = self._handler.execute(args.functionName, args.funcArgs)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except DRPCExecutionException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args(object):
"""
Attributes:
- functionName
- funcArgs
"""
def __init__(self, functionName=None, funcArgs=None,):
self.functionName = functionName
self.funcArgs = funcArgs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.funcArgs = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('execute_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8') if sys.version_info[0] == 2 else self.functionName)
oprot.writeFieldEnd()
if self.funcArgs is not None:
oprot.writeFieldBegin('funcArgs', TType.STRING, 2)
oprot.writeString(self.funcArgs.encode('utf-8') if sys.version_info[0] == 2 else self.funcArgs)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(execute_args)
execute_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', 'UTF8', None, ), # 1
(2, TType.STRING, 'funcArgs', 'UTF8', None, ), # 2
)
class execute_result(object):
"""
Attributes:
- success
- e
- aze
"""
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('execute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(execute_result)
execute_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'e', [DRPCExecutionException, None], None, ), # 1
(2, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 2
)
fix_spec(all_structs)
del all_structs
| {
"content_hash": "f3db21c802fa6a579f24167da8b4c06b",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 134,
"avg_line_length": 34.186619718309856,
"alnum_prop": 0.5742094963435987,
"repo_name": "hmcl/storm-apache",
"id": "4d91b1d9f0034bf7ca971e96172b4609d753a053",
"size": "10668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storm-client/src/py/storm/DistributedRPC.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "54084"
},
{
"name": "CSS",
"bytes": "12597"
},
{
"name": "Clojure",
"bytes": "323393"
},
{
"name": "Fancy",
"bytes": "6234"
},
{
"name": "FreeMarker",
"bytes": "3512"
},
{
"name": "HTML",
"bytes": "193952"
},
{
"name": "Java",
"bytes": "12471832"
},
{
"name": "JavaScript",
"bytes": "74893"
},
{
"name": "M4",
"bytes": "1522"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "PowerShell",
"bytes": "3405"
},
{
"name": "Python",
"bytes": "1072987"
},
{
"name": "Ruby",
"bytes": "15824"
},
{
"name": "Shell",
"bytes": "24778"
},
{
"name": "Thrift",
"bytes": "31772"
},
{
"name": "XSLT",
"bytes": "1365"
}
],
"symlink_target": ""
} |
__import__('pkg_resources').declare_namespace(__name__)
| {
"content_hash": "96b57b75f83745a044136090e8240b5e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 55,
"avg_line_length": 56,
"alnum_prop": 0.6785714285714286,
"repo_name": "projectmallard/mallard-ducktype",
"id": "1e3a4b538ce599fc6293990aab46ba932e88d34b",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mallard/ducktype/extensions/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "236590"
},
{
"name": "Shell",
"bytes": "1316"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# Copyright 2013 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import json
import yaml
from glob import glob
from shutil import copyfile, copytree
from testing.common.database import (
Database, SkipIfNotInstalledDecorator
)
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
__all__ = ['Elasticsearch', 'skipIfNotFound']
SEARCH_PATHS = ['/usr/share/elasticsearch']
class Elasticsearch(Database):
DEFAULT_SETTINGS = dict(auto_start=2,
base_dir=None,
elasticsearch_home=None,
pid=None,
port=None,
copy_data_from=None,
boot_timeout=20)
subdirectories = ['data', 'logs']
def initialize(self):
self.elasticsearch_home = self.settings.get('elasticsearch_home')
if self.elasticsearch_home is None:
self.elasticsearch_home = find_elasticsearch_home()
user_config = self.settings.get('elasticsearch_yaml')
elasticsearch_yaml_path = find_elasticsearch_yaml_path(self.elasticsearch_home)
with open(os.path.realpath(elasticsearch_yaml_path)) as fd:
self.elasticsearch_yaml = yaml.load(fd.read()) or {}
self.elasticsearch_yaml['network.host'] = '127.0.0.1'
self.elasticsearch_yaml['http.port'] = self.settings['port']
self.elasticsearch_yaml['path.data'] = os.path.join(self.base_dir, 'data')
self.elasticsearch_yaml['path.logs'] = os.path.join(self.base_dir, 'logs')
self.elasticsearch_yaml['cluster.name'] = generate_cluster_name()
self.elasticsearch_yaml['discovery.zen.ping.multicast.enabled'] = False
if user_config:
for key, value in user_config.items():
self.elasticsearch_yaml[key] = value
def dsn(self, **kwargs):
return {'hosts': ['127.0.0.1:%d' % self.elasticsearch_yaml['http.port']]}
def get_data_directory(self):
return os.path.join(self.base_dir, 'data')
def initialize_database(self):
# copy data files
if self.settings['copy_data_from'] and self.elasticsearch_yaml['cluster.name']:
indexdir = os.listdir(self.settings['copy_data_from'])[0]
os.rename(os.path.join(self.base_dir, 'data', indexdir),
os.path.join(self.base_dir, 'data', self.elasticsearch_yaml['cluster.name']))
# conf directory
for filename in os.listdir(self.elasticsearch_home):
srcpath = os.path.join(self.elasticsearch_home, filename)
destpath = os.path.join(self.base_dir, filename)
if not os.path.exists(destpath):
if filename in ['lib', 'plugins']:
os.symlink(srcpath, destpath)
elif filename == 'conf':
destpath = os.path.join(self.base_dir, 'config')
copytree(srcpath, destpath)
elif os.path.isdir(srcpath):
copytree(srcpath, destpath)
else:
copyfile(srcpath, destpath)
elasticsearch_yaml_path = find_elasticsearch_yaml_path(self.elasticsearch_home)
if not elasticsearch_yaml_path.startswith(self.elasticsearch_home):
destpath = os.path.join(self.base_dir, 'config')
copytree(os.path.dirname(elasticsearch_yaml_path), destpath)
# rewrite elasticsearch.in.sh (for homebrew)
with open(os.path.join(self.base_dir, 'bin', 'elasticsearch.in.sh'), 'r+t') as fd:
body = re.sub('ES_HOME=.*', '', fd.read())
fd.seek(0)
fd.write(body)
def prestart(self):
super(Elasticsearch, self).prestart()
# assign port to elasticsearch
self.elasticsearch_yaml['http.port'] = self.settings['port']
# generate cassandra.yaml
with open(os.path.join(self.base_dir, 'config', 'elasticsearch.yml'), 'wt') as fd:
fd.write(yaml.dump(self.elasticsearch_yaml, default_flow_style=False))
def get_server_commandline(self):
return [os.path.join(self.base_dir, 'bin', 'elasticsearch')]
def is_server_available(self):
try:
url = 'http://127.0.0.1:%d/_cluster/health' % self.elasticsearch_yaml['http.port']
ret = json.loads(urlopen(url).read().decode('utf-8'))
if ret['status'] in ('green', 'yellow'):
return True
else:
return False
except Exception:
return False
class ElasticsearchSkipIfNotInstalledDecorator(SkipIfNotInstalledDecorator):
name = 'Elasticsearch'
def search_server(self):
find_elasticsearch_home() # raise exception if not found
skipIfNotFound = skipIfNotInstalled = ElasticsearchSkipIfNotInstalledDecorator()
def strip_version(dir):
m = re.search('(\d+)\.(\d+)\.(\d+)', dir)
if m is None:
return None
else:
return tuple([int(ver) for ver in m.groups()])
def find_elasticsearch_home():
elasticsearch_home = os.environ.get('ES_HOME')
if elasticsearch_home:
elasticsearch_home = os.path.abspath(elasticsearch_home)
if os.path.exists(os.path.join(elasticsearch_home, 'bin', 'elasticsearch')):
return elasticsearch_home
for path in SEARCH_PATHS:
if os.path.exists(os.path.join(path, 'bin', 'elasticsearch')):
return path
# search newest elasticsearch-x.x.x directory
globbed = (glob("/usr/local/*elasticsearch*") +
glob("*elasticsearch*") +
glob("/usr/local/Cellar/elasticsearch/*/libexec"))
elasticsearch_dirs = [os.path.abspath(dir) for dir in globbed if os.path.isdir(dir)]
if elasticsearch_dirs:
return sorted(elasticsearch_dirs, key=strip_version)[-1]
raise RuntimeError("could not find ES_HOME")
def find_elasticsearch_yaml_path(es_home):
for path in (os.path.join(es_home, 'conf', 'elasticsearch.yml'), # ubuntu
os.path.join(es_home, 'config', 'elasticsearch.yml'), # official package
'/etc/elasticsearch/elasticsearch.yml'): # travis
if os.path.exists(path):
return path
raise RuntimeError("could not find elasticsearch.yml")
def generate_cluster_name():
import string
import random
return ''.join([random.choice(string.ascii_letters) for i in range(6)])
| {
"content_hash": "94f3f931a270e562793eb4ad8d9e0ac3",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 99,
"avg_line_length": 37.93010752688172,
"alnum_prop": 0.6215450035435861,
"repo_name": "tk0miya/testing.elasticsearch",
"id": "f658fe3df72fd1048c6cad5c16eae1c59468ee1a",
"size": "7055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/testing/elasticsearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16459"
}
],
"symlink_target": ""
} |