text
stringlengths
4
1.02M
meta
dict
from django.conf.urls import url from . import views urlpatterns = [ url( regex=r'^$', view=views.entry, name='business-entry' ), url( regex=r'^log/$', view=views.entry_log, name='business-entry-log' ), url( regex=r'^overview/$', view=views.overview, name='business-overview', ), ]
{ "content_hash": "c24aa99542aea1fdc15e1fc677360c9a", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 33, "avg_line_length": 18.095238095238095, "alnum_prop": 0.5157894736842106, "repo_name": "pterk/django-tcb", "id": "e40c703a7a570857e2842a5957fd6a9d31727426", "size": "380", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "business/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "860" }, { "name": "HTML", "bytes": "10807" }, { "name": "JavaScript", "bytes": "484" }, { "name": "Python", "bytes": "45389" }, { "name": "Shell", "bytes": "22" } ], "symlink_target": "" }
PYTHON_VERSION_COMPATIBILITY = "PY3" DEPS = [ 'builder_name_schema', 'depot_tools/bot_update', 'recipe_engine/context', 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', ]
{ "content_hash": "f46d0deeaa7dc974be129754ee4bab72", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 36, "avg_line_length": 21.76923076923077, "alnum_prop": 0.6819787985865724, "repo_name": "aosp-mirror/platform_external_skia", "id": "f19553a605b88d6b402f9dddaf8b476a22160f7b", "size": "446", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "infra/bots/recipe_modules/vars/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Assembly", "bytes": "12716940" }, { "name": "Batchfile", "bytes": "904" }, { "name": "C", "bytes": "620774" }, { "name": "C#", "bytes": "4683" }, { "name": "C++", "bytes": "27394853" }, { "name": "GLSL", "bytes": "67013" }, { "name": "Go", "bytes": "80137" }, { "name": "HTML", "bytes": "1002516" }, { "name": "Java", "bytes": "32794" }, { "name": "JavaScript", "bytes": "51666" }, { "name": "Lex", "bytes": "4372" }, { "name": "Lua", "bytes": "70974" }, { "name": "Makefile", "bytes": "2295" }, { "name": "Objective-C", "bytes": "35223" }, { "name": "Objective-C++", "bytes": "34410" }, { "name": "PHP", "bytes": "120845" }, { "name": "Python", "bytes": "1002226" }, { "name": "Shell", "bytes": "49974" } ], "symlink_target": "" }
__requires__ = 'setuptools==0.9.8' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install-2.7')() )
{ "content_hash": "9027bd893e0f881f10c3ca29a81df69e", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 86, "avg_line_length": 28, "alnum_prop": 0.6116071428571429, "repo_name": "t-rodynenko/simplequiz", "id": "5424967687b50ef0155b998ea84b658251a763d7", "size": "360", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "venv/Scripts/easy_install-2.7-script.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
from django.db import models class Orderable(models.Model): """ Add extra field and default ordering column for and inline orderable model """ order = models.IntegerField(default=0) class Meta: abstract = True ordering = ('order',)
{ "content_hash": "299b5710490cc1bf605a7116fa67c779", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 82, "avg_line_length": 22.46153846153846, "alnum_prop": 0.6061643835616438, "repo_name": "marcofucci/django-inline-orderable", "id": "df80f55925016bfddb5f808e923edecfa58d425d", "size": "292", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "inline_orderable/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "3842" }, { "name": "Python", "bytes": "1481" } ], "symlink_target": "" }
""" generate_graphs.py ------------------ Generate small synthetic graphs whose complete CLD will be computed. """ import cycles import numpy as np import networkx as nx from numpy import arange def is_valid(graph): """Return whether the graph is valid to run experiments on.""" rank = cycles.fundamental_group_rank(graph) # return nx.density(graph) < 0.3 and nx.is_connected(graph) and rank < 50 return nx.is_connected(graph) and rank < 50 def save_graph(graph, filename): """Save the graph to the given path. filename should be the name of the target file, without the format extension. """ component = max(nx.connected_component_subgraphs(graph), key=len) matrix = nx.adjacency_matrix(component).A np.savetxt(filename + '.txt', matrix, fmt='%1.1f') def generate_erdos_renyi(): """Generate small synthetic ER graphs.""" for num_nodes in range(10, 31, 5): for prob in arange(0.05, 0.4, 0.05): for i in range(20): graph = nx.erdos_renyi_graph(num_nodes, prob) if is_valid(graph): rank = cycles.fundamental_group_rank(graph) name = 'data/ER_N={}_p={}_R={}_i={}'.format(num_nodes, int(prob * 1000), rank, i) save_graph(graph, name) def generate_barabasi_albert(): """Generate small synthetic BA graphs.""" for num_nodes in range(10, 31, 5): for edges_per_step in range(2, 6): for i in range(20): graph = nx.barabasi_albert_graph(num_nodes, edges_per_step) if is_valid(graph): rank = cycles.fundamental_group_rank(graph) name = 'data/BA_N={}_m={}_R={}_i={}'.format(num_nodes, edges_per_step, rank, i) save_graph(graph, name) def generate_watts_strogatz(): """Generate small synthetic WS graphs.""" for num_nodes in range(10, 31, 5): for degree in [2, 4]: for prob in arange(0.05, 0.4, 0.05): for i in range(20): graph = nx.watts_strogatz_graph(num_nodes, degree, prob) if is_valid(graph): rank = cycles.fundamental_group_rank(graph) name = 'data/WS_N={}_d={}_p={}_R={}_i={}'.format(num_nodes, degree, int(prob * 1000), rank, i) save_graph(graph, name) def generate_other(): """Generate other small graphs.""" graph = nx.florentine_families_graph() if is_valid(graph): rank = cycles.fundamental_group_rank(graph) filename = 'data/{}_N={}_R={}'.format('florentine', len(graph), rank) save_graph(graph, filename) graph = nx.karate_club_graph() if is_valid(graph): rank = cycles.fundamental_group_rank(graph) filename = 'data/{}_N={}_R={}'.format('karate', len(graph), rank) save_graph(graph, filename) def main(): """Generate small graphs of different kinds.""" generate_erdos_renyi() generate_barabasi_albert() generate_watts_strogatz() generate_other() if __name__ == '__main__': main()
{ "content_hash": "84727440c3bba6ab8fef91cfb69a85a1", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 118, "avg_line_length": 33.02105263157895, "alnum_prop": 0.5773031558814153, "repo_name": "leotrs/graph_homotopy", "id": "b191af64d44ef32bc54ee859144141e877534ae8", "size": "3137", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "generate_graphs.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Matlab", "bytes": "9561" }, { "name": "Python", "bytes": "33231" } ], "symlink_target": "" }
from distutils.core import setup setup( name='rq_test1', packages=['rq_test1'], version='0.3.0', description='Simple statistical functions implemented in readable Python.', author='Sherif Soliman', author_email='sherif@ssoliman.com', copyright='Copyright (c) 2016 Sherif Soliman', url='https://github.com/rquirozr/Test-Package2', # download_url='https://github.com/sheriferson/simplestatistics/tarball/0.3.0', keywords=['statistics', 'math'], classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Mathematics', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', 'Operating System :: MacOS', 'Operating System :: Unix', 'Topic :: Education', 'Topic :: Utilities' ] )
{ "content_hash": "568ecb5a7ce5f9f1ed3661a21f1cec35", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 82, "avg_line_length": 36.07142857142857, "alnum_prop": 0.6108910891089109, "repo_name": "rquirozr/Test-Package2", "id": "9cc7bd9d339432a3e2e9ed3e11c2e4efb6bae1a1", "size": "2920", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "31887" } ], "symlink_target": "" }
"""Provide the device automations for Vacuum.""" from typing import Dict, List import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, CONF_CONDITION, CONF_DOMAIN, CONF_TYPE, CONF_DEVICE_ID, CONF_ENTITY_ID, ) from homeassistant.core import HomeAssistant from homeassistant.helpers import condition, config_validation as cv, entity_registry from homeassistant.helpers.typing import ConfigType, TemplateVarsType from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA from . import DOMAIN, STATE_DOCKED, STATE_CLEANING, STATE_RETURNING CONDITION_TYPES = {"is_cleaning", "is_docked"} CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES), } ) async def async_get_conditions( hass: HomeAssistant, device_id: str ) -> List[Dict[str, str]]: """List device conditions for Vacuum devices.""" registry = await entity_registry.async_get_registry(hass) conditions = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue conditions.append( { CONF_CONDITION: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "is_cleaning", } ) conditions.append( { CONF_CONDITION: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "is_docked", } ) return conditions def async_condition_from_config( config: ConfigType, config_validation: bool ) -> condition.ConditionCheckerType: """Create a function to test a device condition.""" if config_validation: config = CONDITION_SCHEMA(config) if config[CONF_TYPE] == "is_docked": test_states = [STATE_DOCKED] else: test_states = [STATE_CLEANING, STATE_RETURNING] def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool: """Test if an entity is a certain state.""" state = hass.states.get(config[ATTR_ENTITY_ID]) return state is not None and state.state in test_states return test_is_state
{ "content_hash": "3d8db337ab42c3c9c7736145281e4ae3", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 85, "avg_line_length": 31.734177215189874, "alnum_prop": 0.6402074192261668, "repo_name": "joopert/home-assistant", "id": "6a41fe0490e13e79fa78f65ecd8988dd1792c9c6", "size": "2507", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/vacuum/device_condition.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "18670593" }, { "name": "Shell", "bytes": "6846" } ], "symlink_target": "" }
import spotify import threading import json import os import nltk.metrics.agreement import api_keys # Get secret keys KEYS = api_keys.get_keys() logged_in_event = threading.Event() def pretty_print(obj): print json.dumps(obj, sort_keys=True, indent=4, separators=(',',': ')) def connection_state_listener(session): if session.connection.state is spotify.ConnectionState.LOGGED_IN: logged_in_event.set() # Specify configuration config = spotify.Config() config.user_agent = 'My awesome Spotify client' config.tracefile = b'/tmp/libspotify-trace.log' print "Opening session with user {}...".format(KEYS["SPOTIFY_USERNAME"]) # Open session and loop session = spotify.Session(config) loop = spotify.EventLoop(session) loop.start() session.on( spotify.SessionEvent.CONNECTION_STATE_UPDATED, connection_state_listener) session.login(KEYS["SPOTIFY_USERNAME"],KEYS["SPOTIFY_PASSWORD"]) logged_in_event.wait() print "Logged in and waiting..." # Return the greatest common suffix in a list of strings def greatest_common_suffix(list_of_strings): reversed_strings = [' '.join(s.split()[::-1]) for s in list_of_strings] reversed_gcs = os.path.commonprefix(reversed_strings) gcs = ' '.join(reversed_gcs.split()[::-1]) return gcs def score(target, item): target = target.lower() item = item.lower() return nltk.metrics.edit_distance(target, item)*1.0 / len(target) def match(target, candidate_list, distance_only=False): """ Given a target string and a list of candidate strings, return the best matching candidate. """ distances = [] for item in candidate_list: dist = score(target, item) distances.append(dist) if distance_only: return min(distances) # Get index of minimum distance return distances.index(min(distances)) def search_score(target_tracks, matching_tracks): """ Given a list of track names to be matched, and a list of matching tracks, returns a score that approximates the confidence that the match is valid. The score is based on the average of the edit distance between each target track and its best match, offset by the difference in the length of each list. """ distances = [] for target in target_tracks: dist = match(target, matching_tracks, distance_only=True) distances.append(dist) return (sum(distances) / len(distances)) + abs(len(target_tracks)- len(matching_tracks))/len(distances) def search_for_album(show): query = show["name"] search = session.search(query) # Execute search query search = search.load() album_results = search.albums print '\nSearching for "{}"'.format(query) # If we find no results, report error if len(album_results) == 0: raise StandardError("Error: no search results found.") scores = [] for album in album_results: album.load() # Obtain track list browser = album.browse().load() tracks = browser.tracks # Get lists of candidate album's track names and # the actual track names track_names = [clean_track_name(track.name, album, browser) for track in tracks] target_names = [song["name"] for song in show["songs"]] # Obtain a similarity score between the two lists score = search_score(target_names, track_names) # Save the score scores.append(score) # If none of the results have an acceptable score, report # an error if min(scores) > .3: raise StandardError("Error: no results above threshold") return album_results[scores.index(min(scores))] def ascii(s): return s.encode('ascii', 'ignore') def add_spotify_song_data(song, spotify_track): song["spotify_popularity"] = spotify_track.popularity song["spotify_duration"] = spotify_track.duration / 1000 song["spotify_track"] = str(spotify_track.link) song["spotify_track_name"] = spotify_track.name song["spotify_match_score"] = match_score artists= [str(artist.link) for artist in spotify_track.artists] artist_names = [ascii(artist.name) for artist in spotify_track.artists] song["spotify_artists"] = artists song["spotify_artist_names"] = artist_names song["spotify_track_index"] = spotify_track.index def add_spotify_album_data(album, spotify_album): # Save the cover art file found on Spotify cover_art_file = '../data/cover_art/'+str(spotify_album.link)+'.jpg' open(cover_art_file,'w+').write(spotify_album.cover().load().data) # Record album-specific data show["show_on_spotify"] = True show["spotify_album"] = str(spotify_album.link) show["spotify_album_year"] = spotify_album.year show["spotify_album_artist"] = ascii(spotify_album.artist.name) show["spotify_cover_art"] = cover_art_file def clean_track_name(track_name, album, browser): browser = album.browse().load() tracks = browser.tracks track_names = [track.name for track in tracks] gcs = greatest_common_suffix(track_names) track_name = ascii(track_name).lower() album_name = ascii(album.name).lower().replace(' the musical','') # Remove greatest common suffix if large enough if len(gcs) > 3: track_name = track_name.replace(gcs.lower(), '') # Remove "(From "[show_name]")" from track name if present track_name = track_name.replace('(from "{}")'.format(album_name),'') # Remove "- Musical "[show_name]"" from track name if present track_name = track_name.replace(' - musical "{}"'.format(album_name),'') # Remove " - feat.*" if present track_name = track_name.split(" - feat. ")[0] return track_name with open('../data/shows_combined.json.matched', 'r') as f: data = json.load(f) for show in data: show_name = show["name"] # Try to search Spotify for the album. If no suitable matches are found, # note that the album was not found on Spotify and move on. try: album = search_for_album(show) except StandardError as e: show["show_on_spotify"] = False print e continue # Load the album, get the track list, and produce a list of track names # on the Spotify album album.load() browser = album.browse().load() tracks = browser.tracks track_names = [clean_track_name(track.name, album, browser) for track in tracks] show["spotify_song_count"] = len(track_names) add_spotify_album_data(show, album) # Keep track of any songs that we find on spotify that we didn't have # saved before new_songs = [] # For each song in the show, find a match from the track list. for song in show["songs"]: track_index = match(song["name"], track_names) matching_track = tracks[track_index] matching_track_name = clean_track_name(matching_track.name, album, browser) song_name = ascii(song["name"]) match_score = score(song_name,matching_track_name) print '\t"{}", "{}": {}'.format( song_name, matching_track_name, match_score) if match_score < .7: song["song_on_allmusicals"] = True song["song_on_spotify"] = True add_spotify_song_data(song, matching_track) else: new_song = {} song["song_on_spotify"] = False song["song_on_allmusicals"] = True new_song["song_on_spotify"] = True new_song["song_on_allmusicals"] = False add_spotify_song_data(new_song, matching_track) collected = [s["spotify_track"] for s in new_songs] if new_song["spotify_track"] not in collected: new_songs.append(new_song) collected = [s["spotify_track"] for s in show["songs"] if "spotify_track" in s] new_songs = [s for s in new_songs if s["spotify_track"] not in collected] show["songs"].extend(new_songs) with open('../data/shows_w_spotify.json', 'w') as outfile: json.dump(data, outfile)
{ "content_hash": "70dbea1ead9603d63be789458c04a54c", "timestamp": "", "source": "github", "line_count": 256, "max_line_length": 82, "avg_line_length": 28.91796875, "alnum_prop": 0.7080913143320275, "repo_name": "willwest/broadwaydb", "id": "059f4e705407fd88b54870e000d121a2011c7e90", "size": "7403", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "crawl/crawl_spotify.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "9056" }, { "name": "HTML", "bytes": "5834" }, { "name": "JavaScript", "bytes": "536" }, { "name": "Makefile", "bytes": "29" }, { "name": "Python", "bytes": "33568" }, { "name": "R", "bytes": "5854" } ], "symlink_target": "" }
import sys from mauto import gui from mauto.api import library def show(): gui.show() def select_repo(): gui.select_repo() def list_macros(): return library.macros.keys() def new_macro(*arg, **kwds): return library.new_macro(*arg, **kwds) def get_macro(name): return library.get(name) def remove_macro(name): if library.get(name): library.remove_macro(name) def save_macro(name): return library.save_macro(name) def get_filepath(name): return library.get_filepath(name) def __main__(): app = gui.QtGui.QApplication(sys.argv) w = gui.Layout() w.show() sys.exit(app.exec_()) if __name__ == "__main__": __main__()
{ "content_hash": "f416cb853539322e5df49dace06bf2fd", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 42, "avg_line_length": 14.416666666666666, "alnum_prop": 0.6257225433526011, "repo_name": "csaez/mauto", "id": "9233f7c2437fb7455f008dd6631a612f6d896ac5", "size": "692", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mauto/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "52303" } ], "symlink_target": "" }
import abc import collections import os import re import shutil import time import netaddr from neutron_lib import constants from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from oslo_utils import uuidutils import six from neutron._i18n import _, _LI, _LW, _LE from neutron.agent.common import utils as agent_common_utils from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import constants as n_const from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) UDP = 'udp' TCP = 'tcp' DNS_PORT = 53 DHCPV4_PORT = 67 DHCPV6_PORT = 547 METADATA_DEFAULT_PREFIX = 16 METADATA_DEFAULT_IP = '169.254.169.254' METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, METADATA_DEFAULT_PREFIX) METADATA_PORT = 80 WIN2k3_STATIC_DNS = 249 NS_PREFIX = 'qdhcp-' DNSMASQ_SERVICE_NAME = 'dnsmasq' class DictModel(dict): """Convert dict into an object that provides attribute access to values.""" def __init__(self, *args, **kwargs): """Convert dict values to DictModel values.""" super(DictModel, self).__init__(*args, **kwargs) def needs_upgrade(item): """Check if `item` is a dict and needs to be changed to DictModel. """ return isinstance(item, dict) and not isinstance(item, DictModel) def upgrade(item): """Upgrade item if it needs to be upgraded.""" if needs_upgrade(item): return DictModel(item) else: return item for key, value in six.iteritems(self): if isinstance(value, (list, tuple)): # Keep the same type but convert dicts to DictModels self[key] = type(value)( (upgrade(item) for item in value) ) elif needs_upgrade(value): # Change dict instance values to DictModel instance values self[key] = DictModel(value) def __getattr__(self, name): try: return self[name] except KeyError as e: raise AttributeError(e) def __setattr__(self, name, value): self[name] = value def __delattr__(self, name): del self[name] def __str__(self): pairs = ['%s=%s' % (k, v) for k, v in self.items()] return ', '.join(sorted(pairs)) class NetModel(DictModel): def __init__(self, d): super(NetModel, self).__init__(d) self._ns_name = "%s%s" % (NS_PREFIX, self.id) @property def namespace(self): return self._ns_name @six.add_metaclass(abc.ABCMeta) class DhcpBase(object): def __init__(self, conf, network, process_monitor, version=None, plugin=None): self.conf = conf self.network = network self.process_monitor = process_monitor self.device_manager = DeviceManager(self.conf, plugin) self.version = version @abc.abstractmethod def enable(self): """Enables DHCP for this network.""" @abc.abstractmethod def disable(self, retain_port=False): """Disable dhcp for this network.""" def restart(self): """Restart the dhcp service for the network.""" self.disable(retain_port=True) self.enable() @abc.abstractproperty def active(self): """Boolean representing the running state of the DHCP server.""" @abc.abstractmethod def reload_allocations(self): """Force the DHCP server to reload the assignment database.""" @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" raise NotImplementedError() @classmethod def check_version(cls): """Execute version checks on DHCP server.""" raise NotImplementedError() @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated""" raise NotImplementedError() @classmethod def should_enable_metadata(cls, conf, network): """True if the metadata-proxy should be enabled for the network.""" raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class DhcpLocalProcess(DhcpBase): PORTS = [] def __init__(self, conf, network, process_monitor, version=None, plugin=None): super(DhcpLocalProcess, self).__init__(conf, network, process_monitor, version, plugin) self.confs_dir = self.get_confs_dir(conf) self.network_conf_dir = os.path.join(self.confs_dir, network.id) common_utils.ensure_dir(self.network_conf_dir) @staticmethod def get_confs_dir(conf): return os.path.abspath(os.path.normpath(conf.dhcp_confs)) def get_conf_file_name(self, kind): """Returns the file name for a given kind of config file.""" return os.path.join(self.network_conf_dir, kind) def _remove_config_files(self): shutil.rmtree(self.network_conf_dir, ignore_errors=True) def _enable_dhcp(self): """check if there is a subnet within the network with dhcp enabled.""" for subnet in self.network.subnets: if subnet.enable_dhcp: return True return False def enable(self): """Enables DHCP for this network by spawning a local process.""" if self.active: self.restart() elif self._enable_dhcp(): common_utils.ensure_dir(self.network_conf_dir) interface_name = self.device_manager.setup(self.network) self.interface_name = interface_name self.spawn_process() def _get_process_manager(self, cmd_callback=None): return external_process.ProcessManager( conf=self.conf, uuid=self.network.id, namespace=self.network.namespace, default_cmd_callback=cmd_callback, pid_file=self.get_conf_file_name('pid'), run_as_root=True) def disable(self, retain_port=False): """Disable DHCP for this network by killing the local process.""" self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME) self._get_process_manager().disable() if not retain_port: self._destroy_namespace_and_port() self._remove_config_files() def _destroy_namespace_and_port(self): try: self.device_manager.destroy(self.network, self.interface_name) except RuntimeError: LOG.warning(_LW('Failed trying to delete interface: %s'), self.interface_name) ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace) try: ns_ip.netns.delete(self.network.namespace) except RuntimeError: LOG.warning(_LW('Failed trying to delete namespace: %s'), self.network.namespace) def _get_value_from_conf_file(self, kind, converter=None): """A helper function to read a value from one of the state files.""" file_name = self.get_conf_file_name(kind) msg = _('Error while reading %s') try: with open(file_name, 'r') as f: try: return converter(f.read()) if converter else f.read() except ValueError: msg = _('Unable to convert value in %s') except IOError: msg = _('Unable to access %s') LOG.debug(msg, file_name) return None @property def interface_name(self): return self._get_value_from_conf_file('interface') @interface_name.setter def interface_name(self, value): interface_file_path = self.get_conf_file_name('interface') common_utils.replace_file(interface_file_path, value) @property def active(self): return self._get_process_manager().active @abc.abstractmethod def spawn_process(self): pass class Dnsmasq(DhcpLocalProcess): # The ports that need to be opened when security policies are active # on the Neutron port used for DHCP. These are provided as a convenience # for users of this class. PORTS = {constants.IP_VERSION_4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], constants.IP_VERSION_6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], } _TAG_PREFIX = 'tag%d' _ID = 'id:' @classmethod def check_version(cls): pass @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" confs_dir = cls.get_confs_dir(conf) try: return [ c for c in os.listdir(confs_dir) if uuidutils.is_uuid_like(c) ] except OSError: return [] def _build_cmdline_callback(self, pid_file): # We ignore local resolv.conf if dns servers are specified # or if local resolution is explicitly disabled. _no_resolv = ( '--no-resolv' if self.conf.dnsmasq_dns_servers or not self.conf.dnsmasq_local_resolv else '') cmd = [ 'dnsmasq', '--no-hosts', _no_resolv, '--strict-order', '--except-interface=lo', '--pid-file=%s' % pid_file, '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'), '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'), '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'), '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'), '--dhcp-match=set:ipxe,175', ] if self.device_manager.driver.bridged: cmd += [ '--bind-interfaces', '--interface=%s' % self.interface_name, ] else: cmd += [ '--bind-dynamic', '--interface=%s' % self.interface_name, '--interface=tap*', '--bridge-interface=%s,tap*' % self.interface_name, ] possible_leases = 0 for i, subnet in enumerate(self.network.subnets): mode = None # if a subnet is specified to have dhcp disabled if not subnet.enable_dhcp: continue if subnet.ip_version == 4: mode = 'static' else: # Note(scollins) If the IPv6 attributes are not set, set it as # static to preserve previous behavior addr_mode = getattr(subnet, 'ipv6_address_mode', None) ra_mode = getattr(subnet, 'ipv6_ra_mode', None) if (addr_mode in [n_const.DHCPV6_STATEFUL, n_const.DHCPV6_STATELESS] or not addr_mode and not ra_mode): mode = 'static' cidr = netaddr.IPNetwork(subnet.cidr) if self.conf.dhcp_lease_duration == -1: lease = 'infinite' else: lease = '%ss' % self.conf.dhcp_lease_duration # mode is optional and is not set - skip it if mode: if subnet.ip_version == 4: cmd.append('--dhcp-range=%s%s,%s,%s,%s' % ('set:', self._TAG_PREFIX % i, cidr.network, mode, lease)) else: cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' % ('set:', self._TAG_PREFIX % i, cidr.network, mode, cidr.prefixlen, lease)) possible_leases += cidr.size if cfg.CONF.advertise_mtu: mtu = getattr(self.network, 'mtu', 0) # Do not advertise unknown mtu if mtu > 0: cmd.append('--dhcp-option-force=option:mtu,%d' % mtu) # Cap the limit because creating lots of subnets can inflate # this possible lease cap. cmd.append('--dhcp-lease-max=%d' % min(possible_leases, self.conf.dnsmasq_lease_max)) cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) if self.conf.dnsmasq_dns_servers: cmd.extend( '--server=%s' % server for server in self.conf.dnsmasq_dns_servers) if self.conf.dhcp_domain: cmd.append('--domain=%s' % self.conf.dhcp_domain) if self.conf.dhcp_broadcast_reply: cmd.append('--dhcp-broadcast') if self.conf.dnsmasq_base_log_dir: log_dir = os.path.join( self.conf.dnsmasq_base_log_dir, self.network.id) try: if not os.path.exists(log_dir): os.makedirs(log_dir) except OSError: LOG.error(_LE('Error while create dnsmasq log dir: %s'), log_dir) else: log_filename = os.path.join(log_dir, 'dhcp_dns_log') cmd.append('--log-queries') cmd.append('--log-dhcp') cmd.append('--log-facility=%s' % log_filename) return cmd def spawn_process(self): """Spawn the process, if it's not spawned already.""" # we only need to generate the lease file the first time dnsmasq starts # rather than on every reload since dnsmasq will keep the file current self._output_init_lease_file() self._spawn_or_reload_process(reload_with_HUP=False) def _spawn_or_reload_process(self, reload_with_HUP): """Spawns or reloads a Dnsmasq process for the network. When reload_with_HUP is True, dnsmasq receives a HUP signal, or it's reloaded if the process is not running. """ self._output_config_files() pm = self._get_process_manager( cmd_callback=self._build_cmdline_callback) pm.enable(reload_cfg=reload_with_HUP) self.process_monitor.register(uuid=self.network.id, service_name=DNSMASQ_SERVICE_NAME, monitored_process=pm) def _release_lease(self, mac_address, ip, client_id): """Release a DHCP lease.""" if netaddr.IPAddress(ip).version == constants.IP_VERSION_6: # Note(SridharG) dhcp_release is only supported for IPv4 # addresses. For more details, please refer to man page. return cmd = ['dhcp_release', self.interface_name, ip, mac_address] if client_id: cmd.append(client_id) ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace) ip_wrapper.netns.execute(cmd, run_as_root=True) def _output_config_files(self): self._output_hosts_file() self._output_addn_hosts_file() self._output_opts_file() def reload_allocations(self): """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" # If all subnets turn off dhcp, kill the process. if not self._enable_dhcp(): self.disable() LOG.debug('Killing dnsmasq for network since all subnets have ' 'turned off DHCP: %s', self.network.id) return self._release_unused_leases() self._spawn_or_reload_process(reload_with_HUP=True) LOG.debug('Reloading allocations for network: %s', self.network.id) self.device_manager.update(self.network, self.interface_name) def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets): """Sort fixed_ips so that stateless IPv6 subnets appear first. For example, If a port with v6 extra_dhcp_opts is on a network with IPv4 and IPv6 stateless subnets. Then dhcp host file will have below 2 entries for same MAC, fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for IPv4 dhcp) fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for stateless IPv6 for v6 options) dnsmasq internal details for processing host file entries 1) dnsmasq reads the host file from EOF. 2) So it first picks up stateless IPv6 entry, fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd 3) But dnsmasq doesn't have sufficient checks to skip this entry and pick next entry, to process dhcp IPv4 request. 4) So dnsmasq uses this entry to process dhcp IPv4 request. 5) As there is no ip in this entry, dnsmasq logs "no address available" and fails to send DHCPOFFER message. As we rely on internal details of dnsmasq to understand and fix the issue, Ihar sent a mail to dnsmasq-discuss mailing list http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/ 009650.html So if we reverse the order of writing entries in host file, so that entry for stateless IPv6 comes first, then dnsmasq can correctly fetch the IPv4 address. """ return sorted( fixed_ips, key=lambda fip: ((fip.subnet_id in v6_nets) and ( v6_nets[fip.subnet_id].ipv6_address_mode == ( n_const.DHCPV6_STATELESS))), reverse=True) def _iter_hosts(self): """Iterate over hosts. For each host on the network we yield a tuple containing: ( port, # a DictModel instance representing the port. alloc, # a DictModel instance of the allocated ip and subnet. # if alloc is None, it means there is no need to allocate # an IPv6 address because of stateless DHCPv6 network. host_name, # Host name. name, # Canonical hostname in the format 'hostname[.domain]'. no_dhcp, # A flag indicating that the address doesn't need a DHCP # IP address. no_opts, # A flag indication that options shouldn't be written ) """ v6_nets = dict((subnet.id, subnet) for subnet in self.network.subnets if subnet.ip_version == 6) for port in self.network.ports: fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips, v6_nets) # Confirm whether Neutron server supports dns_name attribute in the # ports API dns_assignment = getattr(port, 'dns_assignment', None) if dns_assignment: dns_ip_map = {d.ip_address: d for d in dns_assignment} for alloc in fixed_ips: no_dhcp = False no_opts = False if alloc.subnet_id in v6_nets: addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode no_dhcp = addr_mode in (n_const.IPV6_SLAAC, n_const.DHCPV6_STATELESS) # we don't setup anything for SLAAC. It doesn't make sense # to provide options for a client that won't use DHCP no_opts = addr_mode == n_const.IPV6_SLAAC # If dns_name attribute is supported by ports API, return the # dns_assignment generated by the Neutron server. Otherwise, # generate hostname and fqdn locally (previous behaviour) if dns_assignment: hostname = dns_ip_map[alloc.ip_address].hostname fqdn = dns_ip_map[alloc.ip_address].fqdn else: hostname = 'host-%s' % alloc.ip_address.replace( '.', '-').replace(':', '-') fqdn = hostname if self.conf.dhcp_domain: fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain) yield (port, alloc, hostname, fqdn, no_dhcp, no_opts) def _get_port_extra_dhcp_opts(self, port): return getattr(port, edo_ext.EXTRADHCPOPTS, False) def _output_init_lease_file(self): """Write a fake lease file to bootstrap dnsmasq. The generated file is passed to the --dhcp-leasefile option of dnsmasq. This is used as a bootstrapping mechanism to avoid NAKing active leases when a dhcp server is scheduled to another agent. Using a leasefile will also prevent dnsmasq from NAKing or ignoring renewals after a restart. Format is as follows: epoch-timestamp mac_addr ip_addr hostname client-ID """ filename = self.get_conf_file_name('leases') buf = six.StringIO() LOG.debug('Building initial lease file: %s', filename) # we make up a lease time for the database entry if self.conf.dhcp_lease_duration == -1: # Even with an infinite lease, a client may choose to renew a # previous lease on reboot or interface bounce so we should have # an entry for it. # Dnsmasq timestamp format for an infinite lease is 0. timestamp = 0 else: timestamp = int(time.time()) + self.conf.dhcp_lease_duration dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets if s.enable_dhcp] for host_tuple in self._iter_hosts(): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple # don't write ip address which belongs to a dhcp disabled subnet # or an IPv6 SLAAC/stateless subnet if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids: continue ip_address = self._format_address_for_dnsmasq(alloc.ip_address) # all that matters is the mac address and IP. the hostname and # client ID will be overwritten on the next renewal. buf.write('%s %s %s * *\n' % (timestamp, port.mac_address, ip_address)) contents = buf.getvalue() common_utils.replace_file(filename, contents) LOG.debug('Done building initial lease file %s with contents:\n%s', filename, contents) return filename @staticmethod def _format_address_for_dnsmasq(address): # (dzyu) Check if it is legal ipv6 address, if so, need wrap # it with '[]' to let dnsmasq to distinguish MAC address from # IPv6 address. if netaddr.valid_ipv6(address): return '[%s]' % address return address def _output_hosts_file(self): """Writes a dnsmasq compatible dhcp hosts file. The generated file is sent to the --dhcp-hostsfile option of dnsmasq, and lists the hosts on the network which should receive a dhcp lease. Each line in this file is in the form:: 'mac_address,FQDN,ip_address' IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in this file if it did not give a lease to a host listed in it (e.g.: multiple dnsmasq instances on the same network if this network is on multiple network nodes). This file is only defining hosts which should receive a dhcp lease, the hosts resolution in itself is defined by the `_output_addn_hosts_file` method. """ buf = six.StringIO() filename = self.get_conf_file_name('host') LOG.debug('Building host file: %s', filename) dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets if s.enable_dhcp] # NOTE(ihrachyshka): the loop should not log anything inside it, to # avoid potential performance drop when lots of hosts are dumped for host_tuple in self._iter_hosts(): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple if no_dhcp: if not no_opts and self._get_port_extra_dhcp_opts(port): buf.write('%s,%s%s\n' % (port.mac_address, 'set:', port.id)) continue # don't write ip address which belongs to a dhcp disabled subnet. if alloc.subnet_id not in dhcp_enabled_subnet_ids: continue ip_address = self._format_address_for_dnsmasq(alloc.ip_address) if self._get_port_extra_dhcp_opts(port): client_id = self._get_client_id(port) if client_id and len(port.extra_dhcp_opts) > 1: buf.write('%s,%s%s,%s,%s,%s%s\n' % (port.mac_address, self._ID, client_id, name, ip_address, 'set:', port.id)) elif client_id and len(port.extra_dhcp_opts) == 1: buf.write('%s,%s%s,%s,%s\n' % (port.mac_address, self._ID, client_id, name, ip_address)) else: buf.write('%s,%s,%s,%s%s\n' % (port.mac_address, name, ip_address, 'set:', port.id)) else: buf.write('%s,%s,%s\n' % (port.mac_address, name, ip_address)) common_utils.replace_file(filename, buf.getvalue()) LOG.debug('Done building host file %s', filename) return filename def _get_client_id(self, port): if self._get_port_extra_dhcp_opts(port): for opt in port.extra_dhcp_opts: if opt.opt_name == edo_ext.CLIENT_ID: return opt.opt_value def _read_hosts_file_leases(self, filename): leases = set() try: with open(filename) as f: for l in f.readlines(): host = l.strip().split(',') mac = host[0] client_id = None if host[1].startswith('set:'): continue if host[1].startswith(self._ID): ip = host[3].strip('[]') client_id = host[1][len(self._ID):] else: ip = host[2].strip('[]') leases.add((ip, mac, client_id)) except (OSError, IOError): LOG.debug('Error while reading hosts file %s', filename) return leases def _release_unused_leases(self): filename = self.get_conf_file_name('host') old_leases = self._read_hosts_file_leases(filename) new_leases = set() dhcp_port_exists = False dhcp_port_on_this_host = self.device_manager.get_device_id( self.network) for port in self.network.ports: client_id = self._get_client_id(port) for alloc in port.fixed_ips: new_leases.add((alloc.ip_address, port.mac_address, client_id)) if port.device_id == dhcp_port_on_this_host: dhcp_port_exists = True for ip, mac, client_id in old_leases - new_leases: self._release_lease(mac, ip, client_id) if not dhcp_port_exists: self.device_manager.driver.unplug( self.interface_name, namespace=self.network.namespace) def _output_addn_hosts_file(self): """Writes a dnsmasq compatible additional hosts file. The generated file is sent to the --addn-hosts option of dnsmasq, and lists the hosts on the network which should be resolved even if the dnsmasq instance did not give a lease to the host (see the `_output_hosts_file` method). Each line in this file is in the same form as a standard /etc/hosts file. """ buf = six.StringIO() for host_tuple in self._iter_hosts(): port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple # It is compulsory to write the `fqdn` before the `hostname` in # order to obtain it in PTR responses. if alloc: buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) addn_hosts = self.get_conf_file_name('addn_hosts') common_utils.replace_file(addn_hosts, buf.getvalue()) return addn_hosts def _output_opts_file(self): """Write a dnsmasq compatible options file.""" options, subnet_index_map = self._generate_opts_per_subnet() options += self._generate_opts_per_port(subnet_index_map) name = self.get_conf_file_name('opts') common_utils.replace_file(name, '\n'.join(options)) return name def _generate_opts_per_subnet(self): options = [] subnet_index_map = {} if self.conf.enable_isolated_metadata or self.conf.force_metadata: subnet_to_interface_ip = self._make_subnet_interface_ip_map() isolated_subnets = self.get_isolated_subnets(self.network) for i, subnet in enumerate(self.network.subnets): addr_mode = getattr(subnet, 'ipv6_address_mode', None) if (not subnet.enable_dhcp or (subnet.ip_version == 6 and addr_mode == n_const.IPV6_SLAAC)): continue if subnet.dns_nameservers: options.append( self._format_option( subnet.ip_version, i, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs( subnet.ip_version, subnet.dns_nameservers)))) else: # use the dnsmasq ip as nameservers only if there is no # dns-server submitted by the server subnet_index_map[subnet.id] = i if self.conf.dhcp_domain and subnet.ip_version == 6: options.append('tag:tag%s,option6:domain-search,%s' % (i, ''.join(self.conf.dhcp_domain))) gateway = subnet.gateway_ip host_routes = [] for hr in subnet.host_routes: if hr.destination == constants.IPv4_ANY: if not gateway: gateway = hr.nexthop else: host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) # Add host routes for isolated network segments if (self.conf.force_metadata or (isolated_subnets[subnet.id] and self.conf.enable_isolated_metadata and subnet.ip_version == 4)): subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) ) elif not isolated_subnets[subnet.id] and gateway: host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, gateway) ) if subnet.ip_version == 4: host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in self.network.subnets if (s.ip_version == 4 and s.cidr != subnet.cidr)]) if host_routes: if gateway: host_routes.append("%s,%s" % (constants.IPv4_ANY, gateway)) options.append( self._format_option(subnet.ip_version, i, 'classless-static-route', ','.join(host_routes))) options.append( self._format_option(subnet.ip_version, i, WIN2k3_STATIC_DNS, ','.join(host_routes))) if gateway: options.append(self._format_option(subnet.ip_version, i, 'router', gateway)) else: options.append(self._format_option(subnet.ip_version, i, 'router')) return options, subnet_index_map def _generate_opts_per_port(self, subnet_index_map): options = [] dhcp_ips = collections.defaultdict(list) for port in self.network.ports: if self._get_port_extra_dhcp_opts(port): port_ip_versions = set( [netaddr.IPAddress(ip.ip_address).version for ip in port.fixed_ips]) for opt in port.extra_dhcp_opts: if opt.opt_name == edo_ext.CLIENT_ID: continue opt_ip_version = opt.ip_version if opt_ip_version in port_ip_versions: options.append( self._format_option(opt_ip_version, port.id, opt.opt_name, opt.opt_value)) else: LOG.info(_LI("Cannot apply dhcp option %(opt)s " "because it's ip_version %(version)d " "is not in port's address IP versions"), {'opt': opt.opt_name, 'version': opt_ip_version}) # provides all dnsmasq ip as dns-server if there is more than # one dnsmasq for a subnet and there is no dns-server submitted # by the server if port.device_owner == constants.DEVICE_OWNER_DHCP: for ip in port.fixed_ips: i = subnet_index_map.get(ip.subnet_id) if i is None: continue dhcp_ips[i].append(ip.ip_address) for i, ips in dhcp_ips.items(): for ip_version in (4, 6): vx_ips = [ip for ip in ips if netaddr.IPAddress(ip).version == ip_version] if len(vx_ips) > 1: options.append( self._format_option( ip_version, i, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs(ip_version, vx_ips)))) return options def _make_subnet_interface_ip_map(self): ip_dev = ip_lib.IPDevice(self.interface_name, namespace=self.network.namespace) subnet_lookup = dict( (netaddr.IPNetwork(subnet.cidr), subnet.id) for subnet in self.network.subnets ) retval = {} for addr in ip_dev.addr.list(): ip_net = netaddr.IPNetwork(addr['cidr']) if ip_net in subnet_lookup: retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] return retval def _format_option(self, ip_version, tag, option, *args): """Format DHCP option by option name or code.""" option = str(option) pattern = "(tag:(.*),)?(.*)$" matches = re.match(pattern, option) extra_tag = matches.groups()[0] option = matches.groups()[2] if isinstance(tag, int): tag = self._TAG_PREFIX % tag if not option.isdigit(): if ip_version == 4: option = 'option:%s' % option else: option = 'option6:%s' % option if extra_tag: tags = ('tag:' + tag, extra_tag[:-1], '%s' % option) else: tags = ('tag:' + tag, '%s' % option) return ','.join(tags + args) @staticmethod def _convert_to_literal_addrs(ip_version, ips): if ip_version == 4: return ips return ['[' + ip + ']' for ip in ips] @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated A subnet is considered non-isolated if there is a port connected to the subnet, and the port's ip address matches that of the subnet's gateway. The port must be owned by a neutron router. """ isolated_subnets = collections.defaultdict(lambda: True) subnets = dict((subnet.id, subnet) for subnet in network.subnets) for port in network.ports: if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS: continue for alloc in port.fixed_ips: if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address: isolated_subnets[alloc.subnet_id] = False return isolated_subnets @classmethod def should_enable_metadata(cls, conf, network): """Determine whether the metadata proxy is needed for a network This method returns True for truly isolated networks (ie: not attached to a router) when enable_isolated_metadata is True, or for all the networks when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local CIDR, thus characterizing it as a "metadata" network. The metadata network is used by solutions which do not leverage the l3 agent for providing access to the metadata service via logical routers built with 3rd party backends. """ if conf.force_metadata: return True if conf.enable_metadata_network and conf.enable_isolated_metadata: # check if the network has a metadata subnet meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR) if any(netaddr.IPNetwork(s.cidr) in meta_cidr for s in network.subnets): return True if not conf.enable_isolated_metadata: return False isolated_subnets = cls.get_isolated_subnets(network) return any(isolated_subnets[subnet.id] for subnet in network.subnets) class DeviceManager(object): def __init__(self, conf, plugin): self.conf = conf self.plugin = plugin self.driver = agent_common_utils.load_interface_driver(conf) def get_interface_name(self, network, port): """Return interface(device) name for use by the DHCP process.""" return self.driver.get_device_name(port) def get_device_id(self, network): """Return a unique DHCP device ID for this host on the network.""" # There could be more than one dhcp server per network, so create # a device id that combines host and network ids return common_utils.get_dhcp_agent_device_id(network.id, self.conf.host) def _set_default_route(self, network, device_name): """Sets the default gateway for this dhcp namespace. This method is idempotent and will only adjust the route if adjusting it would change it from what it already is. This makes it safe to call and avoids unnecessary perturbation of the system. """ device = ip_lib.IPDevice(device_name, namespace=network.namespace) gateway = device.route.get_gateway() if gateway: gateway = gateway.get('gateway') for subnet in network.subnets: skip_subnet = ( subnet.ip_version != 4 or not subnet.enable_dhcp or subnet.gateway_ip is None) if skip_subnet: continue if gateway != subnet.gateway_ip: LOG.debug('Setting gateway for dhcp netns on net %(n)s to ' '%(ip)s', {'n': network.id, 'ip': subnet.gateway_ip}) # Check for and remove the on-link route for the old # gateway being replaced, if it is outside the subnet is_old_gateway_not_in_subnet = (gateway and not ipam_utils.check_subnet_ip( subnet.cidr, gateway)) if is_old_gateway_not_in_subnet: v4_onlink = device.route.list_onlink_routes( constants.IP_VERSION_4) v6_onlink = device.route.list_onlink_routes( constants.IP_VERSION_6) existing_onlink_routes = set( r['cidr'] for r in v4_onlink + v6_onlink) if gateway in existing_onlink_routes: device.route.delete_route(gateway, scope='link') is_new_gateway_not_in_subnet = (subnet.gateway_ip and not ipam_utils.check_subnet_ip( subnet.cidr, subnet.gateway_ip)) if is_new_gateway_not_in_subnet: device.route.add_route(subnet.gateway_ip, scope='link') device.route.add_gateway(subnet.gateway_ip) return # No subnets on the network have a valid gateway. Clean it up to avoid # confusion from seeing an invalid gateway here. if gateway is not None: LOG.debug('Removing gateway for dhcp netns on net %s', network.id) device.route.delete_gateway(gateway) def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets): """Set up the existing DHCP port, if there is one.""" # To avoid pylint thinking that port might be undefined after # the following loop... port = None # Look for an existing DHCP port for this network. for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: # If using gateway IPs on this port, we can skip the # following code, whose purpose is just to review and # update the Neutron-allocated IP addresses for the # port. if self.driver.use_gateway_ips: return port # Otherwise break out, as we now have the DHCP port # whose subnets and addresses we need to review. break else: return None # Compare what the subnets should be against what is already # on the port. dhcp_enabled_subnet_ids = set(dhcp_subnets) port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) # If those differ, we need to call update. if dhcp_enabled_subnet_ids != port_subnet_ids: # Collect the subnets and fixed IPs that the port already # has, for subnets that are still in the DHCP-enabled set. wanted_fixed_ips = [] for fixed_ip in port.fixed_ips: if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: wanted_fixed_ips.append( {'subnet_id': fixed_ip.subnet_id, 'ip_address': fixed_ip.ip_address}) # Add subnet IDs for new DHCP-enabled subnets. wanted_fixed_ips.extend( dict(subnet_id=s) for s in dhcp_enabled_subnet_ids - port_subnet_ids) # Update the port to have the calculated subnets and fixed # IPs. The Neutron server will allocate a fresh IP for # each subnet that doesn't already have one. port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'fixed_ips': wanted_fixed_ips}}) if not port: raise exceptions.Conflict() return port def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets): """Setup the reserved DHCP port, if there is one.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Checking for a reserved port.', {'device_id': device_id, 'network_id': network.id}) for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == n_const.DEVICE_ID_RESERVED_DHCP_PORT: try: port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'device_id': device_id}}) except oslo_messaging.RemoteError as e: if e.exc_type == n_exc.DhcpPortInUse: LOG.info(_LI("Skipping DHCP port %s as it is " "already in use"), port.id) continue raise if port: return port def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets): """Create and set up new DHCP port for the specified network.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Creating new one.', {'device_id': device_id, 'network_id': network.id}) # Make a list of the subnets that need a unique IP address for # this DHCP port. if self.driver.use_gateway_ips: unique_ip_subnets = [] else: unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets] port_dict = dict( name='', admin_state_up=True, device_id=device_id, network_id=network.id, tenant_id=network.tenant_id, fixed_ips=unique_ip_subnets) return self.plugin.create_dhcp_port({'port': port_dict}) def setup_dhcp_port(self, network): """Create/update DHCP port for the host if needed and return port.""" # The ID that the DHCP port will have (or already has). device_id = self.get_device_id(network) # Get the set of DHCP-enabled subnets on this network. dhcp_subnets = {subnet.id: subnet for subnet in network.subnets if subnet.enable_dhcp} # There are 3 cases: either the DHCP port already exists (but # might need to be updated for a changed set of subnets); or # some other code has already prepared a 'reserved' DHCP port, # and we just need to adopt that; or we need to create a new # DHCP port. Try each of those in turn until we have a DHCP # port. for setup_method in (self._setup_existing_dhcp_port, self._setup_reserved_dhcp_port, self._setup_new_dhcp_port): dhcp_port = setup_method(network, device_id, dhcp_subnets) if dhcp_port: break else: raise exceptions.Conflict() # Convert subnet_id to subnet dict fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, ip_address=fixed_ip.ip_address, subnet=dhcp_subnets[fixed_ip.subnet_id]) for fixed_ip in dhcp_port.fixed_ips] ips = [DictModel(item) if isinstance(item, dict) else item for item in fixed_ips] dhcp_port.fixed_ips = ips return dhcp_port def _update_dhcp_port(self, network, port): for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) def _cleanup_stale_devices(self, network, dhcp_port): LOG.debug("Cleaning stale devices for network %s", network.id) dev_name = self.driver.get_device_name(dhcp_port) ns_ip = ip_lib.IPWrapper(namespace=network.namespace) for d in ns_ip.get_devices(exclude_loopback=True): # delete all devices except current active DHCP port device if d.name != dev_name: LOG.debug("Found stale device %s, deleting", d.name) self.driver.unplug(d.name, namespace=network.namespace) def setup(self, network): """Create and initialize a device for network's DHCP on this host.""" port = self.setup_dhcp_port(network) self._update_dhcp_port(network, port) interface_name = self.get_interface_name(network, port) if ip_lib.ensure_device_is_ready(interface_name, namespace=network.namespace): LOG.debug('Reusing existing device: %s.', interface_name) else: try: self.driver.plug(network.id, port.id, interface_name, port.mac_address, namespace=network.namespace, mtu=network.get('mtu')) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unable to plug DHCP port for ' 'network %s. Releasing port.'), network.id) self.plugin.release_dhcp_port(network.id, port.device_id) self.fill_dhcp_udp_checksums(namespace=network.namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet if not ipv6_utils.is_auto_address_subnet(subnet): net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) if self.driver.use_gateway_ips: # For each DHCP-enabled subnet, add that subnet's gateway # IP address to the Linux device for the DHCP port. for subnet in network.subnets: if not subnet.enable_dhcp: continue gateway = subnet.gateway_ip if gateway: net = netaddr.IPNetwork(subnet.cidr) ip_cidrs.append('%s/%s' % (gateway, net.prefixlen)) if self.conf.enable_isolated_metadata: ip_cidrs.append(METADATA_DEFAULT_CIDR) self.driver.init_l3(interface_name, ip_cidrs, namespace=network.namespace) self._set_default_route(network, interface_name) try: self._cleanup_stale_devices(network, port) except Exception: # catch everything as we don't want to fail because of # cleanup step LOG.error(_LE("Exception during stale dhcp device cleanup")) return interface_name def update(self, network, device_name): """Update device settings for the network's DHCP on this host.""" self._set_default_route(network, device_name) def destroy(self, network, device_name): """Destroy the device used for the network's DHCP on this host.""" if device_name: self.driver.unplug(device_name, namespace=network.namespace) else: LOG.debug('No interface exists for network %s', network.id) self.plugin.release_dhcp_port(network.id, self.get_device_id(network)) def fill_dhcp_udp_checksums(self, namespace): """Ensure DHCP reply packets always have correct UDP checksums.""" iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False, namespace=namespace) ipv4_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % constants.DHCP_RESPONSE_PORT) iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule) iptables_mgr.apply()
{ "content_hash": "48cffafd2d1ef9374e0c7443bbb8d5ba", "timestamp": "", "source": "github", "line_count": 1286, "max_line_length": 79, "avg_line_length": 41.052099533437016, "alnum_prop": 0.5495614948951566, "repo_name": "bigswitch/neutron", "id": "ee855dc9acca2ce3d560d74e8550cd4278ff212b", "size": "53429", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "neutron/agent/linux/dhcp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1047" }, { "name": "Python", "bytes": "8468247" }, { "name": "Shell", "bytes": "14648" } ], "symlink_target": "" }
from PythonQt import QtCore, QtGui from director import lcmUtils from director.simpletimer import SimpleTimer from director.timercallback import TimerCallback import subprocess import os import sys class LCMLoggerWidget(object): def __init__(self, statusBar=None): self.manager = lcmUtils.LCMLoggerManager() self.statusBar = statusBar self.lastActiveLogFile = None self.numProcesses = 0 self.numLogFiles = 0 self.userTag = '' self.button = QtGui.QPushButton('') self.button.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.button.connect('customContextMenuRequested(const QPoint&)', self.showContextMenu) self.button.connect('clicked()', self.onClick) self.timer = TimerCallback(targetFps=0.25) self.timer.callback = self.updateState self.timer.start() def updateState(self): t = SimpleTimer() self.manager.updateExistingLoggerProcesses() activeLogFiles = self.manager.getActiveLogFilenames() self.numProcesses = len(self.manager.getActiveLoggerPids()) self.numLogFiles = len(activeLogFiles) if self.numLogFiles == 1: self.lastActiveLogFile = activeLogFiles[0] if self.numProcesses == 0: self.button.text = 'start logger' elif self.numProcesses == 1: self.button.text = 'stop logger' elif self.numProcesses > 1: self.button.text = 'stop all loggers' statusDescription = 'active' if self.numProcesses else 'last' logFileDescription = self.lastActiveLogFile or '<unknown>' self.button.setToolTip('%s log file: %s' % (statusDescription, logFileDescription)) def onClick(self): if self.numProcesses == 0: self.manager.startNewLogger(tag=self.userTag) self.updateState() self.showStatusMessage('start logging: ' + self.lastActiveLogFile) else: self.manager.killAllLoggingProcesses() self.showStatusMessage('stopped logging') self.updateState() def showStatusMessage(self, msg, timeout=2000): if self.statusBar: self.statusBar.showMessage(msg, timeout) def showContextMenu(self, clickPosition): globalPos = self.button.mapToGlobal(clickPosition) menu = QtGui.QMenu() action = menu.addAction('Stop logger') action.enabled = (self.numProcesses > 0) action = menu.addAction('Stop and delete log file') action.enabled = (self.numProcesses > 0 and self.lastActiveLogFile) action = menu.addAction('Set logger tag') action.enabled = (self.numProcesses == 0) action = menu.addAction('Copy log filename') action.enabled = (self.lastActiveLogFile is not None) action = menu.addAction('Review log') action.enabled = (self.lastActiveLogFile is not None) selectedAction = menu.exec_(globalPos) if selectedAction is None: return if selectedAction.text == 'Copy log filename': clipboard = QtGui.QApplication.instance().clipboard() clipboard.setText(self.lastActiveLogFile) self.showStatusMessage('copy to clipboard: ' + self.lastActiveLogFile) elif selectedAction.text == 'Stop logger': self.manager.killAllLoggingProcesses() self.showStatusMessage('stopped logger') self.updateState() elif selectedAction.text == 'Stop and delete log file': logFileToRemove = self.lastActiveLogFile self.manager.killAllLoggingProcesses() self.updateState() os.remove(logFileToRemove) self.showStatusMessage('deleted: ' + logFileToRemove) elif selectedAction.text == 'Set logger tag': inputDialog = QtGui.QInputDialog() inputDialog.setInputMode(inputDialog.TextInput) inputDialog.setLabelText('Log file tag:') inputDialog.setWindowTitle('Enter tag') inputDialog.setTextValue(self.userTag) result = inputDialog.exec_() if result: tag = inputDialog.textValue() self.userTag = tag self.showStatusMessage('Set lcm logger tag: ' + self.userTag) elif selectedAction.text == 'Review log': newEnv = dict(os.environ) newEnv['LCM_DEFAULT_URL'] = newEnv['LCM_REVIEW_DEFAULT_URL'] devnull = open(os.devnull, 'w') # Pass entire command line invocation of director to subprocess including cfg and json paths subprocess.Popen(sys.argv, stdout=devnull, stderr=devnull, env=newEnv) subprocess.Popen(['lcm-logplayer-gui', self.lastActiveLogFile], stdout=devnull, stderr=devnull, env=newEnv) subprocess.Popen(['bot-procman-sheriff', '-o'], stdout=devnull, stderr=devnull, env=newEnv)
{ "content_hash": "408f95b4b06ac6a10c445888df1a57d4", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 119, "avg_line_length": 37.80152671755725, "alnum_prop": 0.6453957996768982, "repo_name": "patmarion/director", "id": "838101ffdf62d920116635dde6730232dcdc090e", "size": "4952", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "src/python/director/lcmloggerwidget.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "121912" }, { "name": "C++", "bytes": "565385" }, { "name": "CMake", "bytes": "82478" }, { "name": "Dockerfile", "bytes": "2510" }, { "name": "GLSL", "bytes": "15443" }, { "name": "MATLAB", "bytes": "161948" }, { "name": "Makefile", "bytes": "5014" }, { "name": "Python", "bytes": "2282093" }, { "name": "Shell", "bytes": "14291" } ], "symlink_target": "" }
from __future__ import absolute_import import logging from typing import Any, Dict, List, Set, Tuple, Optional, Text from django.contrib.auth.backends import RemoteUserBackend from django.conf import settings from django.http import HttpResponse import django.contrib.auth from django_auth_ldap.backend import LDAPBackend, _LDAPUser from zerver.lib.actions import do_create_user from zerver.models import UserProfile, Realm, get_user_profile_by_id, \ get_user_profile_by_email, remote_user_to_email, email_to_username, \ get_realm, get_realm_by_email_domain from apiclient.sample_tools import client as googleapiclient from oauth2client.crypt import AppIdentityError from social_core.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \ GithubTeamOAuth2 from social_core.exceptions import AuthFailed, SocialAuthBaseException from django.contrib.auth import authenticate from zerver.lib.users import check_full_name from zerver.lib.request import JsonableError from zerver.lib.utils import check_subdomain, get_subdomain from social_django.models import DjangoStorage from social_django.strategy import DjangoStrategy def pad_method_dict(method_dict): # type: (Dict[Text, bool]) -> Dict[Text, bool] """Pads an authentication methods dict to contain all auth backends supported by the software, regardless of whether they are configured on this server""" for key in AUTH_BACKEND_NAME_MAP: if key not in method_dict: method_dict[key] = False return method_dict def auth_enabled_helper(backends_to_check, realm): # type: (List[Text], Optional[Realm]) -> bool if realm is not None: enabled_method_dict = realm.authentication_methods_dict() pad_method_dict(enabled_method_dict) else: enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS) pad_method_dict(enabled_method_dict) for supported_backend in django.contrib.auth.get_backends(): for backend_name in backends_to_check: backend = AUTH_BACKEND_NAME_MAP[backend_name] if enabled_method_dict[backend_name] and isinstance(supported_backend, backend): return True return False def ldap_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return auth_enabled_helper([u'LDAP'], realm) def email_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return auth_enabled_helper([u'Email'], realm) def password_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return ldap_auth_enabled(realm) or email_auth_enabled(realm) def dev_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return auth_enabled_helper([u'Dev'], realm) def google_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return auth_enabled_helper([u'Google'], realm) def github_auth_enabled(realm=None): # type: (Optional[Realm]) -> bool return auth_enabled_helper([u'GitHub'], realm) def any_oauth_backend_enabled(realm=None): # type: (Optional[Realm]) -> bool """Used by the login page process to determine whether to show the 'OR' for login with Google""" return auth_enabled_helper([u'GitHub', u'Google'], realm) def common_get_active_user_by_email(email, return_data=None): # type: (Text, Optional[Dict[str, Any]]) -> Optional[UserProfile] try: user_profile = get_user_profile_by_email(email) except UserProfile.DoesNotExist: return None if not user_profile.is_active: if return_data is not None: return_data['inactive_user'] = True return None if user_profile.realm.deactivated: if return_data is not None: return_data['inactive_realm'] = True return None return user_profile class ZulipAuthMixin(object): def get_user(self, user_profile_id): # type: (int) -> Optional[UserProfile] """ Get a UserProfile object from the user_profile_id. """ try: return get_user_profile_by_id(user_profile_id) except UserProfile.DoesNotExist: return None class SocialAuthMixin(ZulipAuthMixin): auth_backend_name = None # type: Text def get_email_address(self, *args, **kwargs): # type: (*Any, **Any) -> Text raise NotImplementedError def get_full_name(self, *args, **kwargs): # type: (*Any, **Any) -> Text raise NotImplementedError def authenticate(self, realm_subdomain='', # type: Optional[Text] storage=None, # type: Optional[DjangoStorage] strategy=None, # type: Optional[DjangoStrategy] user=None, # type: Optional[Dict[str, Any]] return_data=None, # type: Optional[Dict[str, Any]] response=None, # type: Optional[Dict[str, Any]] backend=None # type: Optional[GithubOAuth2] ): # type: (...) -> Optional[UserProfile] """ Django decides which `authenticate` to call by inspecting the arguments. So it's better to create `authenticate` function with well defined arguments. Keeping this function separate so that it can easily be overridden. """ if user is None: user = {} if return_data is None: return_data = {} if response is None: response = {} return self._common_authenticate(self, realm_subdomain=realm_subdomain, storage=storage, strategy=strategy, user=user, return_data=return_data, response=response, backend=backend) def _common_authenticate(self, *args, **kwargs): # type: (*Any, **Any) -> Optional[UserProfile] return_data = kwargs.get('return_data', {}) email_address = self.get_email_address(*args, **kwargs) if not email_address: return_data['invalid_email'] = True return None try: user_profile = get_user_profile_by_email(email_address) except UserProfile.DoesNotExist: return_data["valid_attestation"] = True return None if not user_profile.is_active: return_data["inactive_user"] = True return None if user_profile.realm.deactivated: return_data["inactive_realm"] = True return None if not check_subdomain(kwargs.get("realm_subdomain"), user_profile.realm.subdomain): return_data["invalid_subdomain"] = True return None if not auth_enabled_helper([self.auth_backend_name], user_profile.realm): return_data["auth_backend_disabled"] = True return None return user_profile def process_do_auth(self, user_profile, *args, **kwargs): # type: (UserProfile, *Any, **Any) -> Optional[HttpResponse] # These functions need to be imported here to avoid cyclic # dependency. from zerver.views.auth import (login_or_register_remote_user, redirect_to_subdomain_login_url) from zerver.views.registration import redirect_and_log_into_subdomain return_data = kwargs.get('return_data', {}) inactive_user = return_data.get('inactive_user') inactive_realm = return_data.get('inactive_realm') invalid_subdomain = return_data.get('invalid_subdomain') invalid_email = return_data.get('invalid_email') if inactive_user or inactive_realm: # Redirect to login page. We can't send to registration # workflow with these errors. We will redirect to login page. return None if invalid_email: # In case of invalid email, we will end up on registration page. # This seems better than redirecting to login page. logging.warning( "{} got invalid email argument.".format(self.auth_backend_name) ) strategy = self.strategy # type: ignore # This comes from Python Social Auth. request = strategy.request email_address = self.get_email_address(*args, **kwargs) full_name = self.get_full_name(*args, **kwargs) is_signup = strategy.session_get('is_signup') == '1' subdomain = strategy.session_get('subdomain') if not subdomain: return login_or_register_remote_user(request, email_address, user_profile, full_name, invalid_subdomain=bool(invalid_subdomain), is_signup=is_signup) try: realm = Realm.objects.get(string_id=subdomain) except Realm.DoesNotExist: return redirect_to_subdomain_login_url() return redirect_and_log_into_subdomain(realm, full_name, email_address, is_signup=is_signup) def auth_complete(self, *args, **kwargs): # type: (*Any, **Any) -> Optional[HttpResponse] """ Returning `None` from this function will redirect the browser to the login page. """ try: # Call the auth_complete method of BaseOAuth2 is Python Social Auth return super(SocialAuthMixin, self).auth_complete(*args, **kwargs) # type: ignore except AuthFailed: return None except SocialAuthBaseException as e: logging.exception(e) return None class ZulipDummyBackend(ZulipAuthMixin): """ Used when we want to log you in but we don't know which backend to use. """ def authenticate(self, username=None, realm_subdomain=None, use_dummy_backend=False, return_data=None): # type: (Optional[Text], Optional[Text], bool, Optional[Dict[str, Any]]) -> Optional[UserProfile] assert username is not None if use_dummy_backend: user_profile = common_get_active_user_by_email(username) if user_profile is None: return None if not check_subdomain(realm_subdomain, user_profile.realm.subdomain): return_data["invalid_subdomain"] = True return None return user_profile return None class EmailAuthBackend(ZulipAuthMixin): """ Email Authentication Backend Allows a user to sign in using an email/password pair rather than a username/password pair. """ def authenticate(self, username=None, password=None, realm_subdomain=None, return_data=None): # type: (Optional[Text], Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile] """ Authenticate a user based on email address as the user name. """ if username is None or password is None: # Return immediately. Otherwise we will look for a SQL row with # NULL username. While that's probably harmless, it's needless # exposure. return None user_profile = common_get_active_user_by_email(username, return_data=return_data) if user_profile is None: return None if not password_auth_enabled(user_profile.realm): if return_data is not None: return_data['password_auth_disabled'] = True return None if not email_auth_enabled(user_profile.realm): if return_data is not None: return_data['email_auth_disabled'] = True return None if user_profile.check_password(password): if not check_subdomain(realm_subdomain, user_profile.realm.subdomain): return_data["invalid_subdomain"] = True return None return user_profile return None class GoogleMobileOauth2Backend(ZulipAuthMixin): """ Google Apps authentication for mobile devices Allows a user to sign in using a Google-issued OAuth2 token. Ref: https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess """ def authenticate(self, google_oauth2_token=None, realm_subdomain=None, return_data=None): # type: (Optional[str], Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile] if return_data is None: return_data = {} try: token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID) except AppIdentityError: return None if token_payload["email_verified"] in (True, "true"): try: user_profile = get_user_profile_by_email(token_payload["email"]) except UserProfile.DoesNotExist: return_data["valid_attestation"] = True return None if not user_profile.is_active: return_data["inactive_user"] = True return None if user_profile.realm.deactivated: return_data["inactive_realm"] = True return None if not check_subdomain(realm_subdomain, user_profile.realm.subdomain): return_data["invalid_subdomain"] = True return None if not google_auth_enabled(realm=user_profile.realm): return_data["google_auth_disabled"] = True return None return user_profile else: return_data["valid_attestation"] = False return None class ZulipRemoteUserBackend(RemoteUserBackend): create_unknown_user = False def authenticate(self, remote_user, realm_subdomain=None): # type: (str, Optional[Text]) -> Optional[UserProfile] if not remote_user: return None email = remote_user_to_email(remote_user) user_profile = common_get_active_user_by_email(email) if user_profile is None: return None if not check_subdomain(realm_subdomain, user_profile.realm.subdomain): return None if not auth_enabled_helper([u"RemoteUser"], user_profile.realm): return None return user_profile class ZulipLDAPException(Exception): pass class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend): # Don't use Django LDAP's permissions functions def has_perm(self, user, perm, obj=None): # type: (UserProfile, Any, Any) -> bool # Using Any type is safe because we are not doing anything with # the arguments. return False def has_module_perms(self, user, app_label): # type: (UserProfile, str) -> bool return False def get_all_permissions(self, user, obj=None): # type: (UserProfile, Any) -> Set # Using Any type is safe because we are not doing anything with # the arguments. return set() def get_group_permissions(self, user, obj=None): # type: (UserProfile, Any) -> Set # Using Any type is safe because we are not doing anything with # the arguments. return set() def django_to_ldap_username(self, username): # type: (Text) -> Text if settings.LDAP_APPEND_DOMAIN: if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN): raise ZulipLDAPException("Username does not match LDAP domain.") return email_to_username(username) return username def ldap_to_django_username(self, username): # type: (str) -> str if settings.LDAP_APPEND_DOMAIN: return "@".join((username, settings.LDAP_APPEND_DOMAIN)) return username class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase): def authenticate(self, username, password, realm_subdomain=None, return_data=None): # type: (Text, str, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile] try: if settings.REALMS_HAVE_SUBDOMAINS: self._realm = get_realm(realm_subdomain) else: self._realm = get_realm_by_email_domain(username) username = self.django_to_ldap_username(username) user_profile = ZulipLDAPAuthBackendBase.authenticate(self, username, password) if user_profile is None: return None if not check_subdomain(realm_subdomain, user_profile.realm.subdomain): return None return user_profile except Realm.DoesNotExist: return None except ZulipLDAPException: return None def get_or_create_user(self, username, ldap_user): # type: (str, _LDAPUser) -> Tuple[UserProfile, bool] try: user_profile = get_user_profile_by_email(username) if not user_profile.is_active or user_profile.realm.deactivated: raise ZulipLDAPException("Realm has been deactivated") if not ldap_auth_enabled(user_profile.realm): raise ZulipLDAPException("LDAP Authentication is not enabled") return user_profile, False except UserProfile.DoesNotExist: # No need to check for an inactive user since they don't exist yet if self._realm.deactivated: raise ZulipLDAPException("Realm has been deactivated") full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"] short_name = full_name = ldap_user.attrs[full_name_attr][0] try: full_name = check_full_name(full_name) except JsonableError as e: raise ZulipLDAPException(e.error) if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP: short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"] short_name = ldap_user.attrs[short_name_attr][0] user_profile = do_create_user(username, None, self._realm, full_name, short_name) return user_profile, True # Just like ZulipLDAPAuthBackend, but doesn't let you log in. class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase): def authenticate(self, username, password, realm_subdomain=None): # type: (Text, str, Optional[Text]) -> None return None class DevAuthBackend(ZulipAuthMixin): # Allow logging in as any user without a password. # This is used for convenience when developing Zulip. def authenticate(self, username, realm_subdomain=None, return_data=None): # type: (Text, Optional[Text], Optional[Dict[str, Any]]) -> Optional[UserProfile] user_profile = common_get_active_user_by_email(username, return_data=return_data) if user_profile is None: return None if not dev_auth_enabled(user_profile.realm): return None return user_profile class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2): auth_backend_name = u"GitHub" def get_email_address(self, *args, **kwargs): # type: (*Any, **Any) -> Optional[Text] try: return kwargs['response']['email'] except KeyError: return None def get_full_name(self, *args, **kwargs): # type: (*Any, **Any) -> Text # In case of any error return an empty string. Name is used by # the registration page to pre-populate the name field. However, # if it is not supplied, our registration process will make sure # that the user enters a valid name. try: name = kwargs['response']['name'] except KeyError: name = '' if name is None: return '' return name def do_auth(self, *args, **kwargs): # type: (*Any, **Any) -> Optional[HttpResponse] """ This function is called once the OAuth2 workflow is complete. We override this function to: 1. Inject `return_data` and `realm_admin` kwargs. These will be used by `authenticate()` function to make the decision. 2. Call the proper `do_auth` function depending on whether we are doing individual, team or organization based GitHub authentication. The actual decision on authentication is done in SocialAuthMixin._common_authenticate(). """ kwargs['return_data'] = {} request = self.strategy.request kwargs['realm_subdomain'] = get_subdomain(request) user_profile = None team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME if (team_id is None and org_name is None): try: user_profile = GithubOAuth2.do_auth(self, *args, **kwargs) except AuthFailed: logging.info("User authentication failed.") user_profile = None elif (team_id): backend = GithubTeamOAuth2(self.strategy, self.redirect_uri) try: user_profile = backend.do_auth(*args, **kwargs) except AuthFailed: logging.info("User is not member of GitHub team.") user_profile = None elif (org_name): backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri) try: user_profile = backend.do_auth(*args, **kwargs) except AuthFailed: logging.info("User is not member of GitHub organization.") user_profile = None return self.process_do_auth(user_profile, *args, **kwargs) AUTH_BACKEND_NAME_MAP = { u'Dev': DevAuthBackend, u'Email': EmailAuthBackend, u'GitHub': GitHubAuthBackend, u'Google': GoogleMobileOauth2Backend, u'LDAP': ZulipLDAPAuthBackend, u'RemoteUser': ZulipRemoteUserBackend, } # type: Dict[Text, Any]
{ "content_hash": "d21354776353532ca60d82ede648cad6", "timestamp": "", "source": "github", "line_count": 557, "max_line_length": 114, "avg_line_length": 39.99102333931777, "alnum_prop": 0.6127946127946128, "repo_name": "ryanbackman/zulip", "id": "ac52a7f2ee4df7b6b7e9b3a3b84e79f465b6fdae", "size": "22275", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "zproject/backends.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "392722" }, { "name": "Emacs Lisp", "bytes": "158" }, { "name": "Groovy", "bytes": "5509" }, { "name": "HTML", "bytes": "590505" }, { "name": "JavaScript", "bytes": "1783783" }, { "name": "Nginx", "bytes": "1280" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Perl", "bytes": "401825" }, { "name": "Puppet", "bytes": "87372" }, { "name": "Python", "bytes": "3908421" }, { "name": "Ruby", "bytes": "249744" }, { "name": "Shell", "bytes": "38065" } ], "symlink_target": "" }
from __future__ import division, print_function import matplotlib matplotlib.use('Agg') from matplotlib import rc import matplotlib.pyplot as plt import pandas as pd def initialize_matplotlib(): inches_per_pt = 1.0 / 72.27 fig_width = 240 * inches_per_pt # width in inches fig_height = 160 * inches_per_pt #.4 * fig_width rc('axes', labelsize=6) rc('axes', titlesize=6) rc('axes', unicode_minus=False) rc('axes', grid=False) rc('figure', figsize=(fig_width, fig_height)) rc('grid', linestyle=':') rc('font', family='serif') rc('legend', fontsize=5) rc('lines', linewidth=.7) rc('ps', usedistiller='xpdf') rc('text', usetex=True) rc('xtick', labelsize=6) rc('ytick', labelsize=6) initialize_matplotlib() df = pd.read_excel('results_for_figure1.xlsx', sheetname='Figure3') styles = { 'TribeFlow-Dyn':'D', 'TribeFlow':'o', #'FPMC': #'PRLME': } colors = { 'LFM-1k':'g', 'LFM-G':'m', 'Bkite':'y', 'FourSQ':'b', 'Yoo':'r' } for method in styles: for dset in colors: idx = (df['Name'] == method) & (df['Dataset'] == dset) x_ax = df[idx]['Runtime_s'] y_ax = df[idx]['MRR'] horizontalalignment = 'left' verticalalignment = 'bottom' if colors[dset] == 'g': verticalalignment = 'top' for x, y in zip(x_ax, y_ax): plt.text(x, y, \ method + '\n' + \ dset, fontsize=7, \ verticalalignment=verticalalignment, \ horizontalalignment=horizontalalignment) ps = colors[dset] + styles[method] plt.semilogx(x_ax, y_ax, ps, alpha=.5, markersize=5) ax = plt.gca() ax.tick_params(direction='out', pad=0.3) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.ylim((0, 0.16)) plt.xlim((1e2, 1e6)) plt.minorticks_off() plt.ylabel('MRR', labelpad=0) plt.xlabel('Training Time (s)', labelpad=0) plt.tight_layout(pad=0.2) plt.savefig('figure3.pdf')
{ "content_hash": "96edde13485a09be9d943cf5fd6cc6fe", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 67, "avg_line_length": 26.457831325301203, "alnum_prop": 0.5655737704918032, "repo_name": "flaviovdf/tribeflow", "id": "f86056c51beecacdac10dd2ecb37a3c7a2ee74f7", "size": "2214", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "scripts/paper-data/plot_figure3.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "16016" }, { "name": "Jupyter Notebook", "bytes": "58814" }, { "name": "Makefile", "bytes": "337" }, { "name": "Python", "bytes": "158324" }, { "name": "Shell", "bytes": "3233" } ], "symlink_target": "" }
import time import fixtures import mock import nova from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import cast_as_call from nova.tests.unit import policy_fixture from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova.virt.libvirt import guest as libvirt_guest class TestSerialConsoleLiveMigrate(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(TestSerialConsoleLiveMigrate, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # Replace libvirt with fakelibvirt self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.admin_api = api_fixture.admin_api self.api = api_fixture.api # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) nova.tests.unit.fake_network.set_stub_network_methods(self) self.flags(compute_driver='libvirt.LibvirtDriver') self.flags(enabled=True, group="serial_console") self.flags(enabled=False, group="vnc") self.flags(enabled=False, group="spice") self.flags(use_usb_tablet=False, group="libvirt") self.flags(host="test_compute1") self.start_service('conductor') self.flags(driver='chance_scheduler', group='scheduler') self.start_service('scheduler') self.compute = self.start_service('compute', host='test_compute1') self.consoleauth = self.start_service('consoleauth') self.useFixture(cast_as_call.CastAsCall(self)) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] @mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector') @mock.patch('nova.virt.libvirt.guest.Guest.get_job_info') @mock.patch.object(fakelibvirt.Domain, 'migrateToURI2') @mock.patch('nova.virt.libvirt.host.Host.get_connection') @mock.patch('nova.virt.disk.api.get_disk_size', return_value=1024) @mock.patch('os.path.getsize', return_value=1024) @mock.patch('nova.conductor.tasks.live_migrate.LiveMigrationTask.' '_check_destination_is_not_source', return_value=False) @mock.patch('nova.virt.libvirt.LibvirtDriver._create_image') def test_serial_console_live_migrate(self, mock_create_image, mock_conductor_source_check, mock_path_get_size, mock_get_disk_size, mock_host_get_connection, mock_migrate_to_uri, mock_get_job_info, mock_get_volume_connector): """Regression test for bug #1595962. If the graphical consoles VNC and SPICE are disabled, the live-migration of an instance will result in an ERROR state. VNC and SPICE are usually disabled on IBM z systems platforms where graphical consoles are not available. The serial console is then enabled and VNC + SPICE are disabled. The error will be raised at https://github.com/openstack/nova/blob/ 4f33047d07f5a11b208c344fe206aba01cd8e6fe/ nova/virt/libvirt/driver.py#L5842-L5852 """ mock_get_job_info.return_value = libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002007, hv_version=2001000) mock_host_get_connection.return_value = fake_connection server_attr = dict(name='server1', imageRef=self.image_id, flavorRef=self.flavor_id) server = self.api.post_server({'server': server_attr}) server_id = server['id'] self.wait_till_active_or_timeout(server_id) post = {"os-migrateLive": { "block_migration": False, "disk_over_commit": False, "host": "test_compute1" }} try: # This should succeed self.admin_api.post_server_action(server_id, post) self.wait_till_active_or_timeout(server_id) except Exception as ex: self.fail(ex.response.content) def wait_till_active_or_timeout(self, server_id): timeout = 0.0 server = self.api.get_server(server_id) while server['status'] != "ACTIVE" and timeout < 10.0: time.sleep(.1) timeout += .1 server = self.api.get_server(server_id) if server['status'] != "ACTIVE": self.fail("The server is not active after the timeout.")
{ "content_hash": "09e3b430e052cd881faa98c34c80c139", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 74, "avg_line_length": 43.378787878787875, "alnum_prop": 0.6105483758295495, "repo_name": "jianghuaw/nova", "id": "df0fb6af7a3e81a3a5165a6df9ec3f92450808e6", "size": "6301", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova/tests/functional/regressions/test_bug_1595962.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1435" }, { "name": "PHP", "bytes": "32515" }, { "name": "Python", "bytes": "19932348" }, { "name": "Shell", "bytes": "28290" }, { "name": "Smarty", "bytes": "339635" } ], "symlink_target": "" }
import sys from libcloud.utils.py3 import httplib from libcloud.utils.py3 import ET from libcloud.common.dimensiondata import DimensionDataAPIException from libcloud.common.types import InvalidCredsError from libcloud.backup.base import BackupTargetJob from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN from libcloud.test import MockHttp, unittest from libcloud.test.file_fixtures import BackupFileFixtures from libcloud.test.secrets import DIMENSIONDATA_PARAMS class DimensionData_v2_3_Tests(unittest.TestCase): def setUp(self): DimensionData.connectionCls.active_api_version = '2.3' DimensionData.connectionCls.conn_class = DimensionDataMockHttp DimensionDataMockHttp.type = None self.driver = DimensionData(*DIMENSIONDATA_PARAMS) def test_invalid_region(self): with self.assertRaises(ValueError): self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah') def test_invalid_creds(self): DimensionDataMockHttp.type = 'UNAUTHORIZED' with self.assertRaises(InvalidCredsError): self.driver.list_targets() def test_list_targets(self): targets = self.driver.list_targets() self.assertEqual(len(targets), 2) self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10') self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise') def test_create_target(self): target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.extra['servicePlan'], 'Enterprise') def test_create_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f') self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87') def test_create_target_EXISTS(self): DimensionDataMockHttp.type = 'EXISTS' with self.assertRaises(DimensionDataAPIException) as context: self.driver.create_target( 'name', 'e75ead52-692f-4314-8725-c8a4f4d13a87', extra={'servicePlan': 'Enterprise'}) self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).') def test_update_target(self): target = self.driver.list_targets()[0] extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_update_target_DEFAULT(self): DimensionDataMockHttp.type = 'DEFAULT' target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' self.driver.update_target(target) def test_update_target_STR(self): target = 'e75ead52-692f-4314-8725-c8a4f4d13a87' extra = {'servicePlan': 'Essentials'} new_target = self.driver.update_target(target, extra=extra) self.assertEqual(new_target.extra['servicePlan'], 'Essentials') def test_delete_target(self): target = self.driver.list_targets()[0] self.assertTrue(self.driver.delete_target(target)) def test_ex_add_client_to_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_list_available_client_types(target)[0] storage_policy = self.driver.ex_list_available_storage_policies(target)[0] schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0] self.assertTrue( self.driver.ex_add_client_to_target(target, client, storage_policy, schedule_policy, 'ON_FAILURE', 'nobody@example.com') ) def test_ex_add_client_to_target_STR(self): self.assertTrue( self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy', '12AM - 6AM', 'ON_FAILURE', 'nobody@example.com') ) def test_ex_get_backup_details_for_target(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) self.assertEqual(response.service_plan, 'Enterprise') client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertEqual(client.running_job.progress, 5) self.assertTrue(isinstance(client.running_job, BackupTargetJob)) self.assertEqual(len(client.alert.notify_list), 2) self.assertTrue(isinstance(client.alert.notify_list, list)) def test_ex_get_backup_details_for_target_NOBACKUP(self): target = self.driver.list_targets()[0].address DimensionDataMockHttp.type = 'NOBACKUP' response = self.driver.ex_get_backup_details_for_target(target) self.assertTrue(response is None) def test_ex_cancel_target_job(self): target = self.driver.list_targets()[0] response = self.driver.ex_get_backup_details_for_target(target) client = response.clients[0] self.assertTrue(isinstance(client.running_job, BackupTargetJob)) success = client.running_job.cancel() self.assertTrue(success) def test_ex_cancel_target_job_with_extras(self): success = self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertTrue(success) def test_ex_cancel_target_job_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.cancel_target_job( None, ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8', ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87' ) self.assertEqual(context.exception.code, 'ERROR') """Test a backup info for a target that does not have a client""" def test_ex_get_backup_details_for_target_NO_CLIENT(self): DimensionDataMockHttp.type = 'NOCLIENT' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Essentials') self.assertEqual(len(response.clients), 0) """Test a backup details that has a client, but no alerting or running jobs""" def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self): DimensionDataMockHttp.type = 'NOJOB' response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87') self.assertEqual(response.service_plan, 'Enterprise') self.assertTrue(isinstance(response.clients, list)) self.assertEqual(len(response.clients), 1) client = response.clients[0] self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8') self.assertEqual(client.type.type, 'FA.Linux') self.assertIsNone(client.running_job) self.assertIsNone(client.alert) """Test getting backup info for a server that doesn't exist""" def test_ex_get_backup_details_for_target_DISABLED(self): DimensionDataMockHttp.type = 'DISABLED' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(context.exception.code, 'ERROR') self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup') def test_ex_list_available_client_types(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_client_types(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].type, 'FA.Linux') self.assertEqual(answer[0].is_file_system, True) self.assertEqual(answer[0].description, 'Linux File system') def test_ex_list_available_storage_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_storage_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '30 Day Storage Policy + Secondary Copy') self.assertEqual(answer[0].retention_period, 30) self.assertEqual(answer[0].secondary_location, 'Primary') def test_ex_list_available_schedule_policies(self): target = self.driver.list_targets()[0] answer = self.driver.ex_list_available_schedule_policies(target) self.assertEqual(len(answer), 1) self.assertEqual(answer[0].name, '12AM - 6AM') self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM') def test_ex_remove_client_from_target(self): target = self.driver.list_targets()[0] client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertTrue(self.driver.ex_remove_client_from_target(target, client)) def test_ex_remove_client_from_target_STR(self): self.assertTrue( self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) ) def test_ex_remove_client_from_target_FAIL(self): DimensionDataMockHttp.type = 'FAIL' with self.assertRaises(DimensionDataAPIException) as context: self.driver.ex_remove_client_from_target( 'e75ead52-692f-4314-8725-c8a4f4d13a87', '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) self.assertEqual(context.exception.code, 'ERROR') self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg) def test_priv_target_to_target_address(self): target = self.driver.list_targets()[0] self.assertEqual( self.driver._target_to_target_address(target), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_STR(self): self.assertEqual( self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'), 'e75ead52-692f-4314-8725-c8a4f4d13a87' ) def test_priv_target_to_target_address_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._target_to_target_address([1, 2, 3]) def test_priv_client_to_client_id(self): client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0] self.assertEqual( self.driver._client_to_client_id(client), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_STR(self): self.assertEqual( self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'), '30b1ff76-c76d-4d7c-b39d-3b72be0384c8' ) def test_priv_client_to_client_id_TYPEERROR(self): with self.assertRaises(TypeError): self.driver._client_to_client_id([1, 2, 3]) class InvalidRequestError(Exception): def __init__(self, tag): super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag) class DimensionDataMockHttp(MockHttp): fixtures = BackupFileFixtures('dimensiondata') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_FAIL(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers): body = self.fixtures.load( 'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers): body = self.fixtures.load( 'server_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers): body = self.fixtures.load( '_backup_client_type.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_storagePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy( self, method, url, body, headers): body = self.fixtures.load( '_backup_client_schedulePolicy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_client_SUCCESS_PUT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOCLIENT.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_DISABLED.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB( self, method, url, body, headers): # only gets here are implemented # If we get any other method something has gone wrong assert(method == 'GET') body = self.fixtures.load( '_backup_INFO_NOJOB.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT( self, method, url, body, headers): if method != 'POST': raise InvalidRequestError('Only POST is accepted for this test') request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan)) body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup( self, method, url, body, headers): if method == 'POST': body = self.fixtures.load( '_backup_ENABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'GET': if url.endswith('disable'): body = self.fixtures.load( '_backup_DISABLE.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) body = self.fixtures.load( '_backup_INFO.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) else: raise ValueError("Unknown Method {0}".format(method)) def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP( self, method, url, body, headers): assert(method == 'GET') body = self.fixtures.load('server_server_NOBACKUP.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS( self, method, url, body, headers): # only POSTs are implemented # If we get any other method something has gone wrong assert(method == 'POST') body = self.fixtures.load( '_backup_EXISTS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != 'Essentials': raise InvalidRequestError("Expected Essentials backup plan in request") body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT( self, method, url, body, headers): request = ET.fromstring(body) service_plan = request.get('servicePlan') if service_plan != DEFAULT_BACKUP_PLAN: raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN) body = self.fixtures.load('_backup_modify.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL( self, method, url, body, headers): if url.endswith('disable'): body = self.fixtures.load( ('_remove_backup_client_FAIL.xml') ) elif url.endswith('cancelJob'): body = self.fixtures.load( ('' '_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml') ) else: raise ValueError("Unknown URL: %s" % url) return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
{ "content_hash": "e2dd54ff9b98d7b7c84aaee0306ce870", "timestamp": "", "source": "github", "line_count": 482, "max_line_length": 154, "avg_line_length": 48.051867219917014, "alnum_prop": 0.6566642200250421, "repo_name": "Kami/libcloud", "id": "13039d4c9c3fc4b5a5b8455345b3dfb8a524d7df", "size": "23943", "binary": false, "copies": "10", "ref": "refs/heads/trunk", "path": "libcloud/test/backup/test_dimensiondata_v2_3.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1819" }, { "name": "HTML", "bytes": "2545" }, { "name": "PowerShell", "bytes": "410" }, { "name": "Python", "bytes": "9122888" }, { "name": "Shell", "bytes": "12994" } ], "symlink_target": "" }
""" Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from rest_framework import serializers from django.contrib.auth.models import User, Group from main.models import Image, BasicUser, Project, AnnotationsJson class BasicUserSerializer(serializers.ModelSerializer): images_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all()) projects_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=Project.objects.all()) annotations_by_user = serializers.PrimaryKeyRelatedField(many=True, queryset=AnnotationsJson.objects.all()) class Meta: model = BasicUser fields = ['id', 'display_name', 'email', 'projects_by_user', 'images_by_user', 'annotations_by_user'] def get_authenticated_user(validated_data): email = validated_data.pop("owner_email") # if not User.objects.filter(email=email).exists(): # user = User.objects.create_user(email, email, email) # user.save() return User.objects.get(email=email) class ProjectSerializer(serializers.ModelSerializer): # images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all()) owner = serializers.ReadOnlyField(source='owner.email') class Meta: model = Project fields = ['id', 'name', 'owner', 'labels_json'] def create(self, validated_data, *args, **kwargs): owner = get_authenticated_user(validated_data) return Project.objects.create(owner=owner, **validated_data) class ImageSerializer(serializers.ModelSerializer): owner = serializers.ReadOnlyField(source='owner.email') project_id = serializers.ReadOnlyField(source='part_of_project.id') class Meta: model = Image fields = ['id', 'title', 'description', 'owner', 'image', 'project_id'] def create(self, validated_data, *args, **kwargs): owner = get_authenticated_user(validated_data) project_id = validated_data.pop("project_id") return Image.objects.create(owner=owner, part_of_project=Project.objects.get(id=project_id), **validated_data) class AnnotationsJsonSerializer(serializers.ModelSerializer): #images = serializers.PrimaryKeyRelatedField(many=True, queryset=Image.objects.all()) owner = serializers.ReadOnlyField(source='owner.email') image_id = serializers.ReadOnlyField(source='on_image.id') class Meta: model = AnnotationsJson fields = ['id', 'owner', 'content_json', "image_id"] def create(self, validated_data, *args, **kwargs): owner = get_authenticated_user(validated_data) image_id = validated_data.pop("image_id") return AnnotationsJson.objects.create(owner=owner, on_image=Image.objects.get(id=image_id), **validated_data) class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group fields = ['id','name',] def create(self, validated_data, *args, **kwargs): return Group.objects.create(**validated_data) class UserSerializer(serializers.ModelSerializer): images_by_user = ImageSerializer(read_only=True, many=True) images_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='images_by_user', many=True, queryset=Image.objects.all()) projects_by_user = ProjectSerializer(read_only=True, many=True) projects_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='projects_by_user', many=True, queryset=Project.objects.all()) annotations_by_user = AnnotationsJsonSerializer(read_only=True, many=True) annotations_by_user_id = serializers.PrimaryKeyRelatedField(write_only=True, source='annotations_by_user', many=True, queryset=AnnotationsJson.objects.all()) groups = GroupSerializer(many=True) class Meta: model = User fields = ['email', 'projects_by_user', 'projects_by_user_id', 'images_by_user', 'images_by_user_id', 'annotations_by_user', 'annotations_by_user_id', 'groups',]
{ "content_hash": "6e175d47745b8312f396c0f2a964ef11", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 168, "avg_line_length": 41.63551401869159, "alnum_prop": 0.7207631874298541, "repo_name": "kartta-labs/noter-backend", "id": "1ffad3cff4511a08e683410821a44fe468a54211", "size": "4455", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "noter_backend/main/serializers.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1889" }, { "name": "Python", "bytes": "56419" }, { "name": "Shell", "bytes": "2057" } ], "symlink_target": "" }
import subprocess from typing import List import rich_click as click PYTHON_VERSIONS = ["3.7", "3.8", "3.9"] GHCR_IO_PREFIX = "ghcr.io" GHCR_IO_IMAGES = [ "{prefix}/{repo}/{branch}/ci/python{python_version}:latest", "{prefix}/{repo}/{branch}/prod/python{python_version}:latest", ] # noinspection StrFormat def pull_push_all_images( source_prefix: str, target_prefix: str, images: List[str], source_branch: str, source_repo: str, target_branch: str, target_repo: str, ): for python_version in PYTHON_VERSIONS: for image in images: source_image = image.format( prefix=source_prefix, branch=source_branch, repo=source_repo, python_version=python_version ) target_image = image.format( prefix=target_prefix, branch=target_branch, repo=target_repo, python_version=python_version ) print(f"Copying image: {source_image} -> {target_image}") subprocess.run(["docker", "pull", source_image], check=True) subprocess.run(["docker", "tag", source_image, target_image], check=True) subprocess.run(["docker", "push", target_image], check=True) @click.group(invoke_without_command=True) @click.option("--source-branch", type=str, default="main", help="Source branch name [main]") @click.option("--target-branch", type=str, default="main", help="Target branch name [main]") @click.option("--source-repo", type=str, default="apache/airflow", help="Source repo") @click.option("--target-repo", type=str, default="apache/airflow", help="Target repo") def main( source_branch: str, target_branch: str, source_repo: str, target_repo: str, ): pull_push_all_images( GHCR_IO_PREFIX, GHCR_IO_PREFIX, GHCR_IO_IMAGES, source_branch, source_repo, target_branch, target_repo ) if __name__ == "__main__": main()
{ "content_hash": "a2985ce80b7acb21bc45dda59ee6ef03", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 110, "avg_line_length": 32.8448275862069, "alnum_prop": 0.6409448818897637, "repo_name": "bolkedebruin/airflow", "id": "bcb81c55223f83811ce5dc8c8f06553314c88a0a", "size": "3064", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dev/retag_docker_images.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25286" }, { "name": "Dockerfile", "bytes": "40459" }, { "name": "HCL", "bytes": "3786" }, { "name": "HTML", "bytes": "157840" }, { "name": "JavaScript", "bytes": "167972" }, { "name": "Jinja", "bytes": "33382" }, { "name": "Jupyter Notebook", "bytes": "2933" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "19287942" }, { "name": "Shell", "bytes": "645244" }, { "name": "TypeScript", "bytes": "173854" } ], "symlink_target": "" }
from django import forms from . import models class ThoughtForm(forms.ModelForm): class Meta: fields = ('condition', 'notes') model = models.Thought
{ "content_hash": "fdb32d64b2fe5f85c8337a0f535d12c9", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 39, "avg_line_length": 19, "alnum_prop": 0.6666666666666666, "repo_name": "treehouse/livestream-django-feelings", "id": "a38eb3232e52925c352e40ed085c1b625f609ec2", "size": "171", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "feelings/thoughts/forms.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "65" }, { "name": "HTML", "bytes": "18469" }, { "name": "JavaScript", "bytes": "1252960" }, { "name": "Python", "bytes": "38118" } ], "symlink_target": "" }
import pytest import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import scoped_session, sessionmaker from cleancat import Integer, Schema, StopValidation, String, ValidationError from cleancat.sqla import SQLAEmbeddedReference, SQLAReference, object_as_dict Base = declarative_base() class Person(Base): __tablename__ = 'cleancattest' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) age = sa.Column(sa.Integer) @pytest.fixture def sqla_session(): """Set up an SQLA connection, create all tables, and return a session.""" engine = sa.create_engine('sqlite:///:memory:') Base.metadata.create_all(engine) session = scoped_session(sessionmaker(bind=engine)) Person.query = session.query_property() return session def test_object_as_dict(): steve = Person(name='Steve', age=30) assert object_as_dict(steve) == {'id': None, 'age': 30, 'name': 'Steve'} @pytest.mark.usefixtures('sqla_session') class TestSQLAReferenceField: def test_it_updates_an_existing_instance(self, sqla_session): steve = Person(name='Steve', age=30) sqla_session.add(steve) sqla_session.commit() clean_val = SQLAReference(Person).clean(str(steve.id)) assert isinstance(clean_val, Person) assert clean_val.id == steve.id def test_updating_missing_instance_fails(self): expected_err_msg = 'Object does not exist.' with pytest.raises(ValidationError, match=expected_err_msg): SQLAReference(Person).clean('id-that-does-not-exist') def test_it_can_be_optional(self): field = SQLAReference(Person, required=False) with pytest.raises(StopValidation) as e: field.clean(None) assert e.value.args[0] is None @pytest.mark.usefixtures('sqla_session') class TestSchemaWithSQLAEmbeddedReference: @pytest.fixture def book_schema_cls(self): class PersonSchema(Schema): name = String() age = Integer() class BookSchema(Schema): author = SQLAEmbeddedReference( Person, PersonSchema, required=False ) title = String(required=False) return BookSchema def test_it_creates_a_new_instance(self, book_schema_cls): schema = book_schema_cls({'author': {'name': 'New Author', 'age': 30}}) data = schema.full_clean() author = data['author'] assert isinstance(author, Person) assert not author.id assert author.name == 'New Author' assert author.age == 30 def test_it_updates_an_existing_instance( self, book_schema_cls, sqla_session ): steve = Person(name='Steve', age=30) sqla_session.add(steve) sqla_session.commit() schema = book_schema_cls( {'author': {'id': str(steve.id), 'name': 'Updated', 'age': 50}} ) data = schema.full_clean() author = data['author'] assert isinstance(author, Person) assert author.id == steve.id assert author.name == 'Updated' assert author.age == 50 def test_updating_missing_instance_fails(self, book_schema_cls): schema = book_schema_cls( {'author': {'id': 123456789, 'name': 'Arbitrary Non-existent ID'}} ) pytest.raises(ValidationError, schema.full_clean) assert schema.field_errors == {'author': 'Object does not exist.'} def test_it_can_be_optional(self, book_schema_cls): schema = book_schema_cls( {'title': 'Book without an author', 'author': None} ) data = schema.full_clean() assert data == {'title': 'Book without an author', 'author': None}
{ "content_hash": "b74f75147e5134d22334750466519be5", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 79, "avg_line_length": 33.669642857142854, "alnum_prop": 0.635905595332803, "repo_name": "closeio/cleancat", "id": "3916bc564c76cf88ce4dbb25a04c99fde1efd255", "size": "3771", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_sqla.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "90101" } ], "symlink_target": "" }
from django.conf.urls import patterns, url from django.contrib import admin urlpatterns = patterns('account.views', url(r'^login/$', 'login', name='login'), url(r'^logout/$', 'logout', name='logout'), )
{ "content_hash": "f49ac8c8d2bfc00e31880597368cd25b", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 47, "avg_line_length": 30.285714285714285, "alnum_prop": 0.6745283018867925, "repo_name": "gdgand/Festi", "id": "42a2d4b05daeaaa1d305580b5b2b63757a28c278", "size": "212", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "festi/account/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "367769" }, { "name": "CoffeeScript", "bytes": "15698" }, { "name": "Erlang", "bytes": "2128" }, { "name": "HTML", "bytes": "97067" }, { "name": "JavaScript", "bytes": "71030" }, { "name": "Python", "bytes": "36611" }, { "name": "Ruby", "bytes": "583" }, { "name": "Shell", "bytes": "1176" } ], "symlink_target": "" }
import unittest from vehicle import Vehicle class UtDemo(unittest.TestCase): '''A Unit Test Demo''' def setUp(self): "Create a list of test files" self.time_list=['20120912072912','20120913072230',20120912073312] for f in self.time_list: print f def test_int(self): self.assertEquals(2,2,'number not equals') def test_vehicle(self): v = Vehicle('Corolla') v.display() if __name__=='__main__': unittest.main()
{ "content_hash": "730f7487ea4d40ff972543d6859e5821", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 73, "avg_line_length": 24.9, "alnum_prop": 0.6024096385542169, "repo_name": "vollov/py-lab", "id": "a960771067f3064aa34cee6b5f73f7c43b0d9d21", "size": "516", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/oo/utdemo.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "22" }, { "name": "JavaScript", "bytes": "685" }, { "name": "PLSQL", "bytes": "6838" }, { "name": "Python", "bytes": "254226" }, { "name": "Shell", "bytes": "734" }, { "name": "Smarty", "bytes": "1829" } ], "symlink_target": "" }
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice # Connects to the current device, returning a MonkeyDevice object device = MonkeyRunner.waitForConnection() # Installs the Android package. Notice that this method returns a boolean, so you can test # to see if the installation worked. device.installPackage('../app/target/net-d53dev-dslfy-android-1.0.apk') # sets a variable with the package's internal name package = 'net.d53dev.dslfy.android' # sets a variable with the name of an Activity in the package activity = 'net.d53dev.dslfy.android.ui.CarouselActivity' # sets the name of the component to start runComponent = package + '/' + activity # Runs the component device.startActivity(component=runComponent) MonkeyRunner.sleep(5) device.type('example@example.com') # Takes a screenshot result = device.takeSnapshot() # Writes the screenshot to a file result.writeToFile('screenshot.png','png')
{ "content_hash": "418732af8f97a9a2ab958fb6c523943a", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 90, "avg_line_length": 30.8, "alnum_prop": 0.7813852813852814, "repo_name": "d53dave/DSLFY-Android", "id": "b9374572e102990fb5735a85b1c956380dcf5865", "size": "980", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "integration-tests/monkeyrunnerTestSuite.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "187405" }, { "name": "Python", "bytes": "980" } ], "symlink_target": "" }
import sys number = 0 while number >= 0: print "Enter number:" number = float(sys.stdin.readline()) if number >= 0: if number % 2 == 0: print "Even" else: print "Odd" print "Bye"
{ "content_hash": "82e55ed33e6b245aa18d90b9fe6c3a65", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 40, "avg_line_length": 19.333333333333332, "alnum_prop": 0.5129310344827587, "repo_name": "nathano/Perl_to_Python_Converter", "id": "6adfe6c6e9ace17ee4fbdb2e555bacf164d19366", "size": "256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/subset4/odd0.py", "mode": "33188", "license": "mit", "language": [ { "name": "Perl", "bytes": "20894" }, { "name": "Python", "bytes": "2735" } ], "symlink_target": "" }

The Github repository retrieval source for [code-rag-bench], containing all Python files from the entire GitHub dump (in github-repos)

Downloads last month
39
Edit dataset card

Collection including code-rag-bench/github-repos-python