gt
stringclasses
1 value
context
stringlengths
2.49k
119k
#!/usr/bin/env python import argparse import multiprocessing import os import queue import re import pandas import pandas.io.formats.excel from Bio import SeqIO # Maximum columns allowed in a LibreOffice # spreadsheet is 1024. Excel allows for # 16,384 columns, but we'll set the lower # number as the maximum. Some browsers # (e.g., Firefox on Linux) are configured # to use LibreOffice for Excel spreadsheets. MAXCOLS = 1024 OUTPUT_EXCEL_DIR = 'output_excel_dir' INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir' INPUT_JSON_DIR = 'input_json_dir' INPUT_NEWICK_DIR = 'input_newick_dir' def annotate_table(table_df, group, annotation_dict): for gbk_chrome, pro in list(annotation_dict.items()): ref_pos = list(table_df) ref_series = pandas.Series(ref_pos) ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position']) all_ref = ref_df[ref_df['reference'] == gbk_chrome] positions = all_ref.position.to_frame() # Create an annotation file. annotation_file = "%s_annotations.csv" % group with open(annotation_file, "a") as fh: for _, row in positions.iterrows(): pos = row.position try: aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']] try: chrom, name, locus, tag = aaa.values[0] print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh) except ValueError: # If only one annotation for the entire # chromosome (e.g., flu) then having [0] fails chrom, name, locus, tag = aaa.values print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh) except KeyError: print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh) # Read the annotation file into a data frame. annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index') # Remove the annotation_file from disk since both # cascade and sort tables are built using the file, # and it is opened for writing in append mode. os.remove(annotation_file) # Process the data. table_df_transposed = table_df.T table_df_transposed.index = table_df_transposed.index.rename('index') table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True) table_df = table_df_transposed.T return table_df def excel_formatter(json_file_name, excel_file_name, group, annotation_dict): pandas.io.formats.excel.header_style = None table_df = pandas.read_json(json_file_name, orient='split') if annotation_dict is not None: table_df = annotate_table(table_df, group, annotation_dict) else: table_df = table_df.append(pandas.Series(name='no annotations')) writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter') table_df.to_excel(writer, sheet_name='Sheet1') writer_book = writer.book ws = writer.sheets['Sheet1'] format_a = writer_book.add_format({'bg_color': '#58FA82'}) format_g = writer_book.add_format({'bg_color': '#F7FE2E'}) format_c = writer_book.add_format({'bg_color': '#0000FF'}) format_t = writer_book.add_format({'bg_color': '#FF0000'}) format_normal = writer_book.add_format({'bg_color': '#FDFEFE'}) formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'}) format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'}) format_n = writer_book.add_format({'bg_color': '#E2CFDD'}) rows, cols = table_df.shape ws.set_column(0, 0, 30) ws.set_column(1, cols, 2.1) ws.freeze_panes(2, 1) format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'}) # Set last row. ws.set_row(rows + 1, cols + 1, format_annotation) # Make sure that row/column locations don't overlap. ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n}) ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n}) format_rotation = writer_book.add_format({}) format_rotation.set_rotation(90) for column_num, column_name in enumerate(list(table_df.columns)): ws.write(0, column_num + 1, column_name, format_rotation) format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'}) # Set last row. ws.set_row(rows, 400, format_annotation) writer.save() def get_annotation_dict(gbk_file): gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank")) annotation_dict = {} tmp_file = "features.csv" # Create a file of chromosomes and features. for chromosome in list(gbk_dict.keys()): with open(tmp_file, 'w+') as fh: for feature in gbk_dict[chromosome].features: if "CDS" in feature.type or "rRNA" in feature.type: try: product = feature.qualifiers['product'][0] except KeyError: product = None try: locus = feature.qualifiers['locus_tag'][0] except KeyError: locus = None try: gene = feature.qualifiers['gene'][0] except KeyError: gene = None fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene)) # Read the chromosomes and features file into a data frame. df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"]) # Process the data. df = df.sort_values(['start', 'gene'], ascending=[True, False]) df = df.drop_duplicates('start') pro = df.reset_index(drop=True) pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both') annotation_dict[chromosome] = pro return annotation_dict def get_sample_name(file_path): base_file_name = os.path.basename(file_path) if base_file_name.find(".") > 0: # Eliminate the extension. return os.path.splitext(base_file_name)[0] return base_file_name def output_cascade_table(cascade_order, mqdf, group, annotation_dict): cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner') output_table(cascade_order_mq, "cascade", group, annotation_dict) def output_excel(df, type_str, group, annotation_dict, count=None): # Output the temporary json file that # is used by the excel_formatter. if count is None: if group is None: json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq.json" % type_str) excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str) else: json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq.json" % (group, type_str)) excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str)) else: # The table has more columns than is allowed by the # MAXCOLS setting, so multiple files will be produced # as an output collection. if group is None: json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq_%d.json" % (type_str, count)) excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count)) else: json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq_%d.json" % (group, type_str, count)) excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count)) df.to_json(json_file_name, orient='split') # Output the Excel file. excel_formatter(json_file_name, excel_file_name, group, annotation_dict) def output_sort_table(cascade_order, mqdf, group, annotation_dict): sort_df = cascade_order.T sort_df['abs_value'] = sort_df.index sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True) sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1) sort_df.pos = sort_df.pos.astype(int) sort_df = sort_df.sort_values(by=['pos']) sort_df = sort_df.drop(['pos'], axis=1) sort_df = sort_df.T sort_order_mq = pandas.concat([sort_df, mqdf], join='inner') output_table(sort_order_mq, "sort", group, annotation_dict) def output_table(df, type_str, group, annotation_dict): if isinstance(group, str) and group.startswith("dataset"): # Inputs are single files, not collections, # so input file names are not useful for naming # output files. group_str = None else: group_str = group count = 0 chunk_start = 0 chunk_end = 0 column_count = df.shape[1] if column_count >= MAXCOLS: # Here the number of columns is greater than # the maximum allowed by Excel, so multiple # outputs will be produced. while column_count >= MAXCOLS: count += 1 chunk_end += MAXCOLS df_of_type = df.iloc[:, chunk_start:chunk_end] output_excel(df_of_type, type_str, group_str, annotation_dict, count=count) chunk_start += MAXCOLS column_count -= MAXCOLS count += 1 df_of_type = df.iloc[:, chunk_start:] output_excel(df_of_type, type_str, group_str, annotation_dict, count=count) else: output_excel(df, type_str, group_str, annotation_dict) def preprocess_tables(task_queue, annotation_dict, timeout): while True: try: tup = task_queue.get(block=True, timeout=timeout) except queue.Empty: break newick_file, json_file, json_avg_mq_file = tup avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split') # Map quality to dataframe. mqdf = avg_mq_series.to_frame(name='MQ') mqdf = mqdf.T # Get the group. group = get_sample_name(newick_file) snps_df = pandas.read_json(json_file, orient='split') with open(newick_file, 'r') as fh: for line in fh: line = re.sub('[:,]', '\n', line) line = re.sub('[)(]', '', line) line = re.sub(r'[0-9].*\.[0-9].*\n', '', line) line = re.sub('root\n', '', line) sample_order = line.split('\n') sample_order = list([_f for _f in sample_order if _f]) sample_order.insert(0, 'root') tree_order = snps_df.loc[sample_order] # Count number of SNPs in each column. snp_per_column = [] for column_header in tree_order: count = 0 column = tree_order[column_header] for element in column: if element != column[0]: count = count + 1 snp_per_column.append(count) row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column") # Count number of SNPS from the # top of each column in the table. snp_from_top = [] for column_header in tree_order: count = 0 column = tree_order[column_header] # for each element in the column # skip the first element for element in column[1:]: if element == column[0]: count = count + 1 else: break snp_from_top.append(count) row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top") tree_order = tree_order.append([row1]) tree_order = tree_order.append([row2]) # In pandas=0.18.1 even this does not work: # abc = row1.to_frame() # abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18) # tree_order.append(abc) # Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions" tree_order = tree_order.T tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False]) tree_order = tree_order.T # Remove snp_per_column and snp_from_top rows. cascade_order = tree_order[:-2] # Output the cascade table. output_cascade_table(cascade_order, mqdf, group, annotation_dict) # Output the sorted table. output_sort_table(cascade_order, mqdf, group, annotation_dict) task_queue.task_done() def set_num_cpus(num_files, processes): num_cpus = len(os.sched_getaffinity(0)) if num_files < num_cpus and num_files < processes: return num_files if num_cpus < processes: half_cpus = int(num_cpus / 2) if num_files < half_cpus: return num_files return half_cpus return processes if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file') parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file') parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file') parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'), parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting') args = parser.parse_args() if args.gbk_file is not None: # Create the annotation_dict for annotating # the Excel tables. annotation_dict = get_annotation_dict(args.gbk_file) else: annotation_dict = None # The assumption here is that the list of files # in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are # named such that they are properly matched if # the directories contain more than 1 file (i.e., # hopefully the newick file names and json file names # will be something like Mbovis-01D6_* so they can be # sorted and properly associated with each other). if args.input_newick is not None: newick_files = [args.input_newick] else: newick_files = [] for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)): file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name)) newick_files.append(file_path) if args.input_snps_json is not None: json_files = [args.input_snps_json] else: json_files = [] for file_name in sorted(os.listdir(INPUT_JSON_DIR)): file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name)) json_files.append(file_path) if args.input_avg_mq_json is not None: json_avg_mq_files = [args.input_avg_mq_json] else: json_avg_mq_files = [] for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)): file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name)) json_avg_mq_files.append(file_path) multiprocessing.set_start_method('spawn') queue1 = multiprocessing.JoinableQueue() queue2 = multiprocessing.JoinableQueue() num_files = len(newick_files) cpus = set_num_cpus(num_files, args.processes) # Set a timeout for get()s in the queue. timeout = 0.05 for i, newick_file in enumerate(newick_files): json_file = json_files[i] json_avg_mq_file = json_avg_mq_files[i] queue1.put((newick_file, json_file, json_avg_mq_file)) # Complete the preprocess_tables task. processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)] for p in processes: p.start() for p in processes: p.join() queue1.join() if queue1.empty(): queue1.close() queue1.join_thread()
"""Module for the v0 version of the `SPTrans API <http://www.sptrans.com.br/desenvolvedores/APIOlhoVivo/Documentacao.aspx?1>`_. The first thing you have to do, in order to use it, is to instantiate a client and authenticate to the API: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') Then you can use the other methods to grab data from the API. """ from collections import namedtuple from datetime import date, datetime, time import json try: from urllib import urlencode except ImportError: # pragma: no cover from urllib.parse import urlencode import requests BASE_URL = 'http://api.olhovivo.sptrans.com.br/v0' class RequestError(Exception): """Raised when the request failes to be accomplished. Normally this is due to the client not being authenticated anymore. In this case, just authenticate again, and it should be back at work. """ class AuthenticationError(Exception): """Raised when the authentication fails - for example, with a wrong token -.""" class TupleMapMixin(object): MAPPING = {} @classmethod def from_dict(cls, result_dict): kwargs = {} for key, value in cls.MAPPING.items(): if isinstance(value, str): kwargs[key] = result_dict[value] else: kwargs[key] = value.resolve(result_dict) return cls(**kwargs) def build_tuple_class(name, mapping): base_classes = (namedtuple(name, mapping.keys()), TupleMapMixin) return type(name, base_classes, {'MAPPING': mapping}) def time_string_to_datetime(time_string): hour_parts = time_string.split(':') hour, minute = [int(part) for part in hour_parts] return datetime.combine(date.today(), time(hour=hour, minute=minute)) class TimeField(object): def __init__(self, field): self.field = field def resolve(self, result_dict): return time_string_to_datetime(result_dict[self.field]) class TupleField(object): def __init__(self, field, tuple_class): self.field = field self.tuple_class = tuple_class def resolve(self, result_dict): return self.tuple_class.from_dict(result_dict[self.field]) class TupleListField(object): def __init__(self, field, tuple_class): self.field = field self.tuple_class = tuple_class def resolve(self, result_dict): return [self.tuple_class.from_dict(internal_dict) for internal_dict in result_dict[self.field]] Route = build_tuple_class('Route', { 'code': 'CodigoLinha', 'circular': 'Circular', 'sign': 'Letreiro', 'direction': 'Sentido', 'type': 'Tipo', 'main_to_sec': 'DenominacaoTPTS', 'sec_to_main': 'DenominacaoTSTP', 'info': 'Informacoes', }) """A namedtuple representing a route. :var code: (:class:`int`) The route code. :var circular: (:class:`bool`) Wether it's a circular route or not (without a secondary terminal). :var sign: (:class:`str`) The first part of the route sign. :var direction: (:class:`int`) The route direction. "1" means "main to secondary terminal", "2" means "secondary to main terminal". :var type: (:class:`int`) The route type. :var main_to_sec: (:class:`str`) The name of the route when moving from the main terminal to the second one. :var sec_to_main: (:class:`str`) The name of the route when moving from the second terminal to the main one. :var info: (:class:`str`) Extra information about the route. """ Stop = build_tuple_class('Stop', { 'code': 'CodigoParada', 'name': 'Nome', 'address': 'Endereco', 'latitude': 'Latitude', 'longitude': 'Longitude', }) """A namedtuple representing a bus stop. :var code: (:class:`int`) The stop code. :var name: (:class:`str`) The stop name. :var address: (:class:`str`) The stop address. :var latitude: (:class:`float`) The stop latitude. :var longitude: (:class:`float`) The stop longitude. """ Lane = build_tuple_class('Lane', { 'code': 'CodCorredor', 'cot': 'CodCot', 'name': 'Nome', }) """A namedtuple representing a bus lane. :var code: (:class:`int`) The lane code. :var cot: (:class:`int`) The lane "cot" (?). :var name: (:class:`str`) The lane name. """ Vehicle = build_tuple_class('Vehicle', { 'prefix': 'p', 'accessible': 'a', 'latitude': 'py', 'longitude': 'px', }) """A namedtuple representing a vehicle (bus) with its position. :var prefix: (:class:`str`) The vehicle prefix painted in the bus. :var accessible: (:class:`bool`) Wether the vehicle is accessible or not. :var latitude: (:class:`float`) The vehicle latitude. :var longitude: (:class:`float`) The vehicle longitude. """ VehicleForecast = build_tuple_class('VehicleForecast', { 'prefix': 'p', 'accessible': 'a', 'arriving_at': TimeField('t'), 'latitude': 'py', 'longitude': 'px', }) """A namedtuple representing a vehicle (bus) with its position and forecast to arrive at a certain stop. :var prefix: (:class:`str`) The vehicle prefix painted in the bus. :var accessible: (:class:`bool`) Wether the vehicle is accessible or not. :var arriving_at: (:class:`datetime.datetime`) The time that the vehicle is expected to arrive. :var latitude: (:class:`float`) The vehicle latitude. :var longitude: (:class:`float`) The vehicle longitude. """ Positions = build_tuple_class('Positions', { 'time': TimeField('hr'), 'vehicles': TupleListField('vs', Vehicle), }) """A namedtuple representing a sequence of vehicles positions, with the time when the information was retrieved. :var time: (:class:`datetime.datetime`) The time when the information was retrieved. :var vehicles: (:class:`list`) The list of :class:`vehicles <Vehicle>`. """ RouteWithVehicles = build_tuple_class('RouteWithVehicles', { 'sign': 'c', 'code': 'cl', 'direction': 'sl', 'main_to_sec': 'lt0', 'sec_to_main': 'lt1', 'quantity': 'qv', 'vehicles': TupleListField('vs', VehicleForecast), }) """A namedtuple representing a route with a sequence of vehicles with their current positions. :var sign: (:class:`str`) The first part of the route sign. :var code: (:class:`int`) The route code. :var direction: (:class:`int`) The route direction. "1" means "main to secondary terminal", "2" means "secondary to main terminal". :var main_to_sec: (:class:`str`) The name of the route when moving from the main terminal to the second one. :var sec_to_main: (:class:`str`) The name of the route when moving from the second terminal to the main one. :var quantity: (:class:`int`) The quantity of vehicles. :var vehicles: (:class:`list`) The list of :class:`vehicles <Vehicle>`. """ StopWithRoutes = build_tuple_class('StopWithRoutes', { 'code': 'cp', 'name': 'np', 'latitude': 'py', 'longitude': 'px', 'routes': TupleListField('l', RouteWithVehicles), }) """A namedtuple representing a bus stop with a list of routes that pass through this stop. :var code: (:class:`int`) The stop code. :var name: (:class:`str`) The stop name. :var latitude: (:class:`float`) The stop latitude. :var longitude: (:class:`float`) The stop longitude. :var routes: (:class:`list`) The list of :class:`routes <Route>` that pass through this stop. """ StopWithVehicles = build_tuple_class('StopWithVehicles', { 'code': 'cp', 'name': 'np', 'latitude': 'py', 'longitude': 'px', 'vehicles': TupleListField('vs', VehicleForecast), }) """A namedtuple representing a bus stop with a list of vehicles that pass through this stop. :var code: (:class:`int`) The stop code. :var name: (:class:`str`) The stop name. :var latitude: (:class:`float`) The stop latitude. :var longitude: (:class:`float`) The stop longitude. :var vehicles: (:class:`list`) The list of :class:`vehicles <Vehicle>`. """ ForecastWithStop = build_tuple_class('ForecastWithStop', { 'time': TimeField('hr'), 'stop': TupleField('p', StopWithRoutes), }) """A namedtuple representing a bus stop forecast with routes and the time when the information was retrieved. :var time: (:class:`datetime.datetime`) The time when the information was retrieved. :var stop: (:class:`StopWithRoutes`) The bus stop with :class:`routes <Route>`. """ ForecastWithStops = build_tuple_class('ForecastWithStops', { 'time': TimeField('hr'), 'stops': TupleListField('ps', StopWithVehicles), }) """A namedtuple representing a list of bus stops forecast with vehicles and the time when the information was retrieved. :var time: (:class:`datetime.datetime`) The time when the information was retrieved. :var stops: (:class:`list` of :class:`StopWithVehicles`) The bus stops. """ class Client(object): """Main client class. .. warning:: Any method (except :meth:`authenticate`) may raise :class:`RequestError` if the client is not authenticated anymore. So keep an eye on the authentication state. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') """ _cookies = None def _build_url(self, endpoint, **kwargs): query_string = urlencode(kwargs) return '{}/{}?{}'.format(BASE_URL, endpoint, query_string) def _get_content(self, endpoint, **kwargs): url = self._build_url(endpoint, **kwargs) response = requests.get(url, cookies=self._cookies) return response.content.decode('latin1') def _get_json(self, endpoint, **kwargs): content = self._get_content(endpoint, **kwargs) result = json.loads(content) if isinstance(result, dict) and tuple(result.keys()) == (u'Message', ): raise RequestError(result[u'Message']) return result def authenticate(self, token): """Authenticates to the webservice. :param token: The API token string. :type token: :class:`str` :raises: :class:`AuthenticationError` when there's an error during authentication. """ url = self._build_url('Login/Autenticar', token=token) response = requests.post(url) result = json.loads(response.content.decode('latin1')) if not result: raise AuthenticationError('Cannot authenticate with token "{}"'.format(token)) self._cookies = response.cookies def search_routes(self, keywords): """Searches for routes that match the provided keywords. :param keywords: The keywords, in a single string, to use for matching. :type keywords: :class:`str` :return: A generator that yields :class:`Route` objects. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') for route in client.search_routes('butanta'): print(route.code, route.sign) """ result_list = self._get_json('Linha/Buscar', termosBusca=keywords) for result_dict in result_list: yield Route.from_dict(result_dict) def search_stops(self, keywords): """Searches for bus stops that match the provided keywords. :param keywords: The keywords, in a single string, to use for matching. :type keywords: :class:`str` :return: A generator that yields :class:`Stop` objects. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') for stop in client.search_stops('butanta'): print(stop.code, stop.name) """ result_list = self._get_json('Parada/Buscar', termosBusca=keywords) for result_dict in result_list: yield Stop.from_dict(result_dict) def search_stops_by_route(self, code): """Searches for bus stops that are passed by the route specified by its code. :param code: The route code to use for matching. :type code: :class:`int` :return: A generator that yields :class:`Stop` objects. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') for stop in client.search_stops_by_route(1234): print(stop.code, stop.name) """ result_list = self._get_json('Parada/BuscarParadasPorLinha', codigoLinha=code) for result_dict in result_list: yield Stop.from_dict(result_dict) def search_stops_by_lane(self, code): """Searches for bus stops that are contained in a lane specified by its code. :param code: The lane code to use for matching. :type code: :class:`int` :return: A generator that yields :class:`Stop` objects. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') for stop in client.search_stops_by_lane(1234): print(stop.code, stop.name) """ result_list = self._get_json('Parada/BuscarParadasPorCorredor', codigoCorredor=code) for result_dict in result_list: yield Stop.from_dict(result_dict) def list_lanes(self): """Lists all the bus lanes in the city. :return: A generator that yields :class:`Lane` objects. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') for lane in client.list_lanes(): print(lane.code, lane.name) """ result_list = self._get_json('Corredor') for result_dict in result_list: yield Lane.from_dict(result_dict) def get_positions(self, code): """Gets the vehicles with their current positions, provided a route code. :param code: The route code to use for matching. :type code: :class:`int` :return: A single :class:`Positions` object. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') positions = client.get_positions(1234) print(positions.time) for vehicle in positions.vehicles: print(vehicle.prefix) """ result_dict = self._get_json('Posicao', codigoLinha=code) return Positions.from_dict(result_dict) def get_forecast(self, stop_code=None, route_code=None): """Gets the arrival forecast, provided a route code or a stop code or both. You must provide at least one of the parameters. If you provide only a `stop_code`, it will return a forecast for all routes that attend a certain stop. If you provide only a `route_code`, it will return a forecast for all stops that a certain route attends to. If you provide both, it will return a forecast for the specific route attending to the specific stop provided. :param stop_code: The stop code to use for matching. :type stop_code: :class:`int` :param route_code: The stop code to use for matching. :type route_code: :class:`int` :return: A single :class:`ForecastWithStop` object, when passing only `stop_code` or both. :return: A single :class:`ForecastWithStops` object, when passing only `route_code`. Example: :: from sptrans.v0 import Client client = Client() client.authenticate('this is my token') forecast = client.get_forecast(stop_code=1234) for route in forecast.stop.routes: for vehicle in route.vehicles: print(vehicle.prefix) forecast = client.get_forecast(stop_code=1234, route_code=2345) for route in forecast.stop.routes: for vehicle in route.vehicles: print(vehicle.prefix) forecast = client.get_forecast(route_code=2345) for stop in forecast.stops: for vehicle in stop.vehicles: print(vehicle.prefix) """ if stop_code is None: result_dict = self._get_json('Previsao/Linha', codigoLinha=route_code) return ForecastWithStops.from_dict(result_dict) if route_code is None: result_dict = self._get_json('Previsao/Parada', codigoParada=stop_code) else: result_dict = self._get_json('Previsao', codigoParada=stop_code, codigoLinha=route_code) return ForecastWithStop.from_dict(result_dict)
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Module to collect configuration to run specific jobs """ from __future__ import absolute_import, division, print_function import os import copy from fermipy.jobs.file_archive import FileFlags from fermipy.jobs.link import Link from fermipy.jobs.gtlink import Gtlink from fermipy.jobs.app_link import AppLink from fermipy.jobs.scatter_gather import ScatterGather from fermipy.jobs.slac_impl import make_nfs_path from fermipy.diffuse.utils import create_inputlist from fermipy.diffuse.name_policy import NameFactory from fermipy.diffuse.binning import Component from fermipy.diffuse.diffuse_src_manager import make_ring_dicts,\ make_diffuse_comp_info_dict from fermipy.diffuse.catalog_src_manager import make_catalog_comp_dict from fermipy.diffuse import defaults as diffuse_defaults NAME_FACTORY = NameFactory() def _make_ltcube_file_list(ltsumfile, num_files): """Make the list of input files for a particular energy bin X psf type """ outbasename = os.path.basename(ltsumfile) lt_list_file = ltsumfile.replace('fits', 'lst') outfile = open(lt_list_file, 'w') for i in range(num_files): split_key = "%06i" % i output_dir = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes', split_key) filepath = os.path.join(output_dir, outbasename.replace('.fits', '_%s.fits' % split_key)) outfile.write(filepath) outfile.write("\n") outfile.close() return '@' + lt_list_file class Gtlink_select(Gtlink): """Small wrapper to run gtselect """ appname = 'gtselect' linkname_default = 'gtselect' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(emin=diffuse_defaults.gtopts['emin'], emax=diffuse_defaults.gtopts['emax'], infile=diffuse_defaults.gtopts['infile'], outfile=diffuse_defaults.gtopts['outfile'], zmax=diffuse_defaults.gtopts['zmax'], tmin=diffuse_defaults.gtopts['tmin'], tmax=diffuse_defaults.gtopts['tmax'], evclass=diffuse_defaults.gtopts['evclass'], evtype=diffuse_defaults.gtopts['evtype'], pfiles=diffuse_defaults.gtopts['pfiles']) default_file_args = dict(infile=FileFlags.input_mask, outfile=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_bin(Gtlink): """Small wrapper to run gtbin """ appname = 'gtbin' linkname_default = 'gtbin' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(algorithm=('HEALPIX', "Binning alogrithm", str), coordsys=diffuse_defaults.gtopts['coordsys'], hpx_order=diffuse_defaults.gtopts['hpx_order'], evfile=diffuse_defaults.gtopts['evfile'], outfile=diffuse_defaults.gtopts['outfile'], emin=diffuse_defaults.gtopts['emin'], emax=diffuse_defaults.gtopts['emax'], enumbins=diffuse_defaults.gtopts['enumbins'], ebinalg=diffuse_defaults.gtopts['ebinalg'], ebinfile=diffuse_defaults.gtopts['ebinfile'], pfiles=diffuse_defaults.gtopts['pfiles']) default_file_args = dict(evfile=FileFlags.in_stage_mask, outfile=FileFlags.out_stage_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_expcube2(Gtlink): """Small wrapper to run gtexpcube2 """ appname = 'gtexpcube2' linkname_default = 'gtexpcube2' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(irfs=diffuse_defaults.gtopts['irfs'], evtype=diffuse_defaults.gtopts['evtype'], hpx_order=diffuse_defaults.gtopts['hpx_order'], infile=(None, "Input livetime cube file", str), cmap=diffuse_defaults.gtopts['cmap'], outfile=diffuse_defaults.gtopts['outfile'], coordsys=('GAL', "Coordinate system", str)) default_file_args = dict(infile=FileFlags.input_mask, cmap=FileFlags.input_mask, outfile=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_scrmaps(Gtlink): """Small wrapper to run gtscrmaps """ appname = 'gtscrmaps' linkname_default = 'gtscrmaps' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(irfs=diffuse_defaults.gtopts['irfs'], expcube=diffuse_defaults.gtopts['expcube'], bexpmap=diffuse_defaults.gtopts['bexpmap'], cmap=diffuse_defaults.gtopts['cmap'], srcmdl=diffuse_defaults.gtopts['srcmdl'], outfile=diffuse_defaults.gtopts['outfile']) default_file_args = dict(expcube=FileFlags.input_mask, cmap=FileFlags.input_mask, bexpmap=FileFlags.input_mask, srcmdl=FileFlags.input_mask, outfile=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_ltsum(Gtlink): """Small wrapper to run gtltsum """ appname = 'gtltsum' linkname_default = 'gtltsum' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(infile1=(None, "Livetime cube 1 or list of files", str), infile2=("none", "Livetime cube 2", str), outfile=(None, "Output file", str)) default_file_args = dict(infile1=FileFlags.input_mask, outfile=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_mktime(Gtlink): """Small wrapper to run gtmktime """ appname = 'gtmktime' linkname_default = 'gtmktime' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(evfile=(None, 'Input FT1 File', str), outfile=(None, 'Output FT1 File', str), scfile=(None, 'Input FT2 file', str), roicut=(False, 'Apply ROI-based zenith angle cut', bool), filter=(None, 'Filter expression', str), pfiles=(None, "PFILES directory", str)) default_file_args = dict(evfile=FileFlags.in_stage_mask, scfile=FileFlags.in_stage_mask, outfile=FileFlags.out_stage_mask) __doc__ += Link.construct_docstring(default_options) class Gtlink_ltcube(Gtlink): """Small wrapper to run gtltcube """ appname = 'gtltcube' linkname_default = 'gtltcube' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(evfile=(None, 'Input FT1 File', str), scfile=(None, 'Input FT2 file', str), outfile=(None, 'Output Livetime cube File', str), dcostheta=(0.025, 'Step size in cos(theta)', float), binsz=(1., 'Pixel size (degrees)', float), phibins=(0, 'Number of phi bins', int), zmin=(0, 'Minimum zenith angle', float), zmax=(105, 'Maximum zenith angle', float), pfiles=(None, "PFILES directory", str)) default_file_args = dict(evfile=FileFlags.in_stage_mask, scfile=FileFlags.in_stage_mask, outfile=FileFlags.out_stage_mask) __doc__ += Link.construct_docstring(default_options) class Link_FermipyCoadd(AppLink): """Small wrapper to run fermipy-coadd """ appname = 'fermipy-coadd' linkname_default = 'coadd' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(args=([], "List of input files", list), output=(None, "Output file", str)) default_file_args = dict(args=FileFlags.input_mask, output=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Link_FermipyGatherSrcmaps(AppLink): """Small wrapper to run fermipy-gather-srcmaps """ appname = 'fermipy-gather-srcmaps' linkname_default = 'gather-srcmaps' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(output=(None, "Output file name", str), args=([], "List of input files", list), gzip=(False, "Compress output", bool), rm=(False, "Remove input files", bool), clobber=(False, "Overwrite output", bool)) default_file_args = dict(args=FileFlags.input_mask, output=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Link_FermipyVstack(AppLink): """Small wrapper to run fermipy-vstack """ appname = 'fermipy-vstack' linkname_default = 'vstack' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(output=(None, "Output file name", str), hdu=(None, "Name of HDU to stack", str), args=([], "List of input files", list), gzip=(False, "Compress output", bool), rm=(False, "Remove input files", bool), clobber=(False, "Overwrite output", bool)) default_file_args = dict(args=FileFlags.input_mask, output=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Link_FermipyHealview(AppLink): """Small wrapper to run fermipy-healview """ appname = 'fermipy-healview' linkname_default = 'fermipy-healview' usage = '%s [options]' % (appname) description = "Link to run %s" % (appname) default_options = dict(input=(None, "Input file", str), output=(None, "Output file name", str), extension=(None, "FITS HDU with HEALPix map", str), zscale=("log", "Scaling for color scale", str)) default_file_args = dict(args=FileFlags.input_mask, output=FileFlags.output_mask) __doc__ += Link.construct_docstring(default_options) class Gtexpcube2_SG(ScatterGather): """Small class to generate configurations for `Gtlink_expcube2` """ appname = 'fermipy-gtexcube2-sg' usage = "%s [options]" % (appname) description = "Submit gtexpube2 jobs in parallel" clientclass = Gtlink_expcube2 job_time = 300 default_options = dict(comp=diffuse_defaults.diffuse['comp'], data=diffuse_defaults.diffuse['data'], hpx_order_max=diffuse_defaults.diffuse['hpx_order_expcube']) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) datafile = args['data'] if datafile is None or datafile == 'None': return job_configs NAME_FACTORY.update_base_dict(args['data']) for comp in components: zcut = "zmax%i" % comp.zmax mktimelist = copy.copy(comp.mktimefilters) if not mktimelist: mktimelist.append('none') evtclasslist_keys = copy.copy(comp.evtclasses) if not evtclasslist_keys: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] else: evtclasslist_vals = copy.copy(evtclasslist_keys) for mktimekey in mktimelist: for evtclassval in evtclasslist_vals: fullkey = comp.make_key( '%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey)) name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True) outfile = NAME_FACTORY.bexpcube(**name_keys) cmap = NAME_FACTORY.ccube(**name_keys) infile = NAME_FACTORY.ltcube(**name_keys) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[fullkey] = dict(cmap=cmap, infile=infile, outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), hpx_order=min( comp.hpx_order, args['hpx_order_max']), evtype=comp.evtype, logfile=logfile) return job_configs class Gtltsum_SG(ScatterGather): """Small class to generate configurations for `Gtlink_ltsum` """ appname = 'fermipy-gtltsum-sg' usage = "%s [options]" % (appname) description = "Submit gtltsum jobs in parallel" clientclass = Gtlink_ltsum job_time = 300 default_options = dict(comp=diffuse_defaults.diffuse['comp'], data=diffuse_defaults.diffuse['data'], ft1file=(None, 'Input FT1 file', str)) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) datafile = args['data'] if datafile is None or datafile == 'None': return job_configs NAME_FACTORY.update_base_dict(args['data']) inputfiles = create_inputlist(args['ft1file']) num_files = len(inputfiles) for comp in components: zcut = "zmax%i" % comp.zmax mktimelist = copy.copy(comp.mktimefilters) if not mktimelist: mktimelist.append('none') evtclasslist_keys = copy.copy(comp.evtclasses) if not evtclasslist_keys: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] else: evtclasslist_vals = copy.copy(evtclasslist_keys) for mktimekey in mktimelist: for evtclassval in evtclasslist_vals: fullkey = comp.make_key( '%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey)) name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True) outfile = os.path.join(NAME_FACTORY.base_dict['basedir'], NAME_FACTORY.ltcube(**name_keys)) infile1 = _make_ltcube_file_list(outfile, num_files) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[fullkey] = dict(infile1=infile1, outfile=outfile, logfile=logfile) return job_configs class SumRings_SG(ScatterGather): """Small class to generate configurations for `Link_FermipyCoadd` to sum galprop ring gasmaps """ appname = 'fermipy-sum-rings-sg' usage = "%s [options]" % (appname) description = "Submit fermipy-coadd jobs in parallel to sum GALProp rings" clientclass = Link_FermipyCoadd job_time = 300 default_options = dict(library=diffuse_defaults.diffuse['library'], outdir=(None, 'Output directory', str),) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} gmm = make_ring_dicts(library=args['library'], basedir='.') for galkey in gmm.galkeys(): ring_dict = gmm.ring_dict(galkey) for ring_key, ring_info in ring_dict.items(): output_file = ring_info.merged_gasmap file_string = "" for fname in ring_info.files: file_string += " %s" % fname logfile = make_nfs_path(output_file.replace('.fits', '.log')) job_configs[ring_key] = dict(output=output_file, args=file_string, logfile=logfile) return job_configs class Vstack_SG(ScatterGather): """Small class to generate configurations for `Link_FermipyVstack` to merge source maps """ appname = 'fermipy-vstack-sg' usage = "%s [options]" % (appname) description = "Submit fermipy-vstack jobs in parallel" clientclass = Link_FermipyVstack job_time = 300 default_options = dict(comp=diffuse_defaults.diffuse['comp'], data=diffuse_defaults.diffuse['data'], library=diffuse_defaults.diffuse['library'],) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_diffuse_comp_info_dict(components=components, library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) diffuse_comp_info_dict = ret_dict['comp_info_dict'] for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()): diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') if diffuse_comp_info_value.components is None: sub_comp_info = diffuse_comp_info_value else: sub_comp_info = diffuse_comp_info_value.get_component_info(comp) name_keys = dict(zcut=zcut, sourcekey=sub_comp_info.sourcekey, ebin=comp.ebin_name, psftype=comp.evtype_name, mktime='none', coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits*" % outfile_tokens[0] full_key = "%s_%s" % (sub_comp_info.sourcekey, key) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[full_key] = dict(output=outfile, args=infile_regexp, hdu=sub_comp_info.source_name, logfile=logfile) return job_configs class GatherSrcmaps_SG(ScatterGather): """Small class to generate configurations for `Link_FermipyGatherSrcmaps` """ appname = 'fermipy-gather-srcmaps-sg' usage = "%s [options]" % (appname) description = "Submit fermipy-gather-srcmaps jobs in parallel" clientclass = Link_FermipyGatherSrcmaps job_time = 300 default_options = dict(comp=diffuse_defaults.diffuse['comp'], data=diffuse_defaults.diffuse['data'], library=diffuse_defaults.diffuse['library']) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_catalog_comp_dict(library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) catalog_info_dict = ret_dict['catalog_info_dict'] for catalog_name in catalog_info_dict: for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, sourcekey=catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime='none', fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits" % outfile_tokens[0] logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[key] = dict(output=outfile, args=infile_regexp, logfile=logfile) return job_configs class Healview_SG(ScatterGather): """Small class to generate configurations for `Link_FermipyHealview` """ appname = 'fermipy-healviw-sg' usage = "%s [options]" % (appname) description = "Submit fermipy-healviw jobs in parallel" clientclass = Link_FermipyHealview job_time = 60 default_options = dict(comp=diffuse_defaults.diffuse['comp'], data=diffuse_defaults.diffuse['data'], library=diffuse_defaults.diffuse['library']) __doc__ += Link.construct_docstring(default_options) def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_diffuse_comp_info_dict(components=components, library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) diffuse_comp_info_dict = ret_dict['comp_info_dict'] for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()): diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') if diffuse_comp_info_value.components is None: sub_comp_info = diffuse_comp_info_value else: sub_comp_info = diffuse_comp_info_value.get_component_info(comp) full_key = "%s_%s" % (sub_comp_info.sourcekey, key) name_keys = dict(zcut=zcut, sourcekey=sub_comp_info.sourcekey, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime='none', fullpath=True) infile = NAME_FACTORY.srcmaps(**name_keys) outfile = infile.replace('.fits', '.png') logfile = make_nfs_path(outfile.replace('.png', '_png.log')) job_configs[full_key] = dict(input=infile, output=outfile, extension=sub_comp_info.source_name, zscale=args.get('zscale', 'log'), logfile=logfile) return job_configs def register_classes(): """Register these classes with the `LinkFactory` """ Gtlink_select.register_class() Gtlink_bin.register_class() Gtlink_expcube2.register_class() Gtlink_scrmaps.register_class() Gtlink_mktime.register_class() Gtlink_ltcube.register_class() Link_FermipyCoadd.register_class() Link_FermipyGatherSrcmaps.register_class() Link_FermipyVstack.register_class() Link_FermipyHealview.register_class() Gtexpcube2_SG.register_class() Gtltsum_SG.register_class() SumRings_SG.register_class() Vstack_SG.register_class() GatherSrcmaps_SG.register_class() Healview_SG.register_class()
# Author : Martin Luessi mluessi@nmr.mgh.harvard.edu (2012) # License : BSD 3-clause # Parts of this code were copied from NiTime http://nipy.sourceforge.net/nitime import operator import numpy as np from scipy import linalg from ..parallel import parallel_func from ..utils import sum_squared, warn, verbose, logger def tridisolve(d, e, b, overwrite_b=True): """Symmetric tridiagonal system solver, from Golub and Van Loan p157. .. note:: Copied from NiTime. Parameters ---------- d : ndarray main diagonal stored in d[:] e : ndarray superdiagonal stored in e[:-1] b : ndarray RHS vector Returns ------- x : ndarray Solution to Ax = b (if overwrite_b is False). Otherwise solution is stored in previous RHS vector b """ N = len(b) # work vectors dw = d.copy() ew = e.copy() if overwrite_b: x = b else: x = b.copy() for k in range(1, N): # e^(k-1) = e(k-1) / d(k-1) # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1) t = ew[k - 1] ew[k - 1] = t / dw[k - 1] dw[k] = dw[k] - t * ew[k - 1] for k in range(1, N): x[k] = x[k] - ew[k - 1] * x[k - 1] x[N - 1] = x[N - 1] / dw[N - 1] for k in range(N - 2, -1, -1): x[k] = x[k] / dw[k] - ew[k] * x[k + 1] if not overwrite_b: return x def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8): """Perform an inverse iteration. This will find the eigenvector corresponding to the given eigenvalue in a symmetric tridiagonal system. ..note:: Copied from NiTime. Parameters ---------- d : ndarray main diagonal of the tridiagonal system e : ndarray offdiagonal stored in e[:-1] w : float eigenvalue of the eigenvector x0 : ndarray initial point to start the iteration rtol : float tolerance for the norm of the difference of iterates Returns ------- e: ndarray The converged eigenvector """ eig_diag = d - w if x0 is None: x0 = np.random.randn(len(d)) x_prev = np.zeros_like(x0) norm_x = np.linalg.norm(x0) # the eigenvector is unique up to sign change, so iterate # until || |x^(n)| - |x^(n-1)| ||^2 < rtol x0 /= norm_x while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol: x_prev = x0.copy() tridisolve(eig_diag, e, x0) norm_x = np.linalg.norm(x0) x0 /= norm_x return x0 def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None, interp_kind='linear'): """Compute Discrete Prolate Spheroidal Sequences. Will give of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. .. note:: Copied from NiTime. Parameters ---------- N : int Sequence length half_nbw : float, unitless Standardized half bandwidth corresponding to 2 * half_bw = BW*f0 = BW*N/dt but with dt taken as 1 Kmax : int Number of DPSS windows to return is Kmax (orders 0 through Kmax-1) low_bias : Bool Keep only tapers with eigenvalues > 0.9 interp_from : int (optional) The dpss can be calculated using interpolation from a set of dpss with the same NW and Kmax, but shorter N. This is the length of this shorter set of dpss windows. interp_kind : str (optional) This input variable is passed to scipy.interpolate.interp1d and specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the order of the spline interpolator to use. Returns ------- v, e : tuple, v is an array of DPSS windows shaped (Kmax, N) e are the eigenvalues Notes ----- Tridiagonal form of DPSS calculation from: Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and uncertainty V: The discrete case. Bell System Technical Journal, Volume 57 (1978), 1371430 """ from scipy import interpolate from ..filter import next_fast_len # This np.int32 business works around a weird Windows bug, see # gh-5039 and https://github.com/scipy/scipy/pull/8608 Kmax = np.int32(operator.index(Kmax)) N = np.int32(operator.index(N)) W = float(half_nbw) / N nidx = np.arange(N, dtype='d') # In this case, we create the dpss windows of the smaller size # (interp_from) and then interpolate to the larger size (N) if interp_from is not None: if interp_from > N: e_s = 'In dpss_windows, interp_from is: %s ' % interp_from e_s += 'and N is: %s. ' % N e_s += 'Please enter interp_from smaller than N.' raise ValueError(e_s) dpss = [] d, e = dpss_windows(interp_from, half_nbw, Kmax, low_bias=False) for this_d in d: x = np.arange(this_d.shape[-1]) tmp = interpolate.interp1d(x, this_d, kind=interp_kind) d_temp = tmp(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False)) # Rescale: d_temp = d_temp / np.sqrt(sum_squared(d_temp)) dpss.append(d_temp) dpss = np.array(dpss) else: # here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian # sequences, or discrete prolate spheroidal sequences (DPSS). Only the # first K, K = 2NW/dt orders of DPSS will exhibit good spectral # concentration # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here I set up an alternative symmetric tri-diagonal eigenvalue # problem such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1] # [see Percival and Walden, 1993] diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) off_diag = np.zeros_like(nidx) off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2. # put the diagonals in LAPACK "packed" storage ab = np.zeros((2, N), 'd') ab[1] = diagonal ab[0, 1:] = off_diag[:-1] # only calculate the highest Kmax eigenvalues w = linalg.eigvals_banded(ab, select='i', select_range=(N - Kmax, N - 1)) w = w[::-1] # find the corresponding eigenvectors via inverse iteration t = np.linspace(0, np.pi, N) dpss = np.zeros((Kmax, N), 'd') for k in range(Kmax): dpss[k] = tridi_inverse_iteration(diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)) # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. # * antisymmetric tapers should begin with a positive lobe fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2 * i] *= -1 # rather than test the sign of one point, test the sign of the # linear slope up to the first (largest) peak pk = np.argmax(np.abs(dpss[1::2, :N // 2]), axis=1) for i, p in enumerate(pk): if np.sum(dpss[2 * i + 1, :p]) < 0: dpss[2 * i + 1] *= -1 # Now find the eigenvalues of the original spectral concentration problem # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 # compute autocorr using FFT (same as nitime.utils.autocorr(dpss) * N) rxx_size = 2 * N - 1 n_fft = next_fast_len(rxx_size) dpss_fft = np.fft.rfft(dpss, n_fft) dpss_rxx = np.fft.irfft(dpss_fft * dpss_fft.conj(), n_fft) dpss_rxx = dpss_rxx[:, :N] r = 4 * W * np.sinc(2 * W * nidx) r[0] = 2 * W eigvals = np.dot(dpss_rxx, r) if low_bias: idx = (eigvals > 0.9) if not idx.any(): warn('Could not properly use low_bias, keeping lowest-bias taper') idx = [np.argmax(eigvals)] dpss, eigvals = dpss[idx], eigvals[idx] assert len(dpss) > 0 # should never happen assert dpss.shape[1] == N # old nitime bug return dpss, eigvals def _psd_from_mt_adaptive(x_mt, eigvals, freq_mask, max_iter=150, return_weights=False): r"""Use iterative procedure to compute the PSD from tapered spectra. .. note:: Modified from NiTime. Parameters ---------- x_mt : array, shape=(n_signals, n_tapers, n_freqs) The DFTs of the tapered sequences (only positive frequencies) eigvals : array, length n_tapers The eigenvalues of the DPSS tapers freq_mask : array Frequency indices to keep max_iter : int Maximum number of iterations for weight computation return_weights : bool Also return the weights Returns ------- psd : array, shape=(n_signals, np.sum(freq_mask)) The computed PSDs weights : array shape=(n_signals, n_tapers, np.sum(freq_mask)) The weights used to combine the tapered spectra Notes ----- The weights to use for making the multitaper estimate, such that :math:`S_{mt} = \sum_{k} |w_k|^2S_k^{mt} / \sum_{k} |w_k|^2` """ n_signals, n_tapers, n_freqs = x_mt.shape if len(eigvals) != n_tapers: raise ValueError('Need one eigenvalue for each taper') if n_tapers < 3: raise ValueError('Not enough tapers to compute adaptive weights.') rt_eig = np.sqrt(eigvals) # estimate the variance from an estimate with fixed weights psd_est = _psd_from_mt(x_mt, rt_eig[np.newaxis, :, np.newaxis]) x_var = np.trapz(psd_est, dx=np.pi / n_freqs) / (2 * np.pi) del psd_est # allocate space for output psd = np.empty((n_signals, np.sum(freq_mask))) # only keep the frequencies of interest x_mt = x_mt[:, :, freq_mask] if return_weights: weights = np.empty((n_signals, n_tapers, psd.shape[1])) for i, (xk, var) in enumerate(zip(x_mt, x_var)): # combine the SDFs in the traditional way in order to estimate # the variance of the timeseries # The process is to iteratively switch solving for the following # two expressions: # (1) Adaptive Multitaper SDF: # S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2 # # (2) Weights # d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}] # # Where lam_k are the eigenvalues corresponding to the DPSS tapers, # and the expected value of the broadband bias function # E{B_k(f)} is replaced by its full-band integration # (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k) # start with an estimate from incomplete data--the first 2 tapers psd_iter = _psd_from_mt(xk[:2, :], rt_eig[:2, np.newaxis]) err = np.zeros_like(xk) for n in range(max_iter): d_k = (psd_iter / (eigvals[:, np.newaxis] * psd_iter + (1 - eigvals[:, np.newaxis]) * var)) d_k *= rt_eig[:, np.newaxis] # Test for convergence -- this is overly conservative, since # iteration only stops when all frequencies have converged. # A better approach is to iterate separately for each freq, but # that is a nonvectorized algorithm. # Take the RMS difference in weights from the previous iterate # across frequencies. If the maximum RMS error across freqs is # less than 1e-10, then we're converged err -= d_k if np.max(np.mean(err ** 2, axis=0)) < 1e-10: break # update the iterative estimate with this d_k psd_iter = _psd_from_mt(xk, d_k) err = d_k if n == max_iter - 1: warn('Iterative multi-taper PSD computation did not converge.') psd[i, :] = psd_iter if return_weights: weights[i, :, :] = d_k if return_weights: return psd, weights else: return psd def _psd_from_mt(x_mt, weights): """Compute PSD from tapered spectra. Parameters ---------- x_mt : array Tapered spectra weights : array Weights used to combine the tapered spectra Returns ------- psd : array The computed PSD """ psd = weights * x_mt psd *= psd.conj() psd = psd.real.sum(axis=-2) psd *= 2 / (weights * weights.conj()).real.sum(axis=-2) return psd def _csd_from_mt(x_mt, y_mt, weights_x, weights_y): """Compute CSD from tapered spectra. Parameters ---------- x_mt : array Tapered spectra for x y_mt : array Tapered spectra for y weights_x : array Weights used to combine the tapered spectra of x_mt weights_y : array Weights used to combine the tapered spectra of y_mt Returns ------- psd: array The computed PSD """ csd = np.sum(weights_x * x_mt * (weights_y * y_mt).conj(), axis=-2) denom = (np.sqrt((weights_x * weights_x.conj()).real.sum(axis=-2)) * np.sqrt((weights_y * weights_y.conj()).real.sum(axis=-2))) csd *= 2 / denom return csd def _mt_spectra(x, dpss, sfreq, n_fft=None): """Compute tapered spectra. Parameters ---------- x : array, shape=(n_signals, n_times) Input signal dpss : array, shape=(n_tapers, n_times) The tapers sfreq : float The sampling frequency n_fft : int | None Length of the FFT. If None, the number of samples in the input signal will be used. Returns ------- x_mt : array, shape=(n_signals, n_tapers, n_times) The tapered spectra freqs : array The frequency points in Hz of the spectra """ if n_fft is None: n_fft = x.shape[1] # remove mean (do not use in-place subtraction as it may modify input x) x = x - np.mean(x, axis=-1)[:, np.newaxis] # only keep positive frequencies freqs = np.fft.rfftfreq(n_fft, 1. / sfreq) # The following is equivalent to this, but uses less memory: # x_mt = fftpack.fft(x[:, np.newaxis, :] * dpss, n=n_fft) n_tapers = dpss.shape[0] if dpss.ndim > 1 else 1 x_mt = np.zeros((len(x), n_tapers, len(freqs)), dtype=np.complex128) for idx, sig in enumerate(x): x_mt[idx] = np.fft.rfft(sig[np.newaxis, :] * dpss, n=n_fft) # Adjust DC and maybe Nyquist, depending on one-sided transform x_mt[:, :, 0] /= np.sqrt(2.) if x.shape[1] % 2 == 0: x_mt[:, :, -1] /= np.sqrt(2.) return x_mt, freqs @verbose def _compute_mt_params(n_times, sfreq, bandwidth, low_bias, adaptive, interp_from=None, verbose=None): """Triage windowing and multitaper parameters.""" # Compute standardized half-bandwidth if bandwidth is not None: half_nbw = float(bandwidth) * n_times / (2. * sfreq) else: half_nbw = 4. if half_nbw < 0.5: raise ValueError( 'bandwidth value %s yields a normalized bandwidth of %s < 0.5, ' 'use a value of at least %s' % (bandwidth, half_nbw, sfreq / n_times)) # Compute DPSS windows n_tapers_max = int(2 * half_nbw) window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max, low_bias=low_bias, interp_from=interp_from) logger.info(' using multitaper spectrum estimation with %d DPSS ' 'windows' % len(eigvals)) if adaptive and len(eigvals) < 3: warn('Not adaptively combining the spectral estimators due to a ' 'low number of tapers (%s < 3).' % (len(eigvals),)) adaptive = False return window_fun, eigvals, adaptive @verbose def psd_array_multitaper(x, sfreq, fmin=0, fmax=np.inf, bandwidth=None, adaptive=False, low_bias=True, normalization='length', n_jobs=1, verbose=None): """Compute power spectrum density (PSD) using a multi-taper method. Parameters ---------- x : array, shape=(..., n_times) The data to compute PSD from. sfreq : float The sampling frequency. fmin : float The lower frequency of interest. fmax : float The upper frequency of interest. bandwidth : float The bandwidth of the multi taper windowing function in Hz. adaptive : bool Use adaptive weights to combine the tapered spectra into PSD (slow, use n_jobs >> 1 to speed up computation). low_bias : bool Only use tapers with more than 90% spectral concentration within bandwidth. normalization : str Either "full" or "length" (default). If "full", the PSD will be normalized by the sampling rate as well as the length of the signal (as in nitime). n_jobs : int Number of parallel jobs to use (only used if adaptive=True). verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Returns ------- psds : ndarray, shape (..., n_freqs) or The power spectral densities. All dimensions up to the last will be the same as input. freqs : array The frequency points in Hz of the PSD. See Also -------- mne.io.Raw.plot_psd mne.Epochs.plot_psd csd_multitaper psd_multitaper Notes ----- .. versionadded:: 0.14.0 """ if normalization not in ('length', 'full'): raise ValueError('Normalization must be "length" or "full", not %s' % normalization) # Reshape data so its 2-D for parallelization ndim_in = x.ndim x = np.atleast_2d(x) n_times = x.shape[-1] dshape = x.shape[:-1] x = x.reshape(-1, n_times) dpss, eigvals, adaptive = _compute_mt_params( n_times, sfreq, bandwidth, low_bias, adaptive) # decide which frequencies to keep freqs = np.fft.rfftfreq(n_times, 1. / sfreq) freq_mask = (freqs >= fmin) & (freqs <= fmax) freqs = freqs[freq_mask] psd = np.zeros((x.shape[0], freq_mask.sum())) # Let's go in up to 50 MB chunks of signals to save memory n_chunk = max(50000000 // (len(freq_mask) * len(eigvals) * 16), n_jobs) offsets = np.concatenate((np.arange(0, x.shape[0], n_chunk), [x.shape[0]])) for start, stop in zip(offsets[:-1], offsets[1:]): x_mt = _mt_spectra(x[start:stop], dpss, sfreq)[0] if not adaptive: weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis] psd[start:stop] = _psd_from_mt(x_mt[:, :, freq_mask], weights) else: n_splits = min(stop - start, n_jobs) parallel, my_psd_from_mt_adaptive, n_jobs = \ parallel_func(_psd_from_mt_adaptive, n_splits) out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask) for x in np.array_split(x_mt, n_splits)) psd[start:stop] = np.concatenate(out) if normalization == 'full': psd /= sfreq # Combining/reshaping to original data shape psd.shape = dshape + (-1,) if ndim_in == 1: psd = psd[0] return psd, freqs @verbose def tfr_array_multitaper(epoch_data, sfreq, freqs, n_cycles=7.0, zero_mean=True, time_bandwidth=None, use_fft=True, decim=1, output='complex', n_jobs=1, verbose=None): """Compute time-frequency transforms using wavelets and multitaper windows. Uses Morlet wavelets windowed with multiple DPSS tapers. Parameters ---------- epoch_data : array of shape (n_epochs, n_channels, n_times) The epochs. sfreq : float | int Sampling frequency of the data. freqs : array-like of floats, shape (n_freqs) The frequencies. n_cycles : float | array of float Number of cycles in the Morlet wavelet. Fixed number or one per frequency. Defaults to 7.0. zero_mean : bool If True, make sure the wavelets have a mean of zero. Defaults to True. time_bandwidth : float If None, will be set to 4.0 (3 tapers). Time x (Full) Bandwidth product. The number of good tapers (low-bias) is chosen automatically based on this to equal floor(time_bandwidth - 1). Defaults to None use_fft : bool Use the FFT for convolutions or not. Defaults to True. decim : int | slice To reduce memory usage, decimation factor after time-frequency decomposition. Defaults to 1. If `int`, returns tfr[..., ::decim]. If `slice`, returns tfr[..., decim]. .. note:: Decimation may create aliasing artifacts, yet decimation is done after the convolutions. output : str, defaults to 'complex' * 'complex' : single trial complex. * 'power' : single trial power. * 'phase' : single trial phase. * 'avg_power' : average of single trial power. * 'itc' : inter-trial coherence. * 'avg_power_itc' : average of single trial power and inter-trial coherence across trials. n_jobs : int The number of epochs to process at the same time. The parallelization is implemented across channels. Defaults to 1. verbose : bool, str, int, or None, defaults to None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Returns ------- out : array Time frequency transform of epoch_data. If output is in ['complex', 'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs, n_times), else it is (n_chans, n_freqs, n_times). If output is 'avg_power_itc', the real values code for 'avg_power' and the imaginary values code for the 'itc': out = avg_power + i * itc See Also -------- mne.time_frequency.tfr_multitaper mne.time_frequency.tfr_morlet mne.time_frequency.tfr_array_morlet mne.time_frequency.tfr_stockwell mne.time_frequency.tfr_array_stockwell Notes ----- .. versionadded:: 0.14.0 """ from .tfr import _compute_tfr return _compute_tfr(epoch_data, freqs, sfreq=sfreq, method='multitaper', n_cycles=n_cycles, zero_mean=zero_mean, time_bandwidth=time_bandwidth, use_fft=use_fft, decim=decim, output=output, n_jobs=n_jobs, verbose=verbose)
# -*- coding: UTF-8 -*- import scarlett from scarlett.commands import Command from scarlett.listener import * import pygst pygst.require('0.10') import gobject gobject.threads_init() from dbus.mainloop.glib import DBusGMainLoop DBusGMainLoop(set_as_default=True) import gst, os import threading # import scarlett speaker import scarlett.basics.say as scarlett_says # import new scarlett Brain from scarlett.brain.scarlettbraini import ScarlettBrainImproved # source: http://stackoverflow.com/questions/8005765/how-to-get-duration-of-steaming-data-with-gstreamer # LETS TRY USING THIS: # gobject.threads_init() # EXAMPLES source code: # http://cgit.freedesktop.org/gstreamer/gst-python/tree/examples/filesrc.py?h=0.10 # gst.STATE_PLAYING Used to start playing # * player_name.set_state(gst.STATE_PLAYING) # gst.STATE_PAUSED Used to pause file # * player_name.set_state(gst.STATE_PAUSED) # gst.STATE_NULL Used to stop file # * player_name.set_state(gst.STATE_NULL) class GstListenerImproved(threading.Thread): """ Controls all actions involving pocketsphinx, stt, and tts. Borrowed from mcfletch-listener: Holds the PocketSphinx Pipeline we'll use for recognition The idea here is that the Gstreamer/PocketSphinx back-end is isolated from the GUI code, with the idea that we might be able to add in another backend at some point in the future... Here's the gst-inspect from the pocketsphinx component: Element Properties: hmm : Directory containing acoustic model parameters flags: readable, writable String. Default: "/usr/share/pocketsphinx/model/hmm/wsj1" lm : Language model file flags: readable, writable String. Default: "/usr/share/pocketsphinx/model/lm/wsj/wlist5o.3e-7.vp.tg.lm.DMP" lmctl : Language model control file (for class LMs) flags: readable, writable String. Default: null lmname : Language model name (to select LMs from lmctl) flags: readable, writable String. Default: "default" dict : Dictionary File flags: readable, writable String. Default: "/usr/share/pocketsphinx/model/lm/wsj/wlist5o.dic" fsg : Finite state grammar file flags: readable, writable String. Default: null fsg-model : Finite state grammar object (fsg_model_t *) flags: writable Pointer. Write only fwdflat : Enable Flat Lexicon Search flags: readable, writable Boolean. Default: false bestpath : Enable Graph Search flags: readable, writable Boolean. Default: false maxhmmpf : Maximum number of HMMs searched per frame flags: readable, writable Integer. Range: 1 - 100000 Default: 1000 maxwpf : Maximum number of words searched per frame flags: readable, writable Integer. Range: 1 - 100000 Default: 10 dsratio : Evaluate acoustic model every N frames flags: readable, writable Integer. Range: 1 - 10 Default: 1 latdir : Output Directory for Lattices flags: readable, writable String. Default: null lattice : Word lattice object for most recent result flags: readable Boxed pointer of type "PSLattice" decoder : The underlying decoder flags: readable Boxed pointer of type "PSDecoder" configured : Set this to finalize configuration flags: readable, writable Boolean. Default: false Adaptation/Training still needs to be looked into... http://cmusphinx.sourceforge.net/wiki/tutorialadapt """ def __init__(self, lis_type, brain, voice, override_parse=False, **kwargs): # Init thread class threading.Thread.__init__(self) self._stopevent = threading.Event() self.wit_thread = None self.loop = None ### NOT SURE IF NEEDED, BORROWED FROM TIMESLIDE # # a lock to wait wait for gstreamer thread to be ready ### NOT SURE IF NEEDED, BORROWED FROM TIMESLIDE # self.discovered_cond = threading.Condition(threading.Lock()) ### NOT SURE IF NEEDED, BORROWED FROM TIMESLIDE # self.discovered = False scarlett.log.debug(Fore.YELLOW + 'Starting up GstListenerImproved') self.brain = brain self.failed = int(self.brain.set_brain_item_r('scarlett_failed', 0)) self.keyword_identified = int( self.brain.set_brain_item_r( 'm_keyword_match', 0)) self.lis_type = lis_type self.voice = voice self.commander = Command(self.voice, self.brain) self.config = scarlett.config self.override_parse = override_parse # "/usr/local/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k" self.ps_hmm = self.get_hmm_full_path() self.ps_dict = self.get_dict_full_path() self.ps_lm = self.get_lm_full_path() self.ps_device = self.config.get('audio', 'usb_input_device') self.speech_system = self.config.get('speech', 'system') # default, use what we have set self.parse_launch_array = self._get_pocketsphinx_definition( override_parse) scarlett.log.debug( Fore.YELLOW + 'Initializing gst-parse-launch -------->') self.pipeline = gst.parse_launch( ' ! '.join(self.parse_launch_array)) listener = self.pipeline.get_by_name('listener') listener.connect('result', self.__result__) listener.set_property('configured', True) scarlett.log.debug( Fore.YELLOW + "Initializing connection to vader element -------->") # TODO: Play with vader object some more #vader = self.pipeline.get_by_name("vader") #vader.connect("vader-start", self._on_vader_start) #vader.connect("vader-stop", self._on_vader_stop) scarlett.log.debug(Fore.YELLOW + "Initializing Bus -------->") bus = self.pipeline.get_bus() bus.add_signal_watch() scarlett.log.debug(Fore.YELLOW + "Sending Message to Bus ---------->") bus.connect('message::application', self.__application_message__) # TODO: TEST EOS AND RESETTING PIPLINE #scarlett.log.debug(Fore.YELLOW + "After Message to Bus ----------->") #bus.connect("message::eos", self._on_bus_message_eos) # Start thread self.start() def run(self): """ Listener main loop """ scarlett_says.say_block("Hello sir. How are you doing this afternoon? I am full lee function nall, andd red ee for your commands") self.pipeline.set_state(gst.STATE_PLAYING) scarlett.log.debug(Fore.YELLOW + "KEYWORD: " + self.config.get('scarlett','owner')) # Thread loop self.loop = gobject.MainLoop() self.loop.run() def stop(self): """ Stop listener. """ scarlett_says.speak_block('Goodbye....') # Stop everything self.pipeline.set_state(gst.STATE_NULL) if self.loop is not None: self.loop.quit() def scarlett_start_listen(self): self.pipeline.set_state(gst.STATE_PLAYING) def scarlett_stop_listen(self): self.pipeline.set_state(gst.STATE_READY) def scarlett_pause_listen(self): self.pipeline.set_state(gst.STATE_PAUSED) def scarlett_reset_listen(self): self.failed = int( self.brain.set_brain_item_r( 'scarlett_failed', 0)) self.keyword_identified = int( self.brain.set_brain_item_r( 'm_keyword_match', 0)) def partial_result(self, asr, text, uttid): """Forward partial result signals on the bus to the main thread.""" pass def result(self, hyp, uttid): """Forward result signals on the bus to the main thread.""" scarlett.log.debug(Fore.YELLOW + "Inside result function") if hyp in self.config.get('scarlett', 'keywords'): scarlett.log.debug( Fore.YELLOW + "HYP-IS-SOMETHING: " + hyp + "\n\n\n") scarlett.log.debug( Fore.YELLOW + "UTTID-IS-SOMETHING:" + uttid + "\n") self.failed = int( self.brain.set_brain_item_r( 'scarlett_failed', 0)) # redis implementation # self.keyword_identified = 1 self.keyword_identified = int( self.brain.set_brain_item_r( 'm_keyword_match', 1)) scarlett.basics.voice.play_block('pi-listening') else: self.failed_temp = int( self.brain.get_brain_item('scarlett_failed')) + 1 self.failed = int( self.brain.set_brain_item_r( 'scarlett_failed', self.failed_temp)) scarlett.log.debug( Fore.YELLOW + "self.failed = %i" % (self.failed)) if self.failed > 4: # reset pipline self.scarlett_reset_listen() ScarlettTalk.speak( " %s , if you need me, just say my name." % (self.config.get('scarlett', 'owner'))) def run_cmd(self, hyp, uttid): scarlett.log.debug(Fore.YELLOW + "Inside run_cmd function") scarlett.log.debug(Fore.YELLOW + "KEYWORD IDENTIFIED BABY") scarlett.log.debug( Fore.RED + "self.keyword_identified = %i" % (self.keyword_identified)) if hyp == 'CANCEL': self.cancel_listening() else: self.commander.check_cmd(hyp) self.keyword_identified = int( self.brain.get_brain_item('m_keyword_match')) scarlett.log.debug( Fore.RED + "AFTER run_cmd, self.keyword_identified = %i" % (self.keyword_identified)) def listen(self, valve, vader): scarlett.log.debug(Fore.YELLOW + "Inside listen function") scarlett.basics.voice.play_block('pi-listening') valve.set_property('drop', False) valve.set_property('drop', True) # def cancel_listening(self, valve): def cancel_listening(self): scarlett.log.debug(Fore.YELLOW + "Inside cancel_listening function") self.scarlett_reset_listen() scarlett.log.debug(Fore.YELLOW + "self.failed = %i" % (self.failed)) scarlett.log.debug( Fore.RED + "self.keyword_identified = %i" % (self.keyword_identified)) def get_hmm_full_path(self): if os.environ.get('SCARLETT_HMM'): _hmm_full_path = os.environ.get('SCARLETT_HMM') else: _hmm_full_path = self.config.get('pocketsphinx', 'hmm') return _hmm_full_path def get_lm_full_path(self): if os.environ.get('SCARLETT_LM'): _lm_full_path = os.environ.get('SCARLETT_LM') else: _lm_full_path = self.config.get('pocketsphinx', 'lm') return _lm_full_path def get_dict_full_path(self): if os.environ.get('SCARLETT_DICT'): _dict_full_path = os.environ.get('SCARLETT_DICT') else: _dict_full_path = self.config.get('pocketsphinx', 'dict') return _dict_full_path def get_pipeline(self): scarlett.log.debug(Fore.YELLOW + "Inside get_pipeline") return self.pipeline def get_voice(self): scarlett.log.debug(Fore.YELLOW + "Inside get_voice") return self.voice def get_pipeline_state(self): return self.pipeline.get_state() def _get_pocketsphinx_definition(self, override_parse): scarlett.log.debug(Fore.YELLOW + "Inside _get_pocketsphinx_definition") """Return ``pocketsphinx`` definition for :func:`gst.parse_launch`.""" # default, use what we have set if not override_parse: return [ 'alsasrc device=' + self.ps_device, 'queue silent=false leaky=2 max-size-buffers=0 max-size-time=0 max-size-bytes=0', 'audioconvert', 'audioresample', 'audio/x-raw-int, rate=16000, width=16, depth=16, channels=1', 'audioresample', 'audio/x-raw-int, rate=8000', 'vader name=vader auto-threshold=true', 'pocketsphinx lm=' + self.ps_lm + ' dict=' + self.ps_dict + ' hmm=' + self.ps_hmm + ' name=listener', 'fakesink dump=1'] # NOTE, I commented out the refrence to the tee # 'fakesink dump=1 t.' else: return override_parse def _get_vader_definition(self): scarlett.log.debug(Fore.YELLOW + "Inside _get_vader_definition") """Return ``vader`` definition for :func:`gst.parse_launch`.""" # source: https://github.com/bossjones/eshayari/blob/master/eshayari/application.py # Convert noise level from spin button range [0,32768] to gstreamer # element's range [0,1]. Likewise, convert silence from spin button's # milliseconds to gstreamer element's nanoseconds. # MY DEFAULT VADER DEFINITON WAS: vader name=vader auto-threshold=true # vader name=vader auto-threshold=true noise = 256 / 32768 silence = 300 * 1000000 return ("vader " + "name=vader " + "auto-threshold=false " + "threshold=%.9f " % noise + "run-length=%d " % silence ) def _on_vader_start(self, vader, pos): scarlett.log.debug(Fore.YELLOW + "Inside _on_vader_start") """Send start position as a message on the bus.""" import gst struct = gst.Structure("start") pos = pos / 1000000000 # ns to s struct.set_value("start", pos) vader.post_message(gst.message_new_application(vader, struct)) def _on_vader_stop(self, vader, pos): scarlett.log.debug(Fore.YELLOW + "Inside _on_vader_stop") """Send stop position as a message on the bus.""" import gst struct = gst.Structure("stop") pos = pos / 1000000000 # ns to s struct.set_value("stop", pos) # def _on_bus_message_eos(self, bus, message): ### """Flush remaining subtitles to page.""" # if self._text is not None: # Store previous text. ### self._texts[-1] = self._text ### self._text = None # if self._starts and self._stops[-1] is not None: # self._append_subtitle(-1) # self._stop_speech_recognition() def __result__(self, listener, text, uttid): """We're inside __result__""" scarlett.log.debug(Fore.YELLOW + "Inside __result__") struct = gst.Structure('result') struct.set_value('hyp', text) struct.set_value('uttid', uttid) listener.post_message(gst.message_new_application(listener, struct)) def __partial_result__(self, listner, text, uttid): """We're inside __partial_result__""" scarlett.log.debug(Fore.YELLOW + "Inside __partial_result__") struct = gst.Structure('partial_result') struct.set_value('hyp', text) struct.set_value('uttid', uttid) listener.post_message(gst.message_new_application(listener, struct)) def __run_cmd__(self, listener, text, uttid): """We're inside __run_cmd__""" scarlett.log.debug(Fore.YELLOW + "Inside __run_cmd__") struct = gst.Structure('result') struct.set_value('hyp', text) struct.set_value('uttid', uttid) listener.post_message(gst.message_new_application(listener, struct)) def __application_message__(self, bus, msg): msgtype = msg.structure.get_name() scarlett.log.debug(Fore.YELLOW + "msgtype: " + msgtype) if msgtype == 'partial_result': self.partial_result(msg.structure['hyp'], msg.structure['uttid']) elif msgtype == 'result': if self.keyword_identified == 1: self.run_cmd(msg.structure['hyp'], msg.structure['uttid']) else: self.result(msg.structure['hyp'], msg.structure['uttid']) elif msgtype == 'run_cmd': self.run_cmd(msg.structure['hyp'], msg.structure['uttid']) elif msgtype == gst.MESSAGE_EOS: pass ### DISABLE # TODO: SEE IF WE NEED THIS # self.pipeline.set_state(gst.STATE_NULL) elif msgtype == gst.MESSAGE_ERROR: (err, debug) = msgtype.parse_error() scarlett.log.debug(Fore.RED + "Error: %s" % err, debug) pass ### DISABLE # TODO: SEE IF WE NEED THIS # self.pipeline.set_state(gst.STATE_NULL) ### DISABLE # TODO: SEE IF WE NEED THIS # (err, debug) = msgtype.parse_error() ### DISABLE # TODO: SEE IF WE NEED THIS # scarlett.log.debug(Fore.RED + "Error: %s" % err, debug)
# Copyright 2016-2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :term:`HBA` (Host Bus Adapter) is a logical entity that provides a :term:`Partition` with access to external storage area networks (SANs) through an :term:`FCP Adapter`. More specifically, an HBA connects a Partition with an :term:`Adapter Port` on an FCP Adapter. HBA resources are contained in Partition resources. HBA resources only exist in :term:`CPCs <CPC>` that are in DPM mode and when the "dpm-storage-management" feature is not enabled. See section :ref:`Storage Groups` for details. When the "dpm-storage-management" feature is enabled, :term:`virtual HBAs <HBA>` are represented as :term:`Virtual Storage Resource` resources. """ from __future__ import absolute_import import copy from ._manager import BaseManager from ._resource import BaseResource from ._logging import logged_api_call from ._utils import matches_filters, RC_HBA __all__ = ['HbaManager', 'Hba'] class HbaManager(BaseManager): """ Manager providing access to the :term:`HBAs <HBA>` in a particular :term:`Partition`. Derived from :class:`~zhmcclient.BaseManager`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are accessible via the following instance variable of a :class:`~zhmcclient.Partition` object (in DPM mode): * :attr:`~zhmcclient.Partition.hbas` Note that this instance variable will be `None` if the "dpm-storage-management" feature is enabled. """ def __init__(self, partition): # This function should not go into the docs. # Parameters: # partition (:class:`~zhmcclient.Partition`): # Partition defining the scope for this manager. super(HbaManager, self).__init__( resource_class=Hba, class_name=RC_HBA, session=partition.manager.session, parent=partition, base_uri='{}/hbas'.format(partition.uri), oid_prop='element-id', uri_prop='element-uri', name_prop='name', query_props=[], list_has_name=False) @property def partition(self): """ :class:`~zhmcclient.Partition`: :term:`Partition` defining the scope for this manager. """ return self._parent @logged_api_call def list(self, full_properties=False, filter_args=None): """ List the HBAs in this Partition. The returned HBAs have only the 'element-uri' property set. Filtering is supported only for the 'element-uri' property. Authorization requirements: * Object-access permission to this Partition. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.Hba` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ resource_obj_list = [] uris = self.partition.get_property('hba-uris') if uris: for uri in uris: resource_obj = self.resource_class( manager=self, uri=uri, name=None, properties=None) if matches_filters(resource_obj, filter_args): resource_obj_list.append(resource_obj) if full_properties: resource_obj.pull_full_properties() self._name_uri_cache.update_from(resource_obj_list) return resource_obj_list @logged_api_call def create(self, properties): """ Create and configure an HBA in this Partition. The HBA must be backed by an adapter port on an FCP adapter. The backing adapter port is specified in the "properties" parameter of this method by setting the "adapter-port-uri" property to the URI of the backing adapter port. The value for the "adapter-port-uri" property can be determined from a given adapter name and port index as shown in the following example code (omitting any error handling): .. code-block:: python partition = ... # Partition object for the new HBA adapter_name = 'FCP #1' # name of adapter with backing port adapter_port_index = 0 # port index of backing port adapter = partition.manager.cpc.adapters.find(name=adapter_name) port = adapter.ports.find(index=adapter_port_index) properties['adapter-port-uri'] = port.uri Authorization requirements: * Object-access permission to this Partition. * Object-access permission to the backing Adapter for the new HBA. * Task permission to the "Partition Details" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create HBA' in the :term:`HMC API` book. Returns: Hba: The resource object for the new HBA. The object will have its 'element-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ result = self.session.post(self.partition.uri + '/hbas', body=properties) # There should not be overlaps, but just in case there are, the # returned props should overwrite the input props: props = copy.deepcopy(properties) props.update(result) name = props.get(self._name_prop, None) uri = props[self._uri_prop] hba = Hba(self, uri, name, props) self._name_uri_cache.update(name, uri) return hba class Hba(BaseResource): """ Representation of an :term:`HBA`. Derived from :class:`~zhmcclient.BaseResource`; see there for common methods and attributes. For the properties of an HBA resource, see section 'Data model - HBA Element Object' in section 'Partition object' in the :term:`HMC API` book. Objects of this class are not directly created by the user; they are returned from creation or list functions on their manager object (in this case, :class:`~zhmcclient.HbaManager`). """ def __init__(self, manager, uri, name=None, properties=None): # This function should not go into the docs. # Parameters: # manager (:class:`~zhmcclient.HbaManager`): # Manager object for this resource object. # uri (string): # Canonical URI path of the resource. # name (string): # Name of the resource. # properties (dict): # Properties to be set for this resource object. May be `None` or # empty. assert isinstance(manager, HbaManager), \ "Hba init: Expected manager type %s, got %s" % \ (HbaManager, type(manager)) super(Hba, self).__init__(manager, uri, name, properties) @logged_api_call def delete(self): """ Delete this HBA. Authorization requirements: * Object-access permission to the Partition containing this HBA. * Task permission to the "Partition Details" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ # pylint: disable=protected-access self.manager.session.delete(self._uri) self.manager._name_uri_cache.delete( self.get_properties_local(self.manager._name_prop, None)) parent_hba_uris = self.manager.parent.get_properties_local( 'hba-uris') if parent_hba_uris: try: parent_hba_uris.remove(self._uri) except ValueError: pass @logged_api_call def update_properties(self, properties): """ Update writeable properties of this HBA. This method serializes with other methods that access or change properties on the same Python object. Authorization requirements: * Object-access permission to the Partition containing this HBA. * **TBD: Verify:** Object-access permission to the backing Adapter for this HBA. * Task permission to the "Partition Details" task. Parameters: properties (dict): New values for the properties to be updated. Properties not to be updated are omitted. Allowable properties are the properties with qualifier (w) in section 'Data model - HBA Element Object' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ # pylint: disable=protected-access self.manager.session.post(self.uri, body=properties) is_rename = self.manager._name_prop in properties if is_rename: # Delete the old name from the cache self.manager._name_uri_cache.delete(self.name) self.update_properties_local(copy.deepcopy(properties)) if is_rename: # Add the new name to the cache self.manager._name_uri_cache.update(self.name, self.uri) @logged_api_call def reassign_port(self, port): """ Reassign this HBA to a new underlying :term:`FCP port`. This method performs the HMC operation "Reassign Storage Adapter Port". Authorization requirements: * Object-access permission to the Partition containing this HBA. * Object-access permission to the Adapter with the new Port. * Task permission to the "Partition Details" task. Parameters: port (:class:`~zhmcclient.Port`): :term:`FCP port` to be used. Raises: :exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of operation "Reassign Storage Adapter Port" in the :term:`HMC API` book. :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'adapter-port-uri': port.uri} self.manager.session.post( self._uri + '/operations/reassign-storage-adapter-port', body=body) self.update_properties_local(body)
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v10.services.types import ad_group_feed_service from .base import AdGroupFeedServiceTransport, DEFAULT_CLIENT_INFO class AdGroupFeedServiceGrpcTransport(AdGroupFeedServiceTransport): """gRPC backend transport for AdGroupFeedService. Service to manage ad group feeds. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn( "client_cert_source is deprecated", DeprecationWarning ) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = ( SslCredentials().ssl_credentials ) else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def mutate_ad_group_feeds( self, ) -> Callable[ [ad_group_feed_service.MutateAdGroupFeedsRequest], ad_group_feed_service.MutateAdGroupFeedsResponse, ]: r"""Return a callable for the mutate ad group feeds method over gRPC. Creates, updates, or removes ad group feeds. Operation statuses are returned. List of thrown errors: `AdGroupFeedError <>`__ `AuthenticationError <>`__ `AuthorizationError <>`__ `CollectionSizeError <>`__ `DatabaseError <>`__ `DistinctError <>`__ `FieldError <>`__ `FunctionError <>`__ `FunctionParsingError <>`__ `HeaderError <>`__ `IdError <>`__ `InternalError <>`__ `MutateError <>`__ `NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `SizeLimitError <>`__ `StringFormatError <>`__ `StringLengthError <>`__ Returns: Callable[[~.MutateAdGroupFeedsRequest], ~.MutateAdGroupFeedsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_ad_group_feeds" not in self._stubs: self._stubs[ "mutate_ad_group_feeds" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v10.services.AdGroupFeedService/MutateAdGroupFeeds", request_serializer=ad_group_feed_service.MutateAdGroupFeedsRequest.serialize, response_deserializer=ad_group_feed_service.MutateAdGroupFeedsResponse.deserialize, ) return self._stubs["mutate_ad_group_feeds"] def close(self): self.grpc_channel.close() __all__ = ("AdGroupFeedServiceGrpcTransport",)
import warnings from typing import Any, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torchvision.models import inception as inception_module from torchvision.models.inception import InceptionOutputs from ..._internally_replaced_utils import load_state_dict_from_url from .utils import _fuse_modules, _replace_relu, quantize_model __all__ = [ "QuantizableInception3", "inception_v3", ] quant_model_urls = { # fp32 weights ported from TensorFlow, quantized in PyTorch "inception_v3_google_fbgemm": "https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth" } class QuantizableBasicConv2d(inception_module.BasicConv2d): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: x = self.conv(x) x = self.bn(x) x = self.relu(x) return x def fuse_model(self, is_qat: Optional[bool] = None) -> None: _fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True) class QuantizableInceptionA(inception_module.InceptionA): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionB(inception_module.InceptionB): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionC(inception_module.InceptionC): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionD(inception_module.InceptionD): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionE(inception_module.InceptionE): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] self.myop1 = nn.quantized.FloatFunctional() self.myop2 = nn.quantized.FloatFunctional() self.myop3 = nn.quantized.FloatFunctional() def _forward(self, x: Tensor) -> List[Tensor]: branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)] branch3x3 = self.myop1.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = self.myop2.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return self.myop3.cat(outputs, 1) class QuantizableInceptionAux(inception_module.InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] class QuantizableInception3(inception_module.Inception3): def __init__( self, num_classes: int = 1000, aux_logits: bool = True, transform_input: bool = False, ) -> None: super().__init__( num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input, inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, QuantizableInceptionB, QuantizableInceptionC, QuantizableInceptionD, QuantizableInceptionE, QuantizableInceptionAux, ], ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x: Tensor) -> InceptionOutputs: x = self._transform_input(x) x = self.quant(x) x, aux = self._forward(x) x = self.dequant(x) aux_defined = self.training and self.aux_logits if torch.jit.is_scripting(): if not aux_defined: warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple") return InceptionOutputs(x, aux) else: return self.eager_outputs(x, aux) def fuse_model(self, is_qat: Optional[bool] = None) -> None: r"""Fuse conv/bn/relu modules in inception model Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. Model is modified in place. Note that this operation does not change numerics and the model after modification is in floating point """ for m in self.modules(): if type(m) is QuantizableBasicConv2d: m.fuse_model(is_qat) def inception_v3( pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any, ) -> QuantizableInception3: r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Note that quantize = True returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr quantize (bool): If True, return a quantized version of the model aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: True if ``pretrained=True``, else False. """ if pretrained: if "transform_input" not in kwargs: kwargs["transform_input"] = True if "aux_logits" in kwargs: original_aux_logits = kwargs["aux_logits"] kwargs["aux_logits"] = True else: original_aux_logits = False model = QuantizableInception3(**kwargs) _replace_relu(model) if quantize: # TODO use pretrained as a string to specify the backend backend = "fbgemm" quantize_model(model, backend) else: assert pretrained in [True, False] if pretrained: if quantize: if not original_aux_logits: model.aux_logits = False model.AuxLogits = None model_url = quant_model_urls["inception_v3_google_" + backend] else: model_url = inception_module.model_urls["inception_v3_google"] state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) if not quantize: if not original_aux_logits: model.aux_logits = False model.AuxLogits = None return model
from quex.engine.misc.file_in import \ open_file_or_die, \ write_safely_and_close, \ get_current_line_info_number, \ error_msg from quex.engine.generator.code_fragment_base import CodeFragment from quex.blackboard import setup as Setup UserCodeFragment_OpenLinePragma = { #___________________________________________________________________________________ # Line pragmas allow to direct the 'virtual position' of a program in a file # that was generated to its origin. That is, if an error occurs in line in a # C-program which is actually is the pasted code from a certain line in a .qx # file, then the compiler would print out the line number from the .qx file. # # This mechanism relies on line pragmas of the form '#line xx "filename"' telling # the compiler to count the lines from xx and consider them as being from filename. # # During code generation, the pasted code segments need to be followed by line # pragmas resetting the original C-file as the origin, so that errors that occur # in the 'real' code are not considered as coming from the .qx files. # Therefore, the code generator introduces placeholders that are to be replaced # once the whole code is generated. # # ...[Language][0] = the placeholder until the whole code is generated # ...[Language][1] = template containing 'NUMBER' and 'FILENAME' that # are to replaced in order to get the resetting line pragma. #___________________________________________________________________________________ "C": [ [ '/* POST-ADAPTION: FILL IN APPROPRIATE LINE PRAGMA */', '# line NUMBER "FILENAME"' ], ['/* POST-ADAPTION: FILL IN APPROPRIATE LINE PRAGMA CppTemplate.txt */', '# line NUMBER "CppTemplate.txt"' ] ], } class UserCodeFragment(CodeFragment): def __init__(self, Code, Filename, LineN, LanguageDB=None): assert isinstance(Code, (str, unicode)) assert isinstance(LanguageDB, dict) or LanguageDB is None assert isinstance(Filename, (str, unicode)) assert isinstance(LineN, (int, long, float)) self.filename = Filename self.line_n = LineN CodeFragment.__init__(self, Code) def get_code(self): return self.adorn_with_source_reference(self.get_pure_code()) def adorn_with_source_reference(self, Code, ReturnToSourceF=True): if len(Code.strip()) == 0: return "" # Even under Windows (tm), the '/' is accepted. Thus do not rely on 'normpath' norm_filename = Setup.get_file_reference(self.filename) txt = '\n# line %i "%s"\n' % (self.line_n, norm_filename) txt += Code if ReturnToSourceF: if txt[-1] != "\n": txt = txt + "\n" txt += get_return_to_source_reference() return txt def get_return_to_source_reference(): return "\n" + UserCodeFragment_OpenLinePragma["C"][0][0] + "\n" def UserCodeFragment_straighten_open_line_pragmas(filename, Language): if Language not in UserCodeFragment_OpenLinePragma.keys(): return fh = open_file_or_die(filename) norm_filename = Setup.get_file_reference(filename) new_content = [] line_n = 0 LinePragmaInfoList = UserCodeFragment_OpenLinePragma[Language] for line in fh.readlines(): line_n += 1 if Language == "C": for info in LinePragmaInfoList: if line.find(info[0]) == -1: continue line = info[1] # Since by some definition, line number pragmas < 32768; let us avoid # compiler warnings by setting line_n = min(line_n, 32768) line = line.replace("NUMBER", repr(int(min(line_n + 1, 32767)))) # Even under Windows (tm), the '/' is accepted. Thus do not rely on 'normpath' line = line.replace("FILENAME", norm_filename) if len(line) == 0 or line[-1] != "\n": line = line + "\n" new_content.append(line) fh.close() write_safely_and_close(filename, "".join(new_content)) class GeneratedCode(UserCodeFragment): def __init__(self, GeneratorFunction, FileName=-1, LineN=None): self.function = GeneratorFunction self.data = { "indentation_counter_terminal_id": None, } UserCodeFragment.__init__(self, "", FileName, LineN) def get_code(self): return self.function(self.data) class PatternActionInfo: def __init__(self, ThePattern, Action, PatternStr="", IL = None, ModeName="", Comment=""): assert Action is None or \ issubclass(Action.__class__, CodeFragment) or \ type(Action) in [str, unicode] assert (ThePattern.__class__.__name__ == "Pattern") or (ThePattern is None) self.__pattern_state_machine = ThePattern if type(Action) in [str, unicode]: self.__action = CodeFragment(Action) else: self.__action = Action self.__pattern_str = PatternStr self.mode_name = ModeName self.comment = Comment def pattern(self): return self.__pattern_state_machine def pattern_string(self): return self.__pattern_str @property def line_n(self): return self.action().line_n @property def file_name(self): return self.action().filename def action(self): return self.__action def set_action(self, Action): assert Action is None or \ issubclass(Action.__class__, CodeFragment) or \ type(Action) in [str, unicode] self.__action = Action def get_action_location(self): """RETURNS: FileName, LineN in case that it can be specified. -1, None in case it cannot be specified. This corresponds to the required input for 'error_msg'. """ if hasattr(self.__action, "filename") and hasattr(self.__action, "line_n"): return self.__action.filename, self.__action.line_n else: return -1, None def pattern_index(self): return self.pattern_state_machine().get_id() def __repr__(self): txt = "" txt += "self.mode_name = " + repr(self.mode_name) + "\n" txt += "self.pattern_string = " + repr(self.pattern_string()) + "\n" txt += "self.pattern = \n" + repr(self.pattern()).replace("\n", "\n ") txt += "self.action = " + repr(self.action().get_code()) + "\n" if self.action().__class__ == UserCodeFragment: txt += "self.filename = " + repr(self.action().filename) + "\n" txt += "self.line_n = " + repr(self.action().line_n) + "\n" txt += "self.pattern_index = " + repr(self.pattern().sm.get_id()) + "\n" return txt class LocalizedParameter: def __init__(self, Name, Default, FH=-1): self.name = Name self.__default = Default if FH == -1: self.__value = None self.file_name = "" self.line_n = -1 else: self.__value = Default self.file_name = FH.name self.line_n = get_current_line_info_number(FH) self.__pattern_string = None def set(self, Value, fh): if self.__value is not None: error_msg("%s has been defined more than once.\n" % self.name, fh, DontExitF=True) error_msg("previous definition has been here.\n", self.file_name, self.line_n) self.__value = Value self.file_name = fh.name self.line_n = get_current_line_info_number(fh) def get(self): if self.__value is not None: return self.__value return self.__default def set_pattern_string(self, Value): self.__pattern_string = Value def pattern_string(self): return self.__pattern_string def get_action_location(self): """RETURNS: FileName, LineN in case that it can be specified. -1, None in case it cannot be specified. This corresponds to the required input for 'error_msg'. """ return self.file_name, self.line_n @property def comment(self): return self.name
"""Worker which runs all computations on Cloud VMs. Evaluation of competition is split into work pieces. One work piece is a either evaluation of an attack on a batch of images or evaluation of a defense on a batch of adversarial images. All pieces of attack work are independent from each other and could be run in parallel. Same for pieces of defense work - they are independent from each other and could be run in parallel. But defense work could be run only after all attack work is completed. Worker first runs all attack pieces, by querying next piece of undone work and running it. After all attack pieces are done, worker runs all defense pieces in a similar way. Before workers could be started, datastore has to be populated by master with description of work to be done. See master.py for details. NOTE: Worker is designed to run on linux machine with NVidia docker installed. Worker generally needs administrative privilege to run properly. Also worker relies on very specific directory structure created in home directory. That's why it's highly recommended to run worker only in VM. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from builtins import int # long in python 2 import argparse import json import logging import os import random import shutil import subprocess import time import uuid from six import iteritems import eval_lib from cleverhans.utils import shell_call # Sleep time while waiting for next available piece of work SLEEP_TIME = 30 SLEEP_TIME_SHORT = 10 # Time limit to run one pice of work SUBMISSION_TIME_LIMIT = 500 # Set of local temporary directories and files LOCAL_EVAL_ROOT_DIR = os.path.expanduser("~/competition_eval") LOCAL_DATASET_DIR = os.path.expanduser("~/competition_eval/dataset_images") LOCAL_SUBMISSIONS_DIR = os.path.expanduser("~/competition_eval/submissions") LOCAL_INPUT_DIR = os.path.expanduser("~/competition_eval/input") LOCAL_OUTPUT_DIR = os.path.expanduser("~/competition_eval/output") LOCAL_PROCESSED_OUTPUT_DIR = os.path.expanduser("~/competition_eval/processed_output") LOCAL_ZIPPED_OUTPUT_DIR = os.path.expanduser("~/competition_eval/zipped_output") LOCAL_DATASET_METADATA_FILE = os.path.expanduser("~/competition_eval/dataset_meta.csv") LOCAL_DATASET_COPY = os.path.expanduser("~/competition_data/dataset") # Types of submissions TYPE_TARGETED = "targeted" TYPE_NONTARGETED = "nontargeted" TYPE_DEFENSE = "defense" # Extraction commands for various types of archive EXTRACT_COMMAND = { ".zip": ["unzip", "${src}", "-d", "${dst}"], ".tar": ["tar", "xvf", "${src}", "-C", "${dst}"], ".tar.gz": ["tar", "xvzf", "${src}", "-C", "${dst}"], } # Docker binary to use DOCKER_BINARY = "docker" DOCKER_NVIDIA_RUNTIME = "--runtime=nvidia" # Names of relevant fields in submission metadata file METADATA_CONTAINER = "container_gpu" METADATA_ENTRY_POINT = "entry_point" METADATA_TYPE = "type" # Mapping from submission type in metadata to submission type used in worker METADATA_JSON_TYPE_TO_TYPE = { "attack": TYPE_NONTARGETED, "targeted_attack": TYPE_TARGETED, "defense": TYPE_DEFENSE, } def make_directory_writable(dirname): """Makes directory readable and writable by everybody. If you run something inside Docker container and it writes files, then these files will be written as root user with restricted permissions. So to be able to read/modify these files outside of Docker you have to change permissions to be world readable and writable. Args: dirname: name of the directory Returns: True if operation was successfull """ shell_call( [ "docker", "run", "-v", "{0}:/output_dir".format(dirname), "busybox:1.27.2", "chmod", "-R", "a+rwx", "/output_dir", ] ) def sudo_remove_dirtree(dir_name): """Removes directory tree as a superuser. Args: dir_name: name of the directory to remove. This function is necessary to cleanup directories created from inside a Docker, since they usually written as a root, thus have to be removed as a root. """ try: subprocess.check_output(["sudo", "rm", "-rf", dir_name]) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t remove directory {0}".format(dir_name), e) class WorkerError(Exception): """Error which happen during evaluation of submission. To simplify error handling, worker only raises this type of exception. Exceptions of different types raised by other modules encapsulated into WorkerError by the worker. """ def __init__(self, message, exc=None): """Initializes WorkerError. Args: message: error message exc: optional underlying exception. """ super(WorkerError, self).__init__() self.msg = message self.exc = exc def __str__(self): """Returns human readable string representation of the exception.""" if self.exc: return "{0}\nUnderlying exception:\n{1}".format(self.msg, self.exc) else: return self.msg def get_id_of_running_docker(container_name): """Returns ID of running docker container.""" return shell_call( [DOCKER_BINARY, "ps", "-q", "--filter=name={}".format(container_name)] ).strip() def is_docker_still_running(container_name): """Returns whether given Docker container is still running.""" return bool(get_id_of_running_docker(container_name)) def kill_docker_container(container_name): """Kills given docker container.""" docker_id = get_id_of_running_docker(container_name) shell_call([DOCKER_BINARY, "stop", docker_id]) class ExecutableSubmission(object): """Base class which is used to run submissions.""" def __init__(self, submission_id, submissions, storage_bucket): """Initializes ExecutableSubmission. Args: submission_id: ID of the submissions submissions: instance of CompetitionSubmissions with all submissions storage_bucket: storage bucket where all submissions are stored Raises: WorkerError: if submission was not found """ self.submission_id = submission_id self.storage_bucket = storage_bucket self.type = None self.submission = None if submission_id in submissions.attacks: self.type = TYPE_NONTARGETED self.submission = submissions.attacks[submission_id] elif submission_id in submissions.targeted_attacks: self.type = TYPE_TARGETED self.submission = submissions.targeted_attacks[submission_id] elif submission_id in submissions.defenses: self.type = TYPE_DEFENSE self.submission = submissions.defenses[submission_id] else: raise WorkerError( 'Submission with ID "{0}" not found'.format(submission_id) ) self.submission_dir = None self.extracted_submission_dir = None def download(self): """Method which downloads submission to local directory.""" # Structure of the download directory: # submission_dir=LOCAL_SUBMISSIONS_DIR/submission_id # submission_dir/s.ext <-- archived submission # submission_dir/extracted <-- extracted submission # Check whether submission is already there if self.extracted_submission_dir: return self.submission_dir = os.path.join(LOCAL_SUBMISSIONS_DIR, self.submission_id) if os.path.isdir(self.submission_dir) and os.path.isdir( os.path.join(self.submission_dir, "extracted") ): # submission already there, just re-read metadata self.extracted_submission_dir = os.path.join( self.submission_dir, "extracted" ) with open( os.path.join(self.extracted_submission_dir, "metadata.json"), "r" ) as f: meta_json = json.load(f) self.container_name = str(meta_json[METADATA_CONTAINER]) self.entry_point = str(meta_json[METADATA_ENTRY_POINT]) return # figure out submission location in the Cloud and determine extractor submission_cloud_path = os.path.join( "gs://", self.storage_bucket, self.submission.path ) extract_command_tmpl = None extension = None for k, v in iteritems(EXTRACT_COMMAND): if submission_cloud_path.endswith(k): extension = k extract_command_tmpl = v break if not extract_command_tmpl: raise WorkerError("Unsupported submission extension") # download archive try: os.makedirs(self.submission_dir) tmp_extract_dir = os.path.join(self.submission_dir, "tmp") os.makedirs(tmp_extract_dir) download_path = os.path.join(self.submission_dir, "s" + extension) try: logging.info( "Downloading submission from %s to %s", submission_cloud_path, download_path, ) shell_call(["gsutil", "cp", submission_cloud_path, download_path]) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t copy submission locally", e) # extract archive try: shell_call(extract_command_tmpl, src=download_path, dst=tmp_extract_dir) except subprocess.CalledProcessError as e: # proceed even if extraction returned non zero error code, # sometimes it's just warning logging.warning( "Submission extraction returned non-zero error code. " "It may be just a warning, continuing execution. " "Error: %s", e, ) try: make_directory_writable(tmp_extract_dir) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t make submission directory writable", e) # determine root of the submission tmp_root_dir = tmp_extract_dir root_dir_content = [d for d in os.listdir(tmp_root_dir) if d != "__MACOSX"] if len(root_dir_content) == 1 and os.path.isdir( os.path.join(tmp_root_dir, root_dir_content[0]) ): tmp_root_dir = os.path.join(tmp_root_dir, root_dir_content[0]) # move files to extract subdirectory self.extracted_submission_dir = os.path.join( self.submission_dir, "extracted" ) try: shell_call( ["mv", os.path.join(tmp_root_dir), self.extracted_submission_dir] ) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t move submission files", e) # read metadata file try: with open( os.path.join(self.extracted_submission_dir, "metadata.json"), "r" ) as f: meta_json = json.load(f) except IOError as e: raise WorkerError( "Can" 't read metadata.json for submission "{0}"'.format( self.submission_id ), e, ) try: self.container_name = str(meta_json[METADATA_CONTAINER]) self.entry_point = str(meta_json[METADATA_ENTRY_POINT]) type_from_meta = METADATA_JSON_TYPE_TO_TYPE[meta_json[METADATA_TYPE]] except KeyError as e: raise WorkerError("Invalid metadata.json file", e) if type_from_meta != self.type: raise WorkerError( "Inconsistent submission type in metadata: " + type_from_meta + " vs " + self.type ) except WorkerError as e: self.extracted_submission_dir = None sudo_remove_dirtree(self.submission_dir) raise def temp_copy_extracted_submission(self): """Creates a temporary copy of extracted submission. When executed, submission is allowed to modify it's own directory. So to ensure that submission does not pass any data between runs, new copy of the submission is made before each run. After a run temporary copy of submission is deleted. Returns: directory where temporary copy is located """ tmp_copy_dir = os.path.join(self.submission_dir, "tmp_copy") shell_call( ["cp", "-R", os.path.join(self.extracted_submission_dir), tmp_copy_dir] ) return tmp_copy_dir def run_without_time_limit(self, cmd): """Runs docker command without time limit. Args: cmd: list with the command line arguments which are passed to docker binary Returns: how long it took to run submission in seconds Raises: WorkerError: if error occurred during execution of the submission """ cmd = [DOCKER_BINARY, "run", DOCKER_NVIDIA_RUNTIME] + cmd logging.info("Docker command: %s", " ".join(cmd)) start_time = time.time() retval = subprocess.call(cmd) elapsed_time_sec = int(time.time() - start_time) logging.info("Elapsed time of attack: %d", elapsed_time_sec) logging.info("Docker retval: %d", retval) if retval != 0: logging.warning("Docker returned non-zero retval: %d", retval) raise WorkerError("Docker returned non-zero retval " + str(retval)) return elapsed_time_sec def run_with_time_limit(self, cmd, time_limit=SUBMISSION_TIME_LIMIT): """Runs docker command and enforces time limit. Args: cmd: list with the command line arguments which are passed to docker binary after run time_limit: time limit, in seconds. Negative value means no limit. Returns: how long it took to run submission in seconds Raises: WorkerError: if error occurred during execution of the submission """ if time_limit < 0: return self.run_without_time_limit(cmd) container_name = str(uuid.uuid4()) cmd = [ DOCKER_BINARY, "run", DOCKER_NVIDIA_RUNTIME, "--detach", "--name", container_name, ] + cmd logging.info("Docker command: %s", " ".join(cmd)) logging.info("Time limit %d seconds", time_limit) retval = subprocess.call(cmd) start_time = time.time() elapsed_time_sec = 0 while is_docker_still_running(container_name): elapsed_time_sec = int(time.time() - start_time) if elapsed_time_sec < time_limit: time.sleep(1) else: kill_docker_container(container_name) logging.warning("Submission was killed because run out of time") logging.info("Elapsed time of submission: %d", elapsed_time_sec) logging.info("Docker retval: %d", retval) if retval != 0: logging.warning("Docker returned non-zero retval: %d", retval) raise WorkerError("Docker returned non-zero retval " + str(retval)) return elapsed_time_sec class AttackSubmission(ExecutableSubmission): """Class to run attack submissions.""" def __init__(self, submission_id, submissions, storage_bucket): """Initializes AttackSubmission. Args: submission_id: ID of the submission submissions: instance of CompetitionSubmissions with all submissions storage_bucket: storage bucket where all submissions are stored Raises: WorkerError: if submission has incorrect type """ super(AttackSubmission, self).__init__( submission_id, submissions, storage_bucket ) if (self.type != TYPE_TARGETED) and (self.type != TYPE_NONTARGETED): raise WorkerError( 'Incorrect attack type for submission "{0}"'.format(submission_id) ) def run(self, input_dir, output_dir, epsilon): """Runs attack inside Docker. Args: input_dir: directory with input (dataset). output_dir: directory where output (adversarial images) should be written. epsilon: maximum allowed size of adversarial perturbation, should be in range [0, 255]. Returns: how long it took to run submission in seconds """ logging.info("Running attack %s", self.submission_id) tmp_run_dir = self.temp_copy_extracted_submission() cmd = [ "--network=none", "-m=24g", "--cpus=3.75", "-v", "{0}:/input_images:ro".format(input_dir), "-v", "{0}:/output_images".format(output_dir), "-v", "{0}:/code".format(tmp_run_dir), "-w", "/code", self.container_name, "./" + self.entry_point, "/input_images", "/output_images", str(epsilon), ] elapsed_time_sec = self.run_with_time_limit(cmd) sudo_remove_dirtree(tmp_run_dir) return elapsed_time_sec class DefenseSubmission(ExecutableSubmission): """Helper class to run one defense submission.""" def __init__(self, submission_id, submissions, storage_bucket): """Initializes DefenseSubmission. Args: submission_id: ID of the submission submissions: instance of CompetitionSubmissions with all submissions storage_bucket: storage bucket where all submissions are stored Raises: WorkerError: if submission has incorrect type """ super(DefenseSubmission, self).__init__( submission_id, submissions, storage_bucket ) if self.type != TYPE_DEFENSE: raise WorkerError( 'Incorrect defense type for submission "{0}"'.format(submission_id) ) def run(self, input_dir, output_file_path): """Runs defense inside Docker. Args: input_dir: directory with input (adversarial images). output_file_path: path of the output file. Returns: how long it took to run submission in seconds """ logging.info("Running defense %s", self.submission_id) tmp_run_dir = self.temp_copy_extracted_submission() output_dir = os.path.dirname(output_file_path) output_filename = os.path.basename(output_file_path) cmd = [ "--network=none", "-m=24g", "--cpus=3.75", "-v", "{0}:/input_images:ro".format(input_dir), "-v", "{0}:/output_data".format(output_dir), "-v", "{0}:/code".format(tmp_run_dir), "-w", "/code", self.container_name, "./" + self.entry_point, "/input_images", "/output_data/" + output_filename, ] elapsed_time_sec = self.run_with_time_limit(cmd) sudo_remove_dirtree(tmp_run_dir) return elapsed_time_sec class EvaluationWorker(object): """Class which encapsulate logit of the worker. Main entry point of this class is EvaluationWorker.run_work method which performs cleanup of temporary directories, then runs EvaluationWorker.run_attacks and EvaluationWorker.run_defenses """ def __init__( self, worker_id, storage_client, datastore_client, storage_bucket, round_name, dataset_name, blacklisted_submissions="", num_defense_shards=None, ): """Initializes EvaluationWorker. Args: worker_id: ID of the worker storage_client: instance of eval_lib.CompetitionStorageClient datastore_client: instance of eval_lib.CompetitionDatastoreClient storage_bucket: name of the Google Cloud Storage bucket where all competition data is stored round_name: name of the competition round dataset_name: name of the dataset to use, typically 'dev' of 'final' blacklisted_submissions: optional list of blacklisted submissions which are excluded from evaluation num_defense_shards: optional number of shards to use for evaluation of defenses """ self.worker_id = int(worker_id) self.storage_client = storage_client self.datastore_client = datastore_client self.storage_bucket = storage_bucket self.round_name = round_name self.dataset_name = dataset_name self.blacklisted_submissions = [ s.strip() for s in blacklisted_submissions.split(",") ] if num_defense_shards: self.num_defense_shards = int(num_defense_shards) else: self.num_defense_shards = None logging.info("Number of defense shards: %s", str(self.num_defense_shards)) # init client classes self.submissions = eval_lib.CompetitionSubmissions( datastore_client=self.datastore_client, storage_client=self.storage_client, round_name=self.round_name, ) self.dataset_batches = eval_lib.DatasetBatches( datastore_client=self.datastore_client, storage_client=self.storage_client, dataset_name=self.dataset_name, ) self.adv_batches = eval_lib.AversarialBatches( datastore_client=self.datastore_client ) self.attack_work = eval_lib.AttackWorkPieces( datastore_client=self.datastore_client ) self.defense_work = eval_lib.DefenseWorkPieces( datastore_client=self.datastore_client ) self.class_batches = eval_lib.ClassificationBatches( datastore_client=self.datastore_client, storage_client=self.storage_client, round_name=self.round_name, ) # whether data was initialized self.attacks_data_initialized = False self.defenses_data_initialized = False # dataset metadata self.dataset_meta = None def read_dataset_metadata(self): """Read `dataset_meta` field from bucket""" if self.dataset_meta: return shell_call( [ "gsutil", "cp", "gs://" + self.storage_client.bucket_name + "/" + "dataset/" + self.dataset_name + "_dataset.csv", LOCAL_DATASET_METADATA_FILE, ] ) with open(LOCAL_DATASET_METADATA_FILE, "r") as f: self.dataset_meta = eval_lib.DatasetMetadata(f) def fetch_attacks_data(self): """Initializes data necessary to execute attacks. This method could be called multiple times, only first call does initialization, subsequent calls are noop. """ if self.attacks_data_initialized: return # init data from datastore self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() # copy dataset locally if not os.path.exists(LOCAL_DATASET_DIR): os.makedirs(LOCAL_DATASET_DIR) eval_lib.download_dataset( self.storage_client, self.dataset_batches, LOCAL_DATASET_DIR, os.path.join(LOCAL_DATASET_COPY, self.dataset_name, "images"), ) # download dataset metadata self.read_dataset_metadata() # mark as initialized self.attacks_data_initialized = True def run_attack_work(self, work_id): """Runs one attack work. Args: work_id: ID of the piece of work to run Returns: elapsed_time_sec, submission_id - elapsed time and id of the submission Raises: WorkerError: if error occurred during execution. """ adv_batch_id = self.attack_work.work[work_id]["output_adversarial_batch_id"] adv_batch = self.adv_batches[adv_batch_id] dataset_batch_id = adv_batch["dataset_batch_id"] submission_id = adv_batch["submission_id"] epsilon = self.dataset_batches[dataset_batch_id]["epsilon"] logging.info( "Attack work piece: " 'dataset_batch_id="%s" submission_id="%s" ' "epsilon=%d", dataset_batch_id, submission_id, epsilon, ) if submission_id in self.blacklisted_submissions: raise WorkerError("Blacklisted submission") # get attack attack = AttackSubmission(submission_id, self.submissions, self.storage_bucket) attack.download() # prepare input input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id) if attack.type == TYPE_TARGETED: # prepare file with target classes target_class_filename = os.path.join(input_dir, "target_class.csv") self.dataset_meta.save_target_classes_for_batch( target_class_filename, self.dataset_batches, dataset_batch_id ) # prepare output directory if os.path.exists(LOCAL_OUTPUT_DIR): sudo_remove_dirtree(LOCAL_OUTPUT_DIR) os.mkdir(LOCAL_OUTPUT_DIR) if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR): shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR) os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR) if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR): shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR) os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR) # run attack elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon) if attack.type == TYPE_TARGETED: # remove target class file os.remove(target_class_filename) # enforce epsilon and compute hashes image_hashes = eval_lib.enforce_epsilon_and_compute_hash( input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon ) if not image_hashes: logging.warning("No images saved by the attack.") return elapsed_time_sec, submission_id # write images back to datastore # rename images and add information to adversarial batch for clean_image_id, hash_val in iteritems(image_hashes): # we will use concatenation of batch_id and image_id # as adversarial image id and as a filename of adversarial images adv_img_id = adv_batch_id + "_" + clean_image_id # rename the image os.rename( os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, clean_image_id + ".png"), os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, adv_img_id + ".png"), ) # populate values which will be written to datastore image_path = "{0}/adversarial_images/{1}/{1}.zip/{2}.png".format( self.round_name, adv_batch_id, adv_img_id ) # u'' + foo is a a python 2/3 compatible way of casting foo to unicode adv_batch["images"][adv_img_id] = { "clean_image_id": u"" + str(clean_image_id), "image_path": u"" + str(image_path), "image_hash": u"" + str(hash_val), } # archive all images and copy to storage zipped_images_filename = os.path.join( LOCAL_ZIPPED_OUTPUT_DIR, adv_batch_id + ".zip" ) try: logging.debug( "Compressing adversarial images to %s", zipped_images_filename ) shell_call( ["zip", "-j", "-r", zipped_images_filename, LOCAL_PROCESSED_OUTPUT_DIR] ) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t make archive from adversarial iamges", e) # upload archive to storage dst_filename = "{0}/adversarial_images/{1}/{1}.zip".format( self.round_name, adv_batch_id ) logging.debug("Copying archive with adversarial images to %s", dst_filename) self.storage_client.new_blob(dst_filename).upload_from_filename( zipped_images_filename ) # writing adv batch to datastore logging.debug("Writing adversarial batch to datastore") self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id) return elapsed_time_sec, submission_id def run_attacks(self): """Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it. """ logging.info("******** Start evaluation of attacks ********") prev_submission_id = None while True: # wait until work is available self.attack_work.read_all_from_datastore() if not self.attack_work.work: logging.info("Work is not populated, waiting...") time.sleep(SLEEP_TIME) continue if self.attack_work.is_all_work_competed(): logging.info("All attack work completed.") break # download all attacks data and dataset self.fetch_attacks_data() # pick piece of work work_id = self.attack_work.try_pick_piece_of_work( self.worker_id, submission_id=prev_submission_id ) if not work_id: logging.info("Failed to pick work, waiting...") time.sleep(SLEEP_TIME_SHORT) continue logging.info("Selected work_id: %s", work_id) # execute work try: elapsed_time_sec, prev_submission_id = self.run_attack_work(work_id) logging.info("Work %s is done", work_id) # indicate that work is completed is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, other_values={"elapsed_time": elapsed_time_sec}, ) except WorkerError as e: logging.info("Failed to run work:\n%s", str(e)) is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, error=str(e) ) if not is_work_update: logging.warning( "Can" 't update work "%s" as completed by worker %d', work_id, self.worker_id, ) logging.info("******** Finished evaluation of attacks ********") def fetch_defense_data(self): """Lazy initialization of data necessary to execute defenses.""" if self.defenses_data_initialized: return logging.info("Fetching defense data from datastore") # init data from datastore self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() # read dataset metadata self.read_dataset_metadata() # mark as initialized self.defenses_data_initialized = True def run_defense_work(self, work_id): """Runs one defense work. Args: work_id: ID of the piece of work to run Returns: elapsed_time_sec, submission_id - elapsed time and id of the submission Raises: WorkerError: if error occurred during execution. """ class_batch_id = self.defense_work.work[work_id][ "output_classification_batch_id" ] class_batch = self.class_batches.read_batch_from_datastore(class_batch_id) adversarial_batch_id = class_batch["adversarial_batch_id"] submission_id = class_batch["submission_id"] cloud_result_path = class_batch["result_path"] logging.info( "Defense work piece: " 'adversarial_batch_id="%s" submission_id="%s"', adversarial_batch_id, submission_id, ) if submission_id in self.blacklisted_submissions: raise WorkerError("Blacklisted submission") # get defense defense = DefenseSubmission( submission_id, self.submissions, self.storage_bucket ) defense.download() # prepare input - copy adversarial batch locally input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id) if os.path.exists(input_dir): sudo_remove_dirtree(input_dir) os.makedirs(input_dir) try: shell_call( [ "gsutil", "-m", "cp", # typical location of adv batch: # testing-round/adversarial_images/ADVBATCH000/ os.path.join( "gs://", self.storage_bucket, self.round_name, "adversarial_images", adversarial_batch_id, "*", ), input_dir, ] ) adv_images_files = os.listdir(input_dir) if (len(adv_images_files) == 1) and adv_images_files[0].endswith(".zip"): logging.info( "Adversarial batch is in zip archive %s", adv_images_files[0] ) shell_call( [ "unzip", os.path.join(input_dir, adv_images_files[0]), "-d", input_dir, ] ) os.remove(os.path.join(input_dir, adv_images_files[0])) adv_images_files = os.listdir(input_dir) logging.info("%d adversarial images copied", len(adv_images_files)) except (subprocess.CalledProcessError, IOError) as e: raise WorkerError("Can" "t copy adversarial batch locally", e) # prepare output directory if os.path.exists(LOCAL_OUTPUT_DIR): sudo_remove_dirtree(LOCAL_OUTPUT_DIR) os.mkdir(LOCAL_OUTPUT_DIR) output_filname = os.path.join(LOCAL_OUTPUT_DIR, "result.csv") # run defense elapsed_time_sec = defense.run(input_dir, output_filname) # evaluate defense result batch_result = eval_lib.analyze_one_classification_result( storage_client=None, file_path=output_filname, adv_batch=self.adv_batches.data[adversarial_batch_id], dataset_batches=self.dataset_batches, dataset_meta=self.dataset_meta, ) # copy result of the defense into storage try: shell_call( [ "gsutil", "cp", output_filname, os.path.join("gs://", self.storage_bucket, cloud_result_path), ] ) except subprocess.CalledProcessError as e: raise WorkerError("Can" "t result to Cloud Storage", e) return elapsed_time_sec, submission_id, batch_result def run_defenses(self): """Method which evaluates all defense work. In a loop this method queries not completed defense work, picks one defense work and runs it. """ logging.info("******** Start evaluation of defenses ********") prev_submission_id = None need_reload_work = True while True: # wait until work is available if need_reload_work: if self.num_defense_shards: shard_with_work = self.defense_work.read_undone_from_datastore( shard_id=(self.worker_id % self.num_defense_shards), num_shards=self.num_defense_shards, ) else: shard_with_work = self.defense_work.read_undone_from_datastore() logging.info( "Loaded %d records of undone work from shard %s", len(self.defense_work), str(shard_with_work), ) if not self.defense_work.work: logging.info("Work is not populated, waiting...") time.sleep(SLEEP_TIME) continue if self.defense_work.is_all_work_competed(): logging.info("All defense work completed.") break # download all defense data and dataset self.fetch_defense_data() need_reload_work = False # pick piece of work work_id = self.defense_work.try_pick_piece_of_work( self.worker_id, submission_id=prev_submission_id ) if not work_id: need_reload_work = True logging.info("Failed to pick work, waiting...") time.sleep(SLEEP_TIME_SHORT) continue logging.info("Selected work_id: %s", work_id) # execute work try: ( elapsed_time_sec, prev_submission_id, batch_result, ) = self.run_defense_work(work_id) logging.info("Work %s is done", work_id) # indicate that work is completed is_work_update = self.defense_work.update_work_as_completed( self.worker_id, work_id, other_values={ "elapsed_time": elapsed_time_sec, "stat_correct": batch_result[0], "stat_error": batch_result[1], "stat_target_class": batch_result[2], "stat_num_images": batch_result[3], }, ) except WorkerError as e: logging.info("Failed to run work:\n%s", str(e)) if str(e).startswith("Docker returned non-zero retval"): logging.info("Running nvidia-docker to ensure that GPU works") shell_call( ["nvidia-docker", "run", "--rm", "nvidia/cuda", "nvidia-smi"] ) is_work_update = self.defense_work.update_work_as_completed( self.worker_id, work_id, error=str(e) ) if not is_work_update: logging.warning( "Can" 't update work "%s" as completed by worker %d', work_id, self.worker_id, ) need_reload_work = True logging.info("******** Finished evaluation of defenses ********") def run_work(self): """Run attacks and defenses""" if os.path.exists(LOCAL_EVAL_ROOT_DIR): sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR) self.run_attacks() self.run_defenses() def main(args): """Main function which runs worker.""" title = "## Starting evaluation of round {0} ##".format(args.round_name) logging.info( "\n" + "#" * len(title) + "\n" + "#" * len(title) + "\n" + "##" + " " * (len(title) - 2) + "##" + "\n" + title + "\n" + "#" * len(title) + "\n" + "#" * len(title) + "\n" + "##" + " " * (len(title) - 2) + "##" + "\n" ) if args.blacklisted_submissions: logging.warning("BLACKLISTED SUBMISSIONS: %s", args.blacklisted_submissions) random.seed() logging.info("Running nvidia-docker to ensure that GPU works") shell_call( ["docker", "run", "--runtime=nvidia", "--rm", "nvidia/cuda", "nvidia-smi"] ) eval_worker = EvaluationWorker( worker_id=args.worker_id, storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket ), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name ), storage_bucket=args.storage_bucket, round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, num_defense_shards=args.num_defense_shards, ) eval_worker.run_work() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Worker which executes work.") parser.add_argument( "--worker_id", required=True, type=int, help="Numerical ID of the worker." ) parser.add_argument( "--project_id", required=True, help="Your Google Cloud project ID." ) parser.add_argument( "--storage_bucket", required=True, help="Cloud Storage bucket to store competition data.", ) parser.add_argument( "--round_name", default="testing-round", required=False, help="Name of the round.", ) parser.add_argument( "--dataset_name", default="dev", required=False, help="Which dataset to use, either dev or final.", ) parser.add_argument( "--blacklisted_submissions", default="", required=False, help="Comma separated list of blacklisted submission " "IDs.", ) parser.add_argument( "--num_defense_shards", default=10, required=False, help="Number of defense shards", ) parser.add_argument( "--log_file", default="", required=False, help="Location of the logfile." ) worker_args = parser.parse_args() logging_args = { "format": "%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s -- %(message)s", "level": logging.INFO, "datefmt": "%Y-%m-%d %H:%M:%S", } if worker_args.log_file: logging_args["filename"] = worker_args.log_file logging_args["filemode"] = "a" logging.basicConfig(**logging_args) main(worker_args)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== """TensorFlow API compatibility tests. This test ensures all changes to the public API of TensorFlow are intended. If this test fails, it means a change has been made to the public API. Backwards incompatible changes are not allowed. You can run the test with "--update_goldens" flag set to "True" to update goldens when making changes to the public TF python API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import re import sys import tensorflow as tf from tensorflow._api.v2 import v2 as tf_v2 from google.protobuf import message from google.protobuf import text_format from tensorflow.python.lib.io import file_io from tensorflow.python.framework import test_util from tensorflow.python.platform import resource_loader from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.tools.api.lib import api_objects_pb2 from tensorflow.tools.api.lib import python_object_to_proto_visitor from tensorflow.tools.common import public_api from tensorflow.tools.common import traverse # FLAGS defined at the bottom: FLAGS = None # DEFINE_boolean, update_goldens, default False: _UPDATE_GOLDENS_HELP = """ Update stored golden files if API is updated. WARNING: All API changes have to be authorized by TensorFlow leads. """ # DEFINE_boolean, only_test_core_api, default False: _ONLY_TEST_CORE_API_HELP = """ Some TF APIs are being moved outside of the tensorflow/ directory. There is no guarantee which versions of these APIs will be present when running this test. Therefore, do not error out on API changes in non-core TF code if this flag is set. """ # DEFINE_boolean, verbose_diffs, default True: _VERBOSE_DIFFS_HELP = """ If set to true, print line by line diffs on all libraries. If set to false, only print which libraries have differences. """ _API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1' _API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2' _TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt' _UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt' _NON_CORE_PACKAGES = ['estimator'] def _KeyToFilePath(key, api_version): """From a given key, construct a filepath. Filepath will be inside golden folder for api_version. """ def _ReplaceCapsWithDash(matchobj): match = matchobj.group(0) return '-%s' % (match.lower()) case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key) api_folder = ( _API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1) return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key) def _FileNameToKey(filename): """From a given filename, construct a key we use for api objects.""" def _ReplaceDashWithCaps(matchobj): match = matchobj.group(0) return match[1].upper() base_filename = os.path.basename(filename) base_filename_without_ext = os.path.splitext(base_filename)[0] api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext) return api_object_key def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children): """A Visitor that crashes on subclasses of generated proto classes.""" # If the traversed object is a proto Message class if not (isinstance(parent, type) and issubclass(parent, message.Message)): return if parent is message.Message: return # Check that it is a direct subclass of Message. if message.Message not in parent.__bases__: raise NotImplementedError( 'Object tf.%s is a subclass of a generated proto Message. ' 'They are not yet supported by the API tools.' % path) def _FilterNonCoreGoldenFiles(golden_file_list): """Filter out non-core API pbtxt files.""" filtered_file_list = [] filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES] for f in golden_file_list: if any( f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes ): continue filtered_file_list.append(f) return filtered_file_list class ApiCompatibilityTest(test.TestCase): def __init__(self, *args, **kwargs): super(ApiCompatibilityTest, self).__init__(*args, **kwargs) golden_update_warning_filename = os.path.join( resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE) self._update_golden_warning = file_io.read_file_to_string( golden_update_warning_filename) test_readme_filename = os.path.join( resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE) self._test_readme_message = file_io.read_file_to_string( test_readme_filename) def _AssertProtoDictEquals(self, expected_dict, actual_dict, verbose=False, update_goldens=False, additional_missing_object_message='', api_version=2): """Diff given dicts of protobufs and report differences a readable way. Args: expected_dict: a dict of TFAPIObject protos constructed from golden files. actual_dict: a ict of TFAPIObject protos constructed by reading from the TF package linked to the test. verbose: Whether to log the full diffs, or simply report which files were different. update_goldens: Whether to update goldens when there are diffs found. additional_missing_object_message: Message to print when a symbol is missing. api_version: TensorFlow API version to test. """ diffs = [] verbose_diffs = [] expected_keys = set(expected_dict.keys()) actual_keys = set(actual_dict.keys()) only_in_expected = expected_keys - actual_keys only_in_actual = actual_keys - expected_keys all_keys = expected_keys | actual_keys # This will be populated below. updated_keys = [] for key in all_keys: diff_message = '' verbose_diff_message = '' # First check if the key is not found in one or the other. if key in only_in_expected: diff_message = 'Object %s expected but not found (removed). %s' % ( key, additional_missing_object_message) verbose_diff_message = diff_message elif key in only_in_actual: diff_message = 'New object %s found (added).' % key verbose_diff_message = diff_message else: # Do not truncate diff self.maxDiff = None # pylint: disable=invalid-name # Now we can run an actual proto diff. try: self.assertProtoEquals(expected_dict[key], actual_dict[key]) except AssertionError as e: updated_keys.append(key) diff_message = 'Change detected in python object: %s.' % key verbose_diff_message = str(e) # All difference cases covered above. If any difference found, add to the # list. if diff_message: diffs.append(diff_message) verbose_diffs.append(verbose_diff_message) # If diffs are found, handle them based on flags. if diffs: diff_count = len(diffs) logging.error(self._test_readme_message) logging.error('%d differences found between API and golden.', diff_count) messages = verbose_diffs if verbose else diffs for i in range(diff_count): print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr) if update_goldens: # Write files if requested. logging.warning(self._update_golden_warning) # If the keys are only in expected, some objects are deleted. # Remove files. for key in only_in_expected: filepath = _KeyToFilePath(key, api_version) file_io.delete_file(filepath) # If the files are only in actual (current library), these are new # modules. Write them to files. Also record all updates in files. for key in only_in_actual | set(updated_keys): filepath = _KeyToFilePath(key, api_version) file_io.write_string_to_file( filepath, text_format.MessageToString(actual_dict[key])) else: # Fail if we cannot fix the test by updating goldens. self.fail('%d differences found between API and golden.' % diff_count) else: logging.info('No differences found between API and golden.') def testNoSubclassOfMessage(self): visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor) visitor.do_not_descend_map['tf'].append('contrib') # Skip compat.v1 and compat.v2 since they are validated in separate tests. visitor.private_map['tf.compat'] = ['v1', 'v2'] traverse.traverse(tf, visitor) def testNoSubclassOfMessageV1(self): if not hasattr(tf.compat, 'v1'): return visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor) visitor.do_not_descend_map['tf'].append('contrib') if FLAGS.only_test_core_api: visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES) traverse.traverse(tf_v2.compat.v1, visitor) def testNoSubclassOfMessageV2(self): if not hasattr(tf.compat, 'v2'): return visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor) visitor.do_not_descend_map['tf'].append('contrib') if FLAGS.only_test_core_api: visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES) traverse.traverse(tf_v2, visitor) def _checkBackwardsCompatibility(self, root, golden_file_pattern, api_version, additional_private_map=None): # Extract all API stuff. visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor() public_api_visitor = public_api.PublicAPIVisitor(visitor) public_api_visitor.private_map['tf'] = ['contrib'] if api_version == 2: public_api_visitor.private_map['tf'].append('enable_v2_behavior') public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental'] if FLAGS.only_test_core_api: public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES) if additional_private_map: public_api_visitor.private_map.update(additional_private_map) traverse.traverse(root, public_api_visitor) proto_dict = visitor.GetProtos() # Read all golden files. golden_file_list = file_io.get_matching_files(golden_file_pattern) if FLAGS.only_test_core_api: golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list) def _ReadFileToProto(filename): """Read a filename, create a protobuf from its contents.""" ret_val = api_objects_pb2.TFAPIObject() text_format.Merge(file_io.read_file_to_string(filename), ret_val) return ret_val golden_proto_dict = { _FileNameToKey(filename): _ReadFileToProto(filename) for filename in golden_file_list } # Diff them. Do not fail if called with update. # If the test is run to update goldens, only report diffs but do not fail. self._AssertProtoDictEquals( golden_proto_dict, proto_dict, verbose=FLAGS.verbose_diffs, update_goldens=FLAGS.update_goldens, api_version=api_version) @test_util.run_v1_only('b/120545219') def testAPIBackwardsCompatibility(self): api_version = 1 golden_file_pattern = os.path.join( resource_loader.get_root_dir_with_all_resources(), _KeyToFilePath('*', api_version)) self._checkBackwardsCompatibility( tf, golden_file_pattern, api_version, # Skip compat.v1 and compat.v2 since they are validated # in separate tests. additional_private_map={'tf.compat': ['v1', 'v2']}) # Also check that V1 API has contrib self.assertTrue( 'tensorflow.python.util.lazy_loader.LazyLoader' in str(type(tf.contrib))) @test_util.run_v1_only('b/120545219') def testAPIBackwardsCompatibilityV1(self): api_version = 1 golden_file_pattern = os.path.join( resource_loader.get_root_dir_with_all_resources(), _KeyToFilePath('*', api_version)) self._checkBackwardsCompatibility(tf_v2.compat.v1, golden_file_pattern, api_version) def testAPIBackwardsCompatibilityV2(self): api_version = 2 golden_file_pattern = os.path.join( resource_loader.get_root_dir_with_all_resources(), _KeyToFilePath('*', api_version)) self._checkBackwardsCompatibility( tf_v2, golden_file_pattern, api_version, additional_private_map={'tf.compat': ['v1']}) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP) # TODO(mikecase): Create Estimator's own API compatibility test or # a more general API compatibility test for use for TF components. parser.add_argument( '--only_test_core_api', type=bool, default=False, help=_ONLY_TEST_CORE_API_HELP) parser.add_argument( '--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP) FLAGS, unparsed = parser.parse_known_args() # Now update argv, so that unittest library does not get confused. sys.argv = [sys.argv[0]] + unparsed test.main()
import os import cv2 import pdb import math import pickle import numpy as np import glob as glob from time import clock from scipy.io import loadmat,savemat from scipy.sparse import csr_matrix from matplotlib import pyplot as plt from DataPathclass import * DataPathobj = DataPath(dataSource,VideoIndex) from parameterClass import * Parameterobj = parameter(dataSource,VideoIndex) def readBuffer(startOffset, cap): for ii in range(startOffset): ret, frame = cap.read() return cap """thre's bug in cap.set, try loopy reading instead""" def readVideo(cap,subSampRate): """when read video in a loop, every subSampRate frames""" status, frame = cap.read() for ii in range(subSampRate-1): status_1, frameskip = cap.read() return status,frame if __name__ == '__main__': frame_idx_bias = 0 start_position = frame_idx_bias isVideo = True if isVideo: dataPath = DataPathobj.video else: dataPath = DataPathobj.imagePath savePath = DataPathobj.kltpath useBlobCenter = Parameterobj.useSBS isVisualize = False # -- utilities if isVisualize: plt.figure(num=None) lk_params = Parameterobj.lk_params feature_params = Parameterobj.feature_params # previousLastFiles = sorted(glob.glob(savePath+'*klt_*')) previousLastFiles = sorted(glob.glob(savePath+'*.mat')) if len(previousLastFiles)>0: if len(previousLastFiles) >1: previousLastFile = previousLastFiles[-1] else: previousLastFile = previousLastFiles[0] lastTrj = loadmat(previousLastFile) lastID = np.max(lastTrj['trjID'][0]) dicidx = lastID+1 #counting from the last biggest global ID lastT = lastTrj['Ttracks'] lastT[lastT==np.max(lastT)] = np.nan tracksdic = {} start = {} end = {} if len(lastTrj['lastPtsKey'])>0: for kk in range((lastTrj['lastPtsKey'][0]).shape[0]): key = lastTrj['lastPtsKey'][0][kk] start[key] = np.nanmin(lastT[kk,:]) if math.isnan(start[key]): print "key:",key, "kk:",kk start.pop(key) continue end[key] = -1 #all alive trj tracksdic[key] = [] tracksdic[key].append(tuple(lastTrj['lastPtsValue'][kk,:])) else: dicidx = 0 # start from 0 tracksdic = {} start = {} end = {} if isVideo: video_src = dataPath cap = cv2.VideoCapture(video_src) # if not cap.isOpened(): # raise Exception("video not opened!") # nframe = np.int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) # nframe is calculated from the 'wrong' fps # fps = int(np.round(cap.get(cv2.cv.CV_CAP_PROP_FPS))) """hard code!!!!!!""" fps = 30 print 'fps',fps print 'reading buffer...' # cap.set( cv2.cv.CV_CAP_PROP_POS_FRAMES , max(0,start_position)) cap = readBuffer(start_position, cap) status, frame = cap.read() nrows,ncols = frame.shape[:2] frameL = np.zeros_like(frame[:,:,0]) #just initilize, will be set in the while loop if len(previousLastFiles)>0: frameLp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #set the previous to be the last frame in last truncation else: frameLp = np.zeros_like(frameL) if not isVideo: # -- get the full image list imagepath = dataPath imlist = sorted(glob.glob(imagepath + '*.jpg')) nframe = len(imlist) # -- read in first frame and set dimensions frame = cv2.imread(imlist[max(0,start_position)]) frameL = np.zeros_like(frame[:,:,0]) #just initilize, will be set in the while loop if len(previousLastFiles)>0: frameLp = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #set the previous to be the last frame in last truncation else: frameLp = np.zeros_like(frameL) trunclen = Parameterobj.trunclen subSampRate = fps/Parameterobj.targetFPS """hard code!!!!!""" # subSampRate = 1 # subSampRate = 1 # for people counter if len(previousLastFiles)>0: frame_idx = len(previousLastFiles)*trunclen*subSampRate else: frame_idx = (0 + frame_idx_bias) detect_interval = Parameterobj.klt_detect_interval if detect_interval < subSampRate: detect_interval = 1 else: detect_interval = np.floor(detect_interval/subSampRate) subsample_frmIdx = np.int(np.floor(frame_idx/subSampRate)) if useBlobCenter: blob_ind_sparse_matrices = sorted(glob.glob(DataPathobj.blobPath + 'blobLabel*.p')) blob_center_sparse_lists = sorted(glob.glob(DataPathobj.blobPath + 'blobCenter*.p')) # -- set mask, all ones = no mask if Parameterobj.useMask: # # if not Parameterobj.mask is None: # # pass # # else: # plt.imshow(frame[:,:,::-1]) # pdb.set_trace() # mask = np.zeros(frameL.shape, dtype=np.uint8) # # roi_corners = np.array([[(191,0),(343,626),(344,0),(190,629)]], dtype=np.int32) # # roi_corners = np.array([[(0,191), (629,190), (0,344),(626,343)]], dtype=np.int32) # # roi_corners = np.array([[(0,191), (629,190), (0,344)]]) # roi_corners = np.array([[(191,0),(190,629),(344,0),(343,626)]]).reshape(-1, 2) # # ignore_mask_color = (255,)*frame.shape[2] # cv2.fillPoly(mask, roi_corners, (255,255)) # # apply the mask # masked_image = cv2.bitwise_and(frameL, mask) # # save the result # cv2.imwrite(Parameterobj.dataSource+'_mask.jpg', mask) masktouse = 255*np.ones_like(frameL) mask1 = cv2.imread(glob.glob(DataPathobj.DataPath+'/*Mask.jpg')[0]) masktouse[mask1[:,:,0]==0]=0 else: masktouse = 255*np.ones_like(frameL) ## set is buggy # cap.set( cv2.cv.CV_CAP_PROP_POS_FRAMES , max(0,subsample_frmIdx*subSampRate)) cap = readBuffer(max(0,subsample_frmIdx*subSampRate)-start_position, cap) # while (frame_idx < nframe): while status: if useBlobCenter and ((subsample_frmIdx % trunclen) == 0): print "load foreground blob index matrix file....",subsample_frmIdx/trunclen blobIndLists = [] blobIndListfile = blob_ind_sparse_matrices[subsample_frmIdx/trunclen] blobIndLists = pickle.load( open( blobIndListfile, "rb" ) ) blobCenterLists = [] blobCenterListfile = blob_center_sparse_lists[subsample_frmIdx/trunclen] blobCenterLists = pickle.load( open( blobCenterListfile, "rb" ) ) if not isVideo: frame[:,:,:] = cv2.imread(imlist[subsample_frmIdx*subSampRate]) if isVideo: try: """set has bug""" # cap.set( cv2.cv.CV_CAP_PROP_POS_FRAMES , max(0,subsample_frmIdx*subSampRate)) # status, frame[:,:,:] = cap.read() status,frame[:,:,:] = readVideo(cap,subSampRate) except: print "exception!!" print "directly save" continue if useBlobCenter: BlobIndMatrixCurFrm = (blobIndLists[np.mod(subsample_frmIdx,min(len(blobIndLists),trunclen))]).todense() BlobCenterCurFrm = blobCenterLists[np.mod(subsample_frmIdx,min(len(blobIndLists),trunclen))] if len(BlobCenterCurFrm)<1: #is empty BlobCenterCurFrm=[(0,0)] frameL[:,:] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # cv2.imshow('hue',frame_hsv[:,:,0]) # cv2.waitKey(0) ## histogram equalization, more contrast clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) frameL_he = clahe.apply(frameL) frameL = frameL_he # for visulization vis = frame.copy() """Tracking""" if len(tracksdic) > 0: try: pnts_old = np.float32([tracksdic[i][-1][:2] for i in sorted(tracksdic.keys())]).reshape(-1, 1, 2) except: pnts_old = np.float32([tracksdic[i][:2] for i in sorted(tracksdic.keys())]).reshape(-1, 1, 2) pnts_new, st, err = cv2.calcOpticalFlowPyrLK(frameLp, frameL, pnts_old, None, **lk_params) pnts_oldr, st, err = cv2.calcOpticalFlowPyrLK(frameL, frameLp, pnts_new, None, **lk_params) dist = abs(pnts_old-pnts_oldr).reshape(-1, 2).max(-1) good = dist < 1 for (x, y), good_flag, idx in zip(pnts_new.reshape(-1, 2), good, sorted(tracksdic.keys())): if not good_flag: if end[idx]==-1: end[idx] = (frame_idx-1) if useBlobCenter: # tracksdic[idx].append((-100,-100,frame_idx,np.nan,0,np.nan,np.nan)) tracksdic[idx].append((np.nan,np.nan,frame_idx,np.nan,0,np.nan,np.nan)) else: tracksdic[idx].append((np.nan,np.nan,frame_idx,np.nan)) else: pass ## already dead, already has end[idx] else: if (not np.isnan(x)) and (not np.isnan(y)): if end[idx]>0: pdb.set_trace() x = min(x,frameLp.shape[1]-1) ## why?? klt will find points outside the bdry??? y = min(y,frameLp.shape[0]-1) x = max(x,0) ## why?? klt will find points outside the bdry??? y = max(y,0) # if x>frameLp.shape[1] or y>frameLp.shape[0]: # print x, y # hue = frame_hsv[y,x,0] """use a median of the 3*3 window""" hue = np.nanmedian(frame_hsv[max(0,y-1):min(y+2,nrows),max(0,x-1):min(x+2,ncols),0]) if np.isnan(hue): hue = frame_hsv[y,x,0] """try median of the intensity""" # hue = np.median(frameL[max(0,y-1):min(y+2,nrows),max(0,x-1):min(x+2,ncols)]) if useBlobCenter: blobInd = BlobIndMatrixCurFrm[y,x] if blobInd!=0: blobCenter = BlobCenterCurFrm[blobInd-1] tracksdic[idx].append((x,y,frame_idx,hue,np.int8(blobInd),blobCenter[1],blobCenter[0])) else: # tracksdic[idx].append((x,y,frame_idx,hue,0,np.NaN,np.NaN)) tracksdic[idx].append((x,y,frame_idx,hue,0,x,y)) else: tracksdic[idx].append((x,y,frame_idx,hue)) """mask out this point, avoid duplicating""" cv2.circle(vis, (x, y), 3, (0, 0, 255), -1) if isVisualize: # cv2.line(vis, (int(tracksdic[idx][-2][0]),int(tracksdic[idx][-2][1])), (int(tracksdic[idx][-1][0]),int(tracksdic[idx][-1][1])), (0, 255, 0), 2) # cv2.line(vis, (int(tracksdic[idx][-2][0]),int(tracksdic[idx][-2][1])), (x,y), (0, 255, 0), 1) for vvv in range(len(tracksdic[idx])-1,1,-1): cv2.line(vis, (int(tracksdic[idx][vvv][0]),int(tracksdic[idx][vvv][1])), (int(tracksdic[idx][vvv-1][0]),int(tracksdic[idx][vvv-1][1])), (0, 255, 0), 1) else: """stop appending nans after this point is already lost.""" pass """Detecting new points""" if subsample_frmIdx % detect_interval == 0: # GGD: this is (I *think*) eliminating redundant non-moving points # mask = masktouse masktouse[:,:] = 255 mask = masktouse for x, y in [tracksdic[tr][-1][:2] for tr in tracksdic.keys()]: if not np.isnan(x): cv2.circle(mask, (np.int32(x), np.int32(y)), 5, 0, -1) corners = cv2.goodFeaturesToTrack(frameL,mask=mask,**feature_params) if corners is not None: for x, y in np.float32(corners).reshape(-1, 2): # create new dic item using new dicidx since these are new points: tracksdic[dicidx] = [] start[dicidx] = frame_idx end[dicidx] = -1 hue = np.median(frame_hsv[max(0,y-1):min(y+2,nrows),max(0,x-1):min(x+2,ncols),0]) if np.isnan(hue): hue = frame_hsv[y,x,0] if useBlobCenter: blobInd = BlobIndMatrixCurFrm[y,x] if blobInd!=0: blobCenter = BlobCenterCurFrm[blobInd-1] tracksdic[dicidx].append((x,y,frame_idx,hue,np.int8(blobInd),blobCenter[1],blobCenter[0])) else: # tracksdic[dicidx].append((x,y,frame_idx,hue,0,np.NaN,np.NaN)) tracksdic[dicidx].append((x,y,frame_idx,hue,0,x,y)) else: tracksdic[dicidx].append((x,y,frame_idx,hue)) dicidx += 1 print('{0} - {1}'.format(subsample_frmIdx*subSampRate,len(tracksdic))) if isVisualize: # cv2.imshow('klt', vis) # cv2.waitKey(5) plt.imshow(vis[:,:,::-1]) plt.pause(0.00001) # switch previous frame frameLp[:,:] = frameL[:,:] subsample_frmIdx += 1 frame_idx = subsample_frmIdx*subSampRate # dump trajectories to file # trunclen = min(trunclen,frame_idx - frame_idx/trunclen*600) #the very last truncation length may be less than original trunclen # if ((frame_idx>0) & (subsample_frmIdx % trunclen == 0)) or (frame_idx==nframe): if ((frame_idx>0) & (subsample_frmIdx % trunclen == 0)) or (not status): print "saving===!!!" # print('{0} - {1}'.format(frame_idx,len(tracksdic))) Xtracks = np.zeros([len(tracksdic),trunclen]) Ytracks = np.zeros([len(tracksdic),trunclen]) Huetracks = np.zeros([len(tracksdic),trunclen]) # initialize T track using numbers that will never appear in reality # "won't-appear" fillers": frame_idx+3*trunclen # this way, we won't lose the REAL 0's, i.e. starts from 0 frame, when filtering in the trj_filter.py Ttracks = np.ones([len(tracksdic),trunclen])*(frame_idx+3*trunclen) if useBlobCenter: BlobIndtracks = np.zeros([len(tracksdic),trunclen]) #blob index starts from 1 BlobCenterX = np.zeros([len(tracksdic),trunclen]) BlobCenterY = np.zeros([len(tracksdic),trunclen]) # set first frame in this chunk if subsample_frmIdx%trunclen==0: offset = subsample_frmIdx - trunclen else: ## the last truncation is less than trunclen frames offset = subsample_frmIdx - subsample_frmIdx%trunclen # loop through the current trajectories list for ii, trjidx in enumerate(tracksdic.keys()): # set the starting and ending frame index st_ind = start[trjidx] en_ind = end[trjidx] # if en_ind is -1, then the traj is still alive, # otherwise, the trajectory is dead (but still in the # tracks dictionary, otherwise it would have been # removed). if en_ind==-1: #not yet finished, save whole row ttrack = np.array(tracksdic[trjidx]).T else: #already ended within this truncation ttrack = np.array(tracksdic[trjidx][:-1]).T # don't save nans # if st_ind is -1, then the track existed in the previous # truncation and all points except the last one of the # previous truncation were removed, so only save from the # second point. # if st_ind=='fromPre': if st_ind/subSampRate<offset: # print "trj point is from previous truncation!" st_ind = offset*subSampRate ttrack = ttrack[:,1:] #because the first point is the last point from pre trunc, already saved # put trajectory into matrix tstart, tstop = (st_ind-start_position)/subSampRate-offset, (en_ind-start_position)/subSampRate-offset+1 if en_ind==-1: Xtracks[ii,:][tstart:tstart+len(ttrack[0,:])] = ttrack[0,:] Ytracks[ii,:][tstart:tstart+len(ttrack[1,:])] = ttrack[1,:] Ttracks[ii,:][tstart:tstart+len(ttrack[2,:])] = ttrack[2,:] Huetracks[ii,:][tstart:tstart+len(ttrack[3,:])] = ttrack[3,:] if useBlobCenter: BlobIndtracks[ii,:][tstart:] = ttrack[4,:] BlobCenterX[ii,:][tstart:] = ttrack[5,:] BlobCenterY[ii,:][tstart:] = ttrack[6,:] else: Xtracks[ii,:][tstart:tstop] = ttrack[0,:] Ytracks[ii,:][tstart:tstop] = ttrack[1,:] Ttracks[ii,:][tstart:tstop] = ttrack[2,:] Huetracks[ii,:][tstart:tstop] = ttrack[3,:] if useBlobCenter: BlobIndtracks[ii,:][tstart:tstop] = ttrack[4,:] BlobCenterX[ii,:][tstart:tstop] = ttrack[5,:] BlobCenterY[ii,:][tstart:tstop] = ttrack[6,:] # put tracks into sparse matrix trk ={} # Ttracks = Ttracks+frame_idx_bias # use the actual frame index as the key, to save data trk['xtracks'] = csr_matrix(Xtracks) trk['ytracks'] = csr_matrix(Ytracks) trk['Ttracks'] = Ttracks trk['Huetracks'] = csr_matrix(Huetracks) trk['trjID'] = tracksdic.keys() if useBlobCenter: trk['fg_blob_index'] = csr_matrix(BlobIndtracks) trk['fg_blob_center_X'] = csr_matrix(BlobCenterX) trk['fg_blob_center_Y'] = csr_matrix(BlobCenterY) # for dead tracks, remove them. for alive tracks, remove all # points except the last one (in order to track into the next # frame). deadtrj = np.array(end.keys())[np.array(end.values())>=0]# ==0 is for the case when the tracks ends at 0 frame for i in sorted(tracksdic.keys()): if i in deadtrj: tracksdic.pop(i) else: tracksdic[i] = [tracksdic[i][-1]]#save the last one if len(tracksdic)>0: trk['lastPtsValue'] = np.array(tracksdic.values())[:,0,:] trk['lastPtsKey'] = np.array(tracksdic.keys()) else: ##all points are dead in this trunk trk['lastPtsValue'] = np.array([]) trk['lastPtsKey'] = np.array([]) # savename = os.path.join(savePath,'simp_blob_klt_'+str(np.int(np.ceil(subsample_frmIdx/float(trunclen)))).zfill(3)) savename = os.path.join(savePath,str(np.int(np.ceil(subsample_frmIdx/float(trunclen)))).zfill(3)) savemat(savename,trk)
import os import sys from tarfile import open as tar_open import tarfile import tempfile import uuid import insights.client.utilities as util from insights.client.constants import InsightsConstants as constants import re import mock import six import pytest from mock.mock import patch from json import loads as json_load machine_id = str(uuid.uuid4()) remove_file_content = """ [remove] commands=foo files=bar """.strip().encode("utf-8") def test_display_name(): assert util.determine_hostname(display_name='foo') == 'foo' def test_determine_hostname(): import socket hostname = socket.gethostname() fqdn = socket.getfqdn() assert util.determine_hostname() in (hostname, fqdn) assert util.determine_hostname() != 'foo' def test_get_time(): time_regex = re.match('\d{4}-\d{2}-\d{2}\D\d{2}:\d{2}:\d{2}\.\d+', util.get_time()) assert time_regex.group(0) is not None def test_write_to_disk(): content = 'boop' filename = '/tmp/testing' util.write_to_disk(filename, content=content) assert os.path.isfile(filename) with open(filename, 'r') as f: result = f.read() assert result == 'boop' util.write_to_disk(filename, delete=True) is None def test_generate_machine_id(): machine_id_regex = re.match('\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', util.generate_machine_id(destination_file='/tmp/testmachineid')) assert machine_id_regex.group(0) is not None with open('/tmp/testmachineid', 'r') as _file: machine_id = _file.read() assert util.generate_machine_id(destination_file='/tmp/testmachineid') == machine_id os.remove('/tmp/testmachineid') def test_bad_machine_id(): with mock.patch.object(util.sys, "exit") as mock_exit: with open('/tmp/testmachineid', 'w') as _file: _file.write("this_is_bad") util.generate_machine_id(destination_file='/tmp/testmachineid') assert mock_exit.call_args[0][0] == constants.sig_kill_bad os.remove('/tmp/testmachineid') def test_expand_paths(): assert util._expand_paths('/tmp') == ['/tmp'] def test_magic_plan_b(): tf = tempfile.NamedTemporaryFile() with open(tf.name, 'w') as f: f.write('testing stuff') assert util.magic_plan_b(tf.name) == 'text/plain; charset=us-ascii' def test_run_command_get_output(): cmd = 'echo hello' assert util.run_command_get_output(cmd) == {'status': 0, 'output': u'hello\n'} @patch('insights.client.utilities.wrapper_constants') @patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) def test_get_version_info_OK(wrapper_constants): ''' insights_client constants are imported OK and version is reported. Return version as defined ''' wrapper_constants.version = 1 version_info = util.get_version_info() assert version_info == {'core_version': '1-1', 'client_version': 1} @patch('insights.client.utilities.wrapper_constants', new=None) @patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) def test_get_version_info_no_module(): ''' insights_client constants cannot be imported, constants object is None. Return None version. ''' version_info = util.get_version_info() assert version_info == {'core_version': '1-1', 'client_version': None} @patch('insights.client.utilities.wrapper_constants') @patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) def test_get_version_info_no_version(wrapper_constants): ''' insights_client constants are imported OK but constants object has no attribute "version." Return None version ''' del wrapper_constants.version version_info = util.get_version_info() assert version_info == {'core_version': '1-1', 'client_version': None} # TODO: DRY @patch('insights.client.utilities.constants.registered_files', ['/tmp/insights-client.registered', '/tmp/redhat-access-insights.registered']) @patch('insights.client.utilities.constants.unregistered_files', ['/tmp/insights-client.unregistered', '/tmp/redhat-access-insights.unregistered']) def test_write_registered_file(): util.write_registered_file() for r in constants.registered_files: assert os.path.isfile(r) is True for u in constants.unregistered_files: assert os.path.isfile(u) is False @patch('insights.client.utilities.constants.registered_files', ['/tmp/insights-client.registered', '/tmp/redhat-access-insights.registered']) @patch('insights.client.utilities.constants.unregistered_files', ['/tmp/insights-client.unregistered', '/tmp/redhat-access-insights.unregistered']) def test_delete_registered_file(): util.write_registered_file() util.delete_registered_file() for r in constants.registered_files: assert os.path.isfile(r) is False @patch('insights.client.utilities.constants.registered_files', ['/tmp/insights-client.registered', '/tmp/redhat-access-insights.registered']) @patch('insights.client.utilities.constants.unregistered_files', ['/tmp/insights-client.unregistered', '/tmp/redhat-access-insights.unregistered']) def test_write_unregistered_file(): util.write_unregistered_file() for r in constants.registered_files: assert os.path.isfile(r) is False for u in constants.unregistered_files: assert os.path.isfile(u) is True @patch('insights.client.utilities.constants.registered_files', ['/tmp/insights-client.registered', '/tmp/redhat-access-insights.registered']) @patch('insights.client.utilities.constants.unregistered_files', ['/tmp/insights-client.unregistered', '/tmp/redhat-access-insights.unregistered']) def test_delete_unregistered_file(): util.write_unregistered_file() util.delete_unregistered_file() for u in constants.unregistered_files: assert os.path.isfile(u) is False def test_read_pidfile(): ''' Test a pidfile that exists ''' if six.PY3: open_name = 'builtins.open' else: open_name = '__builtin__.open' with patch(open_name, create=True) as mock_open: mock_open.side_effect = [mock.mock_open(read_data='420').return_value] assert util.read_pidfile() == '420' def test_read_pidfile_failure(): ''' Test a pidfile that does not exist ''' if six.PY3: open_name = 'builtins.open' else: open_name = '__builtin__.open' with patch(open_name, create=True) as mock_open: mock_open.side_effect = IOError assert util.read_pidfile() is None @patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') def test_systemd_notify_init_thread_no_socket(exists, thread): ''' Test this function when NOTIFY_SOCKET is undefined, i.e. when we run the client on demand and not via systemd job ''' exists.return_value = True util.systemd_notify_init_thread() thread.assert_not_called() @patch('insights.client.utilities.Popen') def test_systemd_notify(Popen): ''' Test calling systemd-notify with a "valid" PID On RHEL 7, exists(/usr/bin/systemd-notify) == True ''' Popen.return_value.communicate.return_value = ('', '') util._systemd_notify('420') Popen.assert_called_once() @patch('insights.client.utilities.read_pidfile', mock.Mock(return_value=None)) @patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') @patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) def test_systemd_notify_init_thread_failure_bad_pid(exists, thread): ''' Test initializing systemd-notify loop with an invalid PID On RHEL 7, exists(/usr/bin/systemd-notify) == True ''' exists.return_value = True util.systemd_notify_init_thread() exists.assert_not_called() thread.assert_not_called() @patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') @patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) def test_systemd_notify_init_thread_failure_rhel_6(exists, thread): ''' Test calling systemd-notify on RHEL 6 On RHEL 6, exists(/usr/bin/systemd-notify) == False ''' exists.return_value = False util.systemd_notify_init_thread() thread.assert_not_called() def test_get_tags(): content = b"foo: bar" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(content) fp.close() got = util.get_tags(fp.name) assert got == {"foo": "bar"} def test_get_tags_empty(): content = b"" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(content) fp.close() got = util.get_tags(fp.name) assert got == {} def test_get_tags_nonexist(): got = util.get_tags("/file/does/not/exist") assert got is None @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier uses oyaml library which is incompatable with this test') def test_write_tags(): tags = {'foo': 'bar'} fp = tempfile.NamedTemporaryFile() util.write_tags(tags, tags_file_path=fp.name) got = util.get_tags(fp.name) assert got == tags @patch('insights.client.utilities.os.rename') @patch('insights.client.utilities.os.path.exists') def test_migrate_tags(path_exists, os_rename): ''' Test the migrate_tags function for the following cases: 1) tags.yaml does not exist, tags.conf does not exist - do nothing 2) tags.yaml exists, tags.conf does not exist - do nothing 3) tags.yaml does not exist, tags.conf exists - rename tags.conf to tags.yaml 4) tags.yaml exists, tags.conf exists - do nothing ''' # existence of tags.yaml is checked FIRST, tags.conf is checked SECOND # mock side effects are according to this order # case 1 path_exists.side_effect = [False, False] util.migrate_tags() os_rename.assert_not_called() os_rename.reset_mock() # case 2 path_exists.side_effect = [True, False] util.migrate_tags() os_rename.assert_not_called() os_rename.reset_mock() # case 3 path_exists.side_effect = [False, True] util.migrate_tags() os_rename.assert_called_once() os_rename.reset_mock() # case 4 path_exists.side_effect = [True, True] util.migrate_tags() os_rename.assert_not_called() os_rename.reset_mock() def mock_open(name, mode, files=[["meta_data/insights.spec-small", 1], ["meta_data/insights.spec-big", 1], ["data/insights/small", 1], ["data/insights/big", 100]]): base_path = "./insights-client" with tempfile.TemporaryFile(suffix='.tar.gz') as f: tarball = tar_open(fileobj=f, mode='w:gz') for file in files: member = tarfile.TarInfo(name=os.path.join(base_path, file[0])) member.size = file[1] tarball.addfile(member, None) return tarball def mock_extract_file(self, filename): if "small" in filename: f = "small" else: f = "big" return f def mock_json_load(filename): if "small" in filename: content = json_load('{"name": "insights.spec-small", "results": {"type": "insights.core.spec_factory.CommandOutputProvider", "object": { "relative_path": "insights/small"}}}') else: content = json_load('{"name": "insights.spec-big", "results": {"type": "insights.core.spec_factory.CommandOutputProvider", "object": { "relative_path": "insights/big"}}}') return content # TODO: try to pass files as mock_open parameter @patch('insights.client.utilities.tarfile.open', mock_open) @patch('insights.client.utilities.json.load', mock_json_load) @patch('insights.client.utilities.tarfile.TarFile.extractfile', mock_extract_file) def test_largest_spec_in_archive(): largest_file = util.largest_spec_in_archive("/tmp/insights-client.tar.gz") assert largest_file[0] == "insights/big" assert largest_file[1] == 100 assert largest_file[2] == "insights.spec-big"
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from subprocess import check_call, check_output import requests.exceptions import requests import time import six import re try: check_call(["kubectl", "get", "pods"]) except Exception as e: raise unittest.SkipTest( "Kubernetes integration tests require a minikube cluster;" "Skipping tests {}".format(e) ) def get_minikube_host(): host_ip = check_output(['minikube', 'ip']) if six.PY3: host_ip = host_ip.decode('UTF-8') host = '{}:30809'.format(host_ip.strip()) return host class KubernetesExecutorTest(unittest.TestCase): def _delete_airflow_pod(self): air_pod = check_output(['kubectl', 'get', 'pods']).decode() air_pod = air_pod.split('\n') names = [re.compile('\s+').split(x)[0] for x in air_pod if 'airflow' in x] if names: check_call(['kubectl', 'delete', 'pod', names[0]]) def monitor_task(self, host, execution_date, dag_id, task_id, expected_final_state, timeout): tries = 0 state = '' max_tries = max(int(timeout / 5), 1) # Wait 100 seconds for the operator to complete while tries < max_tries: time.sleep(5) # Trigger a new dagrun try: result = requests.get( 'http://{host}/api/experimental/dags/{dag_id}/' 'dag_runs/{execution_date}/tasks/{task_id}' .format(host=host, dag_id=dag_id, execution_date=execution_date, task_id=task_id) ) self.assertEqual(result.status_code, 200, "Could not get the status") result_json = result.json() state = result_json['state'] print("Attempt {}: Current state of operator is {}".format(tries, state)) if state == expected_final_state: break tries += 1 except requests.exceptions.ConnectionError as e: check_call(["echo", "api call failed. trying again. error {}".format(e)]) pass self.assertEqual(state, expected_final_state) # Maybe check if we can retrieve the logs, but then we need to extend the API def ensure_dag_expected_state(self, host, execution_date, dag_id, expected_final_state, timeout): tries = 0 state = '' max_tries = max(int(timeout / 5), 1) # Wait 100 seconds for the operator to complete while tries < max_tries: time.sleep(5) # Trigger a new dagrun result = requests.get( 'http://{host}/api/experimental/dags/{dag_id}/' 'dag_runs/{execution_date}' .format(host=host, dag_id=dag_id, execution_date=execution_date) ) print(result) self.assertEqual(result.status_code, 200, "Could not get the status") result_json = result.json() print(result_json) state = result_json['state'] check_call( ["echo", "Attempt {}: Current state of dag is {}".format(tries, state)]) print("Attempt {}: Current state of dag is {}".format(tries, state)) if state == expected_final_state: break tries += 1 self.assertEqual(state, expected_final_state) # Maybe check if we can retrieve the logs, but then we need to extend the API def start_dag(self, dag_id, host): result = requests.get( 'http://{host}/api/experimental/' 'dags/{dag_id}/paused/false'.format(host=host, dag_id=dag_id) ) self.assertEqual(result.status_code, 200, "Could not enable DAG: {result}" .format(result=result.json())) # Trigger a new dagrun result = requests.post( 'http://{host}/api/experimental/' 'dags/{dag_id}/dag_runs'.format(host=host, dag_id=dag_id), json={} ) self.assertEqual(result.status_code, 200, "Could not trigger a DAG-run: {result}" .format(result=result.json())) time.sleep(1) result = requests.get( 'http://{}/api/experimental/latest_runs'.format(host) ) self.assertEqual(result.status_code, 200, "Could not get the latest DAG-run:" " {result}" .format(result=result.json())) result_json = result.json() return result_json def test_integration_run_dag(self): host = get_minikube_host() dag_id = 'example_kubernetes_annotation' result_json = self.start_dag(dag_id=dag_id, host=host) self.assertGreater(len(result_json['items']), 0) execution_date = result_json['items'][0]['execution_date'] print("Found the job with execution date {}".format(execution_date)) # Wait 100 seconds for the operator to complete self.monitor_task(host=host, execution_date=execution_date, dag_id=dag_id, task_id='start_task', expected_final_state='success', timeout=100) self.ensure_dag_expected_state(host=host, execution_date=execution_date, dag_id=dag_id, expected_final_state='success', timeout=100) def test_integration_run_dag_with_scheduler_failure(self): host = get_minikube_host() dag_id = 'example_kubernetes_annotation' result_json = self.start_dag(dag_id=dag_id, host=host) self.assertGreater(len(result_json['items']), 0) execution_date = result_json['items'][0]['execution_date'] print("Found the job with execution date {}".format(execution_date)) self._delete_airflow_pod() time.sleep(10) # give time for pod to restart # Wait 100 seconds for the operator to complete self.monitor_task(host=host, execution_date=execution_date, dag_id=dag_id, task_id='start_task', expected_final_state='success', timeout=120) self.ensure_dag_expected_state(host=host, execution_date=execution_date, dag_id=dag_id, expected_final_state='success', timeout=100) if __name__ == '__main__': unittest.main()
import os import re import time import pytest import logging from collections import defaultdict from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement from dtest import DtestTimeoutError, Tester, create_ks since = pytest.mark.since logger = logging.getLogger(__name__) TRACE_DETERMINE_REPLICAS = re.compile('Determining replicas for mutation') TRACE_SEND_MESSAGE = re.compile(r'Sending (?:MUTATION|REQUEST_RESPONSE) message to /([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)') TRACE_RESPOND_MESSAGE = re.compile(r'(?:MUTATION|REQUEST_RESPONSE) message received from /([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)') TRACE_COMMIT_LOG = re.compile('Appending to commitlog') TRACE_FORWARD_WRITE = re.compile(r'Enqueuing forwarded write to /([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)') # Some pre-computed murmur 3 hashes; there are no good python murmur3 # hashing libraries :( murmur3_hashes = { 5: -7509452495886106294, 10: -6715243485458697746, 16: -5477287129830487822, 13: -5034495173465742853, 11: -4156302194539278891, 1: -4069959284402364209, 19: -3974532302236993209, 8: -3799847372828181882, 2: -3248873570005575792, 4: -2729420104000364805, 18: -2695747960476065067, 15: -1191135763843456182, 20: 1388667306199997068, 7: 1634052884888577606, 6: 2705480034054113608, 9: 3728482343045213994, 14: 4279681877540623768, 17: 5467144456125416399, 12: 8582886034424406875, 3: 9010454139840013625 } def query_system_traces_length(session): return len(list(session.execute("SELECT * FROM system_traces.events"))) def last_n_values_same(n, iterable): last_n_values = iterable[-n:] if len(last_n_values) != n: return False num_unique_values_in_last_n = len(set(last_n_values)) return num_unique_values_in_last_n == 1 def block_on_trace(session): results_from_query = [] num_same_results_required = 5 # We should never run into a timeout, because # eventually trace events should stop being generated. # Just in case though, we add a large timeout, to prevent # deadlock. start = time.time() timeout = start + 180 while not last_n_values_same(num_same_results_required, results_from_query): results_from_query.append(query_system_traces_length(session)) time.sleep(1) if time.time() > timeout: raise DtestTimeoutError() @pytest.mark.no_vnodes class TestReplication(Tester): """ This test suite looks at how data is replicated across a cluster and who the coordinator, replicas and forwarders involved are. """ def get_replicas_from_trace(self, trace): """ Look at trace and return a list of the replicas contacted """ coordinator = None nodes_sent_write = set() # Nodes sent a write request nodes_responded_write = set() # Nodes that acknowledges a write replicas_written = set() # Nodes that wrote to their commitlog forwarders = set() # Nodes that forwarded a write to another node nodes_contacted = defaultdict(set) # node -> list of nodes that were contacted for trace_event in trace.events: # Step 1, find coordinator node: activity = trace_event.description source = trace_event.source if activity.startswith('Determining replicas for mutation'): if not coordinator: coordinator = source break if not coordinator: continue for trace_event in trace.events: activity = trace_event.description source = trace_event.source # Step 2, find all the nodes that each node talked to: send_match = TRACE_SEND_MESSAGE.search(activity) recv_match = TRACE_RESPOND_MESSAGE.search(activity) if send_match: node_contacted = send_match.groups()[0] if source == coordinator: nodes_sent_write.add(node_contacted) nodes_contacted[source].add(node_contacted) elif recv_match: node_contacted = recv_match.groups()[0] if source == coordinator: nodes_responded_write.add(recv_match.groups()[0]) # Step 3, find nodes that forwarded to other nodes: # (Happens in multi-datacenter clusters) if source != coordinator: forward_match = TRACE_FORWARD_WRITE.search(activity) if forward_match: forwarding_node = forward_match.groups()[0] nodes_sent_write.add(forwarding_node) forwarders.add(forwarding_node) # Step 4, find nodes who actually wrote data: if TRACE_COMMIT_LOG.search(activity): replicas_written.add(source) return {"coordinator": coordinator, "forwarders": forwarders, "replicas": replicas_written, "nodes_sent_write": nodes_sent_write, "nodes_responded_write": nodes_responded_write, "nodes_contacted": nodes_contacted } def get_replicas_for_token(self, token, replication_factor, strategy='SimpleStrategy', nodes=None): """ Figure out which node(s) should receive data for a given token and replication factor """ if not nodes: nodes = self.cluster.nodelist() token_ranges = sorted(zip([n.initial_token for n in nodes], nodes)) replicas = [] # Find first replica: for i, (r, node) in enumerate(token_ranges): if token <= r: replicas.append(node.address()) first_ring_position = i break else: replicas.append(token_ranges[0][1].address()) first_ring_position = 0 # Find other replicas: if strategy == 'SimpleStrategy': for node in nodes[first_ring_position + 1:]: replicas.append(node.address()) if len(replicas) == replication_factor: break if len(replicas) != replication_factor: # Replication token range looped: for node in nodes: replicas.append(node.address()) if len(replicas) == replication_factor: break elif strategy == 'NetworkTopologyStrategy': # NetworkTopologyStrategy can be broken down into multiple # SimpleStrategies, just once per datacenter: for dc, rf in list(replication_factor.items()): dc_nodes = [n for n in nodes if n.data_center == dc] replicas.extend(self.get_replicas_for_token( token, rf, nodes=dc_nodes)) else: raise NotImplemented('replication strategy not implemented: %s' % strategy) return replicas def pprint_trace(self, trace): """ Pretty print a trace """ if logging.root.level == logging.DEBUG: print(("-" * 40)) for t in trace.events: print(("%s\t%s\t%s\t%s" % (t.source, t.source_elapsed, t.description, t.thread_name))) print(("-" * 40)) def test_simple(self): """ Test the SimpleStrategy on a 3 node cluster """ self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True) node1 = self.cluster.nodelist()[0] session = self.patient_exclusive_cql_connection(node1, consistency_level=ConsistencyLevel.ALL) session.max_trace_wait = 120 replication_factor = 3 create_ks(session, 'test', replication_factor) session.execute('CREATE TABLE test.test (id int PRIMARY KEY, value text)', trace=False) for key, token in list(murmur3_hashes.items()): logger.debug('murmur3 hash key={key},token={token}'.format(key=key, token=token)) query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL) future = session.execute_async(query, trace=True) future.result() block_on_trace(session) trace = future.get_query_trace(max_wait=120) self.pprint_trace(trace) stats = self.get_replicas_from_trace(trace) replicas_should_be = set(self.get_replicas_for_token( token, replication_factor)) logger.debug('\nreplicas should be: %s' % replicas_should_be) logger.debug('replicas were: %s' % stats['replicas']) # Make sure the correct nodes are replicas: assert stats['replicas'] == replicas_should_be # Make sure that each replica node was contacted and # acknowledged the write: assert stats['nodes_sent_write'] == stats['nodes_responded_write'] @pytest.mark.resource_intensive def test_network_topology(self): """ Test the NetworkTopologyStrategy on a 2DC 3:3 node cluster """ self.cluster.populate([3, 3]).start(wait_for_binary_proto=True, wait_other_notice=True) node1 = self.cluster.nodelist()[0] ip_nodes = dict((node.address(), node) for node in self.cluster.nodelist()) session = self.patient_exclusive_cql_connection(node1, consistency_level=ConsistencyLevel.ALL) replication_factor = {'dc1': 2, 'dc2': 2} create_ks(session, 'test', replication_factor) session.execute('CREATE TABLE test.test (id int PRIMARY KEY, value text)', trace=False) forwarders_used = set() for key, token in list(murmur3_hashes.items()): query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL) future = session.execute_async(query, trace=True) future.result() block_on_trace(session) trace = future.get_query_trace(max_wait=120) self.pprint_trace(trace) stats = self.get_replicas_from_trace(trace) replicas_should_be = set(self.get_replicas_for_token( token, replication_factor, strategy='NetworkTopologyStrategy')) logger.debug('Current token is %s' % token) logger.debug('\nreplicas should be: %s' % replicas_should_be) logger.debug('replicas were: %s' % stats['replicas']) # Make sure the coordinator only talked to a single node in # the second datacenter - CASSANDRA-5632: num_in_other_dcs_contacted = 0 for node_contacted in stats['nodes_contacted'][node1.address()]: if ip_nodes[node_contacted].data_center != node1.data_center: num_in_other_dcs_contacted += 1 assert num_in_other_dcs_contacted == 1 # Record the forwarder used for each INSERT: forwarders_used = forwarders_used.union(stats['forwarders']) try: # Make sure the correct nodes are replicas: assert stats['replicas'] == replicas_should_be # Make sure that each replica node was contacted and # acknowledged the write: assert stats['nodes_sent_write'] == stats['nodes_responded_write'] except AssertionError as e: logger.debug("Failed on key %s and token %s." % (key, token)) raise e # Given a diverse enough keyset, each node in the second # datacenter should get a chance to be a forwarder: assert len(forwarders_used) == 3 class TestSnitchConfigurationUpdate(Tester): """ Test to reproduce CASSANDRA-10238, wherein changing snitch properties to change racks without a restart could violate RF contract. Since CASSANDRA-10243 it is no longer possible to change rack or dc for live nodes so we must specify which nodes should be shutdown in order to have the rack changed. """ @pytest.fixture(autouse=True) def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.ignore_log_patterns = ( "Fatal exception during initialization", "Cannot start node if snitch's rack(.*) differs from previous rack(.*)", "Cannot update data center or rack" ) def check_endpoint_count(self, ks, table, nodes, rf): """ Check a dummy key expecting it to have replication factor as the sum of rf on all dcs. """ expected_count = sum([int(r) for d, r in rf.items() if d != 'class']) for node in nodes: cmd = "getendpoints {} {} dummy".format(ks, table) out, err, _ = node.nodetool(cmd) if len(err.strip()) > 0: logger.debug("Error running 'nodetool {}': {}".format(cmd, err)) logger.debug("Endpoints for node {}, expected count is {}".format(node.address(), expected_count)) logger.debug(out) ips_found = re.findall('(\d+\.\d+\.\d+\.\d+)', out) assert len(ips_found) == expected_count, "wrong number of endpoints found ({}), should be: {}".format(len(ips_found), expected_count) def wait_for_nodes_on_racks(self, nodes, expected_racks): """ Waits for nodes to match the expected racks. """ regex = re.compile(r"^UN(?:\s*)127\.0\.0(?:.*)\s(.*)$", re.IGNORECASE) for i, node in enumerate(nodes): wait_expire = time.time() + 120 while time.time() < wait_expire: out, err, _ = node.nodetool("status") logger.debug(out) if len(err.strip()) > 0: logger.debug("Error trying to run nodetool status: {}".format(err)) racks = [] for line in out.split(os.linesep): m = regex.match(line) if m: racks.append(m.group(1)) if racks == expected_racks: # great, the topology change is propagated logger.debug("Topology change detected on node {}".format(i)) break else: logger.debug("Waiting for topology change on node {}".format(i)) time.sleep(5) else: raise RuntimeError("Ran out of time waiting for topology to change on node {}".format(i)) def test_rf_collapse_gossiping_property_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are collapsed using a gossiping property file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='GossipingPropertyFileSnitch', snitch_config_file='cassandra-rackdc.properties', snitch_lines_before=lambda i, node: ["dc=dc1", "rack=rack{}".format(i)], snitch_lines_after=lambda i, node: ["dc=dc1", "rack=rack1"], final_racks=["rack1", "rack1", "rack1"], nodes_to_shutdown=[0, 2]) def test_rf_expand_gossiping_property_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are expanded using a gossiping property file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='GossipingPropertyFileSnitch', snitch_config_file='cassandra-rackdc.properties', snitch_lines_before=lambda i, node: ["dc=dc1", "rack=rack1"], snitch_lines_after=lambda i, node: ["dc=dc1", "rack=rack{}".format(i)], final_racks=["rack0", "rack1", "rack2"], nodes_to_shutdown=[0, 2]) @pytest.mark.resource_intensive def test_rf_collapse_gossiping_property_file_snitch_multi_dc(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are collapsed using a gossiping property file snitch the RF is not impacted, in a multi-dc environment. """ self._test_rf_on_snitch_update(nodes=[3, 3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3, 'dc2': 3}, snitch_class_name='GossipingPropertyFileSnitch', snitch_config_file='cassandra-rackdc.properties', snitch_lines_before=lambda i, node: ["dc={}".format(node.data_center), "rack=rack{}".format(i % 3)], snitch_lines_after=lambda i, node: ["dc={}".format(node.data_center), "rack=rack1"], final_racks=["rack1", "rack1", "rack1", "rack1", "rack1", "rack1"], nodes_to_shutdown=[0, 2, 3, 5]) @pytest.mark.resource_intensive def test_rf_expand_gossiping_property_file_snitch_multi_dc(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are expanded using a gossiping property file snitch the RF is not impacted, in a multi-dc environment. """ self._test_rf_on_snitch_update(nodes=[3, 3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3, 'dc2': 3}, snitch_class_name='GossipingPropertyFileSnitch', snitch_config_file='cassandra-rackdc.properties', snitch_lines_before=lambda i, node: ["dc={}".format(node.data_center), "rack=rack1"], snitch_lines_after=lambda i, node: ["dc={}".format(node.data_center), "rack=rack{}".format(i % 3)], final_racks=["rack0", "rack1", "rack2", "rack0", "rack1", "rack2"], nodes_to_shutdown=[0, 2, 3, 5]) def test_rf_collapse_property_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are collapsed using a property file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='PropertyFileSnitch', snitch_config_file='cassandra-topology.properties', snitch_lines_before=lambda i, node: ["127.0.0.1=dc1:rack0", "127.0.0.2=dc1:rack1", "127.0.0.3=dc1:rack2"], snitch_lines_after=lambda i, node: ["default=dc1:rack0"], final_racks=["rack0", "rack0", "rack0"], nodes_to_shutdown=[1, 2]) def test_rf_expand_property_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are expanded using a property file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='PropertyFileSnitch', snitch_config_file='cassandra-topology.properties', snitch_lines_before=lambda i, node: ["default=dc1:rack0"], snitch_lines_after=lambda i, node: ["127.0.0.1=dc1:rack0", "127.0.0.2=dc1:rack1", "127.0.0.3=dc1:rack2"], final_racks=["rack0", "rack1", "rack2"], nodes_to_shutdown=[1, 2]) @since('2.0', max_version='2.1.x') def test_rf_collapse_yaml_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are collapsed using a yaml file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='YamlFileNetworkTopologySnitch', snitch_config_file='cassandra-topology.yaml', snitch_lines_before=lambda i, node: ["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack0", " nodes:", " - broadcast_address: 127.0.0.1", " - rack_name: rack1", " nodes:", " - broadcast_address: 127.0.0.2", " - rack_name: rack2", " nodes:", " - broadcast_address: 127.0.0.3"], snitch_lines_after=lambda i, node: ["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack0", " nodes:", " - broadcast_address: 127.0.0.1", " - broadcast_address: 127.0.0.2", " - broadcast_address: 127.0.0.3"], final_racks=["rack0", "rack0", "rack0"], nodes_to_shutdown=[1, 2]) @since('2.0', max_version='2.1.x') def test_rf_expand_yaml_file_snitch(self): """ @jira_ticket CASSANDRA-10238 @jira_ticket CASSANDRA-10242 @jira_ticket CASSANDRA-10243 Confirm that when racks are expanded using a yaml file snitch the RF is not impacted. """ self._test_rf_on_snitch_update(nodes=[3], rf={'class': '\'NetworkTopologyStrategy\'', 'dc1': 3}, snitch_class_name='YamlFileNetworkTopologySnitch', snitch_config_file='cassandra-topology.yaml', snitch_lines_before=lambda i, node: ["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack0", " nodes:", " - broadcast_address: 127.0.0.1", " - broadcast_address: 127.0.0.2", " - broadcast_address: 127.0.0.3"], snitch_lines_after=lambda i, node: ["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack0", " nodes:", " - broadcast_address: 127.0.0.1", " - rack_name: rack1", " nodes:", " - broadcast_address: 127.0.0.2", " - rack_name: rack2", " nodes:", " - broadcast_address: 127.0.0.3"], final_racks=["rack0", "rack1", "rack2"], nodes_to_shutdown=[1, 2]) def _test_rf_on_snitch_update(self, nodes, rf, snitch_class_name, snitch_config_file, snitch_lines_before, snitch_lines_after, final_racks, nodes_to_shutdown): cluster = self.cluster cluster.populate(nodes) cluster.set_configuration_options( values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'.format(snitch_class_name)} ) # start with separate racks for i, node in enumerate(cluster.nodelist()): with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file: for line in snitch_lines_before(i, node): topo_file.write(line + os.linesep) cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(cluster.nodelist()[0]) options = (', ').join(['\'{}\': {}'.format(d, r) for d, r in rf.items()]) session.execute("CREATE KEYSPACE testing WITH replication = {{{}}}".format(options)) session.execute("CREATE TABLE testing.rf_test (key text PRIMARY KEY, value text)") # avoid errors in nodetool calls below checking for the endpoint count session.cluster.control_connection.wait_for_schema_agreement() # make sure endpoint count is correct before continuing with the rest of the test self.check_endpoint_count('testing', 'rf_test', cluster.nodelist(), rf) for i in nodes_to_shutdown: node = cluster.nodelist()[i] logger.debug("Shutting down node {}".format(node.address())) node.stop(wait_other_notice=True) logger.debug("Updating snitch file") for i, node in enumerate(cluster.nodelist()): with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file: for line in snitch_lines_after(i, node): topo_file.write(line + os.linesep) # wait until the config is reloaded before we restart the nodes, the default check period is # 5 seconds so we wait for 10 seconds to be sure logger.debug("Waiting 10 seconds to make sure snitch file is reloaded...") time.sleep(10) for i in nodes_to_shutdown: node = cluster.nodelist()[i] logger.debug("Restarting node {}".format(node.address())) # Since CASSANDRA-10242 it is no longer # possible to start a node with a different rack unless we specify -Dcassandra.ignore_rack and since # CASSANDRA-9474 it is no longer possible to start a node with a different dc unless we specify # -Dcassandra.ignore_dc. node.start(jvm_args=['-Dcassandra.ignore_rack=true', '-Dcassandra.ignore_dc=true'], wait_for_binary_proto=True) self.wait_for_nodes_on_racks(cluster.nodelist(), final_racks) # nodes have joined racks, check endpoint counts again self.check_endpoint_count('testing', 'rf_test', cluster.nodelist(), rf) def test_cannot_restart_with_different_rack(self): """ @jira_ticket CASSANDRA-10242 Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified. """ cluster = self.cluster cluster.populate(1) cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}' .format('GossipingPropertyFileSnitch')}) node1 = cluster.nodelist()[0] with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file: for line in ["dc={}".format(node1.data_center), "rack=rack1"]: topo_file.write(line + os.linesep) logger.debug("Starting node {} with rack1".format(node1.address())) node1.start(wait_for_binary_proto=True) logger.debug("Shutting down node {}".format(node1.address())) node1.stop(wait_other_notice=True) logger.debug("Updating snitch file with rack2") for node in cluster.nodelist(): with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file: for line in ["dc={}".format(node.data_center), "rack=rack2"]: topo_file.write(line + os.linesep) logger.debug("Restarting node {} with rack2".format(node1.address())) mark = node1.mark_log() node1.start() # check node not running logger.debug("Waiting for error message in log file") if cluster.version() >= '2.2': node1.watch_log_for("Cannot start node if snitch's rack(.*) differs from previous rack(.*)", from_mark=mark) else: node1.watch_log_for("Fatal exception during initialization", from_mark=mark) def test_failed_snitch_update_gossiping_property_file_snitch(self): """ @jira_ticket CASSANDRA-10243 Test that we cannot change the rack of a live node with GossipingPropertyFileSnitch. """ self._test_failed_snitch_update(nodes=[3], snitch_class_name='GossipingPropertyFileSnitch', snitch_config_file='cassandra-rackdc.properties', snitch_lines_before=["dc=dc1", "rack=rack1"], snitch_lines_after=["dc=dc1", "rack=rack2"], racks=["rack1", "rack1", "rack1"], error='') def test_failed_snitch_update_property_file_snitch(self): """ @jira_ticket CASSANDRA-10243 Test that we cannot change the rack of a live node with PropertyFileSnitch. """ self._test_failed_snitch_update(nodes=[3], snitch_class_name='PropertyFileSnitch', snitch_config_file='cassandra-topology.properties', snitch_lines_before=["default=dc1:rack1"], snitch_lines_after=["default=dc1:rack2"], racks=["rack1", "rack1", "rack1"], error='Cannot update data center or rack') @since('2.0', max_version='2.1.x') def test_failed_snitch_update_yaml_file_snitch(self): """ @jira_ticket CASSANDRA-10243 Test that we cannot change the rack of a live node with YamlFileNetworkTopologySnitch. """ self._test_failed_snitch_update(nodes=[3], snitch_class_name='YamlFileNetworkTopologySnitch', snitch_config_file='cassandra-topology.yaml', snitch_lines_before=["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack1", " nodes:", " - broadcast_address: 127.0.0.1", " - broadcast_address: 127.0.0.2", " - broadcast_address: 127.0.0.3"], snitch_lines_after=["topology:", " - dc_name: dc1", " racks:", " - rack_name: rack2", " nodes:", " - broadcast_address: 127.0.0.1", " - broadcast_address: 127.0.0.2", " - broadcast_address: 127.0.0.3"], racks=["rack1", "rack1", "rack1"], error='Cannot update data center or rack') def _test_failed_snitch_update(self, nodes, snitch_class_name, snitch_config_file, snitch_lines_before, snitch_lines_after, racks, error): cluster = self.cluster cluster.populate(nodes) cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}' .format(snitch_class_name)}) # start with initial snitch lines for node in cluster.nodelist(): with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file: for line in snitch_lines_before: topo_file.write(line + os.linesep) cluster.start(wait_for_binary_proto=True) # check racks are as specified self.wait_for_nodes_on_racks(cluster.nodelist(), racks) marks = [node.mark_log() for node in cluster.nodelist()] logger.debug("Updating snitch file") for node in cluster.nodelist(): with open(os.path.join(node.get_conf_dir(), snitch_config_file), 'w') as topo_file: for line in snitch_lines_after: topo_file.write(line + os.linesep) # wait until the config is reloaded, the default check period is # 5 seconds so we wait for 10 seconds to be sure logger.debug("Waiting 10 seconds to make sure snitch file is reloaded...") time.sleep(10) # check racks have not changed self.wait_for_nodes_on_racks(cluster.nodelist(), racks) # check error in log files if applicable if error: for node, mark in zip(cluster.nodelist(), marks): node.watch_log_for(error, from_mark=mark) def test_switch_data_center_startup_fails(self): """ @jira_ticket CASSANDRA-9474 Confirm that switching data centers fails to bring up the node. """ expected_error = (r"Cannot start node if snitch's data center (.*) differs from previous data center (.*)\. " "Please fix the snitch configuration, decommission and rebootstrap this node or use the flag -Dcassandra.ignore_dc=true.") self.fixture_dtest_setup.ignore_log_patterns = [expected_error] cluster = self.cluster cluster.populate(1) cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'}) node = cluster.nodelist()[0] with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file: topo_file.write("dc=dc9" + os.linesep) topo_file.write("rack=rack1" + os.linesep) cluster.start(wait_for_binary_proto=True) node.stop() with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file: topo_file.write("dc=dc0" + os.linesep) topo_file.write("rack=rack1" + os.linesep) mark = node.mark_log() node.start() node.watch_log_for(expected_error, from_mark=mark, timeout=120)
import os, sys, inspect, shutil from collections import defaultdict from inspect import Parameter, Signature from types import FunctionType from pathlib import Path import param from pyviz_comms import extension as _pyviz_extension from ..core import ( Dataset, DynamicMap, HoloMap, Dimensioned, ViewableElement, StoreOptions, Store ) from ..core.options import Keywords, Options, options_policy from ..core.operation import Operation from ..core.overlay import Overlay from ..core.util import merge_options_to_dict, OrderedDict from ..core.operation import OperationCallable from ..core import util from ..operation.element import function from ..streams import Stream, Params, streams_list_from_dict from .settings import OutputSettings, list_formats, list_backends Store.output_settings = OutputSettings def examples(path='holoviews-examples', verbose=False, force=False, root=__file__): """ Copies the notebooks to the supplied path. """ filepath = os.path.abspath(os.path.dirname(root)) example_dir = os.path.join(filepath, './examples') if not os.path.exists(example_dir): example_dir = os.path.join(filepath, '../examples') if os.path.exists(path): if not force: print('%s directory already exists, either delete it or set the force flag' % path) return shutil.rmtree(path) ignore = shutil.ignore_patterns('.ipynb_checkpoints','*.pyc','*~') tree_root = os.path.abspath(example_dir) if os.path.isdir(tree_root): shutil.copytree(tree_root, path, ignore=ignore, symlinks=True) else: print('Cannot find %s' % tree_root) class opts(param.ParameterizedFunction): """ Utility function to set options at the global level or to provide an Options object that can be used with the .options method of an element or container. Option objects can be generated and validated in a tab-completable way (in appropriate environments such as Jupyter notebooks) using completers such as opts.Curve, opts.Image, opts.Overlay, etc. To set opts globally you can pass these option objects into opts.defaults: opts.defaults(*options) For instance: opts.defaults(opts.Curve(color='red')) To set opts on a specific object, you can supply these option objects to the .options method. For instance: curve = hv.Curve([1,2,3]) curve.options(opts.Curve(color='red')) The options method also accepts lists of Option objects. """ __original_docstring__ = None # Keywords not to be tab-completed (helps with deprecation) _no_completion = ['title_format', 'color_index', 'size_index', 'finalize_hooks', 'scaling_factor', 'scaling_method', 'size_fn', 'normalize_lengths', 'group_index', 'category_index', 'stack_index', 'color_by'] strict = param.Boolean(default=False, doc=""" Whether to be strict about the options specification. If not set to strict (default), any invalid keywords are simply skipped. If strict, invalid keywords prevent the options being applied.""") def __init__(self, *args, **kwargs): # Needed for opts specific __signature__ super().__init__(*args, **kwargs) def __call__(self, *args, **params): if not params and not args: return Options() elif params and not args: return Options(**params) @classmethod def _group_kwargs_to_options(cls, obj, kwargs): "Format option group kwargs into canonical options format" groups = Options._option_groups if set(kwargs.keys()) - set(groups): raise Exception("Keyword options %s must be one of %s" % (groups, ','.join(repr(g) for g in groups))) elif not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % ','.join(repr(k) for k in kwargs.keys())) # Check whether the user is specifying targets (such as 'Image.Foo') targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.") elif not any(targets): # Not targets specified - add current object as target sanitized_group = util.group_sanitizer(obj.group) if obj.label: identifier = ('%s.%s.%s' % ( obj.__class__.__name__, sanitized_group, util.label_sanitizer(obj.label))) elif sanitized_group != obj.__class__.__name__: identifier = '%s.%s' % (obj.__class__.__name__, sanitized_group) else: identifier = obj.__class__.__name__ options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}} else: dfltdict = defaultdict(dict) for grp, entries in kwargs.items(): for identifier, kws in entries.items(): dfltdict[identifier][grp] = kws options = dict(dfltdict) return options @classmethod def _apply_groups_to_backend(cls, obj, options, backend, clone): "Apply the groups to a single specified backend" obj_handle = obj if options is None: if clone: obj_handle = obj.map(lambda x: x.clone(id=None)) else: obj.map(lambda x: setattr(x, 'id', None)) elif clone: obj_handle = obj.map(lambda x: x.clone(id=x.id)) return StoreOptions.set_options(obj_handle, options, backend=backend) @classmethod def _grouped_backends(cls, options, backend): "Group options by backend and filter out output group appropriately" if options is None: return [(backend or Store.current_backend, options)] dfltdict = defaultdict(dict) for spec, groups in options.items(): if 'output' not in groups.keys() or len(groups['output'])==0: dfltdict[backend or Store.current_backend][spec.strip()] = groups elif set(groups['output'].keys()) - set(['backend']): dfltdict[groups['output']['backend']][spec.strip()] = groups elif ['backend'] == list(groups['output'].keys()): filtered = {k:v for k,v in groups.items() if k != 'output'} dfltdict[groups['output']['backend']][spec.strip()] = filtered else: raise Exception('The output options group must have the backend keyword') return [(bk, bk_opts) for (bk, bk_opts) in dfltdict.items()] @classmethod def apply_groups(cls, obj, options=None, backend=None, clone=True, **kwargs): """Applies nested options definition grouped by type. Applies options on an object or nested group of objects, returning a new object with the options applied. This method accepts the separate option namespaces explicitly (i.e. 'plot', 'style', and 'norm'). If the options are to be set directly on the object a simple format may be used, e.g.: opts.apply_groups(obj, style={'cmap': 'viridis'}, plot={'show_title': False}) If the object is nested the options must be qualified using a type[.group][.label] specification, e.g.: opts.apply_groups(obj, {'Image': {'plot': {'show_title': False}, 'style': {'cmap': 'viridis}}}) If no opts are supplied all options on the object will be reset. Args: options (dict): Options specification Options specification should be indexed by type[.group][.label] or option type ('plot', 'style', 'norm'). backend (optional): Backend to apply options to Defaults to current selected backend clone (bool, optional): Whether to clone object Options can be applied inplace with clone=False **kwargs: Keywords of options by type Applies options directly to the object by type (e.g. 'plot', 'style', 'norm') specified as dictionaries. Returns: Returns the object or a clone with the options applied """ if isinstance(options, str): from ..util.parser import OptsSpec try: options = OptsSpec.parse(options) except SyntaxError: options = OptsSpec.parse( '{clsname} {options}'.format(clsname=obj.__class__.__name__, options=options)) if kwargs: options = cls._group_kwargs_to_options(obj, kwargs) for backend, backend_opts in cls._grouped_backends(options, backend): obj = cls._apply_groups_to_backend(obj, backend_opts, backend, clone) return obj @classmethod def _process_magic(cls, options, strict, backends=None): if isinstance(options, str): from .parser import OptsSpec try: ns = get_ipython().user_ns # noqa except: ns = globals() options = OptsSpec.parse(options, ns=ns) errmsg = StoreOptions.validation_error_message(options, backends=backends) if errmsg: sys.stderr.write(errmsg) if strict: sys.stderr.write('Options specification will not be applied.') return options, True return options, False @classmethod def _linemagic(cls, options, strict=False, backend=None): backends = None if backend is None else [backend] options, failure = cls._process_magic(options, strict, backends=backends) if failure: return with options_policy(skip_invalid=True, warn_on_skip=False): StoreOptions.apply_customizations(options, Store.options(backend=backend)) @classmethod def defaults(cls, *options, **kwargs): """Set default options for a session. Set default options for a session. whether in a Python script or a Jupyter notebook. Args: *options: Option objects used to specify the defaults. backend: The plotting extension the options apply to """ if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend': raise Exception('opts.defaults only accepts "backend" keyword argument') cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend')) @classmethod def _expand_by_backend(cls, options, backend): """ Given a list of flat Option objects which may or may not have 'backend' in their kwargs, return a list of grouped backend """ groups = defaultdict(list) used_fallback = False for obj in options: if 'backend' in obj.kwargs: opts_backend = obj.kwargs['backend'] elif backend is None: opts_backend = Store.current_backend obj.kwargs['backend']= opts_backend else: opts_backend = backend obj.kwargs['backend'] = opts_backend used_fallback = True groups[opts_backend].append(obj) if backend and not used_fallback: cls.param.warning("All supplied Options objects already define a backend, " "backend override %r will be ignored." % backend) return [(bk, cls._expand_options(o, bk)) for (bk, o) in groups.items()] @classmethod def _expand_options(cls, options, backend=None): """ Validates and expands a dictionaries of options indexed by type[.group][.label] keys into separate style, plot, norm and output options. opts._expand_options({'Image': dict(cmap='viridis', show_title=False)}) returns {'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}} """ current_backend = Store.current_backend if not Store.renderers: raise ValueError("No plotting extension is currently loaded. " "Ensure you load an plotting extension with " "hv.extension or import it explicitly from " "holoviews.plotting before applying any " "options.") elif current_backend not in Store.renderers: raise ValueError("Currently selected plotting extension {ext} " "has not been loaded, ensure you load it " "with hv.extension({ext}) before setting " "options".format(ext=repr(current_backend))) try: backend_options = Store.options(backend=backend or current_backend) except KeyError as e: raise Exception('The %s backend is not loaded. Please load the backend using hv.extension.' % str(e)) expanded = {} if isinstance(options, list): options = merge_options_to_dict(options) for objspec, options in options.items(): objtype = objspec.split('.')[0] if objtype not in backend_options: raise ValueError('%s type not found, could not apply options.' % objtype) obj_options = backend_options[objtype] expanded[objspec] = {g: {} for g in obj_options.groups} for opt, value in options.items(): for g, group_opts in sorted(obj_options.groups.items()): if opt in group_opts.allowed_keywords: expanded[objspec][g][opt] = value break else: valid_options = sorted({ keyword for group_opts in obj_options.groups.values() for keyword in group_opts.allowed_keywords }) cls._options_error(opt, objtype, backend, valid_options) return expanded @classmethod def _options_error(cls, opt, objtype, backend, valid_options): """ Generates an error message for an invalid option suggesting similar options through fuzzy matching. """ current_backend = Store.current_backend loaded_backends = Store.loaded_backends() kws = Keywords(values=valid_options) matches = sorted(kws.fuzzy_match(opt)) if backend is not None: if matches: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. Similar ' 'options are: %s.' % (opt, objtype, backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'when using the %r extension. No ' 'similar options found.' % (opt, objtype, backend)) # Check option is invalid for all backends found = [] for lb in [b for b in loaded_backends if b != backend]: lb_options = Store.options(backend=lb).get(objtype) if lb_options is None: continue for g, group_opts in lb_options.groups.items(): if opt in group_opts.allowed_keywords: found.append(lb) if found: param.main.param.warning( 'Option %r for %s type not valid for selected ' 'backend (%r). Option only applies to following ' 'backends: %r' % (opt, objtype, current_backend, found)) return if matches: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. Similar options ' 'for current extension (%r) are: %s.' % (opt, objtype, current_backend, matches)) else: raise ValueError('Unexpected option %r for %s type ' 'across all extensions. No similar options ' 'found.' % (opt, objtype)) @classmethod def _builder_reprs(cls, options, namespace=None, ns=None): """ Given a list of Option objects (such as those returned from OptsSpec.parse_options) or an %opts or %%opts magic string, return a list of corresponding option builder reprs. The namespace is typically given as 'hv' if fully qualified namespaces are desired. """ if isinstance(options, str): from .parser import OptsSpec if ns is None: try: ns = get_ipython().user_ns # noqa except: ns = globals() options = options.replace('%%opts','').replace('%opts','') options = OptsSpec.parse_options(options, ns=ns) reprs = [] ns = '{namespace}.'.format(namespace=namespace) if namespace else '' for option in options: kws = ', '.join('%s=%r' % (k,option.kwargs[k]) for k in sorted(option.kwargs)) if '.' in option.key: element = option.key.split('.')[0] spec = repr('.'.join(option.key.split('.')[1:])) + ', ' else: element = option.key spec = '' opts_format = '{ns}opts.{element}({spec}{kws})' reprs.append(opts_format.format(ns=ns, spec=spec, kws=kws, element=element)) return reprs @classmethod def _create_builder(cls, element, completions): def builder(cls, spec=None, **kws): spec = element if spec is None else '%s.%s' % (element, spec) prefix = 'In opts.{element}(...), '.format(element=element) backend = kws.get('backend', None) keys = set(kws.keys()) if backend: allowed_kws = cls._element_keywords(backend, elements=[element])[element] invalid = keys - set(allowed_kws) else: mismatched = {} all_valid_kws = set() for loaded_backend in Store.loaded_backends(): valid = set(cls._element_keywords(loaded_backend).get(element, [])) all_valid_kws |= set(valid) if keys <= valid: # Found a backend for which all keys are valid return Options(spec, **kws) mismatched[loaded_backend] = list(keys - valid) invalid = keys - all_valid_kws # Keys not found for any backend if mismatched and not invalid: # Keys found across multiple backends msg = ('{prefix}keywords supplied are mixed across backends. ' 'Keyword(s) {info}') info = ', '.join('%s are invalid for %s' % (', '.join(repr(el) for el in v), k) for k,v in mismatched.items()) raise ValueError(msg.format(info=info, prefix=prefix)) allowed_kws = completions reraise = False if invalid: try: cls._options_error(list(invalid)[0], element, backend, allowed_kws) except ValueError as e: msg = str(e)[0].lower() + str(e)[1:] reraise = True if reraise: raise ValueError(prefix + msg) return Options(spec, **kws) filtered_keywords = [k for k in completions if k not in cls._no_completion] sorted_kw_set = sorted(set(filtered_keywords)) signature = Signature([Parameter('spec', Parameter.POSITIONAL_OR_KEYWORD)] + [Parameter(kw, Parameter.KEYWORD_ONLY) for kw in sorted_kw_set]) builder.__signature__ = signature return classmethod(builder) @classmethod def _element_keywords(cls, backend, elements=None): "Returns a dictionary of element names to allowed keywords" if backend not in Store.loaded_backends(): return {} mapping = {} backend_options = Store.options(backend) elements = elements if elements is not None else backend_options.keys() for element in elements: if '.' in element: continue element = element if isinstance(element, tuple) else (element,) element_keywords = [] options = backend_options['.'.join(element)] for group in Options._option_groups: element_keywords.extend(options[group].allowed_keywords) mapping[element[0]] = element_keywords return mapping @classmethod def _update_backend(cls, backend): if cls.__original_docstring__ is None: cls.__original_docstring__ = cls.__doc__ all_keywords = set() element_keywords = cls._element_keywords(backend) for element, keywords in element_keywords.items(): with param.logging_level('CRITICAL'): all_keywords |= set(keywords) setattr(cls, element, cls._create_builder(element, keywords)) filtered_keywords = [k for k in all_keywords if k not in cls._no_completion] sorted_kw_set = sorted(set(filtered_keywords)) from inspect import Parameter, Signature signature = Signature([Parameter('args', Parameter.VAR_POSITIONAL)] + [Parameter(kw, Parameter.KEYWORD_ONLY) for kw in sorted_kw_set]) cls.__init__.__signature__ = signature Store._backend_switch_hooks.append(opts._update_backend) class output(param.ParameterizedFunction): """ Utility function to set output either at the global level or on a specific object. To set output globally use: output(options) Where options may be an options specification string (as accepted by the IPython opts magic) or an options specifications dictionary. For instance: output("backend='bokeh'") # Or equivalently output(backend='bokeh') To set save output from a specific object do disk using the 'filename' argument, you can supply the object as the first positional argument and supply the filename keyword: curve = hv.Curve([1,2,3]) output(curve, filename='curve.png') For compatibility with the output magic, you can supply the object as the second argument after the string specification: curve = hv.Curve([1,2,3]) output("filename='curve.png'", curve) These two modes are equivalent to the IPython output line magic and the cell magic respectively. """ @classmethod def info(cls): deprecate = ['filename', 'info', 'mode'] options = Store.output_settings.options defaults = Store.output_settings.defaults keys = [k for k,v in options.items() if k not in deprecate and v != defaults[k]] pairs = {k:options[k] for k in sorted(keys)} if 'backend' not in keys: pairs['backend'] = Store.current_backend if ':' in pairs['backend']: pairs['backend'] = pairs['backend'].split(':')[0] keywords = ', '.join('%s=%r' % (k,pairs[k]) for k in sorted(pairs.keys())) print('output({kws})'.format(kws=keywords)) def __call__(self, *args, **options): help_prompt = 'For help with hv.util.output call help(hv.util.output)' line, obj = None,None if len(args) > 2: raise TypeError('The opts utility accepts one or two positional arguments.') if len(args) == 1 and not isinstance(args[0], str): obj = args[0] elif len(args) == 1: line = args[0] elif len(args) == 2: (line, obj) = args if isinstance(obj, Dimensioned): if line: options = Store.output_settings.extract_keywords(line, {}) for k in options.keys(): if k not in Store.output_settings.allowed: raise KeyError('Invalid keyword: %s' % k) def display_fn(obj, renderer): try: from IPython.display import display except: return display(obj) Store.output_settings.output(line=line, cell=obj, cell_runner=display_fn, help_prompt=help_prompt, **options) elif obj is not None: return obj else: Store.output_settings.output(line=line, help_prompt=help_prompt, **options) output.__doc__ = Store.output_settings._generate_docstring(signature=False) output.__init__.__signature__ = Store.output_settings._generate_signature() def renderer(name): """ Helper utility to access the active renderer for a given extension. """ try: if name not in Store.renderers: prev_backend = Store.current_backend if Store.current_backend not in Store.renderers: prev_backend = None extension(name) if prev_backend: Store.set_current_backend(prev_backend) return Store.renderers[name] except ImportError: msg = ('Could not find a {name!r} renderer, available renderers are: {available}.') available = ', '.join(repr(k) for k in Store.renderers) raise ImportError(msg.format(name=name, available=available)) class extension(_pyviz_extension): """ Helper utility used to load holoviews extensions. These can be plotting extensions, element extensions or anything else that can be registered to work with HoloViews. """ # Mapping between backend name and module name _backends = {'matplotlib': 'mpl', 'bokeh': 'bokeh', 'plotly': 'plotly'} # Hooks run when a backend is loaded _backend_hooks = defaultdict(list) def __call__(self, *args, **params): # Get requested backends config = params.pop('config', {}) util.config.param.set_param(**config) imports = [(arg, self._backends[arg]) for arg in args if arg in self._backends] for p, val in sorted(params.items()): if p in self._backends: imports.append((p, self._backends[p])) if not imports: args = ['matplotlib'] imports = [('matplotlib', 'mpl')] args = list(args) selected_backend = None for backend, imp in imports: try: __import__(backend) except: self.param.warning("%s could not be imported, ensure %s is installed." % (backend, backend)) try: __import__('holoviews.plotting.%s' % imp) if selected_backend is None: selected_backend = backend except util.VersionError as e: self.param.warning( "HoloViews %s extension could not be loaded. " "The installed %s version %s is less than " "the required version %s." % (backend, backend, e.version, e.min_version)) except Exception as e: self.param.warning( "Holoviews %s extension could not be imported, " "it raised the following exception: %s('%s')" % (backend, type(e).__name__, e)) finally: Store.output_settings.allowed['backend'] = list_backends() Store.output_settings.allowed['fig'] = list_formats('fig', backend) Store.output_settings.allowed['holomap'] = list_formats('holomap', backend) for hook in self._backend_hooks[backend]: try: hook() except Exception as e: self.param.warning('%s backend hook %s failed with ' 'following exception: %s' % (backend, hook, e)) if selected_backend is None: raise ImportError('None of the backends could be imported') Store.set_current_backend(selected_backend) @classmethod def register_backend_callback(cls, backend, callback): """Registers a hook which is run when a backend is loaded""" cls._backend_hooks[backend].append(callback) def save(obj, filename, fmt='auto', backend=None, resources='cdn', toolbar=None, title=None, **kwargs): """ Saves the supplied object to file. The available output formats depend on the backend being used. By default and if the filename is a string the output format will be inferred from the file extension. Otherwise an explicit format will need to be specified. For ambiguous file extensions such as html it may be necessary to specify an explicit fmt to override the default, e.g. in the case of 'html' output the widgets will default to fmt='widgets', which may be changed to scrubber widgets using fmt='scrubber'. Arguments --------- obj: HoloViews object The HoloViews object to save to file filename: string or IO object The filename or BytesIO/StringIO object to save to fmt: string The format to save the object as, e.g. png, svg, html, or gif and if widgets are desired either 'widgets' or 'scrubber' backend: string A valid HoloViews rendering backend, e.g. bokeh or matplotlib resources: string or bokeh.resource.Resources Bokeh resources used to load bokehJS components. Defaults to CDN, to embed resources inline for offline usage use 'inline' or bokeh.resources.INLINE. toolbar: bool or None Whether to include toolbars in the exported plot. If None, display the toolbar unless fmt is `png` and backend is `bokeh`. If `True`, always include the toolbar. If `False`, do not include the toolbar. title: string Custom title for exported HTML file **kwargs: dict Additional keyword arguments passed to the renderer, e.g. fps for animations """ backend = backend or Store.current_backend renderer_obj = renderer(backend) if ( not toolbar and backend == "bokeh" and (fmt == "png" or (isinstance(filename, str) and filename.endswith("png"))) ): obj = obj.opts(toolbar=None, backend="bokeh", clone=True) elif toolbar is not None and not toolbar: obj = obj.opts(toolbar=None) if kwargs: renderer_obj = renderer_obj.instance(**kwargs) if isinstance(filename, Path): filename = str(filename.absolute()) if isinstance(filename, str): supported = [mfmt for tformats in renderer_obj.mode_formats.values() for mfmt in tformats] formats = filename.split('.') if fmt == 'auto' and formats and formats[-1] != 'html': fmt = formats[-1] if formats[-1] in supported: filename = '.'.join(formats[:-1]) return renderer_obj.save(obj, filename, fmt=fmt, resources=resources, title=title) def render(obj, backend=None, **kwargs): """ Renders the HoloViews object to the corresponding object in the specified backend, e.g. a Matplotlib or Bokeh figure. The backend defaults to the currently declared default backend. The resulting object can then be used with other objects in the specified backend. For instance, if you want to make a multi-part Bokeh figure using a plot type only available in HoloViews, you can use this function to return a Bokeh figure that you can use like any hand-constructed Bokeh figure in a Bokeh layout. Arguments --------- obj: HoloViews object The HoloViews object to render backend: string A valid HoloViews rendering backend **kwargs: dict Additional keyword arguments passed to the renderer, e.g. fps for animations Returns ------- renderered: The rendered representation of the HoloViews object, e.g. if backend='matplotlib' a matplotlib Figure or FuncAnimation """ backend = backend or Store.current_backend renderer_obj = renderer(backend) if kwargs: renderer_obj = renderer_obj.instance(**kwargs) plot = renderer_obj.get_plot(obj) if backend == 'matplotlib' and len(plot) > 1: return plot.anim(fps=renderer_obj.fps) return renderer_obj.get_plot_state(obj) class Dynamic(param.ParameterizedFunction): """ Dynamically applies a callable to the Elements in any HoloViews object. Will return a DynamicMap wrapping the original map object, which will lazily evaluate when a key is requested. By default Dynamic applies a no-op, making it useful for converting HoloMaps to a DynamicMap. Any supplied kwargs will be passed to the callable and any streams will be instantiated on the returned DynamicMap. If the supplied operation is a method on a parameterized object which was decorated with parameter dependencies Dynamic will automatically create a stream to watch the parameter changes. This default behavior may be disabled by setting watch=False. """ operation = param.Callable(default=lambda x: x, doc=""" Operation or user-defined callable to apply dynamically""") kwargs = param.Dict(default={}, doc=""" Keyword arguments passed to the function.""") link_inputs = param.Boolean(default=True, doc=""" If Dynamic is applied to another DynamicMap, determines whether linked streams attached to its Callable inputs are transferred to the output of the utility. For example if the Dynamic utility is applied to a DynamicMap with an RangeXY, this switch determines whether the corresponding visualization should update this stream with range changes originating from the newly generated axes.""") link_dataset = param.Boolean(default=True, doc=""" Determines whether the output of the operation should inherit the .dataset property of the input to the operation. Helpful for tracking data providence for user supplied functions, which do not make use of the clone method. Should be disabled for operations where the output is not derived from the input and instead depends on some external state.""") shared_data = param.Boolean(default=False, doc=""" Whether the cloned DynamicMap will share the same cache.""") streams = param.ClassSelector(default=[], class_=(list, dict), doc=""" List of streams to attach to the returned DynamicMap""") def __call__(self, map_obj, **params): watch = params.pop('watch', True) self.p = param.ParamOverrides(self, params) callback = self._dynamic_operation(map_obj) streams = self._get_streams(map_obj, watch) if isinstance(map_obj, DynamicMap): dmap = map_obj.clone(callback=callback, shared_data=self.p.shared_data, streams=streams) if self.p.shared_data: dmap.data = OrderedDict([(k, callback.callable(*k)) for k, v in dmap.data]) else: dmap = self._make_dynamic(map_obj, callback, streams) return dmap def _get_streams(self, map_obj, watch=True): """ Generates a list of streams to attach to the returned DynamicMap. If the input is a DynamicMap any streams that are supplying values for the key dimension of the input are inherited. And the list of supplied stream classes and instances are processed and added to the list. """ if isinstance(self.p.streams, dict): streams = defaultdict(dict) stream_specs, params = [], {} for name, p in self.p.streams.items(): if not isinstance(p, param.Parameter): raise ValueError("Stream dictionary must map operation keywords " "to parameter names. Cannot handle %r type." % type(p)) if inspect.isclass(p.owner) and issubclass(p.owner, Stream): if p.name != name: streams[p.owner][p.name] = name else: streams[p.owner] = {} else: params[name] = p stream_specs = streams_list_from_dict(params) # Note that the correct stream instance will only be created # correctly of the parameter's .owner points to the correct # class (i.e the parameter isn't defined on a superclass) stream_specs += [stream(rename=rename) for stream, rename in streams.items()] else: stream_specs = self.p.streams streams = [] op = self.p.operation for stream in stream_specs: if inspect.isclass(stream) and issubclass(stream, Stream): stream = stream() elif not (isinstance(stream, Stream) or util.is_param_method(stream)): raise ValueError('Streams must be Stream classes or instances, found %s type' % type(stream).__name__) if isinstance(op, Operation): updates = {k: op.p.get(k) for k, v in stream.contents.items() if v is None and k in op.p} if not isinstance(stream, Params): reverse = {v: k for k, v in stream._rename.items()} updates = {reverse.get(k, k): v for k, v in updates.items()} stream.update(**updates) streams.append(stream) params = {} for k, v in self.p.kwargs.items(): if 'panel' in sys.modules: from panel.widgets.base import Widget if isinstance(v, Widget): v = v.param.value if isinstance(v, param.Parameter) and isinstance(v.owner, param.Parameterized): params[k] = v streams += Params.from_params(params) # Inherit dimensioned streams if isinstance(map_obj, DynamicMap): dim_streams = util.dimensioned_streams(map_obj) streams = list(util.unique_iterator(streams + dim_streams)) # If callback is a parameterized method and watch is disabled add as stream has_dependencies = (util.is_param_method(op, has_deps=True) or isinstance(op, FunctionType) and hasattr(op, '_dinfo')) if has_dependencies and watch: streams.append(op) # Add any keyword arguments which are parameterized methods # with dependencies as streams for value in self.p.kwargs.values(): if util.is_param_method(value, has_deps=True): streams.append(value) elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'): dependencies = list(value._dinfo.get('dependencies', [])) dependencies += list(value._dinfo.get('kw', {}).values()) params = [d for d in dependencies if isinstance(d, param.Parameter) and isinstance(d.owner, param.Parameterized)] streams.append(Params(parameters=params, watch_only=True)) valid, invalid = Stream._process_streams(streams) if invalid: msg = ('The supplied streams list contains objects that ' 'are not Stream instances: {objs}') raise TypeError(msg.format(objs = ', '.join('%r' % el for el in invalid))) return valid def _process(self, element, key=None, kwargs={}): if util.is_param_method(self.p.operation) and util.get_method_owner(self.p.operation) is element: return self.p.operation(**kwargs) elif isinstance(self.p.operation, Operation): kwargs = {k: v for k, v in kwargs.items() if k in self.p.operation.param} return self.p.operation.process_element(element, key, **kwargs) else: return self.p.operation(element, **kwargs) def _dynamic_operation(self, map_obj): """ Generate function to dynamically apply the operation. Wraps an existing HoloMap or DynamicMap. """ def resolve(key, kwargs): if not isinstance(map_obj, HoloMap): return key, map_obj elif isinstance(map_obj, DynamicMap) and map_obj._posarg_keys and not key: key = tuple(kwargs[k] for k in map_obj._posarg_keys) return key, map_obj[key] def apply(element, *key, **kwargs): kwargs = dict(util.resolve_dependent_kwargs(self.p.kwargs), **kwargs) processed = self._process(element, key, kwargs) if (self.p.link_dataset and isinstance(element, Dataset) and isinstance(processed, Dataset) and processed._dataset is None): processed._dataset = element.dataset return processed def dynamic_operation(*key, **kwargs): key, obj = resolve(key, kwargs) return apply(obj, *key, **kwargs) operation = self.p.operation op_kwargs = self.p.kwargs if not isinstance(operation, Operation): operation = function.instance(fn=apply) op_kwargs = {'kwargs': op_kwargs} return OperationCallable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs, operation=operation, operation_kwargs=op_kwargs) def _make_dynamic(self, hmap, dynamic_fn, streams): """ Accepts a HoloMap and a dynamic callback function creating an equivalent DynamicMap from the HoloMap. """ if isinstance(hmap, ViewableElement): dmap = DynamicMap(dynamic_fn, streams=streams) if isinstance(hmap, Overlay): dmap.callback.inputs[:] = list(hmap) return dmap dim_values = zip(*hmap.data.keys()) params = util.get_param_values(hmap) kdims = [d.clone(values=list(util.unique_iterator(values))) for d, values in zip(hmap.kdims, dim_values)] return DynamicMap(dynamic_fn, streams=streams, **dict(params, kdims=kdims))
""" Xref plugin for Hexrays Decompiler Author: EiNSTeiN_ <einstein@g3nius.org> Show decompiler-style Xref when the X key is pressed in the Decompiler window. - It supports any global name: functions, strings, integers, etc. - It supports structure member. """ import idautils import idaapi import idc import traceback try: from PyQt4 import QtCore, QtGui print 'Using PyQt' except: print 'PyQt not available' try: from PySide import QtGui, QtCore print 'Using PySide' except: print 'PySide not available' XREF_EA = 0 XREF_STRUC_MEMBER = 1 class XrefsForm(idaapi.PluginForm): def __init__(self, target): idaapi.PluginForm.__init__(self) self.target = target if type(self.target) == idaapi.cfunc_t: self.__type = XREF_EA self.__ea = self.target.entry_ea self.__name = 'Xrefs of %x' % (self.__ea, ) elif type(self.target) == idaapi.cexpr_t and self.target.opname == 'obj': self.__type = XREF_EA self.__ea = self.target.obj_ea self.__name = 'Xrefs of %x' % (self.__ea, ) elif type(self.target) == idaapi.cexpr_t and self.target.opname in ('memptr', 'memref'): self.__type = XREF_STRUC_MEMBER name = self.get_struc_name() self.__name = 'Xrefs of %s' % (name, ) else: raise ValueError('cannot show xrefs for this kind of target') return def get_struc_name(self): x = self.target.operands['x'] m = self.target.operands['m'] xtype = x.type xtype.remove_ptr_or_array() typename = idaapi.print_tinfo('', 0, 0, idaapi.PRTYPE_1LINE, xtype, '', '') sid = idc.GetStrucIdByName(typename) member = idc.GetMemberName(sid, m) return '%s::%s' % (typename, member) def OnCreate(self, form): # Get parent widget try: self.parent = self.FormToPySideWidget(form) except: self.parent = self.FormToPyQtWidget(form) self.populate_form() return def Show(self): idaapi.PluginForm.Show(self, self.__name) return def populate_form(self): # Create layout layout = QtGui.QVBoxLayout() layout.addWidget(QtGui.QLabel(self.__name)) self.table = QtGui.QTableWidget() layout.addWidget(self.table) self.table.setColumnCount(3) self.table.setHorizontalHeaderItem(0, QtGui.QTableWidgetItem("Address")) self.table.setHorizontalHeaderItem(1, QtGui.QTableWidgetItem("Function")) self.table.setHorizontalHeaderItem(2, QtGui.QTableWidgetItem("Line")) self.table.setColumnWidth(0, 80) self.table.setColumnWidth(1, 150) self.table.setColumnWidth(2, 450) self.table.cellDoubleClicked.connect(self.double_clicked) #~ self.table.setSelectionMode(QtGui.QAbstractItemView.NoSelection) self.table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows ) self.parent.setLayout(layout) self.populate_table() return def double_clicked(self, row, column): ea = self.functions[row] idaapi.open_pseudocode(ea, True) return def get_decompiled_line(self, cfunc, ea): print repr(ea) if ea not in cfunc.eamap: print 'strange, %x is not in %x eamap' % (ea, cfunc.entry_ea) return insnvec = cfunc.eamap[ea] lines = [] for stmt in insnvec: qp = idaapi.qstring_printer_t(cfunc.__deref__(), False) stmt._print(0, qp) s = qp.s.split('\n')[0] #~ s = idaapi.tag_remove(s) lines.append(s) return '\n'.join(lines) def get_items_for_ea(self, ea): frm = [x.frm for x in idautils.XrefsTo(self.__ea)] items = [] for ea in frm: try: cfunc = idaapi.decompile(ea) self.functions.append(cfunc.entry_ea) self.items.append((ea, idc.GetFunctionName(cfunc.entry_ea), self.get_decompiled_line(cfunc, ea))) except Exception as e: print 'could not decompile: %s' % (str(e), ) raise return def get_items_for_type(self): x = self.target.operands['x'] m = self.target.operands['m'] xtype = x.type xtype.remove_ptr_or_array() typename = idaapi.print_tinfo('', 0, 0, idaapi.PRTYPE_1LINE, xtype, '', '') addresses = [] for ea in idautils.Functions(): try: cfunc = idaapi.decompile(ea) except: print 'Decompilation of %x failed' % (ea, ) continue str(cfunc) for citem in cfunc.treeitems: citem = citem.to_specific_type if not (type(citem) == idaapi.cexpr_t and citem.opname in ('memptr', 'memref')): continue _x = citem.operands['x'] _m = citem.operands['m'] _xtype = _x.type _xtype.remove_ptr_or_array() _typename = idaapi.print_tinfo('', 0, 0, idaapi.PRTYPE_1LINE, _xtype, '', '') #~ print 'in', hex(cfunc.entry_ea), _typename, _m if not (_typename == typename and _m == m): continue parent = citem while parent: if type(parent.to_specific_type) == idaapi.cinsn_t: break parent = cfunc.body.find_parent_of(parent) if not parent: print 'cannot find parent statement (?!)' continue if parent.ea in addresses: continue if parent.ea == idaapi.BADADDR: print 'parent.ea is BADADDR' continue addresses.append(parent.ea) self.functions.append(cfunc.entry_ea) self.items.append(( parent.ea, idc.GetFunctionName(cfunc.entry_ea), self.get_decompiled_line(cfunc, parent.ea))) return [] def populate_table(self): self.functions = [] self.items = [] if self.__type == XREF_EA: self.get_items_for_ea(self.__ea) else: self.get_items_for_type() self.table.setRowCount(len(self.items)) i = 0 for item in self.items: address, func, line = item item = QtGui.QTableWidgetItem('0x%x' % (address, )) item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable) self.table.setItem(i, 0, item) item = QtGui.QTableWidgetItem(func) item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable) self.table.setItem(i, 1, item) item = QtGui.QTableWidgetItem(line) item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable) self.table.setItem(i, 2, item) i += 1 self.table.resizeRowsToContents() return def OnClose(self, form): pass class show_xrefs_ah_t(idaapi.action_handler_t): def __init__(self): idaapi.action_handler_t.__init__(self) self.sel = None def activate(self, ctx): vu = idaapi.get_tform_vdui(ctx.form) if not vu or not self.sel: print "No vdui? Strange, since this action should be enabled only for pseudocode views." return 0 form = XrefsForm(self.sel) form.Show() return 1 def update(self, ctx): vu = idaapi.get_tform_vdui(ctx.form) if not vu: return idaapi.AST_DISABLE_FOR_FORM else: vu.get_current_item(idaapi.USE_KEYBOARD) item = vu.item self.sel = None if item.citype == idaapi.VDI_EXPR and item.it.to_specific_type.opname in ('obj', 'memref', 'memptr'): # if an expression is selected. verify that it's either a cot_obj, cot_memref or cot_memptr self.sel = item.it.to_specific_type elif item.citype == idaapi.VDI_FUNC: # if the function itself is selected, show xrefs to it. self.sel = item.f return idaapi.AST_ENABLE if self.sel else idaapi.AST_DISABLE class hexrays_callback_info(object): def __init__(self): return def event_callback(self, event, *args): try: if event == idaapi.hxe_populating_popup: form, phandle, vu = args idaapi.attach_action_to_popup(form, phandle, "vdsxrefs:show", None) except: traceback.print_exc() return 0 if idaapi.init_hexrays_plugin(): adesc = idaapi.action_desc_t('vdsxrefs:show', 'Show xrefs', show_xrefs_ah_t(), "Ctrl+X") if idaapi.register_action(adesc): i = hexrays_callback_info() idaapi.install_hexrays_callback(i.event_callback) else: print "Couldn't register action." else: print 'hexrays is not available.'
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2021, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import unittest import tempfile import pathlib import collections import datetime import dateutil.relativedelta as relativedelta import qiime2.core.util as util class TestFindDuplicates(unittest.TestCase): # NOTE: wrapping each input in `iter()` because that is the interface # expected by `find_duplicates`, and avoids the need to test other iterable # types, e.g. list, tuples, generators, etc. def test_empty_iterable(self): obs = util.find_duplicates(iter([])) self.assertEqual(obs, set()) def test_single_value(self): obs = util.find_duplicates(iter(['foo'])) self.assertEqual(obs, set()) def test_multiple_values_no_duplicates(self): obs = util.find_duplicates(iter(['foo', 'bar'])) self.assertEqual(obs, set()) def test_one_duplicate(self): obs = util.find_duplicates(iter(['foo', 'bar', 'foo'])) self.assertEqual(obs, {'foo'}) def test_multiple_duplicates(self): obs = util.find_duplicates( iter(['foo', 'bar', 'foo', 'baz', 'foo', 'bar'])) self.assertEqual(obs, {'foo', 'bar'}) def test_all_duplicates(self): obs = util.find_duplicates( iter(['foo', 'bar', 'baz', 'baz', 'bar', 'foo'])) self.assertEqual(obs, {'foo', 'bar', 'baz'}) def test_different_hashables(self): iterable = iter(['foo', 42, -9.999, 'baz', ('a', 'b'), 42, 'foo', ('a', 'b', 'c'), ('a', 'b')]) obs = util.find_duplicates(iterable) self.assertEqual(obs, {'foo', 42, ('a', 'b')}) class TestDurationTime(unittest.TestCase): def test_time_travel(self): start = datetime.datetime(1987, 10, 27, 1, 21, 2, 50) end = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '-2 years, -1 days, -3 seconds, and 999950 microseconds') def test_no_duration(self): time = datetime.datetime(1985, 10, 26, 1, 21, 0) reldelta = relativedelta.relativedelta(time, time) self.assertEqual(util.duration_time(reldelta), '0 microseconds') def test_singular(self): start = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) end = datetime.datetime(1986, 11, 27, 2, 22, 1, 1) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '1 year, 1 month, 1 day, 1 hour, 1 minute, 1 second,' ' and 1 microsecond') def test_plural(self): start = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) end = datetime.datetime(1987, 12, 28, 3, 23, 2, 2) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '2 years, 2 months, 2 days, 2 hours, 2 minutes, 2 seconds,' ' and 2 microseconds') def test_missing(self): start = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) end = datetime.datetime(1987, 10, 27, 1, 21, 2, 50) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '2 years, 1 day, 2 seconds, and 50 microseconds') def test_unusually_round_number(self): start = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) end = datetime.datetime(1985, 10, 27, 1, 21, 0, 0) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '1 day') def test_microseconds(self): start = datetime.datetime(1985, 10, 26, 1, 21, 0, 0) end = datetime.datetime(1985, 10, 26, 1, 21, 0, 1955) reldelta = relativedelta.relativedelta(end, start) self.assertEqual( util.duration_time(reldelta), '1955 microseconds') class TestMD5Sum(unittest.TestCase): # All expected results where generated via GNU coreutils md5sum def setUp(self): self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-') self.test_path = pathlib.Path(self.test_dir.name) def tearDown(self): self.test_dir.cleanup() def make_file(self, bytes_): path = self.test_path / 'file' with path.open(mode='wb') as fh: fh.write(bytes_) return path def test_empty_file(self): self.assertEqual(util.md5sum(self.make_file(b'')), 'd41d8cd98f00b204e9800998ecf8427e') def test_single_byte_file(self): self.assertEqual(util.md5sum(self.make_file(b'a')), '0cc175b9c0f1b6a831c399e269772661') def test_large_file(self): path = self.make_file(b'verybigfile' * (1024 * 50)) self.assertEqual(util.md5sum(path), '27d64211ee283283ad866c18afa26611') def test_can_use_string(self): string_path = str(self.make_file(b'Normal text\nand things\n')) self.assertEqual(util.md5sum(string_path), '93b048d0202e4b06b658f3aef1e764d3') class TestMD5SumDirectory(unittest.TestCase): # All expected results where generated via GNU coreutils md5sum def setUp(self): self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-') self.test_path = pathlib.Path(self.test_dir.name) def tearDown(self): self.test_dir.cleanup() def make_file(self, bytes_, relpath): path = self.test_path / relpath with path.open(mode='wb') as fh: fh.write(bytes_) return path def test_empty_directory(self): self.assertEqual(util.md5sum_directory(self.test_path), collections.OrderedDict()) def test_nested_empty_directories(self): (self.test_path / 'foo').mkdir() (self.test_path / 'foo' / 'bar').mkdir() (self.test_path / 'baz').mkdir() self.assertEqual(util.md5sum_directory(self.test_path), collections.OrderedDict()) def test_single_file(self): self.make_file(b'Normal text\nand things\n', 'foobarbaz.txt') self.assertEqual( util.md5sum_directory(self.test_path), collections.OrderedDict([ ('foobarbaz.txt', '93b048d0202e4b06b658f3aef1e764d3') ])) def test_single_file_nested(self): nested_dir = self.test_path / 'bar' nested_dir.mkdir() filepath = (nested_dir / 'foo.baz').relative_to(self.test_path) self.make_file(b'anything at all', filepath) self.assertEqual( util.md5sum_directory(self.test_path), collections.OrderedDict([ ('bar/foo.baz', 'dcc0975b66728be0315abae5968379cb') ])) def test_sorted_decent(self): nested_dir = self.test_path / 'beta' nested_dir.mkdir() filepath = (nested_dir / '10').relative_to(self.test_path) self.make_file(b'10', filepath) filepath = (nested_dir / '1').relative_to(self.test_path) self.make_file(b'1', filepath) filepath = (nested_dir / '2').relative_to(self.test_path) self.make_file(b'2', filepath) nested_dir = self.test_path / 'alpha' nested_dir.mkdir() filepath = (nested_dir / 'foo').relative_to(self.test_path) self.make_file(b'foo', filepath) filepath = (nested_dir / 'bar').relative_to(self.test_path) self.make_file(b'bar', filepath) self.make_file(b'z', 'z') self.assertEqual( list(util.md5sum_directory(self.test_path).items()), [ ('z', 'fbade9e36a3f36d3d676c1b808451dd7'), ('alpha/bar', '37b51d194a7513e45b56f6524f2d51f2'), ('alpha/foo', 'acbd18db4cc2f85cedef654fccc4a4d8'), ('beta/1', 'c4ca4238a0b923820dcc509a6f75849b'), ('beta/10', 'd3d9446802a44259755d38e6d163e820'), ('beta/2', 'c81e728d9d4c2f636f067f89cc14862c'), ]) def test_can_use_string(self): nested_dir = self.test_path / 'bar' nested_dir.mkdir() filepath = (nested_dir / 'foo.baz').relative_to(self.test_path) self.make_file(b'anything at all', filepath) self.assertEqual( util.md5sum_directory(str(self.test_path)), collections.OrderedDict([ ('bar/foo.baz', 'dcc0975b66728be0315abae5968379cb') ])) class TestChecksumFormat(unittest.TestCase): def test_to_simple(self): line = util.to_checksum_format('this/is/a/filepath', 'd9724aeba59d8cea5265f698b2c19684') self.assertEqual( line, 'd9724aeba59d8cea5265f698b2c19684 this/is/a/filepath') def test_from_simple(self): fp, chks = util.from_checksum_format( 'd9724aeba59d8cea5265f698b2c19684 this/is/a/filepath') self.assertEqual(fp, 'this/is/a/filepath') self.assertEqual(chks, 'd9724aeba59d8cea5265f698b2c19684') def test_to_hard(self): # two kinds of backslash n to trip up the escaping: line = util.to_checksum_format('filepath/\n/with/\\newline', '939aaaae6098ebdab049b0f3abe7b68c') # Note raw string self.assertEqual( line, r'\939aaaae6098ebdab049b0f3abe7b68c filepath/\n/with/\\newline') def test_from_hard(self): fp, chks = util.from_checksum_format( r'\939aaaae6098ebdab049b0f3abe7b68c filepath/\n/with/\\newline' + '\n') # newline from a checksum "file" self.assertEqual(fp, 'filepath/\n/with/\\newline') self.assertEqual(chks, '939aaaae6098ebdab049b0f3abe7b68c') def test_from_legacy_format(self): fp, chks = util.from_checksum_format( r'0ed29022ace300b4d96847882daaf0ef *this/means/binary/mode') self.assertEqual(fp, 'this/means/binary/mode') self.assertEqual(chks, '0ed29022ace300b4d96847882daaf0ef') def check_roundtrip(self, filepath, checksum): line = util.to_checksum_format(filepath, checksum) new_fp, new_chks = util.from_checksum_format(line) self.assertEqual(new_fp, filepath) self.assertEqual(new_chks, checksum) def test_nonsense(self): self.check_roundtrip( r'^~gpfh)bU)WvN/;3jR6H-*={iEBM`(flY2>_|5mp8{-h>Ou\{{ImLT>h;XuC,.#', '89241859050e5a43ccb5f7aa0bca7a3a') self.check_roundtrip( r"l5AAPGKLP5Mcv0b`@zDR\XTTnF;[2M>O/>,d-^Nti'vpH\{>q)/4&CuU/xQ}z,O", 'c47d43cadb60faf30d9405a3e2592b26') self.check_roundtrip( r'FZ\rywG:7Q%"J@}Rk>\&zbWdS0nhEl_k1y1cMU#Lk_"*#*/uGi>Evl7M1suNNVE', '9c7753f252116473994e8bffba2c620b') if __name__ == '__main__': unittest.main()
"""Test the Blink config flow.""" from blinkpy.auth import LoginError from blinkpy.blinkpy import BlinkSetupError from homeassistant import config_entries, data_entry_flow, setup from homeassistant.components.blink import DOMAIN from tests.async_mock import Mock, patch from tests.common import MockConfigEntry async def test_form(hass): """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=False, ), patch( "homeassistant.components.blink.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.blink.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"}, ) assert result2["type"] == "create_entry" assert result2["title"] == "blink" assert result2["result"].unique_id == "blink@example.com" assert result2["data"] == { "username": "blink@example.com", "password": "example", "device_id": "Home Assistant", "token": None, "host": None, "account_id": None, "client_id": None, "region_id": None, } await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 async def test_form_2fa(hass): """Test we get the 2fa form.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=True, ), patch( "homeassistant.components.blink.async_setup", return_value=True ) as mock_setup: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"}, ) assert result2["type"] == "form" assert result2["step_id"] == "2fa" with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=False, ), patch( "homeassistant.components.blink.config_flow.Auth.send_auth_key", return_value=True, ), patch( "homeassistant.components.blink.config_flow.Blink.setup_urls", return_value=True, ), patch( "homeassistant.components.blink.async_setup", return_value=True ) as mock_setup, patch( "homeassistant.components.blink.async_setup_entry", return_value=True ) as mock_setup_entry: result3 = await hass.config_entries.flow.async_configure( result2["flow_id"], {"pin": "1234"} ) assert result3["type"] == "create_entry" assert result3["title"] == "blink" assert result3["result"].unique_id == "blink@example.com" await hass.async_block_till_done() assert len(mock_setup.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1 async def test_form_2fa_connect_error(hass): """Test we report a connect error during 2fa setup.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=True, ), patch("homeassistant.components.blink.async_setup", return_value=True): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"}, ) assert result2["type"] == "form" assert result2["step_id"] == "2fa" with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=False, ), patch( "homeassistant.components.blink.config_flow.Auth.send_auth_key", return_value=True, ), patch( "homeassistant.components.blink.config_flow.Blink.setup_urls", side_effect=BlinkSetupError, ), patch( "homeassistant.components.blink.async_setup", return_value=True ), patch( "homeassistant.components.blink.async_setup_entry", return_value=True ): result3 = await hass.config_entries.flow.async_configure( result2["flow_id"], {"pin": "1234"} ) assert result3["type"] == "form" assert result3["errors"] == {"base": "cannot_connect"} async def test_form_2fa_invalid_key(hass): """Test we report an error if key is invalid.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=True, ), patch("homeassistant.components.blink.async_setup", return_value=True): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"}, ) assert result2["type"] == "form" assert result2["step_id"] == "2fa" with patch("homeassistant.components.blink.config_flow.Auth.startup",), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=False, ), patch( "homeassistant.components.blink.config_flow.Auth.send_auth_key", return_value=False, ), patch( "homeassistant.components.blink.config_flow.Blink.setup_urls", return_value=True, ), patch( "homeassistant.components.blink.async_setup", return_value=True ), patch( "homeassistant.components.blink.async_setup_entry", return_value=True ): result3 = await hass.config_entries.flow.async_configure( result2["flow_id"], {"pin": "1234"} ) assert result3["type"] == "form" assert result3["errors"] == {"base": "invalid_access_token"} async def test_form_2fa_unknown_error(hass): """Test we report an unknown error during 2fa setup.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=True, ), patch("homeassistant.components.blink.async_setup", return_value=True): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"}, ) assert result2["type"] == "form" assert result2["step_id"] == "2fa" with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch( "homeassistant.components.blink.config_flow.Auth.check_key_required", return_value=False, ), patch( "homeassistant.components.blink.config_flow.Auth.send_auth_key", return_value=True, ), patch( "homeassistant.components.blink.config_flow.Blink.setup_urls", side_effect=KeyError, ), patch( "homeassistant.components.blink.async_setup", return_value=True ), patch( "homeassistant.components.blink.async_setup_entry", return_value=True ): result3 = await hass.config_entries.flow.async_configure( result2["flow_id"], {"pin": "1234"} ) assert result3["type"] == "form" assert result3["errors"] == {"base": "unknown"} async def test_form_invalid_auth(hass): """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.blink.config_flow.Auth.startup", side_effect=LoginError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"} ) assert result2["type"] == "form" assert result2["errors"] == {"base": "invalid_auth"} async def test_form_unknown_error(hass): """Test we handle unknown error at startup.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.blink.config_flow.Auth.startup", side_effect=KeyError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {"username": "blink@example.com", "password": "example"} ) assert result2["type"] == "form" assert result2["errors"] == {"base": "unknown"} async def test_reauth_shows_user_step(hass): """Test reauth shows the user form.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": "reauth"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" async def test_options_flow(hass): """Test config flow options.""" config_entry = MockConfigEntry( domain=DOMAIN, data={"username": "blink@example.com", "password": "example"}, options={}, entry_id=1, version=2, ) config_entry.add_to_hass(hass) mock_auth = Mock( startup=Mock(return_value=True), check_key_required=Mock(return_value=False) ) mock_blink = Mock() with patch("homeassistant.components.blink.Auth", return_value=mock_auth), patch( "homeassistant.components.blink.Blink", return_value=mock_blink ): await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() result = await hass.config_entries.options.async_init( config_entry.entry_id, context={"show_advanced_options": False} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "simple_options" result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={"scan_interval": 5}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["data"] == {"scan_interval": 5} assert mock_blink.refresh_rate == 5
#!/usr/bin/env python3 # Copyright (c) 2014-present, The osquery authors # # This source code is licensed as defined by the LICENSE file found in the # root directory of this source tree. # # SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) import json import os import subprocess import sys import time try: import argparse except ImportError: print("Cannot import argparse.") exit(1) # Import the testing utils sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../tests/") import utils KB = 1024 * 1024 RANGES = { "colors": (utils.blue, utils.green, utils.yellow, utils.red), "utilization": (8, 20, 50), "cpu_time": (0.4, 1, 10), "memory": (8 * KB, 12 * KB, 24 * KB), "fds": (10, 20, 50), "duration": (0.8, 1, 3), } def check_leaks_linux(shell, query, count=1, supp_file=None): """Run valgrind using the shell and a query, parse leak reports.""" suppressions = "" if supp_file is None else "--suppressions=%s" % supp_file cmd = [ "valgrind", "--tool=memcheck", suppressions, shell, "--profile", "%d" % count, query, "--disable_extensions", ] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) _, stderr = proc.communicate() summary = { "definitely": None, "indirectly": None, "possibly": None, } if args.verbose: print(stderr) for line in stderr.split("\n"): for key in summary: if line.find(key) >= 0: summary[key] = line.split(":")[1].strip() if summary["definitely"] is None: raise Exception("Could not execute valgrind correctly") return summary def check_leaks_darwin(shell, query, count=1): # Run the shell with a --delay flag such that leaks can attach before exit. proc = subprocess.Popen( [shell, "--profile", str(count), "--profile_delay", "1", query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) leak_checks = None while proc.poll() is None: # Continue to run leaks until the monitored shell exits. leaks = subprocess.Popen( ["leaks", "%s" % proc.pid], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, _ = leaks.communicate() if args.verbose: print(stdout) try: for line in stdout.split("\n"): if line.find("total leaked bytes") >= 0: leak_checks = line.split(":")[1].strip() except: print("Encountered exception while running leaks:") print(stdout) return {"definitely": leak_checks} def check_leaks(shell, query, count=1, supp_file=None): if utils.platform() == "darwin": return check_leaks_darwin(shell, query, count=count) else: return check_leaks_linux(shell, query, count=count, supp_file=supp_file) def profile_leaks(shell, queries, count=1, rounds=1, supp_file=None): report = {} for name, query in queries.items(): print("Analyzing leaks in query: %s" % query) # Apply count (optionally run the query several times). summary = check_leaks(shell, query, count, supp_file) display = [] for key in summary: output = summary[key] if output is not None and output[0] != "0": # Add some fun colored output if leaking. if key == "definitely": output = utils.red(output) report[name] = "LEAKING" if key == "indirectly": output = utils.yellow(output) report[name] = "WARNING" elif name not in report.keys(): report[name] = "SAFE" display.append("%s: %s" % (key, output)) print(" %s" % "; ".join(display)) return report def run_query(shell, query, timeout=0, count=1): """Execute the osqueryi shell in profile mode with a setup/teardown delay.""" start_time = time.time() return utils.profile_cmd([ shell, "--profile", str(count), "--profile_delay", "1", query, "--disable_extensions", ], timeout=timeout, count=count) def summary_line(name, result): if not args.n: for key, v in result.items(): print("%s" % ( RANGES["colors"][v[0]]("%s:%s" % ( key[0].upper(), v[0]))), end="") print(" ", end="") print("%s:" % name, end=" ") for key, v in result.items(): print("%s: %s" % (key, v[1]), end=" ") print("") def summary(results, display=False): """Map the results to simple thresholds.""" def rank(value, ranges): for i, r in enumerate(ranges): if value < r: return i return len(ranges) summary_results = {} for name, result in results.items(): failed = "exit" in result and result["exit"] > 0 summary_result = {} for key in RANGES: if key == "colors": continue if key not in result: continue if failed: summary_result[key] = (len(RANGES["colors"]) - 1, -1) else: summary_result[key] = (rank(result[key], RANGES[key]), result[key]) if display and not args.check: summary_line(name, summary_result) summary_results[name] = summary_result return summary_results def profile(shell, queries, timeout=0, count=1, rounds=1): report = {} for name, query in queries.items(): forced = True if name == "force" else False if not forced: print("Profiling query: %s" % query) results = {} for i in range(rounds): if forced: result = utils.profile_cmd(shell, shell=True, timeout=timeout, count=count) else: result = run_query(shell, query, timeout=timeout, count=count) summary( {"%s (%d/%d)" % (name, i + 1, rounds): result}, display=True) # Store each result round to return an average. for k, v in result.items(): results[k] = results.get(k, []) results[k].append(v) average_results = {} for k in results: average_results[k] = sum(results[k]) / len(results[k]) report[name] = average_results if rounds > 1: summary({"%s avg" % name: report[name]}, display=True) return report def compare(profile1, profile2): """Compare two jSON profile outputs.""" for table in profile1: if table not in profile2: # No comparison possible continue summary_line(table, profile1[table]) summary_line(table, profile2[table]) def regress_check(profile1, profile2): regressed = False for table in profile1: if table not in profile2: continue for measure in profile1[table]: if profile2[table][measure][0] > profile1[table][measure][0]: print("%s %s has regressed (%s->%s)!" % (table, measure, profile1[table][measure][0], profile2[table][measure][0])) regressed = True if not regressed: print("No regressions!") return 0 return 1 if __name__ == "__main__": parser = argparse.ArgumentParser(description=( "Profile osquery, individual tables, " "or a set of osqueryd config queries." )) parser.add_argument( "-n", action="store_true", default=False, help="Do not output colored ranks." ) parser.add_argument( "--verbose", action="store_true", default=False, help="Be verbose.") parser.add_argument( "--leaks", default=False, action="store_true", help="Check for memory leaks instead of performance." ) group = parser.add_argument_group("Query Options:") group.add_argument( "--restrict", metavar="LIST", default="", help="Limit to a list of comma-separated tables." ) group.add_argument( "--tables", metavar="PATH", default="./specs", help="Path to the osquery table specs." ) group.add_argument( "--config", metavar="FILE", default=None, help="Use scheduled queries from a config." ) group.add_argument( "--query", metavar="STRING", default=None, help="Profile a single query." ) group = parser.add_argument_group("Run Options:") group.add_argument( "--timeout", metavar="N", default=0, type=int, help="Max seconds a query may run --count times." ) group.add_argument( "--count", metavar="N", default=1, type=int, help="Run the query N times serially." ) group.add_argument( "--rounds", metavar="N", default=1, type=int, help="Run the profile for N rounds and use the average." ) group.add_argument( "--shell", metavar="PATH", default="./build/%s/osquery/osqueryi" % ( utils.platform()), help="Path to osqueryi shell (./build/<sys>/osquery/osqueryi)." ) group.add_argument( "--force", action="store_true", default=False, help="Force run the target of shell", ) group = parser.add_argument_group("Performance Options:") group.add_argument( "--output", metavar="FILE", default=None, help="Write JSON performance output to file." ) group.add_argument( "--check", metavar="OLD_OUTPUT", nargs=1, help="Check regressions using an existing output." ) group.add_argument( "--compare", metavar="FILE", nargs=2, help="Compare existing performance outputs (old, new)." ) group = parser.add_argument_group("Memory Options:") group.add_argument( "--suppressions", metavar="SUPP", default="./tools/analysis/valgrind.supp", help="Add a suppressions files to memory leak checking (linux only)." ) args = parser.parse_args() if args.compare: with open(args.compare[0]) as fh: profile1 = json.loads(fh.read()) with open(args.compare[1]) as fh: profile2 = json.loads(fh.read()) compare(profile1, profile2) exit(0) if args.check: with open(args.check[0]) as fh: profile1 = json.loads(fh.read()) if not args.force and not os.path.exists(args.shell): print("Cannot find --shell: %s" % (args.shell)) exit(1) if args.config is None and not os.path.exists(args.tables): print("Cannot find --tables: %s" % (args.tables)) exit(1) queries = {} if args.config is not None: if not os.path.exists(args.config): print("Cannot find --config: %s" % (args.config)) exit(1) queries = utils.queries_from_config(args.config) # Search queries in subdirectory ".d" based on the config filename if os.path.isdir(args.config + ".d"): for config_file in os.listdir(args.config + ".d"): queries.update(utils.queries_from_config(os.path.join( args.config + ".d", config_file))) elif args.query is not None: queries["manual"] = args.query elif args.force: queries["force"] = True else: queries = utils.queries_from_tables(args.tables, args.restrict) if args.leaks: results = profile_leaks( args.shell, queries, count=args.count, rounds=args.rounds, supp_file=args.suppressions ) else: # Start the profiling! results = profile( args.shell, queries, timeout=args.timeout, count=args.count, rounds=args.rounds ) # Only apply checking/regressions to performance, not leaks. if args.check: exit(regress_check(profile1, summary(results))) if args.output is not None: with open(args.output, "w") as fh: if args.leaks: # Leaks report does not need a summary view. fh.write(json.dumps(results, indent=1)) else: fh.write(json.dumps(summary(results), indent=1)) print("Wrote output summary: %s" % args.output) if args.leaks: for name in results.keys(): if results[name] != "SAFE": sys.exit(1) sys.exit(0)
#!/usr/bin/python # Copyright (c) 2003-2014 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # $Id: smbclient.py 1125 2014-01-27 19:58:16Z bethus@gmail.com $ # # Description: Mini shell using some of the SMB funcionality of the library # # Author: # Alberto Solino # # # Reference for: # SMB DCE/RPC # import sys import string import time import logging from impacket import smb, version, smb3, nt_errors from impacket.dcerpc.v5 import samr, transport, srvs from impacket.dcerpc.v5.dtypes import NULL from impacket.smbconnection import * import argparse import ntpath import cmd import os # If you wanna have readline like functionality in Windows, install pyreadline try: import pyreadline as readline except ImportError: import readline class MiniImpacketShell(cmd.Cmd): def __init__(self): cmd.Cmd.__init__(self) self.prompt = '# ' self.smb = None self.tid = None self.intro = 'Type help for list of commands' self.pwd = '' self.share = None self.loggedIn = False self.password = None self.lmhash = None self.nthash = None self.username = None self.completion = [] def emptyline(self): pass def onecmd(self,s): retVal = False try: retVal = cmd.Cmd.onecmd(self,s) except Exception, e: #import traceback #print traceback.print_exc() logging.error(e) return retVal def do_exit(self,line): return True def do_shell(self, line): output = os.popen(line).read() print output self.last_output = output def do_help(self,line): print """ open {host,port=445} - opens a SMB connection against the target host/port login {domain/username,passwd} - logs into the current SMB connection, no parameters for NULL connection. If no password specified, it'll be prompted login_hash {domain/username,lmhash:nthash} - logs into the current SMB connection using the password hashes logoff - logs off shares - list available shares use {sharename} - connect to an specific share cd {path} - changes the current directory to {path} pwd - shows current remote directory password - changes the user password, the new password will be prompted for input ls {wildcard} - lists all the files in the current directory rm {file} - removes the selected file mkdir {dirname} - creates the directory under the current path rmdir {dirname} - removes the directory under the current path put {filename} - uploads the filename into the current path get {filename} - downloads the filename from the current path info - returns NetrServerInfo main results who - returns the sessions currently connected at the target host (admin required) close - closes the current SMB Session exit - terminates the server process (and this session) """ def do_password(self, line): if self.loggedIn is False: logging.error("Not logged in") return from getpass import getpass newPassword = getpass("New Password:") rpctransport = transport.SMBTransport(self.smb.getServerName(), self.smb.getRemoteHost(), filename = r'\samr', smb_connection = self.smb) dce = rpctransport.get_dce_rpc() dce.connect() dce.bind(samr.MSRPC_UUID_SAMR) resp = samr.hSamrUnicodeChangePasswordUser2(dce, '\x00', self.username, self.password, newPassword, self.lmhash, self.nthash) self.password = newPassword self.lmhash = None self.nthash = None def do_open(self,line): l = line.split(' ') port = 445 if len(l) > 0: host = l[0] if len(l) > 1: port = int(l[1]) if port == 139: self.smb = SMBConnection('*SMBSERVER', host, sess_port=port) else: self.smb = SMBConnection(host, host, sess_port=port) dialect = self.smb.getDialect() if dialect == SMB_DIALECT: logging.info("SMBv1 dialect used") elif dialect == SMB2_DIALECT_002: logging.info("SMBv2.0 dialect used") elif dialect == SMB2_DIALECT_21: logging.info("SMBv2.1 dialect used") else: logging.info("SMBv3.0 dialect used") self.share = None self.tid = None self.pwd = '' self.loggedIn = False self.password = None self.lmhash = None self.nthash = None self.username = None def do_login(self,line): if self.smb is None: logging.error("No connection open") return l = line.split(' ') username = '' password = '' domain = '' if len(l) > 0: username = l[0] if len(l) > 1: password = l[1] if username.find('/') > 0: domain, username = username.split('/') if password == '' and username != '': from getpass import getpass password = getpass("Password:") self.smb.login(username, password, domain=domain) self.password = password self.username = username if self.smb.isGuestSession() > 0: logging.info("GUEST Session Granted") else: logging.info("USER Session Granted") self.loggedIn = True def do_login_hash(self,line): if self.smb is None: logging.error("No connection open") return l = line.split(' ') domain = '' if len(l) > 0: username = l[0] if len(l) > 1: hashes = l[1] else: logging.error("Hashes needed. Format is lmhash:nthash") return if username.find('/') > 0: domain, username = username.split('/') lmhash, nthash = hashes.split(':') self.smb.login(username, '', domain,lmhash=lmhash, nthash=nthash) self.username = username self.lmhash = lmhash self.nthash = nthash if self.smb.isGuestSession() > 0: logging.info("GUEST Session Granted") else: logging.info("USER Session Granted") self.loggedIn = True def do_logoff(self, line): if self.smb is None: logging.error("No connection open") return self.smb.logoff() self.share = None self.smb = None self.tid = None self.pwd = '' self.loggedIn = False self.password = None self.lmhash = None self.nthash = None self.username = None def do_info(self, line): if self.loggedIn is False: logging.error("Not logged in") return rpctransport = transport.SMBTransport(self.smb.getServerName(), self.smb.getRemoteHost(), filename = r'\srvsvc', smb_connection = self.smb) dce = rpctransport.get_dce_rpc() dce.connect() dce.bind(srvs.MSRPC_UUID_SRVS) resp = srvs.hNetrServerGetInfo(dce, 102) print "Version Major: %d" % resp['InfoStruct']['ServerInfo102']['sv102_version_major'] print "Version Minor: %d" % resp['InfoStruct']['ServerInfo102']['sv102_version_minor'] print "Server Name: %s" % resp['InfoStruct']['ServerInfo102']['sv102_name'] print "Server Comment: %s" % resp['InfoStruct']['ServerInfo102']['sv102_comment'] print "Server UserPath: %s" % resp['InfoStruct']['ServerInfo102']['sv102_userpath'] print "Simultaneous Users: %d" % resp['InfoStruct']['ServerInfo102']['sv102_users'] def do_who(self, line): if self.loggedIn is False: logging.error("Not logged in") return rpctransport = transport.SMBTransport(self.smb.getServerName(), self.smb.getRemoteHost(), filename = r'\srvsvc', smb_connection = self.smb) dce = rpctransport.get_dce_rpc() dce.connect() dce.bind(srvs.MSRPC_UUID_SRVS) resp = srvs.hNetrSessionEnum(dce, NULL, NULL, 502) for session in resp['InfoStruct']['SessionInfo']['Level502']['Buffer']: print "host: %15s, user: %5s, active: %5d, idle: %5d, type: %5s, transport: %s" % (session['sesi502_cname'][:-1], session['sesi502_username'][:-1], session['sesi502_time'], session['sesi502_idle_time'], session['sesi502_cltype_name'][:-1],session['sesi502_transport'][:-1] ) def do_shares(self, line): if self.loggedIn is False: logging.error("Not logged in") return resp = self.smb.listShares() for i in range(len(resp)): print resp[i]['shi1_netname'][:-1] def do_use(self,line): if self.loggedIn is False: logging.error("Not logged in") return self.share = line self.tid = self.smb.connectTree(line) self.pwd = '\\' self.do_ls('', False) def complete_cd(self, text, line, begidx, endidx): return self.complete_get(text, line, begidx, endidx, include = 2) def do_cd(self, line): if self.tid is None: logging.error("No share selected") return p = string.replace(line,'/','\\') oldpwd = self.pwd if p[0] == '\\': self.pwd = line else: self.pwd = ntpath.join(self.pwd, line) self.pwd = ntpath.normpath(self.pwd) # Let's try to open the directory to see if it's valid try: fid = self.smb.openFile(self.tid, self.pwd) self.smb.closeFile(self.tid,fid) self.pwd = oldpwd logging.error("Invalid directory") except Exception, e: if e.getErrorCode() == nt_errors.STATUS_FILE_IS_A_DIRECTORY: pass else: self.pwd = oldpwd raise def do_pwd(self,line): if self.loggedIn is False: logging.error("Not logged in") return print self.pwd def do_ls(self, wildcard, display = True): if self.tid is None: logging.error("No share selected") return if wildcard == '': pwd = ntpath.join(self.pwd,'*') else: pwd = ntpath.join(self.pwd, wildcard) self.completion = [] pwd = string.replace(pwd,'/','\\') pwd = ntpath.normpath(pwd) for f in self.smb.listPath(self.share, pwd): if display is True: print "%crw-rw-rw- %10d %s %s" % ('d' if f.is_directory() > 0 else '-', f.get_filesize(), time.ctime(float(f.get_mtime_epoch())) ,f.get_longname() ) self.completion.append((f.get_longname(),f.is_directory())) def do_rm(self, filename): if self.tid is None: logging.error("No share selected") return f = ntpath.join(self.pwd, filename) file = string.replace(f,'/','\\') self.smb.deleteFile(self.share, file) def do_mkdir(self, path): if self.tid is None: logging.error("No share selected") return p = ntpath.join(self.pwd, path) pathname = string.replace(p,'/','\\') self.smb.createDirectory(self.share,pathname) def do_rmdir(self, path): if self.tid is None: logging.error("No share selected") return p = ntpath.join(self.pwd, path) pathname = string.replace(p,'/','\\') self.smb.deleteDirectory(self.share, pathname) def do_put(self, pathname): if self.tid is None: logging.error("No share selected") return src_path = pathname dst_name = os.path.basename(src_path) fh = open(pathname, 'rb') f = ntpath.join(self.pwd,dst_name) finalpath = string.replace(f,'/','\\') self.smb.putFile(self.share, finalpath, fh.read) fh.close() def complete_get(self, text, line, begidx, endidx, include = 1): # include means # 1 just files # 2 just directories p = string.replace(line,'/','\\') if p.find('\\') < 0: items = [] if include == 1: mask = 0 else: mask = 0x010 for i in self.completion: if i[1] == mask: items.append(i[0]) if text: return [ item for item in items if item.upper().startswith(text.upper()) ] else: return items def do_get(self, filename): if self.tid is None: logging.error("No share selected") return filename = string.replace(filename,'/','\\') fh = open(ntpath.basename(filename),'wb') pathname = ntpath.join(self.pwd,filename) try: self.smb.getFile(self.share, pathname, fh.write) except: fh.close() os.remove(filename) raise fh.close() def do_close(self, line): if self.loggedIn is False: logging.error("Not logged in") return del(self.smb); def main(): print version.BANNER shell = MiniImpacketShell() if len(sys.argv)==1: shell.cmdloop() else: parser = argparse.ArgumentParser() parser.add_argument('-file', type=argparse.FileType('r'), help='input file with commands to execute in the mini shell') options = parser.parse_args() logging.info("Executing commands from %s" % options.file.name) for line in options.file.readlines(): if line[0] != '#': print "# %s" % line, shell.onecmd(line) else: print line, if __name__ == "__main__": try: main() except: print "\n" pass
""" General tests for all estimators in sklearn. """ # Authors: Andreas Mueller <amueller@ais.uni-bonn.de> # Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause import os import warnings import sys import re import pkgutil from inspect import isgenerator, signature from itertools import product from functools import partial import pytest from sklearn.utils import all_estimators from sklearn.utils._testing import ignore_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import FitFailedWarning from sklearn.utils.estimator_checks import check_estimator import sklearn from sklearn.decomposition import PCA from sklearn.linear_model._base import LinearClassifierMixin from sklearn.linear_model import LogisticRegression from sklearn.linear_model import Ridge from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.experimental import enable_halving_search_cv # noqa from sklearn.model_selection import HalvingGridSearchCV from sklearn.model_selection import HalvingRandomSearchCV from sklearn.pipeline import make_pipeline from sklearn.utils import IS_PYPY from sklearn.utils._testing import ( SkipTest, set_random_state, ) from sklearn.utils.estimator_checks import ( _construct_instance, _set_checking_parameters, _get_check_estimator_ids, check_class_weight_balanced_linear_classifier, parametrize_with_checks, check_n_features_in_after_fitting, ) def test_all_estimator_no_base_class(): # test that all_estimators doesn't find abstract classes. for name, Estimator in all_estimators(): msg = ( "Base estimators such as {0} should not be included in all_estimators" ).format(name) assert not name.lower().startswith("base"), msg def _sample_func(x, y=1): pass @pytest.mark.parametrize( "val, expected", [ (partial(_sample_func, y=1), "_sample_func(y=1)"), (_sample_func, "_sample_func"), (partial(_sample_func, "world"), "_sample_func"), (LogisticRegression(C=2.0), "LogisticRegression(C=2.0)"), ( LogisticRegression( random_state=1, solver="newton-cg", class_weight="balanced", warm_start=True, ), "LogisticRegression(class_weight='balanced',random_state=1," "solver='newton-cg',warm_start=True)", ), ], ) def test_get_check_estimator_ids(val, expected): assert _get_check_estimator_ids(val) == expected def _tested_estimators(): for name, Estimator in all_estimators(): try: estimator = _construct_instance(Estimator) except SkipTest: continue yield estimator @parametrize_with_checks(list(_tested_estimators())) def test_estimators(estimator, check, request): # Common tests for estimator instances with ignore_warnings( category=(FutureWarning, ConvergenceWarning, UserWarning, FutureWarning) ): _set_checking_parameters(estimator) check(estimator) def test_check_estimator_generate_only(): all_instance_gen_checks = check_estimator(LogisticRegression(), generate_only=True) assert isgenerator(all_instance_gen_checks) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) # ignore deprecated open(.., 'U') in numpy distutils def test_configure(): # Smoke test the 'configure' step of setup, this tests all the # 'configure' functions in the setup.pys in scikit-learn # This test requires Cython which is not necessarily there when running # the tests of an installed version of scikit-learn or when scikit-learn # is installed in editable mode by pip build isolation enabled. pytest.importorskip("Cython") cwd = os.getcwd() setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], "..")) setup_filename = os.path.join(setup_path, "setup.py") if not os.path.exists(setup_filename): pytest.skip("setup.py not available") # XXX unreached code as of v0.22 try: os.chdir(setup_path) old_argv = sys.argv sys.argv = ["setup.py", "config"] with warnings.catch_warnings(): # The configuration spits out warnings when not finding # Blas/Atlas development headers warnings.simplefilter("ignore", UserWarning) with open("setup.py") as f: exec(f.read(), dict(__name__="__main__")) finally: sys.argv = old_argv os.chdir(cwd) def _tested_linear_classifiers(): classifiers = all_estimators(type_filter="classifier") with warnings.catch_warnings(record=True): for name, clazz in classifiers: required_parameters = getattr(clazz, "_required_parameters", []) if len(required_parameters): # FIXME continue if "class_weight" in clazz().get_params().keys() and issubclass( clazz, LinearClassifierMixin ): yield name, clazz @pytest.mark.parametrize("name, Classifier", _tested_linear_classifiers()) def test_class_weight_balanced_linear_classifiers(name, Classifier): check_class_weight_balanced_linear_classifier(name, Classifier) @ignore_warnings def test_import_all_consistency(): # Smoke test to check that any name in a __all__ list is actually defined # in the namespace of the module or package. pkgs = pkgutil.walk_packages( path=sklearn.__path__, prefix="sklearn.", onerror=lambda _: None ) submods = [modname for _, modname, _ in pkgs] for modname in submods + ["sklearn"]: if ".tests." in modname: continue if IS_PYPY and ( "_svmlight_format_io" in modname or "feature_extraction._hashing_fast" in modname ): continue package = __import__(modname, fromlist="dummy") for name in getattr(package, "__all__", ()): assert hasattr(package, name), "Module '{0}' has no attribute '{1}'".format( modname, name ) def test_root_import_all_completeness(): EXCEPTIONS = ("utils", "tests", "base", "setup", "conftest") for _, modname, _ in pkgutil.walk_packages( path=sklearn.__path__, onerror=lambda _: None ): if "." in modname or modname.startswith("_") or modname in EXCEPTIONS: continue assert modname in sklearn.__all__ def test_all_tests_are_importable(): # Ensure that for each contentful subpackage, there is a test directory # within it that is also a subpackage (i.e. a directory with __init__.py) HAS_TESTS_EXCEPTIONS = re.compile( r"""(?x) \.externals(\.|$)| \.tests(\.|$)| \._ """ ) resource_modules = { "sklearn.datasets.data", "sklearn.datasets.descr", "sklearn.datasets.images", } lookup = { name: ispkg for _, name, ispkg in pkgutil.walk_packages(sklearn.__path__, prefix="sklearn.") } missing_tests = [ name for name, ispkg in lookup.items() if ispkg and name not in resource_modules and not HAS_TESTS_EXCEPTIONS.search(name) and name + ".tests" not in lookup ] assert missing_tests == [], ( "{0} do not have `tests` subpackages. " "Perhaps they require " "__init__.py or an add_subpackage directive " "in the parent " "setup.py".format(missing_tests) ) def test_class_support_removed(): # Make sure passing classes to check_estimator or parametrize_with_checks # raises an error msg = "Passing a class was deprecated.* isn't supported anymore" with pytest.raises(TypeError, match=msg): check_estimator(LogisticRegression) with pytest.raises(TypeError, match=msg): parametrize_with_checks([LogisticRegression]) def _generate_search_cv_instances(): for SearchCV, (Estimator, param_grid) in product( [ GridSearchCV, HalvingGridSearchCV, RandomizedSearchCV, HalvingGridSearchCV, ], [ (Ridge, {"alpha": [0.1, 1.0]}), (LogisticRegression, {"C": [0.1, 1.0]}), ], ): init_params = signature(SearchCV).parameters extra_params = ( {"min_resources": "smallest"} if "min_resources" in init_params else {} ) search_cv = SearchCV(Estimator(), param_grid, cv=2, **extra_params) set_random_state(search_cv) yield search_cv for SearchCV, (Estimator, param_grid) in product( [ GridSearchCV, HalvingGridSearchCV, RandomizedSearchCV, HalvingRandomSearchCV, ], [ (Ridge, {"ridge__alpha": [0.1, 1.0]}), (LogisticRegression, {"logisticregression__C": [0.1, 1.0]}), ], ): init_params = signature(SearchCV).parameters extra_params = ( {"min_resources": "smallest"} if "min_resources" in init_params else {} ) search_cv = SearchCV( make_pipeline(PCA(), Estimator()), param_grid, cv=2, **extra_params ).set_params(error_score="raise") set_random_state(search_cv) yield search_cv @parametrize_with_checks(list(_generate_search_cv_instances())) def test_search_cv(estimator, check, request): # Common tests for SearchCV instances # We have a separate test because those meta-estimators can accept a # wide range of base estimators (classifiers, regressors, pipelines) with ignore_warnings( category=( FutureWarning, ConvergenceWarning, UserWarning, FutureWarning, FitFailedWarning, ) ): check(estimator) @pytest.mark.parametrize( "estimator", _tested_estimators(), ids=_get_check_estimator_ids ) def test_check_n_features_in_after_fitting(estimator): _set_checking_parameters(estimator) check_n_features_in_after_fitting(estimator.__class__.__name__, estimator)
""" Test the API of the symtable module. """ import symtable import unittest TEST_CODE = """ import sys glob = 42 some_var = 12 class Mine: instance_var = 24 def a_method(p1, p2): pass def spam(a, b, *var, **kw): global bar bar = 47 some_var = 10 x = 23 glob def internal(): return x def other_internal(): nonlocal some_var some_var = 3 return some_var return internal def foo(): pass def namespace_test(): pass def namespace_test(): pass """ def find_block(block, name): for ch in block.get_children(): if ch.get_name() == name: return ch class SymtableTest(unittest.TestCase): top = symtable.symtable(TEST_CODE, "?", "exec") # These correspond to scopes in TEST_CODE Mine = find_block(top, "Mine") a_method = find_block(Mine, "a_method") spam = find_block(top, "spam") internal = find_block(spam, "internal") other_internal = find_block(spam, "other_internal") foo = find_block(top, "foo") def test_type(self): self.assertEqual(self.top.get_type(), "module") self.assertEqual(self.Mine.get_type(), "class") self.assertEqual(self.a_method.get_type(), "function") self.assertEqual(self.spam.get_type(), "function") self.assertEqual(self.internal.get_type(), "function") def test_optimized(self): self.assertFalse(self.top.is_optimized()) self.assertTrue(self.spam.is_optimized()) def test_nested(self): self.assertFalse(self.top.is_nested()) self.assertFalse(self.Mine.is_nested()) self.assertFalse(self.spam.is_nested()) self.assertTrue(self.internal.is_nested()) def test_children(self): self.assertTrue(self.top.has_children()) self.assertTrue(self.Mine.has_children()) self.assertFalse(self.foo.has_children()) def test_lineno(self): self.assertEqual(self.top.get_lineno(), 0) self.assertEqual(self.spam.get_lineno(), 12) def test_function_info(self): func = self.spam self.assertEqual(sorted(func.get_parameters()), ["a", "b", "kw", "var"]) expected = ['a', 'b', 'internal', 'kw', 'other_internal', 'some_var', 'var', 'x'] self.assertEqual(sorted(func.get_locals()), expected) self.assertEqual(sorted(func.get_globals()), ["bar", "glob"]) self.assertEqual(self.internal.get_frees(), ("x",)) def test_globals(self): self.assertTrue(self.spam.lookup("glob").is_global()) self.assertFalse(self.spam.lookup("glob").is_declared_global()) self.assertTrue(self.spam.lookup("bar").is_global()) self.assertTrue(self.spam.lookup("bar").is_declared_global()) self.assertFalse(self.internal.lookup("x").is_global()) self.assertFalse(self.Mine.lookup("instance_var").is_global()) self.assertTrue(self.spam.lookup("bar").is_global()) def test_nonlocal(self): self.assertFalse(self.spam.lookup("some_var").is_nonlocal()) self.assertTrue(self.other_internal.lookup("some_var").is_nonlocal()) expected = ("some_var",) self.assertEqual(self.other_internal.get_nonlocals(), expected) def test_local(self): self.assertTrue(self.spam.lookup("x").is_local()) self.assertFalse(self.spam.lookup("bar").is_local()) def test_free(self): self.assertTrue(self.internal.lookup("x").is_free()) def test_referenced(self): self.assertTrue(self.internal.lookup("x").is_referenced()) self.assertTrue(self.spam.lookup("internal").is_referenced()) self.assertFalse(self.spam.lookup("x").is_referenced()) def test_parameters(self): for sym in ("a", "var", "kw"): self.assertTrue(self.spam.lookup(sym).is_parameter()) self.assertFalse(self.spam.lookup("x").is_parameter()) def test_symbol_lookup(self): self.assertEqual(len(self.top.get_identifiers()), len(self.top.get_symbols())) self.assertRaises(KeyError, self.top.lookup, "not_here") def test_namespaces(self): self.assertTrue(self.top.lookup("Mine").is_namespace()) self.assertTrue(self.Mine.lookup("a_method").is_namespace()) self.assertTrue(self.top.lookup("spam").is_namespace()) self.assertTrue(self.spam.lookup("internal").is_namespace()) self.assertTrue(self.top.lookup("namespace_test").is_namespace()) self.assertFalse(self.spam.lookup("x").is_namespace()) self.assertTrue(self.top.lookup("spam").get_namespace() is self.spam) ns_test = self.top.lookup("namespace_test") self.assertEqual(len(ns_test.get_namespaces()), 2) self.assertRaises(ValueError, ns_test.get_namespace) def test_assigned(self): self.assertTrue(self.spam.lookup("x").is_assigned()) self.assertTrue(self.spam.lookup("bar").is_assigned()) self.assertTrue(self.top.lookup("spam").is_assigned()) self.assertTrue(self.Mine.lookup("a_method").is_assigned()) self.assertFalse(self.internal.lookup("x").is_assigned()) def test_annotated(self): st1 = symtable.symtable('def f():\n x: int\n', 'test', 'exec') st2 = st1.get_children()[0] self.assertTrue(st2.lookup('x').is_local()) self.assertTrue(st2.lookup('x').is_annotated()) self.assertFalse(st2.lookup('x').is_global()) st3 = symtable.symtable('def f():\n x = 1\n', 'test', 'exec') st4 = st3.get_children()[0] self.assertTrue(st4.lookup('x').is_local()) self.assertFalse(st4.lookup('x').is_annotated()) # Test that annotations in the global scope are valid after the # variable is declared as nonlocal. st5 = symtable.symtable('global x\nx: int', 'test', 'exec') self.assertTrue(st5.lookup("x").is_global()) # Test that annotations for nonlocals are valid after the # variable is declared as nonlocal. st6 = symtable.symtable('def g():\n' ' x = 2\n' ' def f():\n' ' nonlocal x\n' ' x: int', 'test', 'exec') def test_imported(self): self.assertTrue(self.top.lookup("sys").is_imported()) def test_name(self): self.assertEqual(self.top.get_name(), "top") self.assertEqual(self.spam.get_name(), "spam") self.assertEqual(self.spam.lookup("x").get_name(), "x") self.assertEqual(self.Mine.get_name(), "Mine") def test_class_info(self): self.assertEqual(self.Mine.get_methods(), ('a_method',)) def test_filename_correct(self): ### Bug tickler: SyntaxError file name correct whether error raised ### while parsing or building symbol table. def checkfilename(brokencode, offset): try: symtable.symtable(brokencode, "spam", "exec") except SyntaxError as e: self.assertEqual(e.filename, "spam") self.assertEqual(e.lineno, 1) self.assertEqual(e.offset, offset) else: self.fail("no SyntaxError for %r" % (brokencode,)) checkfilename("def f(x): foo)(", 14) # parse-time checkfilename("def f(x): global x", 11) # symtable-build-time symtable.symtable("pass", b"spam", "exec") with self.assertWarns(DeprecationWarning), \ self.assertRaises(TypeError): symtable.symtable("pass", bytearray(b"spam"), "exec") with self.assertWarns(DeprecationWarning): symtable.symtable("pass", memoryview(b"spam"), "exec") with self.assertRaises(TypeError): symtable.symtable("pass", list(b"spam"), "exec") def test_eval(self): symbols = symtable.symtable("42", "?", "eval") def test_single(self): symbols = symtable.symtable("42", "?", "single") def test_exec(self): symbols = symtable.symtable("def f(x): return x", "?", "exec") def test_bytes(self): top = symtable.symtable(TEST_CODE.encode('utf8'), "?", "exec") self.assertIsNotNone(find_block(top, "Mine")) code = b'# -*- coding: iso8859-15 -*-\nclass \xb4: pass\n' top = symtable.symtable(code, "?", "exec") self.assertIsNotNone(find_block(top, "\u017d")) if __name__ == '__main__': unittest.main()
from __future__ import print_function import sys import os from collections import Counter, OrderedDict import time import datetime as dt from argparse import ArgumentParser import random import cProfile import pstats import numpy as np import pylab as plt from load_settings import settings from results import StormtracksResultsManager, ResultNotFound, StormtracksNumpyResultsManager from ibtracsdata import IbtracsData from c20data import C20Data, GlobalEnsembleMember from full_c20data import FullC20Data from tracking import VortmaxFinder, VortmaxNearestNeighbourTracker,\ VortmaxKalmanFilterTracker, FieldFinder from full_tracking import FullVortmaxFinder,\ FullVortmaxNearestNeighbourTracker, FullFieldFinder import matching import full_matching import classification from plotting import Plotter import plotting from logger import setup_logging from utils.utils import geo_dist SORT_COLS = { 'overlap': 1, 'cumdist': 2, 'cumoveroverlap': 3, 'avgdist': 4, 'avgdistovermatches': 5, } CAL_YEARS = range(1990, 2009, 2) VAL_YEARS = range(1991, 2010, 2) ENSEMBLE_RANGE = range(56) class StormtracksAnalysis(object): """Provides a variety of ways of analysing tracking performance Makes extensive use of its results_manager to load/save results. Used by the pyro code to farm out jobs across the cluster. To a large extent replaces the manual analysis steps. :param year: year on which to run analysis """ def __init__(self, year, profiling=False, mem_profiling=False): self.set_year(year) self.setup_analysis() self.profiling = profiling self.mem_profiling = mem_profiling self.logging_callback = None filename = 'analysis.log' self.log = setup_logging('analysis', filename=filename, console_level_str='INFO') def set_year(self, year): """Sets the year, loading best_tracks and setting up results_manager appropriately""" self.year = year self.ibdata = IbtracsData(verbose=False) self.best_tracks = self.ibdata.load_ibtracks_year(year) def setup_analysis(self): '''Sets up the current configuration options''' self.analysis_config_options = [] scales = [1, 2, 3] pressure_levels = [995, 850] trackers = ['nearest_neighbour'] # pressure_levels = [995, 850, 250] # trackers = ['nearest_neighbour', 'kalman'] for scale in scales: for pressure_level in pressure_levels: for tracker_name in trackers: config = { 'scale': scale, 'pressure_level': pressure_level, 'tracker': tracker_name, } self.analysis_config_options.append(config) def get_matching_configs(self, **kwargs): '''Allows for easy filtering of current config options''' configs = [] for config in self.analysis_config_options: should_add = True for k, v in kwargs.items(): if config[k] != v: should_add = False break if should_add: configs.append(config) return configs def _result_key(self, config): return 'scale:{scale};pl:{pressure_level};tracker:{tracker}'.format(**config) def good_matches_key(self, config): '''Returns the good_matches key for the config options''' return 'good_matches-{0}'.format(self._result_key(config)) def vort_tracks_by_date_key(self, config): '''Returns the vort_tracks_by_date key for the config options''' return 'vort_tracks_by_date-{0}'.format(self._result_key(config)) def run_cross_ensemble_analysis(self, start_date=None, end_date=None): config = {'pressure_level': 850, 'scale': 3, 'tracker': 'nearest_neighbour'} good_matches_key = self.good_matches_key(config) vort_tracks_by_date_key = self.vort_tracks_by_date_key(config) if start_date is None: start_date = dt.datetime(self.year, 6, 1) if end_date is None: end_date = dt.datetime(self.year, 12, 1) try: self.log.info('Get full ensemble analysis') results_manager.get_result(self.year, 'full', good_matches_key) results_manager.get_result(self.year, 'full', vort_tracks_by_date_key) results_manager.get_result(self.year, 'full', 'cyclones') self.log.info('Results already created') except: if self.profiling: pr = cProfile.Profile() pr.enable() if self.mem_profiling: import guppy import ipdb hp = guppy.hpy() heap1 = hp.heap() # Run tracking/matching analysis. self.log.info('Running full ensemble analysis') msg = 'Scale: {scale}, press level: {pressure_level}, tracker:{tracker}'.format(**config) self.log.info(msg) fc20data = FullC20Data(self.year, verbose=False, pressure_level=config['pressure_level'], fields=['u', 'v'], scale_factor=config['scale']) tracker = FullVortmaxNearestNeighbourTracker() vort_finder = FullVortmaxFinder(fc20data) vort_finder.find_vort_maxima(start_date, end_date, use_upscaled=config['scale'] != 1) tracker.track_vort_maxima(vort_finder.all_vortmax_time_series) if self.logging_callback: self.logging_callback('run tracking:{0}'.format('full')) matches = full_matching.full_match_vort_tracks_by_date_to_best_tracks(tracker.all_vort_tracks_by_date, self.best_tracks) all_good_matches = full_matching.full_good_matches(matches) if self.logging_callback: self.logging_callback('run matching:{0}'.format('full')) fc20data.close_datasets() # Run field collection. self.log.info('Running full field collection') field_collection_fc20data = FullC20Data(self.year, verbose=False, pressure_level=995, scale_factor=1) field_finder = FullFieldFinder(field_collection_fc20data, tracker.all_vort_tracks_by_date) field_finder.collect_fields(start_date, end_date) cyclones = field_finder.all_cyclone_tracks field_collection_fc20data.close_datasets() for ensemble_member in range(56): results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) results_manager.add_result(self.year, ensemble_member, good_matches_key, all_good_matches[ensemble_member]) results_manager.add_result(self.year, ensemble_member, vort_tracks_by_date_key, tracker.all_vort_tracks_by_date[ensemble_member]) results_manager.add_result(self.year, ensemble_member, 'cyclones', cyclones[ensemble_member]) # Save results. results_manager.save() del results_manager if self.mem_profiling: hp = guppy.hpy() heap2 = hp.heap() ipdb.set_trace() if self.logging_callback: self.logging_callback('analysed and collected fields:{0}'.format('full')) if self.profiling: pr.disable() with open('{0}/profile-analysis-full-{1}.txt' .format(settings.LOGGING_DIR, self.year), 'w') as f: sortby = 'tottime' ps = pstats.Stats(pr, stream=f).sort_stats(sortby) ps.print_stats() def run_full_analysis(self, config, num_ensemble_members=56): '''Runs tracking/matching analysis then field collection for all ensemble members''' upscaling = config['scale'] != 1 c20data = C20Data(self.year, verbose=False, pressure_level=config['pressure_level'], upscaling=upscaling, scale_factor=config['scale'], fields=['u', 'v']) field_collection_c20data = C20Data(self.year, verbose=False, pressure_level=995, upscaling=False, scale_factor=1) for ensemble_member in range(num_ensemble_members): good_matches_key = self.good_matches_key(config) vort_tracks_by_date_key = self.vort_tracks_by_date_key(config) results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) try: self.log.info('Get indiv. ensemble analysis for em:{0}'.format(ensemble_member)) results_manager.get_result(self.year, ensemble_member, good_matches_key) results_manager.get_result(self.year, ensemble_member, vort_tracks_by_date_key) results_manager.get_result(self.year, ensemble_member, 'cyclones') self.log.info('Results already created') except: if self.profiling: pr = cProfile.Profile() pr.enable() # Run tracking/matching analysis. self.log.info('Running indiv. ensemble analysis for em:{0}'.format(ensemble_member)) good_matches, vort_tracks_by_date =\ self.run_individual_tracking_matching_analysis(ensemble_member, config, c20data) results_manager.add_result(self.year, ensemble_member, good_matches_key, good_matches) results_manager.add_result(self.year, ensemble_member, vort_tracks_by_date_key, vort_tracks_by_date) # Run field collection. self.log.info('Running indiv. field collection for em:{0}'.format(ensemble_member)) tracking_config = {'pressure_level': 850, 'scale': 3, 'tracker': 'nearest_neighbour'} key = self.vort_tracks_by_date_key(tracking_config) self.log.info('Finding fields') field_finder = FieldFinder(field_collection_c20data, vort_tracks_by_date, ensemble_member) field_finder.collect_fields() cyclones = field_finder.cyclone_tracks.values() results_manager.add_result(self.year, ensemble_member, 'cyclones', cyclones) # Save results. results_manager.save() if self.logging_callback: self.logging_callback('analysed and collected fields:{0}'.format(ensemble_member)) if self.profiling: pr.disable() with open('/home/ubuntu/stormtracks_data/logs/profile-{0}.txt' .format(ensemble_member), 'w') as f: sortby = 'cumulative' ps = pstats.Stats(pr, stream=f).sort_stats(sortby) ps.print_stats() c20data.close_datasets() field_collection_c20data.close_datasets() def run_individual_tracking_matching_analysis(self, ensemble_member, config, c20data=None): '''Runs a given analysis based on config dict''' msg = 'Scale: {scale}, press level: {pressure_level}, tracker:{tracker}'.format(**config) self.log.info(msg) upscaling = config['scale'] != 1 if c20data is None: # Set up a c20 object with the specified config options. # N.B. for tracking only vorticity (which uses u, v fields) is needed. c20data = C20Data(self.year, verbose=False, pressure_level=config['pressure_level'], upscaling=upscaling, scale_factor=config['scale'], fields=['u', 'v']) need_to_close = True else: need_to_close = False if config['tracker'] == 'nearest_neighbour': tracker = VortmaxNearestNeighbourTracker(ensemble_member) elif config['tracker'] == 'kalman': tracker = VortmaxKalmanFilterTracker() gem = GlobalEnsembleMember(c20data, ensemble_member) vort_finder = VortmaxFinder(gem) vort_finder.find_vort_maxima(dt.datetime(self.year, 6, 1), dt.datetime(self.year, 12, 1), use_upscaled=upscaling) tracker.track_vort_maxima(vort_finder.vortmax_time_series) matches = matching.match_vort_tracks_by_date_to_best_tracks(tracker.vort_tracks_by_date, self.best_tracks) good_matches = matching.good_matches(matches) if need_to_close: c20data.close_datasets() return good_matches, tracker.vort_tracks_by_date def run_individual_field_collection(self, ensemble_member, c20data=None): self.log.info('Collecting fields for {0}'.format(ensemble_member)) if c20data is None: c20data = C20Data(self.year, verbose=False, pressure_level=995, upscaling=False, scale_factor=1) self.log.info('c20data created') need_to_close = True else: need_to_close = False tracking_config = {'pressure_level': 850, 'scale': 3, 'tracker': 'nearest_neighbour'} key = self.vort_tracks_by_date_key(tracking_config) self.log.info('Loading key: {0}'.format(key)) results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) vms = results_manager.get_result(self.year, ensemble_member, key) self.log.info('Finding fields') field_finder = FieldFinder(c20data, vms, ensemble_member) field_finder.collect_fields() if need_to_close: c20data.close_datasets() return field_finder.cyclone_tracks.values() def run_wld_analysis(self, active_configs={}, num_ensemble_members=56): '''Runs a win/lose/draw analysis on all ensemble members If a track from a particular analysis has a lower average dist it is said to have 'won' i.e. track for Wilma in pressure_level:850/scale:2/tracker:nearest neighbour has a lower av dist than pl:995/../.. . ''' wlds = [] for i in range(num_ensemble_members): configs = self.get_matching_configs(**active_configs) key0 = self.good_matches_key(configs[0]) key1 = self.good_matches_key(configs[1]) wld = self._win_lose_draw(0, key0, key1) wlds.append(wld) sum0 = 0 sum1 = 0 sum_draw = 0 for wld in wlds: sum0 += wld['w0'] sum1 += wld['w1'] sum_draw += wld['d'] if self.log: self.log.info('Win Lose Draw') self.log.info('=============') self.log.info('') self.log.info('{0} won: {1}'.format(key0, sum0)) self.log.info('{0} won: {1}'.format(key1, sum1)) self.log.info('draw: {0}'.format(sum_draw)) self.log.info('') return key0, sum0, key1, sum1, sum_draw def run_position_analysis(self, sort_on='avgdist', active_configs={}, force_regen=False, num_ensemble_members=56): '''Runs a positional analysis on the given sort_on col If sort_on is e.g. avg_dist, summed av dist for each of the active configs are calc'd and they are ranked in terms of which is lowest ''' # import ipdb; ipdb.set_trace() self.log.info('Analysing {0} ensemble members'.format(num_ensemble_members)) cross_ensemble_results = OrderedDict() for config in self.get_matching_configs(**active_configs): cross_ensemble_results[self.good_matches_key(config)] = Counter() for ensemble_member in range(num_ensemble_members): stats = self.list_stats(ensemble_member, sort_on, active_configs) for stat_pos, stat in enumerate(stats): cross_ensemble_results[stat[0]][stat_pos] += 1 pos_title = 'Position on {0}'.format(sort_on) self.log.info(pos_title) self.log.info('=' * len(pos_title)) self.log.info('') for k, v in cross_ensemble_results.items(): self.log.info(k) self.log.info(' {0}'.format(v.items())) self.log.info('') return cross_ensemble_results def run_analysis(self, ensemble_member, force_regen=False): '''For each set of config options, run a tracking analysis and store the results''' results = {} for config in self.analysis_config_options: results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) good_matches_key = self.good_matches_key(config) vort_tracks_by_date_key = self.vort_tracks_by_date_key(config) if not force_regen: try: good_matches = results_manager.get_result(self.year, ensemble_member, good_matches_key) self.log.info('Loaded saved result: {0}'.format(good_matches_key)) except ResultNotFound: force_regen = True if force_regen: self.log.info('Running analysis: {0}'.format(good_matches_key)) good_matches, vort_tracks_by_date = \ self.run_individual_tracking_matching_analysis(ensemble_member, config) results_manager.add_result(self.year, ensemble_member, good_matches_key, good_matches) results_manager.add_result(self.year, ensemble_member, vort_tracks_by_date_key, vort_tracks_by_date) results_manager.save() results[good_matches_key] = good_matches return results def _win_lose_draw(self, ensemble_member, key0, key1): wld = Counter() results_manager = StormtracksResultsManager('wld_analysis') gm0 = results_manager.get_result(self.year, ensemble_member, key0) gm1 = results_manager.get_result(self.year, ensemble_member, key1) for bt in self.best_tracks: m0 = None for m in gm0: if m.best_track.name == bt.name: m0 = m break m1 = None for m in gm1: if m.best_track.name == bt.name: m1 = m break if m0 and m1: if m0.av_dist() < m1.av_dist() - 0.02: wld['w0'] += 1 elif m1.av_dist() < m0.av_dist() - 0.02: wld['w1'] += 1 else: wld['d'] += 1 elif m0: wld['w0'] += 1 elif m1: wld['w1'] += 1 else: wld['d'] += 1 return wld def get_good_matches(self, ensemble_member, config): '''Either loads or generates (and saves) good_matches''' key = self.good_matches_key(config) results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) try: good_matches = results_manager.get_result(self.year, ensemble_member, key) except ResultNotFound: good_matches, vort_tracks_by_date = \ self.run_individual_tracking_matching_analysis(ensemble_member, config) results_manager.add_result(self.year, ensemble_member, key, good_matches) results_manager.save() return good_matches def get_vort_tracks_by_date(self, ensemble_member, config): '''Either loads or generates (and saves) vort_tracks_by_date''' key = self.vort_tracks_by_date_key(config) results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) try: vort_tracks_by_date = results_manager.get_result(self.year, ensemble_member, key) except ResultNotFound: good_matches, vort_tracks_by_date = \ self.run_individual_tracking_matching_analysis(ensemble_member, config) results_manager.add_result(self.year, ensemble_member, vort_tracks_by_date_key, vort_tracks_by_date) results_manager.save() return vort_tracks_by_date def list_stats(self, ensemble_member=0, sort_on='avgdist', active_configs={}): '''Runs through all statistics for the requested ensemble member and compiles stats sorts on the requested column and only looks at the active_configs. This makes it easy to e.g. compare all scale 1 or all 850 configuration options.''' sort_col = SORT_COLS[sort_on] configs = self.get_matching_configs(**active_configs) stats = [] for config in configs: key = self.good_matches_key(config) good_matches = self.get_good_matches(ensemble_member, config) len_matches = len(good_matches) sum_overlap = np.sum([m.overlap for m in good_matches]) sum_cum_dist = np.sum([m.cum_dist for m in good_matches]) cum_over_overlap = sum_cum_dist / sum_overlap sum_av_dist = np.sum([m.av_dist() for m in good_matches]) sum_av_dist_over_len_matches = np.sum([m.av_dist()/len_matches for m in good_matches]) stats.append((key, sum_overlap, sum_cum_dist, cum_over_overlap, sum_av_dist, sum_av_dist_over_len_matches)) return sorted(stats, key=lambda x: x[sort_col]) def print_stats(self, ensemble_member=0, sort_on='avgdist'): '''Prints the stats''' stats = self.list_stats(ensemble_member, sort_on) for stat in stats: key, sum_overlap, sum_cum_dist, sum_av_dist = stat print(key) print(' sum overlap: {0}'.format(sum_overlap)) print(' sum cumdist: {0}'.format(sum_cum_dist)) print(' sum avgdist: {0}'.format(sum_av_dist)) def setup_display(self, ensemble_member, active_configs={}): '''Sets up plotters for displaying of results''' configs = self.get_matching_configs(**active_configs) self.plotters = [] c20data = C20Data(self.year, verbose=False) for i, config in enumerate(configs): key = self.good_matches_key(config) good_matches = self.get_good_matches(ensemble_member, key) plotter = Plotter(key, self.best_tracks, c20data, [good_matches]) plotter.load('match_comp_1', is_plot=False) plotter.layout['figure'] = i + 1 self.plotters.append(plotter) self.best_track_index = 0 def next_best_track(self): '''Moves each plotter's best track on by one''' self.best_track_index += 1 self.plot() def prev_best_track(self): '''Moves each plotter's best track back by one''' self.best_track_index -= 1 self.plot() def plot(self): '''Uses each plotter to plot the current scene''' best_track = self.best_tracks[self.best_track_index] for plotter in self.plotters: self.log.info('{0} - '.format(plotter.title), end='') plotter.plot_match_from_best_track(best_track) def score_matchup(matchup): score = 0 score += matchup['tp'] * 4 # score += matchup['unmatched_tn'] * 0.1 # score += matchup['tn'] * 1 score -= matchup['fn'] * 2 score -= matchup['fp'] * 2 score -= matchup['unmatched_fp'] * 2 score -= matchup['missed'] * 2 return score class ClassificationAnalysis(object): def __init__(self): self.results_manager = StormtracksResultsManager(settings.FIELD_RESULTS) self.plot_results_manager = StormtracksResultsManager('plot_results') self.all_best_tracks = {} self.hurricanes_in_year = {} self.cal_cd = None self.val_cd = None self.classifiers = None def get_trained_classifiers(self): if not self.cal_cd: self.cal_cd = self.load_cal_classification_data() if not self.val_cd: self.val_cd = self.load_val_classification_data() if self.classifiers: return self.cal_cd, self.val_cd, self.classifiers cc = classification.CutoffClassifier() ldc = classification.LDAClassifier() qdc = classification.QDAClassifier() sgdc = classification.SGDClassifier() qdc_chain = classification.QDAClassifier() cc_chain = classification.CutoffClassifier() chain_qdc_cc = classification.ClassifierChain([qdc_chain, cc_chain]) sgdc_chain = classification.SGDClassifier() cc_chain2 = classification.CutoffClassifier() chain_sgdc_cc = classification.ClassifierChain([sgdc_chain, cc_chain2]) classifiers = ((cc, 'cc_best', 'g^', 'Threshold'), (ldc, 'ldc_best', 'm+', 'LDA'), (qdc, 'qdc_best', 'bx', 'QDA'), (sgdc, 'sgdc_best', 'ro', 'SGD'), # (chain_qdc_cc, 'chain_qdc_cc_best', 'cs', 'QDC/Thresh.'), # (chain_sgdc_cc, 'chain_sgdc_cc_best', 'cs', 'Combined SGDC/Thresh.'), ) for i, (classifier, settings, fmt, name) in enumerate(classifiers): cat_settings = classifier.load(settings) classifier.train(self.cal_cd, **cat_settings) # classifier.predict(cal_cd, plot='all', fig=(i + 1) * 10, fmt=fmt) # classifier.predict(val_cd, plot='all', fig=(i + 1) * 10 + 2, fmt=fmt) self.classifiers = classifiers return self.cal_cd, self.val_cd, self.classifiers def run_yearly_analysis(self, classifier, start_year=1890, end_year=2010): ems = range(56) ib_hurrs = [] ib_pdis = [] cla_hurrs = [] cla_pdis = [] for start_year in range(start_year, end_year, 10): print(start_year) years = range(start_year, start_year + 10) cla_data = self.load_classification_data('{0}s'.format(start_year), years, ems) classifier.predict(cla_data) pred_hurr = cla_data.data[classifier.are_hurr_pred] for year in years: cla_hurr = [] cla_pdi = [] ib_hurr = self.get_total_hurrs([year]) ib_hurrs.append(ib_hurr) if year not in self.all_best_tracks: self.load_ibtracs_year(year) ib_pdi = 0 for bt in self.all_best_tracks[year]: for cls, ws in zip(bt.cls, bt.winds): if cls == 'HU': ib_pdi += ws ** 3 ib_pdis.append(ib_pdi) year_mask = (pred_hurr[:, 15] == year) for em in ems: em_mask = pred_hurr[year_mask][:, 16] == em em_hurr = (em_mask).sum() mws_index = classification.SCATTER_ATTRS['maxwindspeed']['index'] em_pdi = (pred_hurr[year_mask][em_mask][:, mws_index] ** 3).sum() cla_hurr.append(em_hurr) cla_pdi.append(em_pdi) cla_hurrs.append(cla_hurr) cla_pdis.append(cla_pdi) del cla_data return np.array(ib_hurrs), np.array(ib_pdis), np.array(cla_hurrs), np.array(cla_pdis) def cat_results_key(self, name, years, ensemble_members): years_str = '-'.join(map(str, years)) em_str = '-'.join(map(str, ensemble_members)) return '{0}_{1}_{2}'.format(name, years_str, em_str) def load_ibtracs_year(self, year): ibdata = IbtracsData(verbose=False) best_tracks = ibdata.load_ibtracks_year(year) self.all_best_tracks[year] = best_tracks hurricanes_in_year = 0 for best_track in best_tracks: for cls in best_track.cls: if cls == 'HU': hurricanes_in_year += 1 self.hurricanes_in_year[year] = hurricanes_in_year def miss_count(self, years, num_ensemble_members, hurr_counts): total_hurrs = self.get_total_hurrs(years) expexted_hurrs = total_hurrs * num_ensemble_members all_tracked_hurricanes = hurr_counts[:, 2].sum() return expexted_hurrs - all_tracked_hurricanes def get_total_hurrs(self, years): total_hurricanes = 0 for year in years: if year not in self.all_best_tracks: self.load_ibtracs_year(year) total_hurricanes += self.hurricanes_in_year[year] return total_hurricanes def run_categorisation_analysis(self, years, ensemble_members=(0, ), plot_mode=None, save=False): KNOWN_BAD = ( (1890, 27), ) total_classification_data = None total_are_hurricanes = None total_dates = None total_hurr_counts = [] numpy_res_man = StormtracksNumpyResultsManager('classification_data') total_hurr_count = 0 for year in years: for ensemble_member in ensemble_members: print('{0}-{1}'.format(year, ensemble_member)) if (year, ensemble_member) in KNOWN_BAD: print('SKIPPING {0}-{1}'.format(year, ensemble_member)) continue # matches, unmatched =\ # self.run_individual_tracking_matching_analysis(year, ensemble_member, # plot_mode, save) classification_data, are_hurricanes, dates, hurr_count, double_count =\ self.build_classification_data(year, ensemble_member) if double_count > 5: print('Hi double count for year/em: {0}, {1}'.format(year, ensemble_member)) hurr_counts = np.array((year, ensemble_member, hurr_count, double_count)) total_hurr_count += hurr_count if total_classification_data is not None: total_classification_data = np.concatenate((total_classification_data, classification_data)) else: total_classification_data = classification_data if total_are_hurricanes is not None: total_are_hurricanes = np.concatenate((total_are_hurricanes, are_hurricanes)) else: total_are_hurricanes = are_hurricanes if total_dates is not None: total_dates = np.concatenate((total_dates, dates)) else: total_dates = dates total_hurr_counts.append(hurr_counts) total_hurr_counts = np.array(total_hurr_counts) numpy_res_man.save(self.cat_results_key('classification_data', years, ensemble_members), total_classification_data) numpy_res_man.save(self.cat_results_key('are_hurr', years, ensemble_members), total_are_hurricanes) numpy_res_man.save(self.cat_results_key('dates', years, ensemble_members), total_dates) numpy_res_man.save(self.cat_results_key('hurr_counts', years, ensemble_members), total_hurr_counts) miss_count = self.miss_count(years, len(ensemble_members), total_hurr_counts) return classification.ClassificationData('calcd', total_classification_data, total_are_hurricanes, total_dates, total_hurr_counts, miss_count) def load_classification_data(self, name, years, ensemble_members, should_lon_filter=False): numpy_res_man = StormtracksNumpyResultsManager('classification_data') total_classification_data = numpy_res_man.load(self.cat_results_key('classification_data', years, ensemble_members)) total_are_hurricanes = numpy_res_man.load(self.cat_results_key('are_hurr', years, ensemble_members)) total_dates = numpy_res_man.load(self.cat_results_key('dates', years, ensemble_members)) total_hurr_counts = numpy_res_man.load(self.cat_results_key('hurr_counts', years, ensemble_members)) miss_count = self.miss_count(years, len(ensemble_members), total_hurr_counts) if should_lon_filter: total_classification_data, total_are_hurricanes, total_dates = \ self.lon_filter(total_classification_data, total_are_hurricanes, total_dates) return classification.ClassificationData(name, total_classification_data, total_are_hurricanes, total_dates, total_hurr_counts, miss_count) def load_cal_classification_data(self): classification_data = self.load_classification_data('Calibration', CAL_YEARS, ENSEMBLE_RANGE) return classification_data def load_val_classification_data(self): classification_data = self.load_classification_data('Validation', VAL_YEARS, ENSEMBLE_RANGE) return classification_data def optimize_cutoff_cat(self, classification_data, are_hurr, dates): self.cutoff_cat.best_so_far() vort_lo_dist = 0.00001 vort_lo_start = self.cutoff_cat.cutoffs['vort_lo'] t995_lo_dist = 0.1 t995_lo_start = self.cutoff_cat.cutoffs['t995_lo'] t850_lo_dist = 0.1 t850_lo_start = self.cutoff_cat.cutoffs['t850_lo'] maxwindspeed_lo_dist = 0.2 maxwindspeed_lo_start = self.cutoff_cat.cutoffs['maxwindspeed_lo'] pambdiff_lo_dist = 0.2 pambdiff_lo_start = self.cutoff_cat.cutoffs['pambdiff_lo'] lowest_score = 1e99 n = 3 for vort_lo in np.arange(vort_lo_start - vort_lo_dist * n, vort_lo_start + vort_lo_dist * n, vort_lo_dist): self.cutoff_cat.cutoffs['vort_lo'] = vort_lo # for t995_lo in np.arange(t995_lo_start - t995_lo_dist * n, # t995_lo_start + t995_lo_dist * n, # t995_lo_dist): # self.cutoff_cat.cutoffs['t995_lo'] = t995_lo for maxwindspeed_lo in np.arange(maxwindspeed_lo_start - maxwindspeed_lo_dist * n, maxwindspeed_lo_start + maxwindspeed_lo_dist * n, maxwindspeed_lo_dist): self.cutoff_cat.cutoffs['maxwindspeed_lo'] = maxwindspeed_lo # for t850_lo in np.arange(t850_lo_start - t850_lo_dist * n, # t850_lo_start + t850_lo_dist * n, # t850_lo_dist): # self.cutoff_cat.cutoffs['t850_lo'] = t850_lo for pambdiff_lo in np.arange(pambdiff_lo_start - pambdiff_lo_dist * n, pambdiff_lo_start + pambdiff_lo_dist * n, pambdiff_lo_dist): self.cutoff_cat.cutoffs['pambdiff_lo'] = pambdiff_lo score = self.cutoff_cat.predict(classification_data, are_hurr) if score < lowest_score: print('New low score: {0}'.format(score)) lowest_score = score print(self.cutoff_cat.cutoffs) def lon_filter(self, total_classification_data, total_are_hurricanes, total_dates): i = classification.SCATTER_ATTRS['lon']['index'] mask = total_classification_data[:, i] > 260 # return total_classification_data[mask], total_are_hurricanes[mask] return total_classification_data[mask], total_are_hurricanes[mask], total_dates[mask] def apply_all_cutoffs(self, total_classification_data, total_are_hurricanes): total_classification_data, total_are_hurricanes = self.lon_filter(total_classification_data, total_are_hurricanes) hf = total_classification_data[total_are_hurricanes].copy() nhf = total_classification_data[~total_are_hurricanes].copy() t850_cutoff = 287 t995_cutoff = 297 vort_cutoff = 0.0003 # 0.00035 might be better. hf = hf[hf[:, 5] > t850_cutoff] hf = hf[hf[:, 4] > t995_cutoff] hf = hf[hf[:, 0] > vort_cutoff] nhf = nhf[nhf[:, 5] > t850_cutoff] nhf = nhf[nhf[:, 4] > t995_cutoff] nhf = nhf[nhf[:, 0] > vort_cutoff] plt.clf() ci1 = 0 ci2 = 1 plt.plot(nhf[:, ci1], nhf[:, ci2], 'bx', zorder=1) plt.plot(hf[:, ci1], hf[:, ci2], 'ko', zorder=0) return hf, nhf def plot_total_classification_data(self, total_classification_data, total_are_hurricanes, var1, var2, fig=1): plt.figure(fig) plt.clf() i1 = classification.SCATTER_ATTRS[var1]['index'] i2 = classification.SCATTER_ATTRS[var2]['index'] plt.xlabel(var1) plt.ylabel(var2) plt.plot(total_classification_data[:, i1][~total_are_hurricanes], total_classification_data[:, i2][~total_are_hurricanes], 'bx', zorder=3) plt.plot(total_classification_data[:, i1][total_are_hurricanes], total_classification_data[:, i2][total_are_hurricanes], 'ko', zorder=2) def run_individual_cla_analysis(self, year, ensemble_member): results_manager = self.results_manager if year not in self.all_best_tracks: self.load_ibtracs_year(year) best_tracks = self.all_best_tracks[year] cyclones = results_manager.get_result(year, ensemble_member, 'cyclones') if isinstance(cyclones, dict): cyclones = cyclones.values() matches, unmatched = matching.match_best_tracks_to_cyclones(best_tracks, cyclones) return cyclones, matches, unmatched def calc_total_hurr(self, hurr_counts): num_ensemble_members = len(set(hurr_counts[:, 1])) all_hurricanes = hurr_counts[:, 2].sum() return 1. * all_hurricanes / num_ensemble_members def _make_classification_data_row(self, year, ensemble_member, date, cyclone): classification_data_row = [] for variable in classification.SCATTER_ATTRS.keys(): attr = classification.SCATTER_ATTRS[variable] x = classification.get_cyclone_attr(cyclone, attr, date) classification_data_row.append(x) classification_data_row.append(year) classification_data_row.append(ensemble_member) return classification_data_row def build_classification_data(self, year, ensemble_member, unmatched_sample_size=None): cyclones, matches, unmatched = self.run_individual_cla_analysis(year, ensemble_member) dates = [] class_data = [] are_hurricanes = [] if unmatched_sample_size: unmatched_samples = random.sample(unmatched, min(unmatched_sample_size, len(unmatched))) else: unmatched_samples = unmatched for cyclone in unmatched_samples: for date in cyclone.dates: if cyclone.pmins[date]: class_data.append(self._make_classification_data_row(year, ensemble_member, date, cyclone)) dates.append(date) are_hurricanes.append(False) added_dates = [] # Stops a double count of matched hurrs. matched_best_tracks = Counter() for match in matches: best_track = match.best_track cyclone = match.cyclone for date, cls in zip(best_track.dates, best_track.cls): if date in cyclone.dates and cyclone.pmins[date]: added_dates.append(date) if cls == 'HU': matched_best_tracks[(best_track.name, date)] += 1 class_data.append(self._make_classification_data_row(year, ensemble_member, date, cyclone)) dates.append(date) are_hurricanes.append(True) else: class_data.append(self._make_classification_data_row(year, ensemble_member, date, cyclone)) dates.append(date) are_hurricanes.append(False) for date in cyclone.dates: if date not in added_dates and cyclone.pmins[date]: class_data.append(self._make_classification_data_row(year, ensemble_member, date, cyclone)) dates.append(date) are_hurricanes.append(False) double_count = sum(matched_best_tracks.values()) - len(matched_best_tracks) return np.array(class_data), np.array(are_hurricanes), np.array(dates),\ len(matched_best_tracks), double_count def gen_plotting_scatter_data(self, matches, unmatched, var1, var2): plotted_dates = [] ps = {'unmatched': {'xs': [], 'ys': []}, 'hu': {'xs': [], 'ys': []}, 'ts': {'xs': [], 'ys': []}, 'no': {'xs': [], 'ys': []}} attr1 = classification.SCATTER_ATTRS[var1] attr2 = classification.SCATTER_ATTRS[var2] for cyclone in unmatched: for date in cyclone.dates: if cyclone.pmins[date]: x = classification.get_cyclone_attr(cyclone, attr1, date) y = classification.get_cyclone_attr(cyclone, attr2, date) ps['unmatched']['xs'].append(x) ps['unmatched']['ys'].append(y) for match in matches: best_track = match.best_track cyclone = match.cyclone for date, cls in zip(best_track.dates, best_track.cls): if date in cyclone.dates and cyclone.pmins[date]: plotted_dates.append(date) if cls == 'HU': ps['hu']['xs'].append(classification.get_cyclone_attr(cyclone, attr1, date)) ps['hu']['ys'].append(classification.get_cyclone_attr(cyclone, attr2, date)) else: ps['ts']['xs'].append(classification.get_cyclone_attr(cyclone, attr1, date)) ps['ts']['ys'].append(classification.get_cyclone_attr(cyclone, attr2, date)) for date in cyclone.dates: if date not in plotted_dates and cyclone.pmins[date]: ps['no']['xs'].append(classification.get_cyclone_attr(cyclone, attr1, date)) ps['no']['ys'].append(classification.get_cyclone_attr(cyclone, attr2, date)) return ps def gen_plotting_error_data(self, matches, unmatched, var1, var2): plotted_dates = [] ps = {'fp': {'xs': [], 'ys': []}, 'fn': {'xs': [], 'ys': []}, 'tp': {'xs': [], 'ys': []}, 'tn': {'xs': [], 'ys': []}, 'un': {'xs': [], 'ys': []}} attr1 = classification.SCATTER_ATTRS[var1] attr2 = classification.SCATTER_ATTRS[var2] for date in cyclone.dates: xs = classification.get_cyclone_attr(cyclone, attr1, date) ys = classification.get_cyclone_attr(cyclone, attr2, date) if date in cyclone.cat_matches: ps[cyclone.cat_matches[date]]['xs'].append(xs) ps[cyclone.cat_matches[date]]['ys'].append(ys) else: ps['un']['xs'].append(xs) ps['un']['ys'].append(ys) return ps def plot_scatters(self, years, ensemble_members, var1='vort', var2='pmin'): for year in years: for ensemble_member in ensemble_members: self.plot_scatter(year, ensemble_member, var1=var1, var2=var2) def plot_scatter(self, year, ensemble_member, matches=None, unmatched=None, var1='vort', var2='pmin'): if not matches or not unmatched: matches, unmatched = self.run_individual_cla_analysis(year, ensemble_member) key = 'scatter_{0}_{1}'.format(var1, var2) try: ps = self.plot_results_manager.get_result(year, ensemble_member, key) except ResultNotFound: ps = self.gen_plotting_scatter_data(matches, unmatched, var1, var2) self.plot_results_manager.add_result(year, ensemble_member, key, ps) self.plot_results_manager.save() plotting.plot_2d_scatter(ps, var1, var2) def plot_error(self, year, ensemble_member, matches=None, unmatched=None, var1='vort', var2='pmin'): if not matches or not unmatched: matches, unmatched = self.run_individual_cla_analysis(year, ensemble_member) key = 'error_{0}_{1}'.format(var1, var2) try: ps = self.plot_results_manager.get_result(year, ensemble_member, key) except ResultNotFound: ps = self.gen_plotting_error_data(matches, unmatched, var1, var2) self.plot_results_manager.add_result(year, ensemble_member, key, ps) self.plot_results_manager.save() plotting.plot_2d_error_scatter(ps, var1, var2) def plot(self, year, ensemble_member, matches, unmatched, plot_mode, save): output_path = os.path.join(settings.OUTPUT_DIR, 'hurr_scatter_plots') if not os.path.exists(output_path): os.makedirs(output_path) plot_variables = ( 'pmin', 'pambdiff', 'max_windspeed', 't995', 't850', 't_anom', 'mindist', 'max_windspeed_dist', 'max_windspeed_dir', 'lon', 'lat', ) var1 = 'vort' if plot_mode in ('scatter', 'both'): title = 'scatter {0}-{1}'.format(year, ensemble_member) plt.figure(title) plt.clf() plt.title(title) for i, var2 in enumerate(plot_variables): plt.subplot(3, 4, i + 1) self.plot_scatter(year, ensemble_member, matches, unmatched, var1, var2) if save: plt.savefig(os.path.join(output_path, '{0}.png'.format(title))) if plot_mode in ('error', 'both'): title = 'error scatter {0}-{1}'.format(year, ensemble_member) plt.figure(title) plt.clf() plt.title(title) for i, var2 in enumerate(plot_variables): plt.subplot(3, 4, i + 1) self.plot_error(year, ensemble_member, matches, unmatched, var1, var2) if save: plt.savefig(os.path.join(output_path, '{0}.png'.format(title))) def run_ensemble_analysis(stormtracks_analysis, year, num_ensemble_members): '''Performs a full enesmble analysis on the given year Searches through and tries to match all tracks across ensemble members **without** using any best tracks info.''' stormtracks_analysis.set_year(year) stormtracks_analysis.run_ensemble_matches_analysis(num_ensemble_members) def run_tracking_stats_analysis(stormtracks_analysis, year, num_ensemble_members=56): '''Runs a complete tracking analysis, comparing the performance of each configuration option Compares performance in a variety of ways, e.g. within pressure level or just scale 1.''' stormtracks_analysis.set_year(year) log = stormtracks_analysis.log log.info('Running tracking stats analysis for year {0}'.format(year)) include_extra_scales = False for sort_col in SORT_COLS.keys(): if sort_col in ['overlap', 'cumdist']: continue log.info('Run analysis on col {0}'.format(sort_col)) log.info('Run full analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, num_ensemble_members=num_ensemble_members) log.info('Run 995 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'pressure_level': 995}, num_ensemble_members=num_ensemble_members) log.info('Run 850 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'pressure_level': 850}, num_ensemble_members=num_ensemble_members) log.info('Run scale 1 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'scale': 1}, num_ensemble_members=num_ensemble_members) log.info('Run scale 2 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'scale': 2}, num_ensemble_members=num_ensemble_members) log.info('Run scale 3 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'scale': 3}, num_ensemble_members=num_ensemble_members) if include_extra_scales: log.info('Run scale 4 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'scale': 4}, num_ensemble_members=num_ensemble_members) log.info('Run scale 5 analysis\n') stormtracks_analysis.run_position_analysis(sort_on=sort_col, active_configs={'scale': 5}, num_ensemble_members=num_ensemble_members) log.info('Run scale 1 wld\n') stormtracks_analysis.run_wld_analysis(active_configs={'scale': 1}, num_ensemble_members=num_ensemble_members) log.info('Run scale 2 wld\n') stormtracks_analysis.run_wld_analysis(active_configs={'scale': 2}, num_ensemble_members=num_ensemble_members) log.info('Run scale 3 wld\n') stormtracks_analysis.run_wld_analysis(active_configs={'scale': 3}, num_ensemble_members=num_ensemble_members) if include_extra_scales: log.info('Run scale 4 wld\n') stormtracks_analysis.run_wld_analysis(active_configs={'scale': 4}, num_ensemble_members=num_ensemble_members) log.info('Run scale 5 wld\n') stormtracks_analysis.run_wld_analysis(active_configs={'scale': 5}, num_ensemble_members=num_ensemble_members) def run_field_collection(stormtracks_analysis, year, num_ensemble_members=56): stormtracks_analysis.set_year(year) for ensemble_member in range(num_ensemble_members): stormtracks_analysis.run_individual_field_collection(ensemble_member) def run_cross_ensemble_analysis(stormtracks_analysis, year, start_date, end_date): stormtracks_analysis.set_year(year) stormtracks_analysis.run_cross_ensemble_analysis(start_date, end_date) def run_wilma_katrina_analysis(show_plots=False, num_ensemble_members=56): c20data = C20Data(year, verbose=False) ibdata = IbtracsData(verbose=False) wilma_bt, katrina_bt = ibdata.load_wilma_katrina() # plt.plot(wilma_bt.dates, wilma_bt.pressures) if show_plots: plt.plot(katrina_bt.pressures * 100) results_manager = StormtracksResultsManager(settings.TRACKING_RESULTS) cyclones = [] for i in range(num_ensemble_members): start = time.time() print(i) gms = results_manager.get_result(2005, i, 'good_matches-scale:3;pl:850;tracker:nearest_neighbour') for gm in gms: if gm.best_track.name == katrina_bt.name: wilma_match = gm break d = OrderedDict() for date in gm.vort_track.dates: d[date] = [gm.vort_track] field_finder = FieldFinder(c20data, d, i) field_finder.collect_fields() cyclone_track = field_finder.cyclone_tracks.values()[0] cyclones.append(cyclone_track) if show_plots: pmin_values = [] for pmin in cyclone_track.pmins.values(): if pmin: pmin_values.append(pmin[0]) else: pmin_values.append(104000) plt.plot(pmin_values) if show_plots: plt.show() def analyse_ibtracs_data(plot=True): '''Adds up a freq. distribution and a yearly total of hurricane-timesteps''' ibdata = IbtracsData(verbose=False) yearly_hurr_distribution = Counter() hurr_per_year = OrderedDict() for year in range(1890, 2010): print(year) best_tracks = ibdata.load_ibtracks_year(year) hurr_count = 0 for best_track in best_tracks: for date, cls in zip(best_track.dates, best_track.cls): if cls == 'HU': hurr_count += 1 day_of_year = date.timetuple().tm_yday if date.year == year + 1: # Takes into account leap years. day_of_year += dt.datetime(year, 12, 31).timetuple().tm_yday elif date.year != year: raise Exception('{0} != {1}'.format(date.year, year)) yearly_hurr_distribution[day_of_year] += 1 hurr_per_year[year] = hurr_count start_doy = dt.datetime(2001, 6, 1).timetuple().tm_yday end_doy = dt.datetime(2001, 12, 1).timetuple().tm_yday if plot: plt.figure(1) plt.title('Hurricane Distribution over the Year') plt.plot(yearly_hurr_distribution.keys(), yearly_hurr_distribution.values()) plt.plot((start_doy, start_doy), (0, 250), 'k--') plt.plot((end_doy, end_doy), (0, 250), 'k--') plt.xlabel('Day of Year') plt.ylabel('Hurricane-timesteps') plt.figure(2) plt.title('Hurricanes per Year') plt.plot(hurr_per_year.keys(), hurr_per_year.values()) plt.xlabel('Year') plt.ylabel('Hurricane-timesteps') return yearly_hurr_distribution, hurr_per_year if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('-a', '--analysis', default='ensemble') parser.add_argument('-s', '--start-year', type=int, default=2005) parser.add_argument('-e', '--end-year', type=int, default=2005) parser.add_argument('-n', '--num-ensemble-members', type=int, default=56) parser.add_argument('-p', '--profiling', action='store_true') parser.add_argument('-m', '--mem-profiling', action='store_true') parser.add_argument('--start-date') parser.add_argument('--end-date') args = parser.parse_args() years = range(args.start_year, args.end_year + 1) if args.analysis == 'scatter_plots': for year in years: print(year) run_scatter_plot_output(year) sys.exit(0) stormtracks_analysis = StormtracksAnalysis(years[0], profiling=args.profiling, mem_profiling=args.mem_profiling) if args.analysis == 'ensemble': for year in years: run_ensemble_analysis(stormtracks_analysis, year, args.num_ensemble_members) elif args.analysis == 'stats': for year in years: run_tracking_stats_analysis(stormtracks_analysis, year, args.num_ensemble_members) elif args.analysis == 'collection': for year in years: run_field_collection(stormtracks_analysis, year, args.num_ensemble_members) elif args.analysis == 'cross_ensemble': if args.start_date: start_date = dt.datetime.strptime(args.start_date, '%Y-%m-%d') if args.end_date: end_date = dt.datetime.strptime(args.end_date, '%Y-%m-%d') for year in years: run_cross_ensemble_analysis(stormtracks_analysis, year, start_date, end_date) elif args.analysis == 'wilma_katrina': for year in years: run_wilma_katrina_analysis(year, args.num_ensemble_members) else: raise Exception('One of ensemble or stats should be chosen')
from typing import Optional, Callable, Iterator, IO, List from struct import unpack, pack from itertools import repeat from jawa.constants import UTF8 from jawa.util.flags import Flags from jawa.util.descriptor import method_descriptor, JVMType from jawa.attribute import AttributeTable from jawa.attributes.code import CodeAttribute class Method(object): def __init__(self, cf): self._cf = cf self.access_flags = Flags('>H', { 'acc_public': 0x0001, 'acc_private': 0x0002, 'acc_protected': 0x0004, 'acc_static': 0x0008, 'acc_final': 0x0010, 'acc_synchronized': 0x0020, 'acc_bridge': 0x0040, 'acc_varargs': 0x0080, 'acc_native': 0x0100, 'acc_abstract': 0x0400, 'acc_strict': 0x0800, 'acc_synthetic': 0x1000 }) self._name_index = 0 self._descriptor_index = 0 self.attributes = AttributeTable(cf) @property def descriptor(self) -> UTF8: """ The UTF8 Constant containing the method's descriptor. """ return self._cf.constants[self._descriptor_index] @property def name(self) -> UTF8: """ The UTF8 Constant containing the method's name. """ return self._cf.constants[self._name_index] @property def returns(self) -> JVMType: """ A :class:`~jawa.util.descriptor.JVMType` representing the method's return type. """ return method_descriptor(self.descriptor.value).returns @property def args(self) -> List[JVMType]: """ A list of :class:`~jawa.util.descriptor.JVMType` representing the method's argument list. """ return method_descriptor(self.descriptor.value).args @property def code(self) -> CodeAttribute: """ A shortcut for :code:`method.attributes.find_one(name='Code')`. """ return self.attributes.find_one(name='Code') def __repr__(self): return f'<Method(name={self.name})>' def unpack(self, source: IO): """ Read the Method from the file-like object `fio`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param source: Any file-like object providing `read()` """ self.access_flags.unpack(source.read(2)) self._name_index, self._descriptor_index = unpack('>HH', source.read(4)) self.attributes.unpack(source) def pack(self, out: IO): """ Write the Method to the file-like object `out`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when saving a ClassFile. :param out: Any file-like object providing `write()` """ out.write(self.access_flags.pack()) out.write(pack( '>HH', self._name_index, self._descriptor_index )) self.attributes.pack(out) class MethodTable(object): def __init__(self, cf): self._cf = cf self._table = [] def append(self, method: Method): self._table.append(method) def find_and_remove(self, f: Callable): """ Removes any and all methods for which `f(method)` returns `True`. """ self._table = [fld for fld in self._table if not f(fld)] def remove(self, method: Method): """ Removes a `method` from the table by identity. """ self._table = [fld for fld in self._table if fld is not method] def create(self, name: str, descriptor: str, code: CodeAttribute=None) -> Method: """ Creates a new method from `name` and `descriptor`. If `code` is not ``None``, add a `Code` attribute to this method. """ method = Method(self._cf) name = self._cf.constants.create_utf8(name) descriptor = self._cf.constants.create_utf8(descriptor) method._name_index = name.index method._descriptor_index = descriptor.index method.access_flags.acc_public = True if code is not None: method.attributes.create(CodeAttribute) self.append(method) return method def __iter__(self): for method in self._table: yield method def unpack(self, source: IO): """ Read the MethodTable from the file-like object `source`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param source: Any file-like object providing `read()` """ method_count = unpack('>H', source.read(2))[0] for _ in repeat(None, method_count): method = Method(self._cf) method.unpack(source) self.append(method) def pack(self, out: IO): """ Write the MethodTable to the file-like object `out`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when saving a ClassFile. :param out: Any file-like object providing `write()` """ out.write(pack('>H', len(self))) for method in self._table: method.pack(out) def find(self, *, name: str=None, args: str=None, returns: str=None, f: Callable=None) -> Iterator[Method]: """ Iterates over the methods table, yielding each matching method. Calling without any arguments is equivalent to iterating over the table. For example, to get all methods that take three integers and return void:: for method in cf.methods.find(args='III', returns='V'): print(method.name.value) Or to get all private methods:: is_private = lambda m: m.access_flags.acc_private for method in cf.methods.find(f=is_private): print method.name.value :param name: The name of the method(s) to find. :param args: The arguments descriptor (ex: ``III``) :param returns: The returns descriptor (Ex: ``V``) :param f: Any callable which takes one argument (the method). """ for method in self._table: if name is not None and method.name.value != name: continue descriptor = method.descriptor.value end_para = descriptor.find(')') m_args = descriptor[1:end_para] if args is not None and args != m_args: continue m_returns = descriptor[end_para + 1:] if returns is not None and returns != m_returns: continue if f is not None and not f(method): continue yield method def find_one(self, **kwargs) -> Optional[Method]: """ Same as ``find()`` but returns only the first result. """ return next(self.find(**kwargs), None) def __len__(self): return len(self._table)
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import random import shutil import tempfile import unittest from mock import patch, call, DEFAULT import eventlet from swift.account import reaper from swift.account.backend import DATADIR from swift.common.exceptions import ClientException from swift.common.utils import normalize_timestamp, Timestamp from test import unit from test.debug_logger import debug_logger from swift.common.storage_policy import StoragePolicy, POLICIES class FakeBroker(object): def __init__(self): self.info = {} def get_info(self): return self.info class FakeAccountBroker(object): def __init__(self, containers, logger): self.containers = containers self.containers_yielded = [] def get_info(self): info = {'account': 'a', 'delete_timestamp': time.time() - 10} return info def list_containers_iter(self, limit, marker, *args, **kwargs): if not kwargs.pop('allow_reserved'): raise RuntimeError('Expected allow_reserved to be True!') if kwargs: raise RuntimeError('Got unexpected keyword arguments: %r' % ( kwargs, )) for cont in self.containers: if cont > marker: yield cont, None, None, None, None limit -= 1 if limit <= 0: break def is_status_deleted(self): return True def empty(self): return False class FakeRing(object): def __init__(self): self.nodes = [{'id': '1', 'ip': '10.10.10.1', 'port': 6202, 'device': 'sda1'}, {'id': '2', 'ip': '10.10.10.2', 'port': 6202, 'device': 'sda1'}, {'id': '3', 'ip': '10.10.10.3', 'port': 6202, 'device': None}, {'id': '4', 'ip': '10.10.10.1', 'port': 6202, 'device': 'sda2'}, {'id': '5', 'ip': '10.10.10.1', 'port': 6202, 'device': 'sda3'}, ] def get_nodes(self, *args, **kwargs): return ('partition', self.nodes) def get_part_nodes(self, *args, **kwargs): return self.nodes acc_nodes = [{'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}] cont_nodes = [{'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}] @unit.patch_policies([StoragePolicy(0, 'zero', False, object_ring=unit.FakeRing()), StoragePolicy(1, 'one', True, object_ring=unit.FakeRing(replicas=4))]) class TestReaper(unittest.TestCase): def setUp(self): self.to_delete = [] self.myexp = ClientException("", http_host=None, http_port=None, http_device=None, http_status=404, http_reason=None ) def tearDown(self): for todel in self.to_delete: shutil.rmtree(todel) def fake_direct_delete_object(self, *args, **kwargs): if self.amount_fail < self.max_fail: self.amount_fail += 1 raise self.myexp if self.reap_obj_timeout: raise eventlet.Timeout() def fake_direct_delete_container(self, *args, **kwargs): if self.amount_delete_fail < self.max_delete_fail: self.amount_delete_fail += 1 raise self.myexp def fake_direct_get_container(self, *args, **kwargs): if self.get_fail: raise self.myexp if self.timeout: raise eventlet.Timeout() objects = [{'name': u'o1'}, {'name': u'o2'}, {'name': u'o3'}, {'name': u'o4'}] return None, [o for o in objects if o['name'] > kwargs['marker']] def fake_container_ring(self): return FakeRing() def fake_reap_object(self, *args, **kwargs): if self.reap_obj_fail: raise Exception def prepare_data_dir(self, ts=False, device='sda1'): devices_path = tempfile.mkdtemp() # will be deleted by teardown self.to_delete.append(devices_path) path = os.path.join(devices_path, device, DATADIR) os.makedirs(path) path = os.path.join(path, '100', 'a86', 'a8c682d2472e1720f2d81ff8993aba6') os.makedirs(path) suffix = 'db' if ts: suffix = 'ts' with open(os.path.join(path, 'a8c682203aba6.%s' % suffix), 'w') as fd: fd.write('') return devices_path def init_reaper(self, conf=None, myips=None, fakelogger=False): if conf is None: conf = {} if myips is None: myips = ['10.10.10.1'] r = reaper.AccountReaper(conf) r.myips = myips if fakelogger: r.logger = debug_logger('test-reaper') return r def fake_reap_account(self, *args, **kwargs): self.called_amount += 1 def fake_account_ring(self): return FakeRing() def test_creation(self): # later config should be extended to assert more config options r = reaper.AccountReaper({'node_timeout': '3.5'}) self.assertEqual(r.node_timeout, 3.5) def test_delay_reaping_conf_default(self): r = reaper.AccountReaper({}) self.assertEqual(r.delay_reaping, 0) r = reaper.AccountReaper({'delay_reaping': ''}) self.assertEqual(r.delay_reaping, 0) def test_delay_reaping_conf_set(self): r = reaper.AccountReaper({'delay_reaping': '123'}) self.assertEqual(r.delay_reaping, 123) def test_delay_reaping_conf_bad_value(self): self.assertRaises(ValueError, reaper.AccountReaper, {'delay_reaping': 'abc'}) def test_reap_warn_after_conf_set(self): conf = {'delay_reaping': '2', 'reap_warn_after': '3'} r = reaper.AccountReaper(conf) self.assertEqual(r.reap_not_done_after, 5) def test_reap_warn_after_conf_bad_value(self): self.assertRaises(ValueError, reaper.AccountReaper, {'reap_warn_after': 'abc'}) def test_reap_delay(self): time_value = [100] def _time(): return time_value[0] time_orig = reaper.time try: reaper.time = _time r = reaper.AccountReaper({'delay_reaping': '10'}) b = FakeBroker() b.info['delete_timestamp'] = normalize_timestamp(110) self.assertFalse(r.reap_account(b, 0, None)) b.info['delete_timestamp'] = normalize_timestamp(100) self.assertFalse(r.reap_account(b, 0, None)) b.info['delete_timestamp'] = normalize_timestamp(90) self.assertFalse(r.reap_account(b, 0, None)) # KeyError raised immediately as reap_account tries to get the # account's name to do the reaping. b.info['delete_timestamp'] = normalize_timestamp(89) self.assertRaises(KeyError, r.reap_account, b, 0, None) b.info['delete_timestamp'] = normalize_timestamp(1) self.assertRaises(KeyError, r.reap_account, b, 0, None) finally: reaper.time = time_orig def test_reset_stats(self): conf = {} r = reaper.AccountReaper(conf) self.assertDictEqual(r.stats_return_codes, {}) self.assertEqual(r.stats_containers_deleted, 0) self.assertEqual(r.stats_containers_remaining, 0) self.assertEqual(r.stats_containers_possibly_remaining, 0) self.assertEqual(r.stats_objects_deleted, 0) self.assertEqual(r.stats_objects_remaining, 0) self.assertEqual(r.stats_objects_possibly_remaining, 0) # also make sure reset actually resets values r.stats_return_codes = {"hello": "swift"} r.stats_containers_deleted = random.randint(1, 100) r.stats_containers_remaining = random.randint(1, 100) r.stats_containers_possibly_remaining = random.randint(1, 100) r.stats_objects_deleted = random.randint(1, 100) r.stats_objects_remaining = random.randint(1, 100) r.stats_objects_possibly_remaining = random.randint(1, 100) r.reset_stats() self.assertDictEqual(r.stats_return_codes, {}) self.assertEqual(r.stats_containers_deleted, 0) self.assertEqual(r.stats_containers_remaining, 0) self.assertEqual(r.stats_containers_possibly_remaining, 0) self.assertEqual(r.stats_objects_deleted, 0) self.assertEqual(r.stats_objects_remaining, 0) self.assertEqual(r.stats_objects_possibly_remaining, 0) def test_reap_object(self): conf = { 'mount_check': 'false', } r = reaper.AccountReaper(conf, logger=debug_logger()) mock_path = 'swift.account.reaper.direct_delete_object' for policy in POLICIES: r.reset_stats() with patch(mock_path) as fake_direct_delete: with patch('swift.common.utils.Timestamp.now') as mock_now: mock_now.return_value = Timestamp(1429117638.86767) r.reap_object('a', 'c', 'partition', cont_nodes, 'o', policy.idx) mock_now.assert_called_once_with() for i, call_args in enumerate( fake_direct_delete.call_args_list): cnode = cont_nodes[i % len(cont_nodes)] host = '%(ip)s:%(port)s' % cnode device = cnode['device'] headers = { 'X-Container-Host': host, 'X-Container-Partition': 'partition', 'X-Container-Device': device, 'X-Backend-Storage-Policy-Index': policy.idx, 'X-Timestamp': '1429117638.86767', 'x-backend-use-replication-network': 'true', } ring = r.get_object_ring(policy.idx) expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c', 'o', headers=headers, conn_timeout=0.5, response_timeout=10) self.assertEqual(call_args, expected) self.assertEqual(policy.object_ring.replicas - 1, i) self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas) def test_reap_object_fail(self): r = self.init_reaper({}, fakelogger=True) self.amount_fail = 0 self.max_fail = 1 self.reap_obj_timeout = False policy = random.choice(list(POLICIES)) with patch('swift.account.reaper.direct_delete_object', self.fake_direct_delete_object): r.reap_object('a', 'c', 'partition', cont_nodes, 'o', policy.idx) # IMHO, the stat handling in the node loop of reap object is # over indented, but no one has complained, so I'm not inclined # to move it. However it's worth noting we're currently keeping # stats on deletes per *replica* - which is rather obvious from # these tests, but this results is surprising because of some # funny logic to *skip* increments on successful deletes of # replicas until we have more successful responses than # failures. This means that while the first replica doesn't # increment deleted because of the failure, the second one # *does* get successfully deleted, but *also does not* increment # the counter (!?). # # In the three replica case this leaves only the last deleted # object incrementing the counter - in the four replica case # this leaves the last two. # # Basically this test will always result in: # deleted == num_replicas - 2 self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas - 2) self.assertEqual(r.stats_objects_remaining, 1) self.assertEqual(r.stats_objects_possibly_remaining, 1) self.assertEqual(r.stats_return_codes[2], policy.object_ring.replicas - 1) self.assertEqual(r.stats_return_codes[4], 1) def test_reap_object_timeout(self): r = self.init_reaper({}, fakelogger=True) self.amount_fail = 1 self.max_fail = 0 self.reap_obj_timeout = True with patch('swift.account.reaper.direct_delete_object', self.fake_direct_delete_object): r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 1) self.assertEqual(r.stats_objects_deleted, 0) self.assertEqual(r.stats_objects_remaining, 4) self.assertEqual(r.stats_objects_possibly_remaining, 0) self.assertTrue(r.logger.get_lines_for_level( 'error')[-1].startswith('Timeout Exception')) def test_reap_object_non_exist_policy_index(self): r = self.init_reaper({}, fakelogger=True) r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2) self.assertEqual(r.stats_objects_deleted, 0) self.assertEqual(r.stats_objects_remaining, 1) self.assertEqual(r.stats_objects_possibly_remaining, 0) @patch('swift.account.reaper.Ring', lambda *args, **kwargs: unit.FakeRing()) def test_reap_container(self): policy = random.choice(list(POLICIES)) r = self.init_reaper({}, fakelogger=True) with patch.multiple('swift.account.reaper', direct_get_container=DEFAULT, direct_delete_object=DEFAULT, direct_delete_container=DEFAULT) as mocks: headers = {'X-Backend-Storage-Policy-Index': policy.idx} obj_listing = [{'name': 'o'}] def fake_get_container(*args, **kwargs): try: obj = obj_listing.pop(0) except IndexError: obj_list = [] else: obj_list = [obj] return headers, obj_list mocks['direct_get_container'].side_effect = fake_get_container with patch('swift.common.utils.Timestamp.now') as mock_now: mock_now.side_effect = [Timestamp(1429117638.86767), Timestamp(1429117639.67676)] r.reap_container('a', 'partition', acc_nodes, 'c') # verify calls to direct_delete_object mock_calls = mocks['direct_delete_object'].call_args_list self.assertEqual(policy.object_ring.replicas, len(mock_calls)) for call_args in mock_calls: _args, kwargs = call_args self.assertEqual(kwargs['headers'] ['X-Backend-Storage-Policy-Index'], policy.idx) self.assertEqual(kwargs['headers'] ['X-Timestamp'], '1429117638.86767') # verify calls to direct_delete_container self.assertEqual(mocks['direct_delete_container'].call_count, 3) for i, call_args in enumerate( mocks['direct_delete_container'].call_args_list): anode = acc_nodes[i % len(acc_nodes)] host = '%(ip)s:%(port)s' % anode device = anode['device'] headers = { 'X-Account-Host': host, 'X-Account-Partition': 'partition', 'X-Account-Device': device, 'X-Account-Override-Deleted': 'yes', 'X-Timestamp': '1429117639.67676', 'x-backend-use-replication-network': 'true', } ring = r.get_object_ring(policy.idx) expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c', headers=headers, conn_timeout=0.5, response_timeout=10) self.assertEqual(call_args, expected) self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas) def test_reap_container_get_object_fail(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = True self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 0 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', self.fake_direct_delete_container), \ patch('swift.account.reaper.AccountReaper.get_container_ring', self.fake_container_ring), \ patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 1) self.assertEqual(r.stats_containers_deleted, 1) def test_reap_container_partial_fail(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = False self.timeout = False self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 4 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', self.fake_direct_delete_container), \ patch('swift.account.reaper.AccountReaper.get_container_ring', self.fake_container_ring), \ patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 4) self.assertEqual(r.stats_containers_possibly_remaining, 1) def test_reap_container_full_fail(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = False self.timeout = False self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 5 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', self.fake_direct_delete_container), \ patch('swift.account.reaper.AccountReaper.get_container_ring', self.fake_container_ring), \ patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 5) self.assertEqual(r.stats_containers_remaining, 1) def test_reap_container_get_object_timeout(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = False self.timeout = True self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 0 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', self.fake_direct_delete_container), \ patch('swift.account.reaper.AccountReaper.get_container_ring', self.fake_container_ring), \ patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') self.assertTrue(r.logger.get_lines_for_level( 'error')[-1].startswith('Timeout Exception')) @patch('swift.account.reaper.Ring', lambda *args, **kwargs: unit.FakeRing()) def test_reap_container_non_exist_policy_index(self): r = self.init_reaper({}, fakelogger=True) with patch.multiple('swift.account.reaper', direct_get_container=DEFAULT, direct_delete_object=DEFAULT, direct_delete_container=DEFAULT) as mocks: headers = {'X-Backend-Storage-Policy-Index': 2} obj_listing = [{'name': 'o'}] def fake_get_container(*args, **kwargs): try: obj = obj_listing.pop(0) except IndexError: obj_list = [] else: obj_list = [obj] return headers, obj_list mocks['direct_get_container'].side_effect = fake_get_container r.reap_container('a', 'partition', acc_nodes, 'c') self.assertEqual(r.logger.get_lines_for_level('error'), [ 'ERROR: invalid storage policy index: 2']) def fake_reap_container(self, *args, **kwargs): self.called_amount += 1 self.r.stats_containers_deleted = 1 self.r.stats_objects_deleted = 1 self.r.stats_containers_remaining = 1 self.r.stats_objects_remaining = 1 self.r.stats_containers_possibly_remaining = 1 self.r.stats_objects_possibly_remaining = 1 self.r.stats_return_codes[2] = \ self.r.stats_return_codes.get(2, 0) + 1 def test_reap_account(self): containers = ('c1', 'c2', 'c3', 'c4') broker = FakeAccountBroker(containers, debug_logger()) self.called_amount = 0 self.r = r = self.init_reaper({}, fakelogger=True) r.start_time = time.time() with patch('swift.account.reaper.AccountReaper.reap_container', self.fake_reap_container), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring): nodes = r.get_account_ring().get_part_nodes() for container_shard, node in enumerate(nodes): self.assertTrue( r.reap_account(broker, 'partition', nodes, container_shard=container_shard)) self.assertEqual(self.called_amount, 4) info_lines = r.logger.get_lines_for_level('info') self.assertEqual(len(info_lines), 10) for start_line, stat_line in zip(*[iter(info_lines)] * 2): self.assertEqual(start_line, 'Beginning pass on account a') self.assertTrue(stat_line.find('1 containers deleted')) self.assertTrue(stat_line.find('1 objects deleted')) self.assertTrue(stat_line.find('1 containers remaining')) self.assertTrue(stat_line.find('1 objects remaining')) self.assertTrue(stat_line.find('1 containers possibly remaining')) self.assertTrue(stat_line.find('1 objects possibly remaining')) self.assertTrue(stat_line.find('return codes: 2 2xxs')) @patch('swift.account.reaper.Ring', lambda *args, **kwargs: unit.FakeRing()) def test_basic_reap_account(self): self.r = reaper.AccountReaper({}) self.r.account_ring = None self.r.get_account_ring() self.assertEqual(self.r.account_ring.replica_count, 3) self.assertEqual(len(self.r.account_ring.devs), 3) def test_reap_account_no_container(self): broker = FakeAccountBroker(tuple(), debug_logger()) self.r = r = self.init_reaper({}, fakelogger=True) self.called_amount = 0 r.start_time = time.time() with patch('swift.account.reaper.AccountReaper.reap_container', self.fake_reap_container), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring): nodes = r.get_account_ring().get_part_nodes() self.assertTrue(r.reap_account(broker, 'partition', nodes)) self.assertTrue(r.logger.get_lines_for_level( 'info')[-1].startswith('Completed pass')) self.assertEqual(self.called_amount, 0) def test_reap_device(self): devices = self.prepare_data_dir() self.called_amount = 0 conf = {'devices': devices} r = self.init_reaper(conf) with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', self.fake_reap_account): r.reap_device('sda1') self.assertEqual(self.called_amount, 1) def test_reap_device_with_ts(self): devices = self.prepare_data_dir(ts=True) self.called_amount = 0 conf = {'devices': devices} r = self.init_reaper(conf=conf) with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', self.fake_reap_account): r.reap_device('sda1') self.assertEqual(self.called_amount, 0) def test_reap_device_with_not_my_ip(self): devices = self.prepare_data_dir() self.called_amount = 0 conf = {'devices': devices} r = self.init_reaper(conf, myips=['10.10.1.2']) with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', self.fake_reap_account): r.reap_device('sda1') self.assertEqual(self.called_amount, 0) def test_reap_device_with_sharding(self): devices = self.prepare_data_dir() conf = {'devices': devices} r = self.init_reaper(conf, myips=['10.10.10.2']) container_shard_used = [-1] def fake_reap_account(*args, **kwargs): container_shard_used[0] = kwargs.get('container_shard') with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', fake_reap_account): r.reap_device('sda1') # 10.10.10.2 is second node from ring self.assertEqual(container_shard_used[0], 1) def test_reap_device_with_sharding_and_various_devices(self): devices = self.prepare_data_dir(device='sda2') conf = {'devices': devices} r = self.init_reaper(conf) container_shard_used = [-1] def fake_reap_account(*args, **kwargs): container_shard_used[0] = kwargs.get('container_shard') with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', fake_reap_account): r.reap_device('sda2') # 10.10.10.2 is second node from ring self.assertEqual(container_shard_used[0], 3) devices = self.prepare_data_dir(device='sda3') conf = {'devices': devices} r = self.init_reaper(conf) container_shard_used = [-1] with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch('swift.account.reaper.AccountReaper.get_account_ring', self.fake_account_ring), \ patch('swift.account.reaper.AccountReaper.reap_account', fake_reap_account): r.reap_device('sda3') # 10.10.10.2 is second node from ring self.assertEqual(container_shard_used[0], 4) def test_reap_account_with_sharding(self): devices = self.prepare_data_dir() self.called_amount = 0 conf = {'devices': devices} r = self.init_reaper(conf, myips=['10.10.10.2'], fakelogger=True) container_reaped = [0] def fake_list_containers_iter(self, *args, **kwargs): if not kwargs.pop('allow_reserved'): raise RuntimeError('Expected allow_reserved to be True!') if kwargs: raise RuntimeError('Got unexpected keyword arguments: %r' % ( kwargs, )) for container in self.containers: if container in self.containers_yielded: continue yield container, None, None, None, None self.containers_yielded.append(container) def fake_reap_container(self, account, account_partition, account_nodes, container): container_reaped[0] += 1 fake_ring = FakeRing() fake_logger = debug_logger() with patch('swift.account.reaper.AccountBroker', FakeAccountBroker), \ patch( 'swift.account.reaper.AccountBroker.list_containers_iter', fake_list_containers_iter), \ patch('swift.account.reaper.AccountReaper.reap_container', fake_reap_container): fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'], fake_logger) r.reap_account(fake_broker, 10, fake_ring.nodes, 0) self.assertEqual(container_reaped[0], 0) fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'], fake_logger) container_reaped[0] = 0 r.reap_account(fake_broker, 10, fake_ring.nodes, 1) self.assertEqual(container_reaped[0], 1) container_reaped[0] = 0 fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'], fake_logger) r.reap_account(fake_broker, 10, fake_ring.nodes, 2) self.assertEqual(container_reaped[0], 0) container_reaped[0] = 0 fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'], fake_logger) r.reap_account(fake_broker, 10, fake_ring.nodes, 3) self.assertEqual(container_reaped[0], 3) container_reaped[0] = 0 fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'], fake_logger) r.reap_account(fake_broker, 10, fake_ring.nodes, 4) self.assertEqual(container_reaped[0], 1) def test_run_once(self): def prepare_data_dir(): devices_path = tempfile.mkdtemp() # will be deleted by teardown self.to_delete.append(devices_path) path = os.path.join(devices_path, 'sda1', DATADIR) os.makedirs(path) return devices_path def init_reaper(devices): r = reaper.AccountReaper({'devices': devices}) return r devices = prepare_data_dir() r = init_reaper(devices) with patch('swift.account.reaper.AccountReaper.reap_device') as foo, \ unit.mock_check_drive(ismount=True): r.run_once() self.assertEqual(foo.called, 1) with patch('swift.account.reaper.AccountReaper.reap_device') as foo, \ unit.mock_check_drive(ismount=False): r.run_once() self.assertFalse(foo.called) with patch('swift.account.reaper.AccountReaper.reap_device') as foo: r.logger = debug_logger('test-reaper') r.devices = 'thisdeviceisbad' r.run_once() self.assertTrue(r.logger.get_lines_for_level( 'error')[-1].startswith('Exception in top-level account reaper')) def test_run_forever(self): def fake_sleep(val): self.val = val def fake_random(): return 1 def fake_run_once(): raise Exception('exit') def init_reaper(): r = reaper.AccountReaper({'interval': 1}) r.run_once = fake_run_once return r r = init_reaper() with patch('swift.account.reaper.sleep', fake_sleep): with patch('swift.account.reaper.random.random', fake_random): with self.assertRaises(Exception) as raised: r.run_forever() self.assertEqual(self.val, 1) self.assertEqual(str(raised.exception), 'exit') if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- # Copyright 2018 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Sync a git repository to a given manifest. This script is intended to define all of the ways that a source checkout can be defined for a Chrome OS builder. If a sync completes successfully, the checked out code will exactly match whatever manifest is defined, and no local git branches will remain. Extraneous files not under version management will be ignored. It is NOT safe to assume that the checkout can be further updated with a simple "repo sync", instead you should call this script again with the same options. """ from __future__ import print_function import sys from chromite.cbuildbot import manifest_version from chromite.cbuildbot import patch_series from chromite.cbuildbot import repository from chromite.lib import commandline from chromite.lib import cros_logging as logging from chromite.lib import config_lib from chromite.lib import gerrit from chromite.lib import osutils assert sys.version_info >= (3, 6), 'This module requires Python 3.6+' def GetParser(): """Creates the argparse parser.""" parser = commandline.ArgumentParser(description=__doc__) parser.add_argument('--repo-root', type='path', default='.', help='Path to the repo root to sync.') manifest_group = parser.add_argument_group( 'Manifest', description='What manifest do we sync?') manifest_ex = manifest_group.add_mutually_exclusive_group() manifest_ex.add_argument( '--branch', default='master', help='Sync to top of given branch.') manifest_ex.add_argument( '--buildspec', help='Path to manifest, relative to manifest-versions root.') manifest_ex.add_argument( '--version', help='Shorthand for an official release buildspec. e.g. 9799.0.0') manifest_ex.add_argument( '--manifest-file', type='path', help='Sync to an existing local manifest file.') manifest_group.add_argument( '--groups', help='manifest groups to sync.') manifest_url_ex = manifest_group.add_mutually_exclusive_group() manifest_url_ex.add_argument( '--external', action='store_true', default=False, help='Sync to the external version of a manifest. Switch from ' 'manifest-versions-internal to manifest-versions for buildspecs. ' 'Not usable with --manifest.') manifest_url_ex.add_argument( '--manifest-url', help='Manually set URL to fetch repo manifest from.') patch_group = parser.add_argument_group( 'Patch', description='Which patches should be included with the build?') patch_group.add_argument( '-g', '--gerrit-patches', action='split_extend', default=[], metavar='Id1 *int_Id2...IdN', help='Space-separated list of short-form Gerrit ' "Change-Id's or change numbers to patch. " "Please prepend '*' to internal Change-Id's") resources_group = parser.add_argument_group( 'Resources', description='External resources that might be needed.') resources_group.add_argument( '--manifest-versions-int', type='path', help='Directory for internal manifest versions checkout. ' 'May be refreshed.') resources_group.add_argument( '--manifest-versions-ext', type='path', help='Directory for internal manifest versions checkout. ' 'May be refreshed.') optimization_group = parser.add_argument_group( 'Optimization', description='Hints provided to possibly speed up initial sync.') optimization_group.add_argument( '--copy-repo', type='path', help='Path to an existing repo root. Used to preload the local ' "checkout if the local checkout doesn't exist.") optimization_group.add_argument( '--git-cache-dir', type='path', help='Git cache directory to use.') optimization_group.add_argument( '--repo-url', help='Repo repository location.') return parser def PrepareManifestVersions(options): """Select manifest-versions checkout to use, and update it. Looks at command line options to decide which manifest-versions checkout to use, and updates (or creates) it as needed. Args: options: Parsed command line options. Returns: Full path to manifest-versions directory to use for this sync. Raises: AssertionError: If the needed manifest-versions path wasn't con the command line. """ site_params = config_lib.GetSiteParams() if options.external: assert options.manifest_versions_ext, '--manifest-versions-ext required.' manifest_versions_url = site_params.MANIFEST_VERSIONS_GOB_URL manifest_versions_path = options.manifest_versions_ext else: assert options.manifest_versions_int, '--manifest-versions-int required.' manifest_versions_url = site_params.MANIFEST_VERSIONS_INT_GOB_URL manifest_versions_path = options.manifest_versions_int # Resolve buildspecs against a current manifest versions value. manifest_version.RefreshManifestCheckout( manifest_versions_path, manifest_versions_url) return manifest_versions_path def ResolveLocalManifestPath(options): """Based on command line options, decide what local manifest file to use. Args: options: Our parsed command line options. Returns: Path to local manifest file to use, or None for no file. """ if options.manifest_file: # If the user gives us an explicit local manifest file, use it. return options.manifest_file elif options.buildspec: # Buildspec builds use a manifest file from manifest_versions. We do NOT # use manifest_versions as the manifest git repo, because it's so large that # sync time would be a major performance problem. manifest_versions_path = PrepareManifestVersions(options) return manifest_version.ResolveBuildspec( manifest_versions_path, options.buildspec) elif options.version: # Versions are a short hand version of a buildspec. manifest_versions_path = PrepareManifestVersions(options) return manifest_version.ResolveBuildspecVersion( manifest_versions_path, options.version) elif options.branch: # Branch checkouts use our normal manifest repos, not a local manifest file. return None else: assert False, 'No sync options specified. Should not be possible.' def main(argv): parser = GetParser() options = parser.parse_args(argv) options.Freeze() local_manifest = ResolveLocalManifestPath(options) if local_manifest: logging.info('Using local_manifest: %s', local_manifest) if options.manifest_url: manifest_url = options.manifest_url elif options.external: manifest_url = config_lib.GetSiteParams().MANIFEST_URL else: manifest_url = config_lib.GetSiteParams().MANIFEST_INT_URL osutils.SafeMakedirs(options.repo_root) repo = repository.RepoRepository( manifest_repo_url=manifest_url, directory=options.repo_root, branch=options.branch, git_cache_dir=options.git_cache_dir, repo_url=options.repo_url, groups=options.groups) if options.copy_repo: repo.PreLoad(options.copy_repo) if repository.IsARepoRoot(options.repo_root): repo.BuildRootGitCleanup(prune_all=True) repo.Sync(local_manifest=local_manifest, detach=True) if options.gerrit_patches: patches = gerrit.GetGerritPatchInfo(options.gerrit_patches) # TODO: Extract patches from manifest synced. helper_pool = patch_series.HelperPool.SimpleCreate( cros_internal=not options.external, cros=True) series = patch_series.PatchSeries( path=options.repo_root, helper_pool=helper_pool, forced_manifest=None) _, failed_tot, failed_inflight = series.Apply(patches) failed = failed_tot + failed_inflight if failed: logging.error('Failed to apply: %s', ', '.join(str(p) for p in failed)) return 1
import effects import datetime from time import * from numpy import * from Tkinter import * import random import english, german from constants import * import fonts import util offset = -5 * 3600 def next_time(*args, **kw): clocktwo.offset += 60 clocktwo.update() def last_time(*args, **kw): clocktwo.offset -= 60 clocktwo.update() def key_press(event): c = event.char if event.char == '1': effects.wordfall_out(clocktwo) for color in COLOR_MASKS: clocktwo.set_by_key('CLOCKTWO', color) effects.scroll_msg(clocktwo, 'ClockTHREE', color, clearall=False) clocktwo.refresh() sleep(1) if event.char == '2': effects.wordfall_out(clocktwo) effects.clocktwo_rain(clocktwo, BLUE_MASK) clocktwo.update(refresh=False) effects.wordfall_in(clocktwo) if event.char == '3': effects.sweep_color(clocktwo, 0) effects.matrix(clocktwo) effects.wordfall_out(clocktwo) clocktwo.update(refresh=False) effects.wordfall_in(clocktwo) if event.char == '4': clocktwo.clearall() new_buff = zeros(N_COL, 'uint32') for o in range(ord('9'), ord('0') - 1, -1): new_buff[:8] = util.mono8x8_to_RGB(fonts.basic.get(chr(o))) new_buff[9:] = 0. effects.flip_in(clocktwo, new_buff) sleep(.2) if event.char == '5': effects.wordfall_out(clocktwo) effects.scroll_msg(clocktwo, 'But', BLUE_MASK) sleep(.2) effects.we_need_your_support(clocktwo) if event.char == '6': effects.wordfall_out(clocktwo) clocktwo.set_by_key('THANK') clocktwo.refresh() sleep(.3) clocktwo.set_by_key('YOU!') clocktwo.refresh() sleep(4) if event.char == '7': clocktwo.paused = True effects.sweep_color(clocktwo, 0) clocktwo.set_by_key('CHAI') clocktwo.refresh() sleep(.3) clocktwo.set_by_key('IN') clocktwo.set_by_key('THE') clocktwo.refresh() sleep(.3) clocktwo.set_by_key('MORNING') clocktwo.refresh() sleep(5) effects.wordfall_out(clocktwo) clocktwo.set_by_key('ITS') clocktwo.refresh() sleep(.3) clocktwo.set_by_key('BEER') clocktwo.refresh() sleep(.3) clocktwo.set_by_key('THIRTY') clocktwo.refresh() sleep(5) effects.sweep_color(clocktwo, 0) clocktwo.update(refresh=False) effects.wordfall_in(clocktwo) clocktwo.paused = False if event.char == 'X': effects.slide_in_RL(clocktwo, clocktwo.buffer) if event.char == 'x': effects.slide_in_LR(clocktwo, clocktwo.buffer) if event.char == 'W': effects.wordfall_out(clocktwo) if event.char == 'w': effects.wordfall_in(clocktwo) if event.char == 'C': effects.cascadeRL(clocktwo) if event.char == 'H': clocktwo.offset -= 3600 clocktwo.update() if event.char == 'h': clocktwo.offset += 3600 clocktwo.update() if event.char == 'M': clocktwo.offset -= 60 clocktwo.update() if event.char == 'm': clocktwo.offset += 60 clocktwo.update() if event.char == 'S': clocktwo.offset -= 1 clocktwo.update() if event.char == 's': clocktwo.offset += 1 clocktwo.update() if event.char == 'F': clocktwo.offset -= 5 * 60 clocktwo.update() if event.char == 'f': clocktwo.offset += 5 * 60 clocktwo.update() if event.char == 'u': effects.wordfall_out(clocktwo) clocktwo.update(refresh=False) effects.wordfall_in(clocktwo) tk = Tk() tk.title('ClockTWO') tk.tk_setPalette('#000000') r = Frame(tk, background='#000000') r.grid(row=0, column=0) r.bind("<Button-1>", next_time) r.bind("<Button-3>", last_time) tk.bind("<Key>", key_press) class Screen: def __init__(self, n_col=N_COL): self.buffer = zeros(n_col, 'uint32') def __getitem__(self, idx): return self.buffer[idx] def setPixel(self, row, col, color): if(row < 10): self.buffer[col] &= ~ROWS[row] self.buffer[col] |= (color & 0b00000111) << 3 * row elif ((color == OFF | color == MONO) & row < 12): if(color): self.buffer[col] |= ROWS[row] else: self.buffer[col] &= ~ROWS[row] def getPixel(self, row, col): out = self.buffer[col] & ROWS[row] if row < 10: out <<= 3 * row else: out <<= 30 + (row - 10) return out def clearall(self): self.buffer *= 0 def clear_rgb(self): self.buffer = self.buffer & MONO class ClockTWO: def __init__(self, language, offset): self.N_ROW = N_ROW self.N_COL = N_COL self.screen = Screen() self.macros = language.macros self.update_time = language.update_time self.text = language.text self.default_color = 0b111 self.offset = offset self.paused = False self.time_str = '' self.language = language def getBuffer(self): return self.screen.buffer buffer = property(getBuffer) def setPixel(self, row, col, color): return self.screen.setPixel(row, col, color) def getPixel(self, row, col): return self.screen.getPixel(row, col) def update(self, refresh=True): if not self.paused: self.update_time(self, refresh=refresh) def clearall(self): self.screen.clearall() def clear_rgb(self): self.screen.clear_rgb() def print_screen(self): out = '' for row in range(12): for col in range(16): if self.getPixel(row, col) > 0: out += self.language.text[row][col] else: out += ' ' out += ' ' print '%02d:%02d' % divmod(self.offset / 60, 60), ' '.join(out.split()) def set_by_key(self, key, color=None): # print key if color is None: color = self.default_color if self.macros.has_key(key): rows, cols = self.macros[key] for row, col in zip(rows, cols): self.screen.setPixel(row, col, color) def refresh(self, delay=.05): for col in range(self.N_COL): data = self.screen[col] for row in range(self.N_ROW): if row < 10: color = (data >> 3 * row) & 0b111 else: color = (data >> 20 + row) & 0b1 s = '#' if color & 1 << 2: s = s + "FF" else: s = s + "20" if color & 1 << 1: s = s + "FF" else: s = s + "20" if color & 1 << 0: s = s + "FF" else: s = s + "20" labels[row][col].config(foreground=s) if delay > 0: sleep(delay) r.update() def main(language, offset): import sys global labels, clocktwo if len(sys.argv) > 1 and sys.argv[1].lower() == 'german': clocktwo = ClockTWO(german, 0) else: clocktwo = ClockTWO(english, -5 * 3600) labels = [] l = Label(r, text=' ', font='Courier 28', background="#000000", foreground="#000000") l.grid(row=0, column=0) l = Label(r, text=' ', font='Courier 28', background="#000000", foreground="#000000") l.grid(row=13, column=17) for row in range(N_ROW): labels.append([]) for col in range(N_COL): l = Label(r, text='%s' % clocktwo.text[row][col], font='Times 28', background="#000000", foreground="#000000") l.grid(column=col+1, row=row+1, ipadx=8) labels[-1].append(l) if clocktwo.text[0].startswith('ITS'): labels[0][1].config(text="T'") labels[5][8].config(text="O'") def do_dec(): clocktwo.offset -= 60 clocktwo.update() def do_inc(): clocktwo.offset += 60 clocktwo.update() def do_mode(): clocktwo.default_color += 1 clocktwo.default_color %= 8 clocktwo.update() def do_reset(): clocktwo.offset = offset clocktwo.update() reset_b = Button(r, text='R', foreground='#ff0000', command=do_reset) mode_b = Button(r, text='M', foreground='#ff0000', command=do_mode) dec_b = Button(r, text='D', foreground='#ff0000', command=do_dec) inc_b = Button(r, text='I', foreground='#ff0000', command=do_inc) reset_b.grid(column=3, row=13) dec_b.grid(column=8, row=13) mode_b.grid(column=7, row=13) inc_b.grid(column=6, row=13) after_id = None def tick_tock(): global after_id clocktwo.update() # after_id = r.after(5 * 1000, tick_tock) clocktwo.refresh() if False: for minute in range(0, 1440, 5): clocktwo.offset = minute * 60 clocktwo.update() clocktwo.print_screen() here else: clocktwo.update() after_id = r.after(5 * 1000, tick_tock) def on_close(*args): r.after_cancel(after_id) tk.destroy() tk.protocol('WM_DELETE_WINDOW', on_close) tk.mainloop() if __name__ == '__main__': main(english, 4 * 4600)
#!/usr/bin/env python # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing the add_network command.""" import unittest if __name__ == "__main__": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestAddNetwork(TestBrokerCommand): def testaddnetwork(self): for network in self.net: if not network.autocreate: continue command = ["add_network", "--network=%s" % network.name, "--ip=%s" % network.ip, "--netmask=%s" % network.netmask, "--" + network.loc_type, network.loc_name, "--type=%s" % network.nettype, "--side=%s" % network.side] if network.comments: command.extend(["--comments", network.comments]) self.noouttest(command) def testverifyauroranetwork(self): net = self.net["aurora2"] command = ["show", "network", "--ip", net.ip] out = self.commandtest(command) self.matchoutput(out, "Network: aurora2", command) self.matchoutput(out, "IP: %s" % net.ip, command) self.matchoutput(out, "Network Type: %s" % net.nettype, command) self.matchoutput(out, "Comments: %s" % net.comments, command) self.matchoutput(out, "Building: np", command) def testverifybunker(self): net = self.net["np06bals03_v103"] command = ["show", "network", "--ip", net.ip] out = self.commandtest(command) self.matchoutput(out, "Network: np06bals03_v103", command) self.matchoutput(out, "IP: %s" % net.ip, command) self.matchoutput(out, "Bunker: nyb10.np", command) def testaddnetworkdup(self): # Old name, new address net = self.net["unknown0"] command = ["add", "network", "--network", net.name, "--ip", "192.168.10.0", "--netmask", "255.255.255.0", "--building", "ut", "--type", net.nettype] err = self.statustest(command) self.matchoutput(err, "WARNING: Network name %s is already used for " "address %s." % (net.name, str(net)), command) def testaddsubnet(self): # Add a subnet of an existing network net = self.net["unknown0"] subnet = net.subnet()[1] command = ["add", "network", "--network", "subnet-test", "--ip", subnet.ip, "--netmask", subnet.netmask, "--building", "ut", "--type", net.nettype] out = self.badrequesttest(command) self.matchoutput(out, "IP address %s is part of existing network " "named %s with address %s." % (str(subnet.ip), net.name, str(net)), command) def testaddnetworkofcards(self): # An entirely fictitious network self.noouttest(["add_network", "--ip", "192.168.1.0", "--network", "cardnetwork", "--netmask", "255.255.255.0", "--building", "cards", "--side", "a", "--type", "unknown", "--comments", "Made-up network"]) def test_autherror_100(self): self.demote_current_user("operations") def test_autherror_200(self): # Another entirely fictitious network command = ["add_network", "--ip", "192.168.2.0", "--network", "cardnetwork2", "--netmask", "255.255.255.0", "--building", "cards", "--side", "a", "--type", "unknown", "--comments", "Made-up network"] out = self.unauthorizedtest(command, auth=True, msgcheck=False) allowed_roles = self.config.get("site", "change_default_netenv_roles") role_list = allowed_roles.strip().split() default_ne = self.config.get("site", "default_network_environment") self.matchoutput(out, "Only users with %s can modify networks in the %s " "network environment." % (role_list, default_ne), command) def test_autherror_300(self): # Yet another entirely fictitious network command = ["add_network", "--ip", "192.168.3.0", "--network_environment", "cardenv", "--network", "cardnetwork3", "--netmask", "255.255.255.0", "--building", "cards", "--side", "a", "--type", "unknown", "--comments", "Made-up network"] self.noouttest(command) def test_autherror_900(self): self.promote_current_user() def testaddexcx(self): net = self.net["unknown0"] subnet = net.subnet()[0] command = ["add", "network", "--network", "excx-net", "--ip", subnet.ip, "--netmask", subnet.netmask, "--building", "np", "--type", net.nettype, "--network_environment", "excx"] self.noouttest(command) def testaddnetsvcmap(self): net = self.net["netsvcmap"] subnet = net.subnet()[0] command = ["add", "network", "--network", "netsvcmap", "--ip", subnet.ip, "--netmask", subnet.netmask, "--building", "ut", "--type", net.nettype] self.noouttest(command) def testaddnetperssvcmap(self): net = self.net["netperssvcmap"] subnet = net.subnet()[0] command = ["add", "network", "--network", "netperssvcmap", "--ip", subnet.ip, "--netmask", subnet.netmask, "--building", "ut", "--type", net.nettype] self.noouttest(command) def testaddutcolo(self): net = self.net["unknown1"] command = ["add", "network", "--network", "utcolo-net", "--ip", net.ip, "--netmask", net.netmask, "--building", "ut", "--type", net.nettype, "--network_environment", "utcolo"] self.noouttest(command) def testbadip(self): command = ["add_network", "--ip", "10.0.0.1", "--network", "bad-address", "--netmask", "255.255.255.0", "--building", "ut", "--side", "a", "--type", "unknown"] out = self.badrequesttest(command) self.matchoutput(out, "IP address 10.0.0.1 is not a network address. " "Maybe you meant 10.0.0.0?", command) def testshownetwork(self): for network in self.net: if not network.autocreate: continue command = "show network --ip %s" % network.ip out = self.commandtest(command.split(" ")) self.matchoutput(out, "Network: %s" % network.name, command) self.matchoutput(out, "Network Environment: internal", command) self.matchoutput(out, "IP: %s" % network.ip, command) self.matchoutput(out, "Netmask: %s" % network.netmask, command) self.matchoutput(out, "%s: %s" % (network.loc_type.title(), network.loc_name), command) self.matchoutput(out, "Side: %s" % network.side, command) self.matchoutput(out, "Network Type: %s" % network.nettype, command) def testshownetworkcomments(self): command = "show network --network np06bals03_v103" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Comments: Some network comments", command) def testshownetworkbuilding(self): command = "show_network --building ut" out = self.commandtest(command.split(" ")) for network in self.net: if not network.autocreate: continue if ((network.loc_type == "building" and network.loc_name == "ut") or (network.loc_type == "bunker" and network.loc_name == "bucket2.ut")): self.matchoutput(out, str(network.ip), command) else: self.matchclean(out, str(network.ip), command) def testshownetworkcsv(self): # Use --exact_location here, so we don't have to worry about networks # mapped to child locations command = "show_network --building ut --exact_location --format csv" out = self.commandtest(command.split(" ")) for network in self.net: if not network.autocreate: continue if network.loc_type == "building" and network.loc_name == "ut": self.matchoutput(out, "%s,%s,%s,ut.ny.na,us,a,%s,%s\n" % (network.name, network.ip, network.netmask, network.nettype, network.comments or ""), command) else: self.matchclean(out, str(network.ip), command) def testshownetworkproto(self): command = "show network --building ut --format proto" self.protobuftest(command.split(" ")) def testaddlocalnet(self): command = ["add", "network", "--network", "localnet", "--ip", "127.0.0.0", "--netmask", "255.0.0.0", "--building", "ut"] self.noouttest(command) def testshownetworknoenv(self): command = "show network --building np" out = self.commandtest(command.split(" ")) self.matchclean(out, "excx-net", command) command = "show network --building ut" out = self.commandtest(command.split(" ")) self.matchclean(out, "utcolo-net", command) self.matchoutput(out, "netsvcmap", command) self.matchoutput(out, "netperssvcmap", command) def testshownetworkwithenv(self): command = "show network --building np --network_environment excx" out = self.commandtest(command.split(" ")) self.matchoutput(out, "excx-net", command) def testshowexcxnoenv(self): command = "show network --network excx-net" out = self.notfoundtest(command.split(" ")) self.matchoutput(out, "Network excx-net not found.", command) def testshowexcxwithenv(self): net = self.net["unknown0"] subnet = net.subnet()[0] command = "show network --network excx-net --network_environment excx" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Network: excx-net", command) self.matchoutput(out, "Network Environment: excx", command) self.matchoutput(out, "IP: %s" % subnet.ip, command) self.matchoutput(out, "Netmask: %s" % subnet.netmask, command) def testshowutcolowithenv(self): net = self.net["unknown1"] command = "show network --network utcolo-net --network_environment utcolo" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Network: utcolo-net", command) self.matchoutput(out, "Network Environment: utcolo", command) self.matchoutput(out, "IP: %s" % net.ip, command) self.matchoutput(out, "Netmask: %s" % net.netmask, command) def test_800_add_utdmz1_fail(self): network = self.net["ut_dmz1"] command = ["add_network", "--network=%s" % network.name, "--ip=%s" % network.ip, "--netmask=%s" % network.netmask, "--" + network.loc_type, network.loc_name, "--type=%s" % network.nettype, "--side=%s" % network.side, "--network_compartment=noexistant"] out = self.notfoundtest(command) self.matchoutput(out, "Network Compartment noexistant not found.", command) def test_801_add_utdmz1(self): network = self.net["ut_dmz1"] command = ["add_network", "--network=%s" % network.name, "--ip=%s" % network.ip, "--netmask=%s" % network.netmask, "--" + network.loc_type, network.loc_name, "--type=%s" % network.nettype, "--side=%s" % network.side, "--network_compartment=perimeter.ut"] self.noouttest(command) def test_802_del_utper(self): command = ["del", "network", "compartment", "--network_compartment", "perimeter.ut"] out = self.badrequesttest(command) self.matchoutput(out, "still has networks defined", command) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestAddNetwork) unittest.TextTestRunner(verbosity=2).run(suite)
''' MiniFridge is a dict-like class with optional self-destruct timers. CacheOutput is a decorator which caches outputs of function calls. ''' import datetime as dt from threading import Lock class MiniFridge(object): ''' A MiniFridge is like a dictionary, but with 2 extra rules: 1. If another thread is using the fridge, then wait your turn. 2. If you see something past its expiration date, then throw it out. Some MiniFridge methods allow users to set a countdown timer. These timers are set using keyword arguments for datetime.timedelta: weeks, days, hours, minutes, seconds, microseconds, milliseconds Examples: mf = MiniFridge(minutes=1) # Make a MiniFridge with 1-minute timer mf.put('spam',42,expire=False) # Put spam in mf. It never expires mf.put('eggs',2,seconds=2) # Put eggs in mf. It expires in 2 seconds. mf['beer'] = 100 # Put beer in mf. It expires in 1 minute. mf['spam'] # Get value for key 'spam' mf.get('eggs',default='Nope') # Get value without raising KeyError if key is bad mf.pop('beer',default=None) # Pop value without raising KeyError if key is bad mf.keys() # Delete any bad items. Return the good keys. mf.purge() # Delete any expired (key,value) pairs mf.clear() # Delete everything Caution: There are no built-in limits to size or number of elements. Caution: Not all dictionary methods have been implemented yet. Caution: Not well-tested yet, especially with multi-threading. ''' @staticmethod def is_past(t): ''' Convenience function for figuring out if something has expired ''' return ( (t is not None) and (t < dt.datetime.today()) ) def __init__(self,**kwargs): ''' Initialize an empty dictionary with a lock and optional default timer. Use timedelta kwargs to set default timer. Stored items will expire after this much time unless specified otherwise. If no **kwargs, then timer is set to roughly 100 years. ''' self._contents = dict() self.lock = Lock() if kwargs: self.default_timer = dt.timedelta(**kwargs) else: self.default_timer = None def __setitem__(self,key,value): ''' Put a (key,value) pair in the MiniFridge dictionary-style ''' with self.lock: birth = dt.datetime.today() death = birth + self.default_timer self._contents[key] = (value,birth,death) def __getitem__(self,key): ''' Get a value from the MiniFridge dictionary-style. If key is not found, this will throw a KeyError. If key is found but expired, this with throw a KeyError. ''' with self.lock: value,birth,death = self._contents[key] if self.is_past(death): del self._contents[key] raise KeyError(key) else: return value def __delitem__(self,key): ''' Delete (key,value) pair ''' with self.lock: del self._contents[key] def __contains__(self,key): ''' Magic function for answering "x in MiniFridge" questions. If key is not found, then return False. If key is found but has expired, then throw it out! Return False. If key is found and has not expired, then return True. ''' with self.lock: if not key in self._contents: return False else: value,birth,death = self._contents[key] if self.is_past(death): del self._contents[key] return False else: return True def __len__(self): ''' How many items are in the fridge? ''' with self.lock: return len(self._contents) def put(self,key,value,expire=True,**kwargs): ''' Put a (key,value) pair in the MiniFridge with optional timer. By default, it will expire after default_timer elapses. To choose a different lifetime, use timedelta kwargs. To set lifetime to infinity, use expire=False. ''' with self.lock: # Remember creation time and calculate expiration time birth = dt.datetime.now() # Is this item invincible? If not, when does it expire? if not expire: death = None else: if kwargs: death = birth + dt.timedelta(**kwargs) else: death = birth + self.default_timer # Store value, creation time, and expiration time as a 3-tuple. self._contents[key] = (value,birth,death) def get(self,key,default=None): ''' Like __getitem__(), but does not raise a KeyError if key is bad. Returns default value instead. ''' with self.lock: try: value,birth,death = self._contents[key] if self.is_past(death): del self._contents[key] return default else: return value except KeyError: return default def set_expiration(self,key,expiration_time): ''' Instead of a countdown timer, set a specific expiration date. expiration_time should be a datetime object. Will raise a KeyError if key is not found. ''' with self.lock: value,birth,death = self._contents[key] self._contents[key] = value,birth,expiration_time def purge(self): ''' Throw out anything that has expired ''' with self.lock: contents = self._contents def is_dead(key): death = contents[key][2] return self.is_past(death) bad_keys = [ k for k in contents.keys() if is_dead(k) ] for k in bad_keys: del contents[k] self._contents = contents def keys(self): ''' Throw out anything that has expired. Return whatever keys are left. ''' self.purge() with self.lock: return self._contents.keys() def values(self): ''' Throw out anything that has expired. Return whatever values are left. ''' self.purge() with self.lock: return self._contents.values() def pop(self,key,default=None): ''' Pop a value: return it and delete it. Like get(), but deletes (key,value) whether or not it has expired. ''' with self.lock: try: value,birth,death = self._contents[key] if self.is_past(death): value = default del self._contents[key] return value except: return default def clear(self): ''' Delete all (key,value) pairs ''' with self.lock: self._contents.clear() class CacheOutput(object): ''' Class-based decorator used to avoid re-calculating a function. The first time the function is called, it initializes a MiniFridge. Each time the function is called, input arguments are hashed. The resulting hash is used as a MiniFridge key, and the outputs of calling the function are stored for a limited time. Set timer using keyword arguments for datetime.timedelta: weeks, days, hours, minutes, seconds, microseconds, milliseconds Example: @CacheOutput(hours=1) def cached_power_tower(x,N): for n in range(N): x *= x return x WARNING: @CacheOutput stores *outputs* of a function. It does not replicate *side effects* of a function! Caution: Not well-tested yet, especially with multi-threading. ''' @staticmethod def _convert_args_to_hash(args,kwargs): ''' Convert arguments to a string and hash it ''' return hash(str(args)+str(kwargs)) def __init__(self,**kwargs): ''' Create a MiniFridge with timer set by timedelta arguments ''' self.fridge = MiniFridge(**kwargs) def __call__(self,func,*args,**kwargs): def wrapper(*args,**kwargs): ''' Convert inputs to key. Is this key in MiniFridge? If so, just look up the answer. If not, call the function and store the result in MiniFridge. ''' key = self._convert_args_to_hash(args,kwargs) if key in self.fridge: result = self.fridge[key] else: result = func(*args,**kwargs) self.fridge[key] = result return result return wrapper
from sqlobject import * from sqlobject.tests.dbtest import * class TestSO1(SQLObject): name = StringCol(length=50, dbName='name_col') name.title = 'Your Name' name.foobar = 1 passwd = StringCol(length=10) class sqlmeta: cacheValues = False def _set_passwd(self, passwd): self._SO_set_passwd(passwd.encode('rot13')) def setupGetters(cls): setupClass(cls) inserts(cls, [('bob', 'god'), ('sally', 'sordid'), ('dave', 'dremel'), ('fred', 'forgo')], 'name passwd') def test_case1(): setupGetters(TestSO1) bob = TestSO1.selectBy(name='bob')[0] assert bob.name == 'bob' assert bob.passwd == 'god'.encode('rot13') bobs = TestSO1.selectBy(name='bob')[:10] assert len(list(bobs)) == 1 def test_newline(): setupGetters(TestSO1) bob = TestSO1.selectBy(name='bob')[0] testString = 'hey\nyou\\can\'t you see me?\t' bob.name = testString bob.expire() assert bob.name == testString def test_count(): setupGetters(TestSO1) assert TestSO1.selectBy(name=None).count() == 0 assert TestSO1.selectBy(name='bob').count() == 1 assert TestSO1.select(TestSO1.q.name == 'bob').count() == 1 assert TestSO1.select().count() == len(list(TestSO1.select())) def test_getset(): setupGetters(TestSO1) bob = TestSO1.selectBy(name='bob')[0] assert bob.name == 'bob' bob.name = 'joe' assert bob.name == 'joe' bob.set(name='joebob', passwd='testtest') assert bob.name == 'joebob' def test_extra_vars(): setupGetters(TestSO1) col = TestSO1.sqlmeta.columns['name'] assert col.title == 'Your Name' assert col.foobar == 1 assert getattr(TestSO1.sqlmeta.columns['passwd'], 'title', None) is None class TestSO2(SQLObject): name = StringCol(length=50, dbName='name_col') passwd = StringCol(length=10) def _set_passwd(self, passwd): self._SO_set_passwd(passwd.encode('rot13')) def test_case2(): setupGetters(TestSO2) bob = TestSO2.selectBy(name='bob')[0] assert bob.name == 'bob' assert bob.passwd == 'god'.encode('rot13') class Student(SQLObject): is_smart = BoolCol() def test_boolCol(): setupClass(Student) student = Student(is_smart=False) assert student.is_smart == False student2 = Student(is_smart=1) assert student2.is_smart == True class TestSO3(SQLObject): name = StringCol(length=10, dbName='name_col') other = ForeignKey('TestSO4', default=None) other2 = KeyCol(foreignKey='TestSO4', default=None) class TestSO4(SQLObject): me = StringCol(length=10) def test_foreignKey(): setupClass([TestSO4, TestSO3]) test3_order = [col.name for col in TestSO3.sqlmeta.columnList] assert test3_order == ['name', 'otherID', 'other2ID'] tc3 = TestSO3(name='a') assert tc3.other is None assert tc3.other2 is None assert tc3.otherID is None assert tc3.other2ID is None tc4a = TestSO4(me='1') tc3.other = tc4a assert tc3.other == tc4a assert tc3.otherID == tc4a.id tc4b = TestSO4(me='2') tc3.other = tc4b.id assert tc3.other == tc4b assert tc3.otherID == tc4b.id tc4c = TestSO4(me='3') tc3.other2 = tc4c assert tc3.other2 == tc4c assert tc3.other2ID == tc4c.id tc4d = TestSO4(me='4') tc3.other2 = tc4d.id assert tc3.other2 == tc4d assert tc3.other2ID == tc4d.id tcc = TestSO3(name='b', other=tc4a) assert tcc.other == tc4a tcc2 = TestSO3(name='c', other=tc4a.id) assert tcc2.other == tc4a def test_selectBy(): setupClass([TestSO4, TestSO3]) tc4 = TestSO4(me='another') tc3 = TestSO3(name='sel', other=tc4) anothertc3 = TestSO3(name='not joined') assert tc3.other == tc4 assert list(TestSO3.selectBy(other=tc4)) == [tc3] assert list(TestSO3.selectBy(otherID=tc4.id)) == [tc3] assert TestSO3.selectBy(otherID=tc4.id)[0] == tc3 assert list(TestSO3.selectBy(otherID=tc4.id)[:10]) == [tc3] assert list(TestSO3.selectBy(other=tc4)[:10]) == [tc3] class TestSO5(SQLObject): name = StringCol(length=10, dbName='name_col') other = ForeignKey('TestSO6', default=None, cascade=True) another = ForeignKey('TestSO7', default=None, cascade=True) class TestSO6(SQLObject): name = StringCol(length=10, dbName='name_col') other = ForeignKey('TestSO7', default=None, cascade=True) class TestSO7(SQLObject): name = StringCol(length=10, dbName='name_col') def test_foreignKeyDestroySelfCascade(): setupClass([TestSO7, TestSO6, TestSO5]) tc5 = TestSO5(name='a') tc6a = TestSO6(name='1') tc5.other = tc6a tc7a = TestSO7(name='2') tc6a.other = tc7a tc5.another = tc7a assert tc5.other == tc6a assert tc5.otherID == tc6a.id assert tc6a.other == tc7a assert tc6a.otherID == tc7a.id assert tc5.other.other == tc7a assert tc5.other.otherID == tc7a.id assert tc5.another == tc7a assert tc5.anotherID == tc7a.id assert tc5.other.other == tc5.another assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 1 assert TestSO7.select().count() == 1 tc6b = TestSO6(name='3') tc6c = TestSO6(name='4') tc7b = TestSO7(name='5') tc6b.other = tc7b tc6c.other = tc7b assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 3 assert TestSO7.select().count() == 2 tc6b.destroySelf() assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 2 assert TestSO7.select().count() == 2 tc7b.destroySelf() assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 1 assert TestSO7.select().count() == 1 tc7a.destroySelf() assert TestSO5.select().count() == 0 assert TestSO6.select().count() == 0 assert TestSO7.select().count() == 0 def testForeignKeyDropTableCascade(): if not supports('dropTableCascade'): return setupClass(TestSO7) setupClass(TestSO6) setupClass(TestSO5) tc5a = TestSO5(name='a') tc6a = TestSO6(name='1') tc5a.other = tc6a tc7a = TestSO7(name='2') tc6a.other = tc7a tc5a.another = tc7a tc5b = TestSO5(name='b') tc5c = TestSO5(name='c') tc6b = TestSO6(name='3') tc5c.other = tc6b assert TestSO5.select().count() == 3 assert TestSO6.select().count() == 2 assert TestSO7.select().count() == 1 TestSO7.dropTable(cascade=True) assert TestSO5.select().count() == 3 assert TestSO6.select().count() == 2 tc6a.destroySelf() assert TestSO5.select().count() == 2 assert TestSO6.select().count() == 1 tc6b.destroySelf() assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 0 assert iter(TestSO5.select()).next() == tc5b tc6c = TestSO6(name='3') tc5b.other = tc6c assert TestSO5.select().count() == 1 assert TestSO6.select().count() == 1 tc6c.destroySelf() assert TestSO5.select().count() == 0 assert TestSO6.select().count() == 0 class TestSO8(SQLObject): name = StringCol(length=10, dbName='name_col') other = ForeignKey('TestSO9', default=None, cascade=False) class TestSO9(SQLObject): name = StringCol(length=10, dbName='name_col') def testForeignKeyDestroySelfRestrict(): setupClass([TestSO9, TestSO8]) tc8a = TestSO8(name='a') tc9a = TestSO9(name='1') tc8a.other = tc9a tc8b = TestSO8(name='b') tc9b = TestSO9(name='2') assert tc8a.other == tc9a assert tc8a.otherID == tc9a.id assert TestSO8.select().count() == 2 assert TestSO9.select().count() == 2 raises(Exception, tc9a.destroySelf) tc9b.destroySelf() assert TestSO8.select().count() == 2 assert TestSO9.select().count() == 1 tc8a.destroySelf() tc8b.destroySelf() tc9a.destroySelf() assert TestSO8.select().count() == 0 assert TestSO9.select().count() == 0 class TestSO10(SQLObject): name = StringCol() class TestSO11(SQLObject): name = StringCol() other = ForeignKey('TestSO10', default=None, cascade='null') def testForeignKeySetNull(): setupClass([TestSO10, TestSO11]) obj1 = TestSO10(name='foo') obj2 = TestSO10(name='bar') dep1 = TestSO11(name='xxx', other=obj1) dep2 = TestSO11(name='yyy', other=obj1) dep3 = TestSO11(name='zzz', other=obj2) for name in 'xxx', 'yyy', 'zzz': assert len(list(TestSO11.selectBy(name=name))) == 1 obj1.destroySelf() for name in 'xxx', 'yyy', 'zzz': assert len(list(TestSO11.selectBy(name=name))) == 1 assert dep1.other is None assert dep2.other is None assert dep3.other is obj2 def testAsDict(): setupGetters(TestSO1) bob = TestSO1.selectBy(name='bob')[0] assert bob.sqlmeta.asDict() == { 'passwd': 'tbq', 'name': 'bob', 'id': bob.id} def test_nonexisting_attr(): setupClass(Student) try: Student.select(Student.q.nonexisting) except AttributeError: pass else: assert 0, "Expected an AttributeError" class TestSO12(SQLObject): name = StringCol() value = IntCol(defaultSQL='1') def test_defaultSQL(): setupClass(TestSO12) test = TestSO12(name="test") assert test.value == 1 def test_connection_override(): sqlhub.processConnection = connectionForURI('sqlite:///db1') class TestSO13(SQLObject): _connection = connectionForURI('sqlite:///db2') assert TestSO13._connection.uri() == 'sqlite:///db2'
# -*- coding: utf-8 -*- """ Created on Tue Mar 10 11:28:57 2015 @author: dp11 This code implements PCA-based registration of image sequences """ #============================================================================== # Imports #============================================================================== import numpy as np import SimpleITK as sitk import time from pca.pca import PCA import os #============================================================================== # Hard-coded variables TODO - add as input arguments #============================================================================== N_DOF = 6 # Rigid registration MIN_OVERLAP = 10000 MAX_DIST = 1e15 spacing = [2, 2, 2] # resampling spacing fraction = .99 # fraction of retained variance _GAMMA = .1 _SIGMA = 1 os.environ["SITK_SHOW_COMMAND"] = "itksnap" def usage( sname ): """ Define input and output """ os.sys.stderr.write("\nPCA-based rigid registration of image sequences\n") os.sys.stderr.write("\nUsage: {0} -target_path <path_to_target_seq> "\ "-source_path <path_to_source_seq> -dofout <dof_file> -out_path"\ " <out_path> -out_time <otime> -v\n\n".format( os.path.split(sname)[-1] ) ) os.sys.stderr.write("Where:\t-data_path\t\tis the path to target and source sequences\n") os.sys.stderr.write("\t-dofout\t\tname of output file with resulting DOFs\n") os.sys.stderr.write("\t-out_path\t\tpath where registered sequence is written\n") os.sys.stderr.write("\t-out_time\t\tfile where execution time is written\n") os.sys.stderr.write("\t<opt>-v\t\tverbose mode\n\n") return True def read_inputs( argv, argc ): """ Parse input arguments """ argc = len(argv) tpath, spath, dfile, opath, otime, verbose = 0, 0, 0, 0, 0, False for a in range( argc ): if argv[a] == '-target_path': tpath = a elif argv[a] == '-source_path': spath = a elif argv[a] == '-dofout': dfile = a elif argv[a] == '-out_path': opath = a elif argv[a] == '-out_time': otime = a elif argv[a] == '-v': verbose = True if tpath==0 or spath==0 or dfile==0 or opath==0 or otime==0: os.sys.stderr.write('Can''t parse input arguments') return True return argv[tpath+1], argv[spath+1], argv[dfile+1], argv[opath+1], \ argv[otime+1], verbose def find_nearest(array,value): idx = (np.abs(array-value)).argmin() return idx def compute_info( image ): """ Compute information on resampled and original images """ old_size, old_spac, old_orig = image.GetSize(), image.GetSpacing(), \ image.GetOrigin() new_size, new_spac, new_orig = [], [], old_orig for i in range(len(old_size)): new_size.append( int(np.ceil( 1.0*(old_size[i]-1)*old_spac[i]/spacing[i] ) + 1) ) for i in range(len(new_size)): new_spac.append( 1.0*old_spac[i]*old_size[i]/new_size[i] ) #============================================================================== # Store information in two dictionaries #============================================================================== old_image_info = {'Size':old_size, 'Spacing':old_spac, 'Origin':old_orig, \ 'Direction':image.GetDirection()} new_image_info = {'Size':new_size, 'Spacing':new_spac, 'Origin':new_orig, \ 'Direction':image.GetDirection()} return new_image_info, old_image_info def get_overlapping_mask( image_1, image_2 ): """ Returns the indeces of the overlapping region between image_1 and image_2 """ ref_nda_seq_1 = sitk.GetArrayFromImage( image_1 ).ravel() ref_nda_seq_2 = sitk.GetArrayFromImage( image_2 ).ravel() mask_1, mask_2 = np.zeros(len(ref_nda_seq_1),dtype=bool),\ np.zeros(len(ref_nda_seq_2),dtype=bool) mask_1[ np.where(ref_nda_seq_1>0)[0] ] = True mask_2[ np.where(ref_nda_seq_2>0)[0] ] = True mask = mask_1&mask_2 if np.sum(mask)>MIN_OVERLAP: overlap = True else: overlap = False return mask, overlap def get_length_from_list( image_sequence ): """ Function to get PCs of given image sequence """ N, D = len(image_sequence), len(sitk.GetArrayFromImage( image_sequence[0] ).ravel()) X = np.zeros([D,N]) for n in range(N): X[:,n] = sitk.GetArrayFromImage( image_sequence[n] ).ravel() ipca = PCA( X.T, np.eye(N), fraction=fraction, ptype='dual' ) iZ = ipca.transform() length = 0 for n in range(N-1): length += np.linalg.norm( iZ[n,:]-iZ[n+1,:] ) del X, ipca return length def get_pca_distance( tseq, wseq, mask, perimeter, vis ): """ Function to compute the distance between PCA manifolds """ #============================================================================== # Create matrices to store the sequences #============================================================================== n_t, n_w, D = len(tseq), len(wseq), len(sitk.GetArrayFromImage( tseq[0] ).ravel()) tX, wX = np.zeros([D,n_t]), np.zeros([D,n_w]) for n in range(n_t): tX[:,n] = sitk.GetArrayFromImage( tseq[n] ).ravel() for n in range(n_w): wX[:,n] = sitk.GetArrayFromImage( wseq[n] ).ravel() #============================================================================== # Reduce dimensionality of target sequence #============================================================================== tpca = PCA( tX[mask,:].T, np.eye(n_t), fraction=fraction, ptype='dual' ) tZ = tpca.transform() U = np.dot( np.dot( tpca.psi, tpca.eigVec ), \ np.linalg.inv(np.diag(np.sqrt( tpca.eigVal )) ) ) #============================================================================== # Project warped sequence onto reduced subspace of target #============================================================================== wpca = PCA( wX[mask,:].T, np.eye(n_w), fraction=fraction, ptype='dual' ) wtZ = np.real( np.dot( U.T, wpca.A ).T ) length = 0 for n in range(n_w-1): length += np.linalg.norm( wtZ[n,:]-wtZ[n+1,:] ) #============================================================================== # Find correspondences if sequences have different number of frames #============================================================================== n_vertices = np.max( [n_t, n_w]) idx_t, idx_w = np.zeros( n_vertices, dtype = np.uint ),\ np.zeros( n_vertices, dtype = np.uint ) at, aw = np.linspace(0,1,num=n_t), np.linspace(0,1,num=n_w) if n_t>n_w: idx_t = np.arange( n_vertices ) for ni in np.arange( n_vertices ): idx_w[ni] = find_nearest( aw, at[ni] ) elif n_t<n_w: idx_w = np.arange( n_vertices ) for ni in np.arange(n_vertices): idx_t[ni] = find_nearest( at, aw[ni] ) else: idx_t = np.arange( n_vertices ) idx_w = np.arange( n_vertices ) return _GAMMA*np.sum( np.linalg.norm(wtZ[idx_w,:]-tZ[idx_t,:], axis=1) ) +\ (1-_GAMMA)*(perimeter-length) def compute_manifold_distance( tseq, sseq, dof, perimeter, visualise ): """ Function that projects the warped source sequence onto the target sequence and computes the distance between projections """ #============================================================================== # Compute centre of target image #============================================================================== centre = np.dot( np.array( tseq[0].GetDirection() ).reshape((3,3)),\ (np.array(tseq[0].GetSize())-1)*tseq[0].GetSpacing()/2.0 ) centre += tseq[0].GetOrigin() #============================================================================== # Set up the transform #============================================================================== etransf = sitk.Transform( 3, sitk.sitkEuler ) etransf.SetFixedParameters( centre ) etransf.SetParameters( dof ) #============================================================================== # Warp source sequence given the input dof file #============================================================================== resampler = sitk.ResampleImageFilter() resampler.SetReferenceImage( tseq[0] ) resampler.SetInterpolator( sitk.sitkLinear ) resampler.SetTransform( etransf ) wseq = [] for s in sseq: wseq.append( resampler.Execute( s ) ) #============================================================================== # Compute overlapping region #============================================================================== mask, overlap = get_overlapping_mask( tseq[0], wseq[0] ) if overlap: distance = get_pca_distance( tseq, wseq, mask, perimeter, visualise ) else: distance = MAX_DIST del etransf, resampler, wseq, mask, overlap return distance def optimise_dofs( target_sequence, source_sequence, perimeter, verbose ): """ Function to find optimal DOFs """ dof, dirn, step = np.zeros(N_DOF), 1, 10 #============================================================================== # Hill climbing optimisation #============================================================================== std_dev = np.array([np.pi/450.0, np.pi/450.0, np.pi/450.0, .4, .4, .4]) while (dirn!=-1 or step>0.1): incr_step = step*std_dev best_dist = compute_manifold_distance( target_sequence, source_sequence,\ dof, perimeter, True ) if verbose: print('Current dof estimate: {0}'.format(dof)) print('Lowest manifold distance: {0}'.format( best_dist )) dirn, pom = -1, 0 for n_dof in range( N_DOF ): dof[n_dof] += incr_step[n_dof] dist = compute_manifold_distance( target_sequence, source_sequence,\ dof, perimeter, False ) if dist<best_dist: dirn = n_dof pom = 1 dof[n_dof] -= 2*incr_step[n_dof] dist = compute_manifold_distance( target_sequence, source_sequence,\ dof, perimeter, False ) if dist<best_dist: dirn = n_dof pom = -1 dof[n_dof] += incr_step[n_dof] if dirn!=-1: dof[dirn] = dof[dirn]+pom*incr_step[dirn] else: step /= 2.0 if verbose: print('Final dof estimate: {0}'.format(dof)) print('Lowest manifold distance: {0}'.format( best_dist )) return dof def write_registered_sequence( sfiles, tref, sseq, dof, out_path ): """ Function to write registered images to folder """ if not os.path.exists( out_path ): os.mkdir( out_path ) #============================================================================== # Compute centre of target image #============================================================================== centre = np.dot( np.array( tref.GetDirection() ).reshape((3,3)),\ (np.array(tref.GetSize())-1)*tref.GetSpacing()/2.0 ) centre += tref.GetOrigin() #============================================================================== # Set up the transform #============================================================================== etransf = sitk.Transform( 3, sitk.sitkEuler ) etransf.SetFixedParameters( centre ) etransf.SetParameters( dof ) #============================================================================== # Warp source sequence given the input dof file #============================================================================== resampler = sitk.ResampleImageFilter() resampler.SetReferenceImage( tref ) resampler.SetInterpolator( sitk.sitkLinear ) # sitkBSpline resampler.SetTransform( etransf ) os.sys.stdout.write('Writing sequence to folder\n') for name, image in zip( sfiles, sseq ): sitk.WriteImage( resampler.Execute( image ), '{0}{1}'.format(out_path, name) ) return def write_dof_to_file( dof, doffile, verbose ): """ Function to write DOF parameters to file """ ndof = np.zeros([N_DOF,3]) ndof[3:,2] = (180/np.pi)*dof[:3] ndof[:3,2] = dof[3:] if verbose: os.sys.stdout.write('Final DOF: {0}\n'.format(ndof)) os.sys.stdout.write('Writing DOF to file\n') np.savetxt( doffile, ndof, fmt='%.2f', delimiter='\t', header='DOF: 6',\ comments='' ) return def main(argv): """ Check input arguments """ argc = len(argv) if argc < 11: return usage(argv[0]) else: target_dir, source_dir, doffile, out_path, otime, verbose = read_inputs(argv, argc) #============================================================================== # Read names of input target and source sequences #============================================================================== target_files = sorted(os.listdir( target_dir )) source_files = sorted(os.listdir( source_dir )) if verbose: os.sys.stdout.write('Reading target sequence from {0}\n'.format(target_dir)) os.sys.stdout.write('Reading source sequence from {0}\n'.format(source_dir)) #============================================================================== # If images are metadata one file is for metadata and one for raw #============================================================================== extension = target_files[0].split('.')[1] if extension == 'mhd' or extension=='mha': target_files = target_files[::2] source_files = source_files[::2] #============================================================================== # Read and resample target sequence to a 2x2x2 isotropic volume #============================================================================== target_sequence = [] resampler = sitk.ResampleImageFilter() target_reference = sitk.ReadImage('{0}{1}'.format( target_dir, target_files[0] )) resampled_info, original_info = compute_info( target_reference ) smoother = sitk.SmoothingRecursiveGaussianImageFilter() smoother.SetSigma( _SIGMA ) for n, f in enumerate( target_files ): us_image = sitk.ReadImage('{0}{1}'.format( target_dir, f )) #============================================================================== # Smooth images #============================================================================== smus_image = smoother.Execute( us_image ) res_us_image = resampler.Execute( smus_image, resampled_info['Size'],\ sitk.Transform(), sitk.sitkBSpline, resampled_info['Origin'],\ resampled_info['Spacing'], resampled_info['Direction'], int(), sitk.sitkInt16 ) target_sequence.append( res_us_image ) perimeter = get_length_from_list( target_sequence ) #============================================================================== # Read and resample source sequence to a 2x2x2 isotropic volume #============================================================================== source_sequence = [] osource_sequence = [] source_reference = sitk.ReadImage('{0}{1}'.format( source_dir, source_files[0] )) resampled_info, original_info = compute_info( source_reference ) for n, f in enumerate( source_files ): us_image = sitk.ReadImage('{0}{1}'.format( source_dir, f )) osource_sequence.append( us_image ) #============================================================================== # Smooth images #============================================================================== smus_image = smoother.Execute( us_image ) res_us_image = resampler.Execute( smus_image, resampled_info['Size'],\ sitk.Transform(), sitk.sitkBSpline, resampled_info['Origin'],\ resampled_info['Spacing'], resampled_info['Direction'], int(), sitk.sitkInt16 ) source_sequence.append( res_us_image ) #============================================================================== # Reduce sequence #============================================================================== start_time = time.time() dof = optimise_dofs( target_sequence, source_sequence, perimeter, verbose ) stop_time = time.time() write_registered_sequence( source_files, target_reference, osource_sequence,\ dof, out_path ) write_dof_to_file( dof, doffile, verbose ) np.savetxt( otime, [stop_time-start_time], fmt='%.1f' ) if __name__ == "__main__": os.sys.exit(main(os.sys.argv))
#!/usr/bin/env python3 # Copyright 2013 The Font Bakery Authors. # Copyright 2017 The Google Font Tools Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See AUTHORS.txt for the list of Authors and LICENSE.txt for the License. # from __future__ import print_function import argparse import collections import sys from fontTools import ttLib from fontTools.ttLib import TTLibError if sys.version_info.major >= 3: from io import StringIO else: from StringIO import StringIO # TextMetricsView # This class was inherited from the old v0.0.15 codebase # It may be useful at some point, but it seems more sofisticated # than actually necessary. # Right now I'll simply print the relevant # table entries of each font pass by the user on the command line. # So this class will be left here only as a reference for a future # implementation, if something more sofisticated is actually needed. # --FSanches class TextMetricsView(object): def __init__(self): self.outstream = StringIO() self._its_metrics_header = ['Parameter '] # first column has a length of largest parameter # named OS/2.sTypoDescender self._its_metrics = collections.OrderedDict([ ('ymax', []), ('hhea.ascent', []), ('OS/2.sTypoAscender', []), ('OS/2.usWinAscent', []), ('ymin', []), ('hhea.descent', []), ('OS/2.sTypoDescender', []), ('OS/2.usWinDescent', []), ('hhea.lineGap', []), ('OS/2.sTypoLineGap', []), ('hhea total', []), ('typo total', []), ('win total', []), ('UPM:Heights', []), ('UPM:Heights %', []) ]) self._inconsistent = set() self._inconsistent_table = {} self._warnings = [] self.glyphs = collections.OrderedDict() def add_to_table(self, fontname, key, value): if self._its_metrics[key] and value not in self._its_metrics[key]: self._inconsistent.add(key) if key not in self._inconsistent_table: self._inconsistent_table[key] = [] # lookup row with value and append fontname to `fonts` key, eg.: # {'hhea.ascent': [{'value': 390, # 'fonts': ['fontname.ttf', 'fontname2.ttf']}]} # # It looks like json groupped by metrics key inconsistentRow = {} for r in self._inconsistent_table[key]: if r['value'] == value: inconsistentRow = r if not inconsistentRow: inconsistentRow = {'value': value, 'fonts': []} self._inconsistent_table[key].append(inconsistentRow) inconsistentRow['fonts'].append(fontname) self._its_metrics[key].append(value) def add_metric(self, font_name, vmet): ymin, ymax = vmet.get_bounding() self._its_metrics_header.append(font_name) self.add_to_table(font_name, 'hhea.ascent', vmet.ascents.hhea) self.add_to_table(font_name, 'OS/2.sTypoAscender', vmet.ascents.os2typo) self.add_to_table(font_name, 'OS/2.usWinAscent', vmet.ascents.os2win) self.add_to_table(font_name, 'hhea.descent', vmet.descents.hhea) self.add_to_table(font_name, 'OS/2.sTypoDescender', vmet.descents.os2typo) self.add_to_table(font_name, 'OS/2.usWinDescent', vmet.descents.os2win) self.add_to_table(font_name, 'hhea.lineGap', vmet.linegaps.hhea) self.add_to_table(font_name, 'OS/2.sTypoLineGap', vmet.linegaps.os2typo) self._its_metrics['ymax'].append(ymax) self._its_metrics['ymin'].append(ymin) value = abs(ymin) + ymax upm = '%s:%s' % (vmet.get_upm_height(), value) self._its_metrics['UPM:Heights'].append(upm) value = (value / float(vmet.get_upm_height())) * 100 self._its_metrics['UPM:Heights %'].append('%d %%' % value) hhea_total = vmet.ascents.hhea + abs(vmet.descents.hhea) + vmet.linegaps.hhea self._its_metrics['hhea total'].append(hhea_total) typo_total = vmet.ascents.os2typo + abs(vmet.descents.os2typo) + vmet.linegaps.os2typo self._its_metrics['typo total'].append(typo_total) win_total = vmet.ascents.os2win + abs(vmet.descents.os2win) self._its_metrics['win total'].append(win_total) if len(set([typo_total, hhea_total, win_total])) > 1: self._warnings.append('%s has NOT even heights' % font_name) self.glyphs[font_name] = vmet.get_highest_and_lowest() def print_metrics(self): self.print_warnings() self.print_metrics_table() self.print_high_glyphs() self.print_low_glyphs() self.print_inconsistent_table() def print_warnings(self): if self._inconsistent: _ = 'WARNING: Inconsistent {}' print(_.format(' '.join([str(x) for x in self._inconsistent])), end='\n\n', file=self.outstream) if self._warnings: for warn in self._warnings: print('WARNING: %s' % warn, file=self.outstream) def print_metrics_table(self): formatstring = '' for k in self._its_metrics_header: print(('{:<%s}' % (len(k) + 4)).format(k), end='', file=self.outstream) formatstring += '{:<%s}' % (len(k) + 4) print(file=self.outstream) for k, values in self._its_metrics.items(): print(formatstring.format(*([k] + values)), file=self.outstream) def print_high_glyphs(self): header_printed = False for font, glyphs in self.glyphs.items(): if glyphs[0]: if not header_printed: print(file=self.outstream) print('High Glyphs', file=self.outstream) header_printed = True print(font + ':', ' '.join(glyphs[0]), file=self.outstream) def print_low_glyphs(self): header_printed = False for font, glyphs in self.glyphs.items(): if glyphs[1]: if not header_printed: print(file=self.outstream) print('Low Glyphs', file=self.outstream) header_printed = True print(font + ':', ' '.join(glyphs[1]), file=self.outstream) def print_inconsistent_table(self): print(file=self.outstream) for metrickey, row in self._inconsistent_table.items(): value = self.find_max_occurs_from_metrics_key(row) tbl = {} for r in row: if r['value'] == value: continue if metrickey not in tbl: tbl[metrickey] = [] tbl[metrickey] += r['fonts'] for k, r in tbl.items(): print('WARNING: Inconsistent %s:' % k, ', '.join(r), file=self.outstream) def find_max_occurs_from_metrics_key(self, metricvalues): result = 0 occurs = 0 if len(metricvalues) == 2: return metricvalues[1]['value'] for v in metricvalues: if len(v['fonts']) > occurs: occurs = len(v['fonts']) result = v['value'] return result def get_contents(self): self.outstream.seek(0) return self.outstream.read() parser = argparse.ArgumentParser() # ascent parameters parser.add_argument('-a', '--ascents', type=int, help=("Set new ascents value.")) parser.add_argument('-ah', '--ascents-hhea', type=int, help=("Set new ascents value in 'Horizontal Header'" " table ('hhea'). This argument" " cancels --ascents.")) parser.add_argument('-at', '--ascents-typo', type=int, help=("Set new ascents value in 'Horizontal Header'" " table ('OS/2'). This argument" " cancels --ascents.")) parser.add_argument('-aw', '--ascents-win', type=int, help=("Set new ascents value in 'Horizontal Header'" " table ('OS/2.Win'). This argument" " cancels --ascents.")) # descent parameters parser.add_argument('-d', '--descents', type=int, help=("Set new descents value.")) parser.add_argument('-dh', '--descents-hhea', type=int, help=("Set new descents value in 'Horizontal Header'" " table ('hhea'). This argument" " cancels --descents.")) parser.add_argument('-dt', '--descents-typo', type=int, help=("Set new descents value in 'Horizontal Header'" " table ('OS/2'). This argument" " cancels --descents.")) parser.add_argument('-dw', '--descents-win', type=int, help=("Set new descents value in 'Horizontal Header'" " table ('OS/2.Win'). This argument" " cancels --descents.")) # linegaps parameters parser.add_argument('-l', '--linegaps', type=int, help=("Set new linegaps value.")) parser.add_argument('-lh', '--linegaps-hhea', type=int, help=("Set new linegaps value in 'Horizontal Header'" " table ('hhea')")) parser.add_argument('-lt', '--linegaps-typo', type=int, help=("Set new linegaps value in 'Horizontal Header'" " table ('OS/2')")) parser.add_argument('--autofix', action="store_true", help="Autofix font metrics") parser.add_argument('ttf_font', nargs='+', metavar='ttf_font', help="Font file in OpenType (TTF/OTF) format") def vmetrics(ttFonts): from fontbakery.utils import get_bounding_box v_metrics = {"ymin": 0, "ymax": 0} for ttFont in ttFonts: font_ymin, font_ymax = get_bounding_box(ttFont) v_metrics["ymin"] = min(font_ymin, v_metrics["ymin"]) v_metrics["ymax"] = max(font_ymax, v_metrics["ymax"]) return v_metrics def main(): options = parser.parse_args() fonts = options.ttf_font if options.ascents or \ options.descents or \ options.linegaps or \ options.linegaps == 0 or \ options.ascents_hhea or \ options.ascents_typo or \ options.ascents_win or \ options.descents_hhea or \ options.descents_typo or \ options.descents_win or \ options.linegaps_hhea or \ options.linegaps_hhea == 0 or \ options.linegaps_typo or \ options.linegaps_typo == 0: for f in fonts: try: ttfont = ttLib.TTFont(f) except TTLibError as ex: print('Error: {0}: {1}'.format(f, ex)) continue if options.ascents: ttfont['hhea'].ascent = options.ascents ttfont['OS/2'].sTypoAscender = options.ascents ttfont['OS/2'].usWinAscent = options.ascents if options.descents: ttfont['hhea'].descent = options.descents ttfont['OS/2'].sTypoDescender = options.descents ttfont['OS/2'].usWinDescent = abs(options.descents) if options.linegaps or options.linegaps == 0: ttfont['hhea'].lineGap = options.linegaps ttfont['OS/2'].sTypoLineGap = options.linegaps if options.ascents_hhea: ttfont['hhea'].ascent = options.ascents_hhea if options.ascents_typo: ttfont['OS/2'].sTypoAscender = options.ascents_typo if options.ascents_win: ttfont['OS/2'].usWinAscent = options.ascents_win if options.descents_hhea: ttfont['hhea'].descent = options.descents_hhea if options.descents_typo: ttfont['OS/2'].sTypoDescender = options.descents_typo if options.descents_win: ttfont['OS/2'].usWinDescent = abs(options.descents_win) if options.linegaps_hhea or options.linegaps_hhea == 0: ttfont['hhea'].lineGap = options.linegaps_hhea if options.linegaps_typo or options.linegaps_typo == 0: ttfont['OS/2'].sTypoLineGap = options.linegaps_typo ttfont.save(f[:-4] + '.fix.ttf') elif options.autofix: ttFonts = [] for f in fonts: try: ttFonts.append(ttLib.TTFont(f)) except TTLibError as ex: print('Error: {0}: {1}'.format(f, ex)) continue v_metrics = vmetrics(ttFonts) for ttfont in ttFonts: ttfont['hhea'].ascent = v_metrics["ymax"] ttfont['OS/2'].sTypoAscender = v_metrics["ymax"] ttfont['OS/2'].usWinAscent = v_metrics["ymax"] ttfont['hhea'].descent = v_metrics["ymin"] ttfont['OS/2'].sTypoDescender = v_metrics["ymin"] ttfont['OS/2'].usWinDescent = abs(v_metrics["ymin"]) ttfont.save(ttfont.reader.file.name[:-4] + '.fix.ttf') else: entries = [ ('hhea', 'ascent'), ('OS/2', 'sTypoAscender'), ('OS/2', 'usWinAscent'), ('hhea', 'descent'), ('OS/2', 'sTypoDescender'), ('OS/2', 'usWinDescent'), ('hhea', 'lineGap'), ('OS/2', 'sTypoLineGap') ] for f in fonts: ttfont = ttLib.TTFont(f) print ("## {}".format(f)) for table, field in entries: print ("{} {}: {}".format(table, field, getattr(ttfont[table], field))) print() if __name__ == '__main__': main()
import copy import re import warnings from io import BytesIO from itertools import chain from urllib.parse import quote, urlencode, urljoin, urlsplit from django.conf import settings from django.core import signing from django.core.exceptions import ( DisallowedHost, ImproperlyConfigured, RequestDataTooBig, ) from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils.datastructures import ImmutableList, MultiValueDict from django.utils.deprecation import RemovedInDjango30Warning from django.utils.encoding import escape_uri_path, force_bytes, iri_to_uri from django.utils.http import is_same_domain, limited_parse_qsl RAISE_ERROR = object() host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$") class UnreadablePostError(IOError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest: """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = '' self.path_info = '' self.method = None self.resolver_match = None self._post_parse_error = False self.content_type = None self.content_params = None def __repr__(self): if self.method is None or not self.get_full_path(): return '<%s>' % self.__class__.__name__ return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path()) def _get_raw_host(self): """ Return the HTTP host using the environment or request headers. Skip allowed hosts protection, so may return an insecure host. """ # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = self.get_port() if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) return host def get_host(self): """Return the HTTP host using the environment or request headers.""" host = self._get_raw_host() # Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True. allowed_hosts = settings.ALLOWED_HOSTS if settings.DEBUG and not allowed_hosts: allowed_hosts = ['localhost', '127.0.0.1', '[::1]'] domain, port = split_domain_port(host) if domain and validate_host(domain, allowed_hosts): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += " The domain name provided is not valid according to RFC 1034/1035." raise DisallowedHost(msg) def get_port(self): """Return the port number for the request as a string.""" if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META: port = self.META['HTTP_X_FORWARDED_PORT'] else: port = self.META['SERVER_PORT'] return str(port) def get_full_path(self, force_append_slash=False): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s%s' % ( escape_uri_path(self.path), '/' if force_append_slash and not self.path.endswith('/') else '', ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '' ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempt to return a signed cookie. If the signature fails or the cookie has expired, raise an exception, unless the `default` argument is provided, in which case return that value. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def get_raw_uri(self): """ Return an absolute URI from variables available in this request. Skip allowed hosts protection, so may return insecure URI. """ return '{scheme}://{host}{path}'.format( scheme=self.scheme, host=self._get_raw_host(), path=self.get_full_path(), ) def build_absolute_uri(self, location=None): """ Build an absolute URI from the location and the variables available in this request. If no ``location`` is specified, bulid the absolute URI using request.get_full_path(). If the location is absolute, convert it to an RFC 3987 compliant URI and return it. If location is relative or is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = '//%s' % self.get_full_path() bits = urlsplit(location) if not (bits.scheme and bits.netloc): current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme, host=self.get_host(), path=self.path) # Join the constructed URL with the provided location, which will # allow the provided ``location`` to apply query strings to the # base path as well as override the host, if it begins with // location = urljoin(current_uri, location) return iri_to_uri(location) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Return 'http' by default. """ return 'http' @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( 'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.' ) if self.META.get(header) == value: return 'https' return self._get_scheme() def is_secure(self): return self.scheme == 'https' def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, 'GET'): del self.GET if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Return a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise RawPostDataException("You cannot access body after reading from request's data stream") # Limit the maximum request data size that will be handled in-memory. if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.') try: self._body = self.read() except IOError as e: raise UnreadablePostError(*e.args) from e self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict() self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != 'POST': self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.content_type == 'multipart/form-data': if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occurred. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise elif self.content_type == 'application/x-www-form-urlencoded': self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() else: self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() def close(self): if hasattr(self, '_files'): for f in chain.from_iterable(l[1] for l in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except IOError as e: raise UnreadablePostError(*e.args) from e def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except IOError as e: raise UnreadablePostError(*e.args) from e def __iter__(self): while True: buf = self.readline() if not buf: break yield buf def xreadlines(self): warnings.warn( 'HttpRequest.xreadlines() is deprecated in favor of iterating the ' 'request.', RemovedInDjango30Warning, stacklevel=2, ) yield from self def readlines(self): return list(self) class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to str. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super().__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding query_string = query_string or '' parse_qsl_kwargs = { 'keep_blank_values': True, 'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS, 'encoding': encoding, } if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode('iso-8859-1') for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs): self.appendlist(key, value) self._mutable = mutable @classmethod def fromkeys(cls, iterable, value='', mutable=False, encoding=None): """ Return a new QueryDict with keys (may be repeated) from an iterable and values from value. """ q = cls('', mutable=True, encoding=encoding) for key in iterable: q.appendlist(key, value) if not mutable: q._mutable = False return q @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super().__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in self.lists(): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in self.lists(): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super().setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super().setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super().pop(key, *args) def popitem(self): self._assert_mutable() return super().popitem() def clear(self): self._assert_mutable() super().clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super().setdefault(key, default) def copy(self): """Return a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Return an encoded string of all query string arguments. `safe` specifies characters which don't require quoting, for example:: >>> q = QueryDict(mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) def encode(k, v): return '%s=%s' % ((quote(k, safe), quote(v, safe))) else: def encode(k, v): return urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend(encode(k, force_bytes(v, self.encoding)) for v in list_) return '&'.join(output) # It's neither necessary nor appropriate to use # django.utils.encoding.force_text for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Convert bytes objects to strings, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Return any non-bytes objects without change. """ if isinstance(s, bytes): return str(s, encoding, 'replace') else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lower-cased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return '', '' if host[-1] == ']': # It's an IPv6 address without a port. return host, '' bits = host.rsplit(':', 1) domain, port = bits if len(bits) == 2 else (bits[0], '') # Remove a trailing dot (if present) from the domain. domain = domain[:-1] if domain.endswith('.') else domain return domain, port def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lower-cased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ for pattern in allowed_hosts: if pattern == '*' or is_same_domain(host, pattern): return True return False
# # This source file is part of the EdgeDB open source project. # # Copyright 2020-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations from typing import * import collections import functools import json import uuid import immutables from edb.common import verutils from edb.common import uuidgen from edb.schema import abc as s_abc from edb.schema import expr as s_expr from edb.schema import functions as s_func from edb.schema import name as s_name from edb.schema import objects as s_obj from edb.schema import operators as s_oper from edb.schema import schema as s_schema from . import structure as sr_struct SchemaClassLayout = Dict[Type[s_obj.Object], sr_struct.SchemaTypeLayout] def parse_into( base_schema: s_schema.Schema, schema: s_schema.FlatSchema, data: Union[str, bytes], schema_class_layout: SchemaClassLayout, ) -> s_schema.FlatSchema: """Parse JSON-encoded schema objects and populate the schema with them. Args: schema: A schema instance to use as a starting point. data: A JSON-encoded schema object data as returned by an introspection query. schema_class_layout: A mapping describing schema class layout in the reflection, as returned from :func:`schema.reflection.structure.generate_structure`. Returns: A schema instance including objects encoded in the provided JSON sequence. """ id_to_type = {} id_to_data = {} name_to_id = {} shortname_to_id = collections.defaultdict(set) globalname_to_id = {} dict_of_dicts: Callable[ [], Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]], ] = functools.partial(collections.defaultdict, dict) # type: ignore refs_to: Dict[ uuid.UUID, Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]] ] = collections.defaultdict(dict_of_dicts) objects: Dict[uuid.UUID, Tuple[s_obj.Object, Dict[str, Any]]] = {} for entry in json.loads(data): _, _, clsname = entry['_tname'].rpartition('::') mcls = s_obj.ObjectMeta.maybe_get_schema_class(clsname) if mcls is None: raise ValueError( f'unexpected type in schema reflection: {clsname}') objid = uuidgen.UUID(entry['id']) objects[objid] = (mcls._create_from_id(objid), entry) refdict_updates = {} for objid, (obj, entry) in objects.items(): mcls = type(obj) name = s_name.name_from_string(entry['name__internal']) layout = schema_class_layout[mcls] if isinstance(obj, s_obj.QualifiedObject): name_to_id[name] = objid else: globalname_to_id[mcls, name] = objid if isinstance(obj, (s_func.Function, s_oper.Operator)): shortname = mcls.get_shortname_static(name) shortname_to_id[mcls, shortname].add(objid) id_to_type[objid] = type(obj).__name__ all_fields = mcls.get_schema_fields() objdata: List[Any] = [None] * len(all_fields) val: Any for k, v in entry.items(): desc = layout.get(k) if desc is None: continue fn = desc.fieldname field = all_fields.get(fn) if field is None: continue findex = field.index if desc.storage is not None: if v is None: pass elif desc.storage.ptrkind == 'link': refid = uuidgen.UUID(v['id']) newobj = objects.get(refid) if newobj is not None: val = newobj[0] else: val = base_schema.get_by_id(refid) objdata[findex] = val.schema_reduce() refs_to[val.id][mcls, fn][objid] = None elif desc.storage.ptrkind == 'multi link': ftype = mcls.get_field(fn).type if issubclass(ftype, s_obj.ObjectDict): refids = ftype._container( uuidgen.UUID(e['value']) for e in v) refkeys = tuple(e['name'] for e in v) val = ftype(refids, refkeys, _private_init=True) else: refids = ftype._container( uuidgen.UUID(e['id']) for e in v) val = ftype(refids, _private_init=True) objdata[findex] = val.schema_reduce() for refid in refids: refs_to[refid][mcls, fn][objid] = None elif desc.storage.shadow_ptrkind: val = entry[f'{k}__internal'] ftype = mcls.get_field(fn).type if val is not None and type(val) is not ftype: if issubclass(ftype, s_expr.Expression): val = _parse_expression(val) for refid in val.refs.ids(schema): refs_to[refid][mcls, fn][objid] = None elif issubclass(ftype, s_expr.ExpressionList): exprs = [] for e_dict in val: e = _parse_expression(e_dict) assert e.refs is not None for refid in e.refs.ids(schema): refs_to[refid][mcls, fn][objid] = None exprs.append(e) val = ftype(exprs) elif issubclass(ftype, s_obj.Object): val = val.id elif issubclass(ftype, s_name.Name): val = s_name.name_from_string(val) else: val = ftype(val) if issubclass(ftype, s_abc.Reducible): val = val.schema_reduce() objdata[findex] = val else: ftype = mcls.get_field(fn).type if type(v) is not ftype: if issubclass(ftype, verutils.Version): objdata[findex] = _parse_version(v) else: objdata[findex] = ftype(v) else: objdata[findex] = v elif desc.is_refdict: ftype = mcls.get_field(fn).type refids = ftype._container(uuidgen.UUID(e['id']) for e in v) for refid in refids: refs_to[refid][mcls, fn][objid] = None val = ftype(refids, _private_init=True) objdata[findex] = val.schema_reduce() if desc.properties: for e_dict in v: refdict_updates[uuidgen.UUID(e_dict['id'])] = { p: pv for p in desc.properties if (pv := e_dict[f'@{p}']) is not None } id_to_data[objid] = tuple(objdata) for objid, updates in refdict_updates.items(): if updates: sclass = s_obj.ObjectMeta.get_schema_class(id_to_type[objid]) updated_data = list(id_to_data[objid]) for fn, v in updates.items(): field = sclass.get_schema_field(fn) updated_data[field.index] = v id_to_data[objid] = tuple(updated_data) with schema._refs_to.mutate() as mm: for referred_id, refdata in refs_to.items(): try: refs = mm[referred_id] except KeyError: refs = immutables.Map(( (k, immutables.Map(r)) for k, r in refdata.items() )) else: refs_update = {} for k, referrers in refdata.items(): try: rt = refs[k] except KeyError: rt = immutables.Map(referrers) else: rt = rt.update(referrers) refs_update[k] = rt refs = refs.update(refs_update) mm[referred_id] = refs schema = schema._replace( id_to_type=schema._id_to_type.update(id_to_type), id_to_data=schema._id_to_data.update(id_to_data), name_to_id=schema._name_to_id.update(name_to_id), shortname_to_id=schema._shortname_to_id.update( (k, frozenset(v)) for k, v in shortname_to_id.items() ), globalname_to_id=schema._globalname_to_id.update(globalname_to_id), refs_to=mm.finish(), ) return schema def _parse_expression(val: Dict[str, Any]) -> s_expr.Expression: refids = frozenset( uuidgen.UUID(r) for r in val['refs'] ) return s_expr.Expression( text=val['text'], refs=s_obj.ObjectSet( refids, _private_init=True, ) ) def _parse_version(val: Dict[str, Any]) -> verutils.Version: return verutils.Version( major=val['major'], minor=val['minor'], stage=getattr(verutils.VersionStage, val['stage'].upper()), stage_no=val['stage_no'], local=tuple(val['local']), )
# -*- coding: utf-8 -*- from django.core.urlresolvers import reverse from django.core import serializers from django.test.client import Client from transifex.languages.models import Language from transifex.resources.models import Resource, Translation from transifex.txcommon.tests.base import BaseTestCase from django.utils import simplejson as json class PermissionsTest(BaseTestCase): """Test view permissions""" def seUp(self): super(PermissionsTest, self).setUp() def test_anon(self): """ Test anonymous user """ # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project.slug, self.resource.slug,self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project.slug, self.resource.slug]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details resp = self.client['anonymous'].get(reverse('resource_detail', args=[self.project.slug, self.resource.slug])) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project.slug, self.resource.slug]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['anonymous'].post(reverse('project_resources', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['anonymous'].post(reverse('project_resources_more', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) # Check that anonymous user is redirected to signin page page_url = reverse('clone_translate', args=[self.project.slug, self.resource.slug, self.language_en.code, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) # Check download file perms page_url = reverse('download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) #PRIVATE PROJECT CHECKS # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details resp = self.client['anonymous'].get(reverse('resource_detail', args=[self.project_private.slug, self.resource_private.slug])) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project_private.slug, self.resource_private.slug, self.language_ar.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the ajax view which returns more resources in project detail page. resp = self.client['anonymous'].post(reverse('project_resources', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 403) resp = self.client['anonymous'].post(reverse('project_resources_more', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 403) # Check that anonymous user is redirected to signin page page_url = reverse('clone_translate', args=[self.project_private.slug, self.resource_private.slug, self.language_en.code, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) # Check download file perms page_url = reverse('download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['anonymous'].get(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) resp = self.client['anonymous'].post(page_url) self.assertEqual(resp.status_code, 302) self.assertRedirects(resp, '/accounts/signin/?next=%s' % page_url) def test_registered(self): """ Test random registered user """ # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project.slug, self.resource.slug,self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project.slug, self.resource.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project.slug, self.resource.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project.slug, self.resource.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['registered'].post(reverse('project_resources', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['registered'].post(reverse('project_resources_more', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) # Check clone language perms page_url = reverse('clone_translate', args=[self.project.slug, self.resource.slug, self.language_en.code, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check 'lock and get translation file' perms page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check download file perms page_url = reverse('download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 302) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 302) # PRIVATE PROJECT CHECKS # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project_private.slug, self.resource_private.slug, self.language_ar.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the ajax view which returns more resources in project detail page. resp = self.client['registered'].post(reverse('project_resources', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(reverse('project_resources_more', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 403) # Check clone language perms page_url = reverse('clone_translate', args=[self.project_private.slug, self.resource_private.slug, self.language_en.code, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check 'lock and get translation file' perms page_url = reverse('lock_and_download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) # Check download file perms page_url = reverse('download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['registered'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['registered'].post(page_url) self.assertEqual(resp.status_code, 403) def test_team_member(self): """ Test team_member permissions """ # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project.slug, self.resource.slug,self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted page_url = reverse('resource_delete', args=[self.project.slug, self.resource.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project.slug, self.resource.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project.slug, self.resource.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['team_member'].post(reverse('project_resources', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(reverse('project_resources_more', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) # Check clone language perms page_url = reverse('clone_translate', args=[self.project.slug, self.resource.slug, self.language_en.code, self.language.code]) resp = self.client['team_member'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check cloning to a non team-member language page_url = reverse('clone_translate', args=[self.project.slug, self.resource.slug, self.language_en.code, self.language_ar.code]) resp = self.client['team_member'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 403) # Check lock and get translation file perms for resource not accepting # translations. self.resource.accept_translations = False self.resource.save() page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) self.resource.accept_translations = True self.resource.save() # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check download file perms page_url = reverse('download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 302) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 302) # PRIVATE PROJECT CHECKS # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if resource gets deleted page_url = reverse('resource_delete', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 403) # Check the popup page_url = reverse('resource_actions', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['team_member'].post(reverse('project_resources', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(reverse('project_resources_more', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 200) # Check clone language perms page_url = reverse('clone_translate', args=[self.project_private.slug, self.resource_private.slug, self.language_en.code, self.language.code]) resp = self.client['team_member'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check cloning to a non team-member language page_url = reverse('clone_translate', args=[self.project_private.slug, self.resource_private.slug, self.language_en.code, self.language_ar.code]) resp = self.client['team_member'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 403) resp = self.client['team_member'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 403) # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 200) # Check download file perms page_url = reverse('download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['team_member'].get(page_url) self.assertEqual(resp.status_code, 302) resp = self.client['team_member'].post(page_url) self.assertEqual(resp.status_code, 302) def test_maintainer(self): """ Test maintainer permissions """ # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project.slug, self.resource.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project.slug, self.resource.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url, follow=True) self.assertEqual(resp.status_code, 200) # Check the popup page_url = reverse('resource_actions', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['maintainer'].post(reverse('project_resources', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(reverse('project_resources_more', args=[self.project.slug, 5])) self.assertEqual(resp.status_code, 200) # Check clone language perms page_url = reverse('clone_translate', args=[self.project.slug, self.resource.slug, self.language_en.code, self.language.code]) resp = self.client['maintainer'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check lock and get translation file perms for resource not accepting # translations. self.resource.accept_translations = False self.resource.save() page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 403) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 403) self.resource.accept_translations = True self.resource.save() # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check download file perms page_url = reverse('download_translation', args=[self.project.slug, self.resource.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 302) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 302) # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project.slug, self.resource.slug,self.language.code]) resp = self.client['maintainer'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project.slug, self.resource.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url,follow=True) self.assertEqual(resp.status_code, 200) # PRIVATE PROJECT CHECKS # Check if user is able to access resource details page_url = reverse('resource_detail', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check if user is able to access resource edit page_url = reverse('resource_edit', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url, follow=True) self.assertEqual(resp.status_code, 200) # Check the popup page_url = reverse('resource_actions', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check the ajax view which returns more resources in project detail page. resp = self.client['maintainer'].post(reverse('project_resources', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(reverse('project_resources_more', args=[self.project_private.slug, 5])) self.assertEqual(resp.status_code, 200) # Check clone language perms page_url = reverse('clone_translate', args=[self.project_private.slug, self.resource_private.slug, self.language_en.code, self.language.code]) resp = self.client['maintainer'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check lock and get translation file perms page_url = reverse('lock_and_download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 200) # Check download file perms page_url = reverse('download_translation', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 302) resp = self.client['maintainer'].post(page_url) self.assertEqual(resp.status_code, 302) # Delete Translations page_url = reverse('resource_translations_delete', args=[self.project_private.slug, self.resource_private.slug, self.language.code]) resp = self.client['maintainer'].get(page_url ,follow=True) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url ,follow=True) self.assertEqual(resp.status_code, 200) # Check if resource gets deleted successfully page_url = reverse('resource_delete', args=[self.project_private.slug, self.resource_private.slug]) resp = self.client['maintainer'].get(page_url) self.assertEqual(resp.status_code, 200) resp = self.client['maintainer'].post(page_url,follow=True) self.assertEqual(resp.status_code, 200)
import pytest def _type_length(): # Not a fixture as we need to parameterize tests on this from ..loadxl import ORDER from pkg_resources import resource_stream import codecs import json utf8 = codecs.getreader("utf-8") return { name: len(json.load(utf8(resource_stream('encoded', 'tests/data/inserts/%s.json' % name)))) for name in ORDER } TYPE_LENGTH = _type_length() PUBLIC_COLLECTIONS = [ 'source', 'platform', 'treatment', 'lab', 'award', 'target', 'organism', ] def test_home(anonhtmltestapp): res = anonhtmltestapp.get('/', status=200) assert res.body.startswith(b'<!DOCTYPE html>') def test_home_json(testapp): res = testapp.get('/', status=200) assert res.json['@type'] def test_vary_html(anonhtmltestapp): res = anonhtmltestapp.get('/', status=200) assert res.vary is not None assert 'Accept' in res.vary def test_vary_json(anontestapp): res = anontestapp.get('/', status=200) assert res.vary is not None assert 'Accept' in res.vary @pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) def test_collections_anon(workbook, anontestapp, item_type): res = anontestapp.get('/' + item_type).follow(status=200) assert '@graph' in res.json @pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) def test_html_collections_anon(workbook, anonhtmltestapp, item_type): res = anonhtmltestapp.get('/' + item_type).follow(status=200) assert res.body.startswith(b'<!DOCTYPE html>') @pytest.mark.parametrize('item_type', TYPE_LENGTH) def test_html_collections(workbook, htmltestapp, item_type): res = htmltestapp.get('/' + item_type).follow(status=200) assert res.body.startswith(b'<!DOCTYPE html>') @pytest.mark.slow @pytest.mark.parametrize('item_type', TYPE_LENGTH) def test_html_pages(workbook, testapp, htmltestapp, item_type): res = testapp.get('/%s?limit=all' % item_type).follow(status=200) for item in res.json['@graph']: res = htmltestapp.get(item['@id']) assert res.body.startswith(b'<!DOCTYPE html>') @pytest.mark.slow @pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) def test_html_server_pages(workbook, item_type, server): from webtest import TestApp testapp = TestApp(server) res = testapp.get( '/%s?limit=all' % item_type, headers={'Accept': 'application/json'}, ).follow( status=200, headers={'Accept': 'application/json'}, ) for item in res.json['@graph']: res = testapp.get(item['@id'], status=200) assert res.body.startswith(b'<!DOCTYPE html>') assert b'Internal Server Error' not in res.body @pytest.mark.parametrize('item_type', TYPE_LENGTH) def test_json(testapp, item_type): res = testapp.get('/' + item_type).follow(status=200) assert res.json['@type'] def test_json_basic_auth(anonhtmltestapp): from base64 import b64encode from pyramid.compat import ascii_native_ url = '/' value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass')) res = anonhtmltestapp.get(url, headers={'Authorization': value}, status=401) assert res.content_type == 'application/json' def _test_antibody_approval_creation(testapp): from urllib.parse import urlparse new_antibody = {'foo': 'bar'} res = testapp.post_json('/antibodies/', new_antibody, status=201) assert res.location assert '/profiles/result' in res.json['@type']['profile'] assert res.json['@graph'] == [{'href': urlparse(res.location).path}] res = testapp.get(res.location, status=200) assert '/profiles/antibody_approval' in res.json['@type'] data = res.json for key in new_antibody: assert data[key] == new_antibody[key] res = testapp.get('/antibodies/', status=200) assert len(res.json['@graph']) == 1 def test_load_sample_data( analysis_step, analysis_step_run, antibody_characterization, antibody_lot, award, biosample, biosample_characterization, construct, dataset, document, experiment, file, lab, library, mouse_donor, organism, pipeline, publication, quality_metric, replicate, rnai, software, software_version, source, submitter, target, ): assert True, 'Fixtures have loaded sample data' @pytest.mark.slow @pytest.mark.parametrize(('item_type', 'length'), TYPE_LENGTH.items()) def test_load_workbook(workbook, testapp, item_type, length): # testdata must come before testapp in the funcargs list for their # savepoints to be correctly ordered. res = testapp.get('/%s/?limit=all' % item_type).maybe_follow(status=200) assert len(res.json['@graph']) == length @pytest.mark.slow def test_collection_limit(workbook, testapp): res = testapp.get('/antibodies/?limit=2', status=200) assert len(res.json['@graph']) == 2 def test_collection_post(testapp): item = { 'name': 'human', 'scientific_name': 'Homo sapiens', 'taxon_id': '9606', } return testapp.post_json('/organism', item, status=201) def test_collection_post_bad_json(testapp): item = {'foo': 'bar'} res = testapp.post_json('/organism', item, status=422) assert res.json['errors'] def test_collection_post_malformed_json(testapp): item = '{' headers = {'Content-Type': 'application/json'} res = testapp.post('/organism', item, status=400, headers=headers) assert res.json['detail'].startswith('Expecting') def test_collection_post_missing_content_type(testapp): item = '{}' testapp.post('/organism', item, status=415) def test_collection_post_bad_(anontestapp): from base64 import b64encode from pyramid.compat import ascii_native_ value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass')) anontestapp.post_json('/organism', {}, headers={'Authorization': value}, status=401) def test_collection_actions_filtered_by_permission(workbook, testapp, anontestapp): res = testapp.get('/pages/') assert any(action for action in res.json.get('actions', []) if action['name'] == 'add') res = anontestapp.get('/pages/') assert not any(action for action in res.json.get('actions', []) if action['name'] == 'add') def test_item_actions_filtered_by_permission(testapp, authenticated_testapp, source): location = source['@id'] res = testapp.get(location) assert any(action for action in res.json.get('actions', []) if action['name'] == 'edit') res = authenticated_testapp.get(location) assert not any(action for action in res.json.get('actions', []) if action['name'] == 'edit') def test_collection_put(testapp, execute_counter): initial = { 'name': 'human', 'scientific_name': 'Homo sapiens', 'taxon_id': '9606', } item_url = testapp.post_json('/organism', initial).location with execute_counter.expect(1): item = testapp.get(item_url).json for key in initial: assert item[key] == initial[key] update = { 'name': 'mouse', 'scientific_name': 'Mus musculus', 'taxon_id': '10090', } testapp.put_json(item_url, update, status=200) res = testapp.get('/' + item['uuid']).follow().json for key in update: assert res[key] == update[key] def test_post_duplicate_uuid(testapp, mouse): item = { 'uuid': mouse['uuid'], 'name': 'human', 'scientific_name': 'Homo sapiens', 'taxon_id': '9606', } testapp.post_json('/organism', item, status=409) def test_user_effective_principals(submitter, lab, anontestapp, execute_counter): email = submitter['email'] with execute_counter.expect(1): res = anontestapp.get('/@@testing-user', extra_environ={'REMOTE_USER': str(email)}) assert sorted(res.json['effective_principals']) == [ 'group.submitter', 'lab.%s' % lab['uuid'], 'remoteuser.%s' % email, 'submits_for.%s' % lab['uuid'], 'system.Authenticated', 'system.Everyone', 'userid.%s' % submitter['uuid'], 'viewing_group.ENCODE', ] def test_page_toplevel(workbook, anontestapp): res = anontestapp.get('/test-section/', status=200) assert res.json['@id'] == '/test-section/' res = anontestapp.get('/pages/test-section/', status=301) assert res.location == 'http://localhost/test-section/' def test_page_nested(workbook, anontestapp): res = anontestapp.get('/test-section/subpage/', status=200) assert res.json['@id'] == '/test-section/subpage/' def test_page_homepage(workbook, anontestapp): res = anontestapp.get('/pages/homepage/', status=200) assert res.json['canonical_uri'] == '/' res = anontestapp.get('/', status=200) assert 'default_page' in res.json assert res.json['default_page']['@id'] == '/pages/homepage/' def test_page_collection_default(workbook, anontestapp): res = anontestapp.get('/pages/images/', status=200) assert res.json['canonical_uri'] == '/images/' res = anontestapp.get('/images/', status=200) assert 'default_page' in res.json assert res.json['default_page']['@id'] == '/pages/images/' def test_antibody_redirect(testapp, antibody_approval): res = testapp.get('/antibodies/%s/?frame=edit' % antibody_approval['uuid'], status=200) assert 'antibody' in res.json res = testapp.get('/antibodies/%s/' % antibody_approval['uuid']).follow(status=200) assert res.json['@type'] == ['AntibodyLot', 'Item'] def test_jsonld_context(testapp): res = testapp.get('/terms/') assert res.json def test_jsonld_term(testapp): res = testapp.get('/terms/submitted_by') assert res.json @pytest.mark.slow @pytest.mark.parametrize('item_type', TYPE_LENGTH) def test_index_data_workbook(workbook, testapp, indexer_testapp, item_type): res = testapp.get('/%s?limit=all' % item_type).follow(status=200) for item in res.json['@graph']: indexer_testapp.get(item['@id'] + '@@index-data') @pytest.mark.parametrize('item_type', TYPE_LENGTH) def test_profiles(testapp, item_type): from jsonschema import Draft4Validator res = testapp.get('/profiles/%s.json' % item_type).maybe_follow(status=200) errors = Draft4Validator.check_schema(res.json) assert not errors
"""ACME AuthHandler.""" import itertools import logging import time import zope.component from acme import challenges from acme import messages from letsencrypt import achallenges from letsencrypt import constants from letsencrypt import errors from letsencrypt import interfaces logger = logging.getLogger(__name__) class AuthHandler(object): """ACME Authorization Handler for a client. :ivar dv_auth: Authenticator capable of solving :class:`~acme.challenges.DVChallenge` types :type dv_auth: :class:`letsencrypt.interfaces.IAuthenticator` :ivar cont_auth: Authenticator capable of solving :class:`~acme.challenges.ContinuityChallenge` types :type cont_auth: :class:`letsencrypt.interfaces.IAuthenticator` :ivar acme.client.Client acme: ACME client API. :ivar account: Client's Account :type account: :class:`letsencrypt.account.Account` :ivar dict authzr: ACME Authorization Resource dict where keys are domains and values are :class:`acme.messages.AuthorizationResource` :ivar list dv_c: DV challenges in the form of :class:`letsencrypt.achallenges.AnnotatedChallenge` :ivar list cont_c: Continuity challenges in the form of :class:`letsencrypt.achallenges.AnnotatedChallenge` """ def __init__(self, dv_auth, cont_auth, acme, account): self.dv_auth = dv_auth self.cont_auth = cont_auth self.acme = acme self.account = account self.authzr = dict() # List must be used to keep responses straight. self.dv_c = [] self.cont_c = [] def get_authorizations(self, domains, best_effort=False): """Retrieve all authorizations for challenges. :param set domains: Domains for authorization :param bool best_effort: Whether or not all authorizations are required (this is useful in renewal) :returns: tuple of lists of authorization resources. Takes the form of (`completed`, `failed`) :rtype: tuple :raises .AuthorizationError: If unable to retrieve all authorizations """ for domain in domains: self.authzr[domain] = self.acme.request_domain_challenges( domain, self.account.regr.new_authzr_uri) self._choose_challenges(domains) # While there are still challenges remaining... while self.dv_c or self.cont_c: cont_resp, dv_resp = self._solve_challenges() logger.info("Waiting for verification...") # Send all Responses - this modifies dv_c and cont_c self._respond(cont_resp, dv_resp, best_effort) # Just make sure all decisions are complete. self.verify_authzr_complete() # Only return valid authorizations return [authzr for authzr in self.authzr.values() if authzr.body.status == messages.STATUS_VALID] def _choose_challenges(self, domains): """Retrieve necessary challenges to satisfy server.""" logger.info("Performing the following challenges:") for dom in domains: path = gen_challenge_path( self.authzr[dom].body.challenges, self._get_chall_pref(dom), self.authzr[dom].body.combinations) dom_cont_c, dom_dv_c = self._challenge_factory( dom, path) self.dv_c.extend(dom_dv_c) self.cont_c.extend(dom_cont_c) def _solve_challenges(self): """Get Responses for challenges from authenticators.""" cont_resp = [] dv_resp = [] try: if self.cont_c: cont_resp = self.cont_auth.perform(self.cont_c) if self.dv_c: dv_resp = self.dv_auth.perform(self.dv_c) # This will catch both specific types of errors. except errors.AuthorizationError: logger.critical("Failure in setting up challenges.") logger.info("Attempting to clean up outstanding challenges...") self._cleanup_challenges() raise assert len(cont_resp) == len(self.cont_c) assert len(dv_resp) == len(self.dv_c) return cont_resp, dv_resp def _respond(self, cont_resp, dv_resp, best_effort): """Send/Receive confirmation of all challenges. .. note:: This method also cleans up the auth_handler state. """ # TODO: chall_update is a dirty hack to get around acme-spec #105 chall_update = dict() active_achalls = [] active_achalls.extend( self._send_responses(self.dv_c, dv_resp, chall_update)) active_achalls.extend( self._send_responses(self.cont_c, cont_resp, chall_update)) # Check for updated status... try: self._poll_challenges(chall_update, best_effort) finally: # This removes challenges from self.dv_c and self.cont_c self._cleanup_challenges(active_achalls) def _send_responses(self, achalls, resps, chall_update): """Send responses and make sure errors are handled. :param dict chall_update: parameter that is updated to hold authzr -> list of outstanding solved annotated challenges """ active_achalls = [] for achall, resp in itertools.izip(achalls, resps): # XXX: make sure that all achalls, including those # corresponding to None or False returned from # Authenticator are removed from the queue and thus avoid # infinite loop active_achalls.append(achall) # Don't send challenges for None and False authenticator responses if resp is not None and resp: self.acme.answer_challenge(achall.challb, resp) # TODO: answer_challenge returns challr, with URI, # that can be used in _find_updated_challr # comparisons... if achall.domain in chall_update: chall_update[achall.domain].append(achall) else: chall_update[achall.domain] = [achall] return active_achalls def _poll_challenges( self, chall_update, best_effort, min_sleep=3, max_rounds=15): """Wait for all challenge results to be determined.""" dom_to_check = set(chall_update.keys()) comp_domains = set() rounds = 0 while dom_to_check and rounds < max_rounds: # TODO: Use retry-after... time.sleep(min_sleep) all_failed_achalls = set() for domain in dom_to_check: comp_achalls, failed_achalls = self._handle_check( domain, chall_update[domain]) if len(comp_achalls) == len(chall_update[domain]): comp_domains.add(domain) elif not failed_achalls: for achall, _ in comp_achalls: chall_update[domain].remove(achall) # We failed some challenges... damage control else: # Right now... just assume a loss and carry on... if best_effort: comp_domains.add(domain) else: all_failed_achalls.update( updated for _, updated in failed_achalls) if all_failed_achalls: _report_failed_challs(all_failed_achalls) raise errors.FailedChallenges(all_failed_achalls) dom_to_check -= comp_domains comp_domains.clear() rounds += 1 def _handle_check(self, domain, achalls): """Returns tuple of ('completed', 'failed').""" completed = [] failed = [] self.authzr[domain], _ = self.acme.poll(self.authzr[domain]) if self.authzr[domain].body.status == messages.STATUS_VALID: return achalls, [] # Note: if the whole authorization is invalid, the individual failed # challenges will be determined here... for achall in achalls: updated_achall = achall.update(challb=self._find_updated_challb( self.authzr[domain], achall)) # This does nothing for challenges that have yet to be decided yet. if updated_achall.status == messages.STATUS_VALID: completed.append((achall, updated_achall)) elif updated_achall.status == messages.STATUS_INVALID: failed.append((achall, updated_achall)) return completed, failed def _find_updated_challb(self, authzr, achall): # pylint: disable=no-self-use """Find updated challenge body within Authorization Resource. .. warning:: This assumes only one instance of type of challenge in each challenge resource. :param .AuthorizationResource authzr: Authorization Resource :param .AnnotatedChallenge achall: Annotated challenge for which to get status """ for authzr_challb in authzr.body.challenges: if type(authzr_challb.chall) is type(achall.challb.chall): return authzr_challb raise errors.AuthorizationError( "Target challenge not found in authorization resource") def _get_chall_pref(self, domain): """Return list of challenge preferences. :param str domain: domain for which you are requesting preferences """ # Make sure to make a copy... chall_prefs = [] chall_prefs.extend(self.cont_auth.get_chall_pref(domain)) chall_prefs.extend(self.dv_auth.get_chall_pref(domain)) return chall_prefs def _cleanup_challenges(self, achall_list=None): """Cleanup challenges. If achall_list is not provided, cleanup all achallenges. """ logger.info("Cleaning up challenges") if achall_list is None: dv_c = self.dv_c cont_c = self.cont_c else: dv_c = [achall for achall in achall_list if isinstance(achall.chall, challenges.DVChallenge)] cont_c = [achall for achall in achall_list if isinstance( achall.chall, challenges.ContinuityChallenge)] if dv_c: self.dv_auth.cleanup(dv_c) for achall in dv_c: self.dv_c.remove(achall) if cont_c: self.cont_auth.cleanup(cont_c) for achall in cont_c: self.cont_c.remove(achall) def verify_authzr_complete(self): """Verifies that all authorizations have been decided. :returns: Whether all authzr are complete :rtype: bool """ for authzr in self.authzr.values(): if (authzr.body.status != messages.STATUS_VALID and authzr.body.status != messages.STATUS_INVALID): raise errors.AuthorizationError("Incomplete authorizations") def _challenge_factory(self, domain, path): """Construct Namedtuple Challenges :param str domain: domain of the enrollee :param list path: List of indices from `challenges`. :returns: dv_chall, list of DVChallenge type :class:`letsencrypt.achallenges.Indexed` cont_chall, list of ContinuityChallenge type :class:`letsencrypt.achallenges.Indexed` :rtype: tuple :raises .errors.Error: if challenge type is not recognized """ dv_chall = [] cont_chall = [] for index in path: challb = self.authzr[domain].body.challenges[index] chall = challb.chall achall = challb_to_achall(challb, self.account.key, domain) if isinstance(chall, challenges.ContinuityChallenge): cont_chall.append(achall) elif isinstance(chall, challenges.DVChallenge): dv_chall.append(achall) return cont_chall, dv_chall def challb_to_achall(challb, account_key, domain): """Converts a ChallengeBody object to an AnnotatedChallenge. :param .ChallengeBody challb: ChallengeBody :param .JWK account_key: Authorized Account Key :param str domain: Domain of the challb :returns: Appropriate AnnotatedChallenge :rtype: :class:`letsencrypt.achallenges.AnnotatedChallenge` """ chall = challb.chall logger.info("%s challenge for %s", chall.typ, domain) if isinstance(chall, challenges.DVSNI): return achallenges.DVSNI( challb=challb, domain=domain, account_key=account_key) elif isinstance(chall, challenges.SimpleHTTP): return achallenges.SimpleHTTP( challb=challb, domain=domain, account_key=account_key) elif isinstance(chall, challenges.DNS): return achallenges.DNS(challb=challb, domain=domain) elif isinstance(chall, challenges.RecoveryContact): return achallenges.RecoveryContact( challb=challb, domain=domain) elif isinstance(chall, challenges.ProofOfPossession): return achallenges.ProofOfPossession( challb=challb, domain=domain) else: raise errors.Error( "Received unsupported challenge of type: %s", chall.typ) def gen_challenge_path(challbs, preferences, combinations): """Generate a plan to get authority over the identity. .. todo:: This can be possibly be rewritten to use resolved_combinations. :param tuple challbs: A tuple of challenges (:class:`acme.messages.Challenge`) from :class:`acme.messages.AuthorizationResource` to be fulfilled by the client in order to prove possession of the identifier. :param list preferences: List of challenge preferences for domain (:class:`acme.challenges.Challenge` subclasses) :param tuple combinations: A collection of sets of challenges from :class:`acme.messages.Challenge`, each of which would be sufficient to prove possession of the identifier. :returns: tuple of indices from ``challenges``. :rtype: tuple :raises letsencrypt.errors.AuthorizationError: If a path cannot be created that satisfies the CA given the preferences and combinations. """ if combinations: return _find_smart_path(challbs, preferences, combinations) else: return _find_dumb_path(challbs, preferences) def _find_smart_path(challbs, preferences, combinations): """Find challenge path with server hints. Can be called if combinations is included. Function uses a simple ranking system to choose the combo with the lowest cost. """ chall_cost = {} max_cost = 1 for i, chall_cls in enumerate(preferences): chall_cost[chall_cls] = i max_cost += i # max_cost is now equal to sum(indices) + 1 best_combo = [] # Set above completing all of the available challenges best_combo_cost = max_cost combo_total = 0 for combo in combinations: for challenge_index in combo: combo_total += chall_cost.get(challbs[ challenge_index].chall.__class__, max_cost) if combo_total < best_combo_cost: best_combo = combo best_combo_cost = combo_total combo_total = 0 if not best_combo: msg = ("Client does not support any combination of challenges that " "will satisfy the CA.") logger.fatal(msg) raise errors.AuthorizationError(msg) return best_combo def _find_dumb_path(challbs, preferences): """Find challenge path without server hints. Should be called if the combinations hint is not included by the server. This function returns the best path that does not contain multiple mutually exclusive challenges. """ assert len(preferences) == len(set(preferences)) path = [] satisfied = set() for pref_c in preferences: for i, offered_challb in enumerate(challbs): if (isinstance(offered_challb.chall, pref_c) and is_preferred(offered_challb, satisfied)): path.append(i) satisfied.add(offered_challb) return path def mutually_exclusive(obj1, obj2, groups, different=False): """Are two objects mutually exclusive?""" for group in groups: obj1_present = False obj2_present = False for obj_cls in group: obj1_present |= isinstance(obj1, obj_cls) obj2_present |= isinstance(obj2, obj_cls) if obj1_present and obj2_present and ( not different or not isinstance(obj1, obj2.__class__)): return False return True def is_preferred(offered_challb, satisfied, exclusive_groups=constants.EXCLUSIVE_CHALLENGES): """Return whether or not the challenge is preferred in path.""" for challb in satisfied: if not mutually_exclusive( offered_challb.chall, challb.chall, exclusive_groups, different=True): return False return True _ERROR_HELP_COMMON = ( "To fix these errors, please make sure that your domain name was entered " "correctly and the DNS A/AAAA record(s) for that domain contains the " "right IP address.") _ERROR_HELP = { "connection" : _ERROR_HELP_COMMON + " Additionally, please check that your computer " "has publicly routable IP address and no firewalls are preventing the " "server from communicating with the client.", "dnssec" : _ERROR_HELP_COMMON + " Additionally, if you have DNSSEC enabled for " "your domain, please ensure the signature is valid.", "malformed" : "To fix these errors, please make sure that you did not provide any " "invalid information to the client and try running Let's Encrypt " "again.", "serverInternal" : "Unfortunately, an error on the ACME server prevented you from completing " "authorization. Please try again later.", "tls" : _ERROR_HELP_COMMON + " Additionally, please check that you have an up " "to date TLS configuration that allows the server to communicate with " "the Let's Encrypt client.", "unauthorized" : _ERROR_HELP_COMMON, "unknownHost" : _ERROR_HELP_COMMON,} def _report_failed_challs(failed_achalls): """Notifies the user about failed challenges. :param set failed_achalls: A set of failed :class:`letsencrypt.achallenges.AnnotatedChallenge`. """ problems = dict() for achall in failed_achalls: if achall.error: problems.setdefault(achall.error.typ, []).append(achall) reporter = zope.component.getUtility(interfaces.IReporter) for achalls in problems.itervalues(): reporter.add_message( _generate_failed_chall_msg(achalls), reporter.MEDIUM_PRIORITY, True) def _generate_failed_chall_msg(failed_achalls): """Creates a user friendly error message about failed challenges. :param list failed_achalls: A list of failed :class:`letsencrypt.achallenges.AnnotatedChallenge` with the same error type. :returns: A formatted error message for the client. :rtype: str """ typ = failed_achalls[0].error.typ msg = [ "The following '{0}' errors were reported by the server:".format(typ)] problems = dict() for achall in failed_achalls: problems.setdefault(achall.error.description, set()).add(achall.domain) for problem in problems: msg.append("\n\nDomains: ") msg.append(", ".join(sorted(problems[problem]))) msg.append("\nError: {0}".format(problem)) if typ in _ERROR_HELP: msg.append("\n\n") msg.append(_ERROR_HELP[typ]) return "".join(msg)
#!/usr/bin/python2.7 # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A lightweight and flexible web application framework. This framework is intended for highly dynamic content. There are no templates, but a pure Python markup generator. It is expected that little markup will be generated, however. Most of the application would be written in Javascript. """ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from __future__ import division import sys import re import sre_parse from pprint import pformat import itertools import traceback import simplejson from pycopia import urlparse from pycopia.inet import httputils from pycopia.dictlib import ObjectCache from pycopia.WWW.middleware import POMadapter from pycopia.WWW import HTML5 STATUSCODES = httputils.STATUSCODES if sys.version_info.major == 3: basestring = str class Error(Exception): "Base framework error" class InvalidPath(Error): """Raised if a back-URL is requested by the handler that can't be met by any registered handlers. """ class HTTPError(Exception): code = None @property def message(self): try: return self.args[0] except IndexError: return "" def __str__(self): return b"%s %s\n%s" % (self.code, STATUSCODES.get(self.code, b'UNKNOWN STATUS CODE'), self.message) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.message) class HttpErrorNotAuthenticated(HTTPError): code = 401 class HttpErrorNotAuthorized(HTTPError): code = 403 class HttpErrorNotFound(HTTPError): code = 404 class HttpErrorMethodNotAllowed(HTTPError): code = 405 class HttpErrorMethodNotAcceptable(HTTPError): code = 406 class HttpErrorLengthRequired(HTTPError): code = 411 class HttpErrorUnsupportedMedia(HTTPError): code = 415 class HttpErrorServerError(HTTPError): code = 500 ELEMENTCACHE = ObjectCache() # supported mime types. SUPPORTED = [ b"application/xhtml+xml", b"text/html", b"text/plain"] RESERVED_CHARS=b"!*'();:@&=+$,/?%#[]" class HttpResponse(object): "A basic HTTP response, with content and dictionary-accessed headers" status_code = 200 def __init__(self, content=b'', mimetype=None, charset=b"utf-8"): self.headers = httputils.Headers() self.cookies = httputils.CookieJar() self._charset = charset if mimetype: self.headers.add_header(httputils.ContentType(mimetype, charset=self._charset)) if hasattr(content, '__iter__'): self._container = content self._is_string = False else: self._container = [content] self._is_string = True def get_status(self): return b"%s %s" % (self.status_code, STATUSCODES.get(self.status_code, b'UNKNOWN STATUS CODE')) def __str__(self): "Full HTTP message, including headers" return b'\r\n'.join([b'%s: %s' % (key, value) for key, value in self.headers.asWSGI()]) + b'\r\n\r\n' + self.content def __setitem__(self, header, value): self.headers.add_header(header, value) def __delitem__(self, header): try: del self.headers[header] except IndexError: pass def __getitem__(self, header): return self.headers[header] def add_header(self, header, value=None): self.headers.add_header(header, value) def set_cookie(self, key, value=b'', max_age=None, path=b'/', expires=None, domain=None, secure=False, httponly=False): self.cookies.add_cookie(key, value, domain=domain, max_age=max_age, path=path, secure=secure, expires=expires, httponly=httponly) def delete_cookie(self, key, path=b'/', domain=None): self.cookies.delete_cookie(key, path, domain) def get_response_headers(self): self.cookies.get_setcookies(self.headers) return self.headers.asWSGI() def _get_content(self): return b''.join([o.encode(self._charset) for o in self._container]) def _set_content(self, value): self._container = [value] self._is_string = True content = property(_get_content, _set_content) def __iter__(self): self._iterator = self._container.__iter__() return self def __next__(self): chunk = self._iterator.next() return chunk.encode(self._charset) next = __next__ def close(self): try: self._container.close() except AttributeError: pass def write(self, content): if not self._is_string: raise NotImplementedError("This %s instance is not writable" % self.__class__) self._container.append(content) def flush(self): pass def tell(self): if not self._is_string: raise NotImplementedError("This %s instance cannot tell its position" % self.__class__) return sum([len(chunk) for chunk in self._container]) class HttpResponseRedirect(HttpResponse): status_code = 302 def __init__(self, redirect_to, **kwargs): HttpResponse.__init__(self) if kwargs: dest = urlparse.quote(redirect_to, safe=RESERVED_CHARS) + "?" + urlparse.urlencode(kwargs) else: dest = urlparse.quote(redirect_to, safe=RESERVED_CHARS) self[b'Location'] = dest class HttpResponsePermanentRedirect(HttpResponse): status_code = 301 def __init__(self, redirect_to): HttpResponse.__init__(self) self[b'Location'] = urlparse.quote(redirect_to, safe=RESERVED_CHARS) class HttpResponseNotModified(HttpResponse): status_code = 304 class HttpResponseNotAuthenticated(HttpResponse): status_code = 401 class HttpResponsePaymentRequired(HttpResponse): status_code = 402 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods): super(HttpResponse, self). __init__() self[b'Allow'] = ', '.join(permitted_methods) class HttpResponseNotAcceptable(HttpResponse): status_code = 406 def __init__(self): super(HttpResponse, self). __init__() self[b'Content-Type'] = ', '.join(SUPPORTED) class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 def parse_formdata(contenttype, post_data): post = urlparse.URLQuery() files = urlparse.URLQuery() boundary = b"--" + contenttype.parameters[b"boundary"] for part in post_data.split(boundary): if not part: continue if part.startswith(b"--"): continue headers, body = httputils.get_headers_and_body(part) cd = headers[0] if "filename" in cd.parameters: files[cd.parameters['name']] = body else: post[cd.parameters['name']] = body.strip() return post, files class HTTPRequest(object): """The HTTP request that gets passed to handler methods.""" def __init__(self, environ, config, resolver, path): self.environ = environ self.config = config self.resolver = resolver self.method = environ[b'REQUEST_METHOD'] self.path = path self.session = None # possibly set by authentication module. def log_error(self, message): fo = self.environ[b"wsgi.errors"] fo.write(message.encode("ascii")) fo.flush() def __repr__(self): # Since this is called as part of error handling, we need to be very # robust against potentially malformed input. try: get = pformat(self.GET) except: get = '<could not parse>' try: post = pformat(self.POST) except: post = '<could not parse>' try: cookies = pformat(self.COOKIES) except: cookies = '<could not parse>' try: meta = pformat(self.environ) except: meta = '<could not parse>' return '<HTTPRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nenviron:%s>' % \ (get, post, cookies, meta) def get_host(self): host = self.environ.get(b'HTTP_X_FORWARDED_HOST', None) if not host: host = self.environ.get(b'HTTP_HOST', 'localhost') return host def get_domain(self): host = self.get_host() doti = host.find(".") if dot1 > 0: return host[doti:] else: return host def get_full_path(self): qs =self.environ.get(b'QUERY_STRING') if qs: return '%s?%s' % (self.path, qs) else: return self.path def is_secure(self): return self.environ.get('HTTPS', "off") == "on" def _parse_post_content(self): if self.method == b'POST': content_type = httputils.ContentType(self.environ.get(b'CONTENT_TYPE', '')) ctvalue = content_type.value.lower() if ctvalue == b'multipart/form-data': self._post, self._files = parse_formdata(content_type, self._get_raw_post_data()) elif ctvalue == b"application/x-www-form-urlencoded": self._post = urlparse.queryparse(self._get_raw_post_data()) self._files = None else: # some buggy clients don't set proper content-type, so # just preserve the raw data as a file. self.log_error("Bad content-type: {!s}".format(content_type)) data = self._get_raw_post_data() self._post = urlparse.queryparse(data) self._files = {} self._files["body"] = { b'content-type': content_type, b'content': data, } else: self._post = urlparse.URLQuery() self._files = None def __getitem__(self, key): for d in (self.POST, self.GET): try: return d[key] except KeyError: pass raise KeyError("%s not found in either POST or GET" % key) def has_key(self, key): return key in self.GET or key in self.POST def _get_get(self): try: return self._get except AttributeError: # The WSGI spec says 'QUERY_STRING' may be absent. self._get = urlparse.queryparse(self.environ.get(b'QUERY_STRING', b'')) return self._get def _get_post(self): try: return self._post except AttributeError: self._parse_post_content() return self._post def _get_cookies(self): try: return self._cookies except AttributeError: self._cookies = cookies = {} for cookie in httputils.parse_cookie(self.environ.get(b'HTTP_COOKIE', b'')): cookies[cookie.name] = cookie.value return cookies def _get_files(self): try: return self._files except AttributeError: self._parse_post_content() return self._files def _get_raw_post_data(self): try: content_length = int(self.environ.get(b"CONTENT_LENGTH")) except ValueError: # if CONTENT_LENGTH was empty string or not an integer raise HttpErrorLengthRequired("A Content-Length header is required.") return self.environ[b'wsgi.input'].read(content_length) def _get_headers(self): try: return self._headers except AttributeError: self._headers = hdrs = httputils.Headers() for k, v in self.environ.iteritems(): if k.startswith(b"HTTP"): hdrs.append(httputils.make_header(k[5:].replace(b"_", b"-").lower(), v)) return self._headers GET = property(_get_get) POST = property(_get_post) COOKIES = property(_get_cookies) FILES = property(_get_files) headers = property(_get_headers) class URLMap(object): """From regexp to url, and back again. Patterns must use named groups. """ def __init__(self, regexp, method): self._method = method self._regexp, self._format = _make_url_form(regexp) def __str__(self): return "%s => %s" % (self._regexp.pattern, self._method.func_name) def match(self, string): mo = self._regexp.match(string) if mo: return self._method, mo.groupdict() return None, None def get_url(self, **kwargs): path = self._format % kwargs # verify that args are allowed mo = self._regexp.match(path) if mo: return path else: raise InvalidPath("url args don't match path pattern.") def _make_url_form(regexp): # Build reverse mapping format from RE parse tree. This simplified function # only works with the type of RE used in url mappings in the fcgi # config file. cre = re.compile(regexp, re.I) indexmap = dict([(v,k) for k,v in cre.groupindex.items()]) collect = [] for op, val in sre_parse.parse(regexp, re.I): if op is sre_parse.LITERAL: collect.append(chr(val)) elif op is sre_parse.SUBPATTERN: name = indexmap[val[0]] collect.append(br'%%(%s)s' % name) return cre, "".join(collect) class URLAlias(object): """Acts as an alias for static locations.""" def __init__(self, regexp, method): self._name = method self._regexp, self._format = _make_url_form(regexp) self._method = URLRedirector(self._format) def __str__(self): return b"%s => %s" % (self._name, self._method) def match(self, string): mo = self._regexp.match(string) if mo: return self._method, mo.groupdict() return None, None def get_url(self, **kwargs): return self._format % kwargs class URLRedirector(object): def __init__(self, loc): self._loc = loc def __hash__(self): return hash(self._loc) def __repr__(self): return b"URLRedirector(%r)" % self._loc def __call__(self, request, **kwargs): return HttpResponsePermanentRedirect(self._loc % kwargs) class URLResolver(object): """Supports mapping URL paths to handler functions.""" def __init__(self, mapconfig, urlbase=""): self._reverse = {} self._aliases = {} self._patterns = [] self._urlbase = urlbase for pattern, methname in mapconfig: self.register(pattern, methname) def register(self, pattern, method): if isinstance(method, basestring): if b"." in method: method = get_method(method) else: self._aliases[method] = URLAlias(pattern, method) return else: assert callable(method), "Must register a callable." urlmap = URLMap(pattern, method) self._patterns.append(urlmap) self._reverse[method] = urlmap def unregister(self, method): if isinstance(method, basestring): method = get_method(method) try: m = self._reverse[method] except KeyError: return # not registered anyway else: del self._reverse[method] i = 0 for urlmap in self._patterns: if urlmap._method is m: break else: i += 1 del self._patterns[i] def match(self, uri): for mapper in self._patterns: method, kwargs = mapper.match(uri) if method: return method, kwargs return None, None def dispatch(self, request): path = request.environ[b"PATH_INFO"] for mapper in self._patterns: method, kwargs = mapper.match(path) if method: response = method(request, **kwargs) if response is None: request.log_error("Handler %r returned none.\n" % (method,)) raise HttpErrorServerError("handler returned None") return response else: raise HttpErrorNotFound(path) def get_url(self, method, **kwargs): """Reverse mapping. Answers the question: How do I reach the callable object mapped to in the LOCATIONMAP? """ if isinstance(method, basestring): if "." in method: method = get_method(method) else: try: urlmap = self._aliases[method] except KeyError: raise InvalidPath("Alias not registered") return urlmap.get_url(**kwargs) try: urlmap = self._reverse[method] except KeyError: raise InvalidPath("Method %r not registered." % (method,)) return self._urlbase + urlmap.get_url(**kwargs) def get_alias(self, name, **kwargs): try: urlmap = self._aliases[name] except KeyError: raise InvalidPath("Alias not registered") return urlmap.get_url(**kwargs) class FrameworkAdapter(object): """Adapt a WSGI server to a framework style request handler. """ def __init__(self, config): self._config = config self._urlbase = config.get(b"BASEPATH", "/" + config.SERVERNAME) self._resolver = URLResolver(config.LOCATIONMAP, self._urlbase) def __call__(self, environ, start_response): request = HTTPRequest(environ, self._config, self._resolver, self._urlbase + environ[b'PATH_INFO']) try: response = self._resolver.dispatch(request) except: ex, val, tb = sys.exc_info() if issubclass(ex, HTTPError): start_response(str(val).encode("ascii"), [(b"Content-Type", b"text/plain")], (ex, val, tb)) return [val.message] else: raise else: start_response(response.get_status(), response.get_response_headers()) return response # You can subclass this and set and instance to be called by URL mapping. class RequestHandler(object): METHODS = ["get", "head", "post", "put", "delete", "options", "trace"] def __init__(self, constructor=None, verifier=None): self._methods = {} impl = [] for name in self.METHODS: key = name.upper() if name in self.__class__.__dict__: impl.append(key) method = getattr(self, name) if verifier: method = verifier(method) self._methods[key] = method else: self._methods[key] = self._invalid self._implemented = impl self._constructor = constructor # optional subclass initializer. self.initialize() def initialize(self): pass def get_response(self, request, **kwargs): return ResponseDocument(request, self._constructor, **kwargs) def __call__(self, request, **kwargs): meth = self._methods.get(request.method, self._invalid) try: return meth(request, **kwargs) except NotImplementedError: return HttpResponseNotAllowed(self._implemented) def _invalid(self, request, **kwargs): request.log_error("%r: invalid method: %r\n" % (self, request.method)) return HttpResponseNotAllowed(self._implemented) # Override one or more of these in your handler subclass. Invalid # requests are automatically handled. def get(self, request, **kwargs): raise NotImplementedError() def post(self, request, **kwargs): raise NotImplementedError() def put(self, request, **kwargs): raise NotImplementedError() def delete(self, request, **kwargs): raise NotImplementedError() def head(self, request, **kwargs): raise NotImplementedError() def options(self, request, **kwargs): raise NotImplementedError() def trace(self, request, **kwargs): raise NotImplementedError() # for JSON servers. class JSONResponse(HttpResponse): """Used for asynchronous interfaces needing JSON data returned.""" def __init__(self, obj): json = simplejson.dumps(obj) HttpResponse.__init__(self, json, b"application/json") def JSONQuery(request): """Convert query term where values are JSON encoded strings.""" rv = {} for key, value in itertools.chain(request.GET.items(), request.POST.items()): rv[key] = simplejson.loads(value) return rv def JSON404(message=None): json = simplejson.dumps(message) return HttpResponseNotFound(json, mimetype=b"application/json") def JSONServerError(ex, val, tblist): json = simplejson.dumps((str(ex), str(val), tblist)) return HttpResponseServerError(json, mimetype=b"application/json") class JSONRequestHandler(RequestHandler): """Sub-dispatcher for JSON requests. catches all exceptions and returns exception on error as JSON serialized objects (since async requests are not viewable on the client side). Supply a list of functions to handle. The names of which match the "function" field in the URL mapping. Your handler functions will get keyword arguments mapped from the request query or form. You return any Python primitive objects which get sent back to the client. """ def __init__(self, flist, **kwargs): self._mapping = mapping = {} for func in flist: mapping[func.func_name] = func super(JSONRequestHandler, self).__init__(None, **kwargs) def get(self, request, function): try: handler = self._mapping[function] except KeyError: request.log_error("No JSON handler for %r.\n" % function) return JSON404() kwargs = JSONQuery(request) try: return JSONResponse(handler(request, **kwargs)) except: ex, val, tb = sys.exc_info() tblist = traceback.extract_tb(tb) del tb request.log_error("JSON handler error: %s: %s\n" % (ex, val)) return JSONServerError(ex, val, tblist) post = get # since JSONQuery also converts POST part. def get_response(self, request, **kwargs): return None # should not be used for JSON handlers. def default_doc_constructor(request, **kwargs): """Example document constructor. This callback contructs the common elements to a response, usually following some theme. It's use is optional. """ doc = HTML5.new_document() for name, val in kwargs.items(): setattr(doc, name, val) container = doc.add_section("container") header = container.add_section("container", id="header") wrapper = container.add_section("container", id="wrapper") content = wrapper.add_section("container", id="content") navigation = container.add_section("container", id="navigation") sidebar = container.add_section("container", id="sidebar") footer = container.add_section("container", id="footer") doc.header = header doc.content = content doc.nav = navigation doc.sidebar = sidebar doc.footer = footer return doc class ResponseDocument(object): """Wraps a text-creator document and supplies helper methods for accessing configuration. """ def __init__(self, request, _constructor=None, **kwargs): self.config = request.config self.resolver = request.resolver if _constructor is not None: self._doc = _constructor(request, **kwargs) else: self._doc = doc = HTML5.new_document() for name, val in kwargs.items(): setattr(doc, name, val) doc = property(lambda s: s._doc) def __getattr__(self, key): return getattr(self._doc, key) def get_object(self, key, ctor, **kwargs): return ELEMENTCACHE.get_object(key, ctor, **kwargs) def finalize(self): """Handlers should return the return value of this method.""" doc = self._doc self._doc = None self.config = None self.resolver = None adapter = POMadapter.WSGIAdapter(doc) doc.emit(adapter) response = HttpResponse(adapter) for headertuple in adapter.headers: response.add_header(*headertuple) response.add_header(httputils.CacheControl("no-cache")) return response def get_icon(self, name, size="large"): if size == "large": return self.get_large_icon(name) elif size == "medium": return self.get_medium_icon(name) elif size == "small": return self.get_small_icon(name) def get_large_icon(self, name): try: namepair = self.config.ICONMAP["large"][name] except KeyError: namepair = self.config.ICONMAP["large"]["default"] return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=namepair[1]), "alt":name, "width":"24", "height":"24"}) def get_medium_icon(self, name): try: filename = self.config.ICONMAP["medium"][name] except KeyError: filename = self.config.ICONMAP["medium"]["default"] return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=filename), "alt":name, "width":"16", "height":"16"}) def get_small_icon(self, name): try: filename = self.config.ICONMAP["small"][name] except KeyError: filename = self.config.ICONMAP["small"]["default"] return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=filename), "alt":name, "width":"10", "height":"10"}) def anchor2(self, path, text, **kwargs): """Adds a hyperlink to a handler.""" try: href = self.resolver.get_url(path, **kwargs) except InvalidPath: href = str(path) # use as-is as a fallback for hard-coded destinations. return self._doc.nodemaker(b"A", {"href": href}, text) # general purpose URL scheme "hole" filler. Use as a handler in the URL # map that doesn't otherwise handle anything. A little better than just # returning 404. def redirectup(request, **kwargs): return HttpResponsePermanentRedirect("..") def get_method(name): """get a function from a module path.""" dot = name.rfind(".") mod = _get_module(name[:dot]) return getattr(mod, name[dot+1:]) def _get_module(name): try: return sys.modules[name] except KeyError: pass mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod if __name__ == "__main__": DATA = b"""------WebKitFormBoundaryLHph2NIrIQTpfNKw\r Content-Disposition: form-data; name="name"\r \r myenvattr\r ------WebKitFormBoundaryLHph2NIrIQTpfNKw\r Content-Disposition: form-data; name="description"\r \r Some attr test.\r ------WebKitFormBoundaryLHph2NIrIQTpfNKw\r Content-Disposition: form-data; name="value_type"\r \r 1\r ------WebKitFormBoundaryLHph2NIrIQTpfNKw\r Content-Disposition: form-data; name="submit"\r \r submit\r ------WebKitFormBoundaryLHph2NIrIQTpfNKw--\r """ content_type = b"Multipart/form-data; boundary=----WebKitFormBoundaryLHph2NIrIQTpfNKw" content_type = httputils.ContentType(content_type) print(content_type.value) post, files = parse_formdata(content_type, DATA) print (post.items())
#!/usr/bin/env python __all__ = ["TextPreprocessor", "FeatureGenerator", "ClassMapping", "Text2svmConverter", "convert_text"] import sys, os import unicodedata, re from collections import defaultdict if sys.version_info[0] >= 3: xrange = range import pickle as cPickle izip = zip def unicode(string, setting): return string else : import cPickle from itertools import izip # import porter stemmer from .stemmer import porter from ctypes import * # XXX This function must support outputing to one of the input file!! def _merge_files(svm_files, offsets, is_training, output): if not isinstance(offsets, list) or len(svm_files) != len(offsets): raise ValueError('offsets should be a list where the length is the number of merged files') util = CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'classifier', 'learner', 'util.so.1')) util.merge_problems.restype = None util.merge_problems.argtypes = [POINTER(c_char_p), c_int, POINTER(c_int64), c_char_p, c_char, POINTER(c_int64)] size = len(svm_files) c_svm_files = (c_char_p * size)() for i, f in enumerate(svm_files): c_svm_files[i] = c_char_p(f.encode()) c_offsets = (c_int64 * size)() if not is_training: for i, v in enumerate(offsets): c_offsets[i] = c_int64(v) c_is_training = c_char(chr(is_training).encode('ascii')) c_error_code = c_int64() output = c_char_p(bytes(output,'utf-8')) if sys.version_info[0] >= 3 else c_char_p(output) util.merge_problems(c_svm_files, c_int(size), c_offsets, output, c_is_training, c_error_code) error_code = c_error_code.value if error_code > 0: raise ValueError('wrong file format in line ' + str(error_code)) elif error_code == -1: raise IOError('cannot open file') elif error_code == -2: raise MemoryError("Memory Exhausted. Try to restart python.") elif error_code == -3: raise ValueError('merging svm files of different sizes') elif error_code == -4: raise ValueError('at least one file should be given to merge') if is_training: for i in range(size): offsets[i] = c_offsets[i] def _iterdict(d): if sys.version_info[0] >= 3: return d.items() else : return d.iteritems() def _dict2list(d): if len(d) == 0: return [] #XXX check if we can replace the following line to "m = len(d)" m = max(v for k,v in _iterdict(d)) ret = [''] * (m+1) for k,v in _iterdict(d): ret[v] = k return ret def _list2dict(l): return dict((v,k) for k,v in enumerate(l)) class TextPreprocessor(object): """ :class:`TextPreprocessor` is used to pre-process the raw texts to a :class:`list` of feature indices. First, each text is tokenized by the :attr:`tokenizer` into a :class:`list` of tokens. Tokens are then passed to the :attr:`stemmer` and the :attr:`stopword_remover`. Finally, each stemmed token is converted to a token index. Refer to :meth:`parse_option` for the *option* parameter. If *readonly* is set to ``True``, the feature index mapping will not be updated even if new tokens are explored. These new tokens will be ignored. *readonly* should be set as ``True`` for test, and ``False`` for training. """ def __init__(self, option='-stemming 0 -stopword 0', readonly=False): self._option = option self._readonly=readonly self.tok2idx = {'>>dummy<<':0} self.idx2tok = None opts = self.parse_option(option) #: The function used to stem tokens. #: #: Refer to :ref:`CustomizedPreprocessing`. self.stemmer = opts[0] #: The function used to remove stop words. #: #: Refer to :ref:`CustomizedPreprocessing`. self.stopword_remover = opts[1] #: The function used to tokenize texts into a :class:`list` of tokens. #: #: Refer to :ref:`CustomizedPreprocessing`. self.tokenizer = self.default_tokenizer def parse_option(self, option): """ Parse the given :class:`str` parameter *option* and set :attr:`stemmer` and :attr:`stopword_remover` to the desired functions. *option* is a :class:`str` instance: ================== ========================================== Options Description ================== ========================================== -stopword *method* If *method* is `1`, then :meth:`default_stoplist` is used. If *method* is `0`, then no word will be removed. Default is `0` (no stopword removal). -stemming *method* If *method* is `1`, then Porter stemmer is used. If *method* is `0`, tokens are not stemmed. Default is `0` (no stemming). ================== ========================================== The following example creates a :class:`TextPreprocessor` that applies Porter stemmer and removes stop words. >>> preprocessor = TextPreprocessor() >>> preprocessor.parse_option('-stopword 1 -stemming 1') .. note:: Redundant options are ignored quietly. Users should pay attention to the spelling of the options. """ option = option.strip().split() stoplist, tokstemmer = set(), lambda x: x i = 0 while i < len(option): if option[i][0] != '-': break if option[i] == '-stopword': if int(option[i+1]) != 0: stoplist = self.default_stoplist() elif option[i] == '-stemming': if int(option[i+1]) != 0: tokstemmer = porter.stem i+=2 stoplist = set(tokstemmer(x) for x in stoplist) stemmer = lambda text: map(tokstemmer, text) stopword_remover = lambda text: filter(lambda tok: tok not in stoplist, text) return stemmer, stopword_remover def get_idx2tok(self, idx): """ Access the index-token mapping. Given a numerical *idx*, this function returns the corresponding token. .. note:: Because the index-to-token mapping is not maintained internally, the first time to call this function takes longer time to build the reverse mapping. This function should be always called with a readonly :class:`TextPreprocessor` instance to avoid inconsistence between the token-to-index mapping and its reverse. """ if not self.idx2tok: self.idx2tok = _dict2list(self.tok2idx) return self.idx2tok[idx] def save(self, dest_file): """ Save the :class:`TextPreprocessor` to a file. .. note:: Function variables are not saved by this method. Even if :attr:`stopword_remover`, :attr:`stemmer`, or :attr:`tokenizer` are modified, they will **not** be saved accordingly. Therefore, they must be set again after being loaded. Refer to :ref:`CustomizedPreprocessing`. """ self.idx2tok = _dict2list(self.tok2idx) config = {'option':self._option,'idx2tok':self.idx2tok} cPickle.dump(config, open(dest_file,'wb'), -1) # by default, mapping file will be not updated when we load the file def load(self, src_file, readonly=True): """ Load the :class:`TextPreprocessor` instance from the *src_file* file, which is a pickle file generated by :class:`cPickle`. If *readonly* is `True`, the :class:`TextPreprocessor` instance will not be modifiable. """ config = cPickle.load(open(src_file,'rb')) self._readonly = readonly self._option = config['option'] self.idx2tok = config['idx2tok'] self.tok2idx = _list2dict(config['idx2tok']) self.stemmer, self.stopword_remover = self.parse_option(config['option']) self.tokenizer = self.default_tokenizer return self @staticmethod def default_stoplist(): """ Return a default stopword list provided by LibShortText. Note that LibShortText stems words first (if stemmer is provided). Therefore, all words on the stopword list should be stemmed first. The following example creates a stoplist_remover from a list. >>> from libshorttext.converter import * >>> >>> preprocessor = TextPreprocessor('-stemming 1') >>> stoplist = preprocessor.stemmer(list(TextPreprocessor.default_stoplist())) >>> preprocessor.stopword_remover = lambda text: filter( ... lambda token: token not in stoplist, text) """ # This function only parses the default stop word list file. # *src* should not be an argument. src = "" if not src: src = '{0}/stop-words/stoplist-nsp.regex'.format(os.path.dirname(os.path.abspath(__file__))) srcfile = open(src) stoplist = set(map(chr, range(ord('a'),ord('z')+1))) srcfile.readline() srcfile.readline() for line in srcfile: stoplist.add(line[5:-4].lower().replace(']','')) return stoplist # check if a better and more efficient way to tokenize text # http://stackoverflow.com/questions/9455510/remove-all-non-ascii-from-string @staticmethod def default_tokenizer(text): """ The default tokenizer provided by LibShortText. The default tokenizer is used to tokenize English documents. It splits a text to tokens by whitespace characters, and normalizes tokens using `NFD (normalization form D) <http://docs.python.org/2/library/unicodedata.html#unicodedata.normalize>`_. """ def foo(c): if ord(c)>127: return '' if c.isdigit() or c.isalpha(): return c else : return ' ' text = unicodedata.normalize('NFD', unicode(text, 'utf-8')).lower() text = ''.join(map(foo,text)) text = re.sub(r'([a-z])([0-9])', r'\1 \2', text) text = re.sub(r'([0-9])([a-z])', r'\1 \2', text) text = re.sub(r'\s+', r' ', text) return text.strip().split() def preprocess(self, text): """ Preprocess the given *text* into a :class:`list` of token indices, where *text* is a :class:`str` instance. If the preprocessor is not in the read-only mode, :meth:`preprocess` expands the internal token-index mapping for unseen tokens; otherwise, this function ignores unseen tokens. """ text = self.tokenizer(text) text = self.stemmer(text) text = self.stopword_remover(text) ret = [] for i,tok in enumerate(text): if tok not in self.tok2idx: if self._readonly: continue self.tok2idx[tok] = len(self.tok2idx) self.idx2tok = None ret += [self.tok2idx[tok]] return ret class FeatureGenerator(object): """ :class:`FeatureGenerator` is used to generate uni-gram or bi-gram features. """ def __init__(self, option='-feature 1', readonly=False): #: :attr:`option` is a :class:`str` instance, which could be #: ``-feature 0``: uni-gram features #: ``-feature 1``: bi-gram features (default) self._option = option #: :attr:`readonly` is a :class:`bool` variable used to indicate whether #: the future operations of this instance can update the internal #: ngram-index mapping. (the default value is ``False``). self._readonly = readonly self.ngram2fidx = {'>>dummy<<':0} self.fidx2ngram=None #: :attr:`feat_gen` is variable pointing to the function that conducts #: feature generation. It can be either :func:`unigram` or #: :func:`bigram`, determined by :attr:`option`. self.feat_gen = self.parse_option(option) def parse_option(self, option): """ Parse the given :class:`str` parameter *option* and set :attr:`feat_gen` to the desired function. There is only one option in this version. ================= ======================================== Option Description ================= ======================================== -feature *method* If *method* is `1`, then bigram is used. If *method* is `0`, unigram is used. Default is `1` (bigram). ================= ======================================== For example, the following example creates a unigram feature generator. >>> feature_generator = FeatureGenerator() >>> feature_generator.parse_option('-feature 0') .. note:: Redundant options are ignored quietly. Users should pay attention to the spelling of the options. """ option = option.strip().split() feat_gen = self.bigram i = 0 while i < len(option): if option[i][0] != '-': break if option[i] == '-feature': if int(option[i+1]) == 0: feat_gen = self.unigram i+=2 return feat_gen def get_fidx2ngram(self, fidx): """ Access the index-to-ngram mapping. Given a numerical *fidx*, this function returns the corresponding ngram. .. note:: Because the index-to-ngram mapping is not maintained internally, the first time to call this function takes longer time to build the mapping. This function should be always called with a readonly :class:`FeatureGenerator` instance to avoid inconsistence between the ngram-to-index mapping and its reverse. """ if not self.fidx2ngram: self.fidx2ngram = _dict2list(self.ngram2fidx) return self.fidx2ngram[fidx] def save(self, dest_file): """ Save the :class:`FeatureGenerator` instance into the *dest_file* file, which will be a pickle file generated by :class:`cPickle`. We suggest using Python 2.7 or newer versions for faster implementation of :class:`cPickle`. """ self.fidx2ngram = _dict2list(self.ngram2fidx) config = {'option':self._option,'fidx2ngram':self.fidx2ngram} cPickle.dump(config, open(dest_file,'wb'), -1) # by default, mapping file will be not updated when we load the file def load(self, src_file, readonly=True): """ Load the :class:`FeatureGenerator` instance from the *src_file* file, which is a pickle file generated by :class:`cPickle`. We suggest using Python 2.7 or newer versions for faster implementation of :class:`cPickle`. If *readonly* is `True`, the :class:`FeatureGenerator` instance will be readonly. """ config = cPickle.load(open(src_file,'rb')) self._option = config['option'] self.fidx2ngram = config['fidx2ngram'] self.ngram2fidx = _list2dict(config['fidx2ngram']) self._readonly=readonly self.feat_gen = self.parse_option(config['option']) return self def toSVM(self, text): """ Generate a :class:`dict` instance for the given *text*, which is a :class:`list` of tokens. Each `key` of the returning dictionary is an index corresponding to an ngram feature, while the corresponding `value` is the count of the occurrence of that feature. If not in read only mode, this function expands the internal ngram-index mapping for unseen ngrams; otherwise, this function ignores unseen ngrams. """ return self.feat_gen(text) #return ''.join(' %d:%d' %(f, feat[f]) for f in sorted(feat)) def unigram(self, text): """ Generate a :class:`dict` corresponding to the sparse vector of the uni-gram representation of the given *text*, which is a :class:`list` of tokens. """ feat = defaultdict(int) NG = self.ngram2fidx for x in text: if (x,) not in NG: if self._readonly: continue NG[x,] = len(NG) self.fidx2ngram = None feat[NG[x,]]+=1 return feat def bigram(self, text): """ Generate a :class:`dict` corresponding to the sparse vector of the bi-gram representation of the given *text*, which is a :class:`list` of tokens. """ feat = self.unigram(text) NG = self.ngram2fidx for x,y in zip(text[:-1], text[1:]): if (x,y) not in NG: if self._readonly: continue NG[x,y] = len(NG) self.fidx2ngram = None feat[NG[x,y]]+=1 return feat class ClassMapping(object): """ :class:`ClassMapping` is used to handle the mapping between the class label and the internal class index. *option* is ignored in this version. """ def __init__(self, option='', readonly=False): # No option in this version self._option = option #::attr:`readonly` is a :class:`bool` variable used to indicate whether #:the future operations of this instance can update the internal #:label-index mapping. (the defaut value is ``False``). self._readonly = readonly self.class2idx = {} self.idx2class = None def save(self, dest_file): """ Save the :class:`ClassMapping` instance into the *dest_file* file, which will be a pickle file generated by :class:`cPickle`. """ self.idx2class = _dict2list(self.class2idx) config = {'option':self._option,'idx2class':self.idx2class} cPickle.dump(config, open(dest_file,'wb'), -1) # by default, mapping file will be not updated when we load the file def load(self, src_file, readonly=True): """ Load the :class:`ClassMapping` instance from the *src_file* file, which is a pickle file generated by :class:`cPickle`. If *readonly* is `True`, the :class:`ClassMapping` instance will be readonly. """ config = cPickle.load(open(src_file,'rb')) self._readonly = readonly self._option = config['option'] self.idx2class = config['idx2class'] self.class2idx = _list2dict(config['idx2class']) return self def toIdx(self, class_name): """ Return the internal class index for the given *class_name*. If :attr:`readonly` is `False`, :func:`toIdx` generates a new index for a unseen *class_name*; otherwise, :func:`toIdx` returns `None`. """ if class_name in self.class2idx: return self.class2idx[class_name] elif self._readonly: return None m = len(self.class2idx) self.class2idx[class_name] = m self.idx2class = None return m def toClassName(self, idx): """ Return the class label corresponding to the given class *idx*. .. note:: This method will reconstruct the mapping if :meth:`toIdx` has been called after the previous :meth:`toClassName`. Users should not call :meth:`toClassName` and :meth:`toIdx` rotatively. """ if self.idx2class is None: self.idx2class = _dict2list(self.class2idx) if idx == -1: return "**not in training**" if idx >= len(self.idx2class): raise KeyError('class idx ({0}) should be less than the number of classes ({0}).'.format(idx, len(self.idx2class))) return self.idx2class[idx] def rename(self, old_label, new_label): """ Rename the *old_label* to the *new_label*. *old_label* can be either a :class:`str` to denote the class label or an :class:`int` class to denote the class index. *new_label* should be a :class:`str` different from existing labels. """ if not isinstance(new_label, str): raise TypeError("new_label should be a str") if isinstance(old_label, int): old_label = toClassName(old_label) if isinstance(old_label, str): if old_label not in self.class2idx: raise ValueError('class {0} does not exist'.format(old_label)) else: raise TypeError("old label should be int (index) or str (name)") if new_label in self.class2idx: raise ValueError('class {0} already exists'.format(new_label)) self.class2idx[new_label] = self.class2idx.pop(old_label) self.idx2class = None class Text2svmConverter(object): """ :class:`Text2svmConverter` converts a text data to a LIBSVM-format data. (Refer to :ref:`dataset` for text data format.) It consists of three components: :class:`TextPreprocessor`, :class:`FeatureGenerator`, and :class:`ClassMapping`. The *option* can be any option of :class:`TextPreprocessor`, :class:`FeatureGenerator` and :class:`ClassMapping`. .. note:: Redundant options are ignored quietly. Users should pay attention to the spelling of the options. :class:`Text2svmConverter` can be read only if the flag is set. If it is not read only, the converter will be updated if new tokens or new class names are found. """ def __init__(self, option="", readonly=False): self._option = option self._readonly = readonly self._extra_nr_feats = [] self._extra_file_ids = [] text_prep_opt, feat_gen_opt, class_map_opt = self._parse_option(option) #: The :class:`TextPreprocessor` instance. self.text_prep = TextPreprocessor(text_prep_opt, readonly) #: The :class:`FeatureGenerator` instance. self.feat_gen = FeatureGenerator(feat_gen_opt, readonly) #: The :class:`ClassMapping` instance. self.class_map = ClassMapping(class_map_opt, readonly) def _parse_option(self, option): text_prep_opt, feat_gen_opt, class_map_opt = '', '', '' option = option.strip().split() i = 0 while i < len(option): if i+1 >= len(option): raise ValueError("{0} cannot be the last option.".format(option[i])) if type(option[i+1]) is not int and not option[i+1].isdigit(): raise ValueError("Invalid option {0} {1}.".format(option[i], option[i+1])) if option[i] in ['-stopword', '-stemming']: text_prep_opt = ' '.join([text_prep_opt, option[i], option[i+1]]) elif option[i] in ['-feature']: feat_gen_opt = ' '.join([feat_gen_opt, option[i], option[i+1]]) else: raise ValueError("Invalid option {0}.".format(option[i])) i+=2 return text_prep_opt, feat_gen_opt, class_map_opt def merge_svm_files(self, svm_file, extra_svm_files): """ Append extra feature files to *svm_file*. *extra_svm_files* is a class:`list` of extra feature files in LIBSVM-format. These features will be appended to *svm_file*. All files in *extra_svm_files* and *svm_file* should have the same number of instances. .. note:: The output file is *svm_file*. Therefore, the original *svm_file* will be overwritten without backup. """ if not isinstance(extra_svm_files, (tuple, list)): raise TypeError('extra_svm_files should be a tuple or a list') nr_files = len(extra_svm_files) if self._readonly: # test if len(self._extra_file_ids) != nr_files: raise ValueError('wrong number of extra svm files ({0} expected)'.format(len(self._extra_file_ids))) if nr_files == 0: return _merge_files([svm_file] + extra_svm_files, self._extra_nr_feats, False, svm_file) else: # train if nr_files == 0: return self._extra_file_ids = [os.path.basename(f) for f in extra_svm_files] self._extra_nr_feats = [0] * (nr_files + 1) _merge_files([svm_file] + extra_svm_files, self._extra_nr_feats, True, svm_file) def save(self, dest_dir): """ Save the model to a directory. """ config = {'text_prep':'text_prep.config.pickle', 'feat_gen':'feat_gen.config.pickle', 'class_map':'class_map.config.pickle', 'extra_nr_feats': 'extra_nr_feats.pickle', 'extra_file_ids': 'extra_file_ids.pickle'} if not os.path.exists(dest_dir): os.mkdir(dest_dir) self.text_prep.save(os.path.join(dest_dir,config['text_prep'])) self.feat_gen.save(os.path.join(dest_dir,config['feat_gen'])) self.class_map.save(os.path.join(dest_dir,config['class_map'])) cPickle.dump(self._extra_nr_feats, open(os.path.join(dest_dir, config['extra_nr_feats']), 'wb'), -1) cPickle.dump(self._extra_file_ids, open(os.path.join(dest_dir, config['extra_file_ids']), 'wb'), -1) def load(self, src_dir, readonly=True): """ Load the model from a directory. """ self._readonly = readonly config = {'text_prep':'text_prep.config.pickle', 'feat_gen':'feat_gen.config.pickle', 'class_map':'class_map.config.pickle', 'extra_nr_feats': 'extra_nr_feats.pickle', 'extra_file_ids': 'extra_file_ids.pickle'} self.text_prep.load(os.path.join(src_dir,config['text_prep']),readonly) self.feat_gen.load(os.path.join(src_dir,config['feat_gen']),readonly) self.class_map.load(os.path.join(src_dir,config['class_map']),readonly) self._extra_nr_feats = cPickle.load(open(os.path.join(src_dir, config['extra_nr_feats']), 'rb')) self._extra_file_ids = cPickle.load(open(os.path.join(src_dir, config['extra_file_ids']), 'rb')) return self def get_fidx2tok(self, fidx): """ Return the token by the corresponding feature index. """ bases = self._extra_nr_feats if len(bases) <= 0 or fidx <= bases[0]: idx2tok = self.text_prep.get_idx2tok fidx2ngram = self.feat_gen.get_fidx2ngram return [idx2tok(idx) for idx in fidx2ngram(fidx)] else : for i in range(len(self._extra_file_ids)): if fidx <= bases[i+1]: return ['{0}:{1}'.format(self._extra_file_ids[i], fidx - bases[i])] def toSVM(self, text, class_name = None, extra_svm_feats = []): """ Return an LIBSVM python interface instance by the *text*. Note that :attr:`feat_gen` will be updated if the converter is not read only and there are new tokens in the given text. *extra_svm_feats* is a list of feature sets, each of which is a 'class':`dict`. The length should be zero or the same as the extra svm files used. If the length is zero (i.e., an empty list), then the features returned as if there is no extra svm files. """ if len(extra_svm_feats) > 0 and self._readonly and len(self._extra_file_ids) != 0 and len(self._extra_file_ids) != len(extra_svm_feats): raise ValueError("wrong size of extra_svm_feats") text = self.text_prep.preprocess(text) feat = self.feat_gen.toSVM(text) bases = self._extra_nr_feats for i, extra_feat in enumerate(extra_svm_feats): for fid in extra_feat: if bases[i] + fid > bases[i+1]: continue feat[bases[i]+fid] = extra_feat[fid] if class_name is None: return feat return feat, self.getClassIdx(class_name) def getClassIdx(self, class_name): """ Return the class index by the class name. """ return self.class_map.toIdx(class_name) def getClassName(self, class_idx): """ Return the class name by the class index. """ return self.class_map.toClassName(class_idx) def __str__(self): return 'Text2svmConverter: ' + (self._option or 'default') def convert_text(text_src, converter, output=''): """ Convert a text data to a LIBSVM-format data. *text_src* is the path of the text data or a :class:`file`. (Refer to :ref:`dataset`). *output* is the output of the converted LIBSVM-format data. *output* can also be a file path or a :class:`file`. Note that if *text_src* or *output* is a :class:`file`, it will be closed. *converter* is a :class:`Text2svmConverter` instance. """ if output == "": output = text_src+'.svm' if isinstance(output, str): output = open(output,'w') elif not isinstance(output, file): raise TypeError('output is a str or a file.') if isinstance(text_src, str): text_src = open(text_src) elif not isinstance(text_src, file): raise TypeError('text_src is a str or a file.') # add some error handling here!! for line in text_src: try: label, text = line.split('\t', 1) except Exception as e: label, text = '**ILL INST**', '**ILL INST**' #raise ValueError('cannot tokenize: ' + line) feat, label = converter.toSVM(text, label) feat = ''.join(' {0}:{1}'.format(f,feat[f]) for f in sorted(feat)) if label == None: label = -1 output.write(str(label) + ' ' +feat+'\n') output.close() text_src.close()
#!/usr/bin/env python # encoding: utf-8 from exp_tools import Trial from psychopy import event import numpy as np from scipy import stats class FlashTrial(Trial): """ Class that runs a single FlashTrial. Parent for FlashTrialSaccade and FlashTrialKeyboard, which should actually be initiadted (rather than this class). This class assumes that all visual objects (flashing circles, cues, feedback texts, target crosses, fixation cross) are attributes of the Session. This greatly improves speed, as these only have to be initiated once instead of at the start of every trial. A FlashTrial consists of the following phases: 0. Wait for scanner pulse 1. Pre-cue fixation cross (should be jittered) 2. Cue (arrow left/right) 3. Post-cue fixation cross (should be jittered) 4. Stimulus & Response (Flashing Circles are shown, participant makes eye movement / button press) 5. Feedback 6. ITI Parameters ---------- ID: int ID number of trial block_trial_ID: int Number of trial within the current block parameters: dict Dictionary containing parameters that specify what is drawn. Needs: 1. "correct_answer" (0 or 1), which specifies the direction of the stimulus (and response). 2. trial_evidence_arrays: a list of np.arrays, which contains 0 and 1s determining for every frame whether a circle needs to be shown or not. See FlashSession.prepare_trials for more details on how this works. 3. cue: str ['LEFT', 'RIGHT', 'NEUTRAL', 'SPD', 'ACC']. If left/right/neutral, an arrow is drawn. If SPD/ACC, an instruction is shown with SPD or ACC. phase_durations : list List specifying the durations of each phase of this trial. session: exp_tools.Session instance screen: psychopy.visual.Window instance tracker: pygaze.EyeTracker object Passed on to parent class Attributes ----------- response_type: int Specifies what kind of response was given: 0 = Too slow (no answer given at end of stimulus presentation) 1 = Correct 2 = Wrong 3 = Too fast 4 = Too early (early phase response) - does not exist in FlashTrial, but exists for compatibility with LocalizerPractice response_time: float Reaction time. Note that, for the child class FlashTrialSaccade, this is NOT ACCURATE! """ def __init__(self, ID, block_trial_ID=0, parameters={}, phase_durations=[], session=None, screen=None, tracker=None): super(FlashTrial, self).__init__(parameters=parameters, phase_durations=phase_durations, session=session, screen=screen, tracker=tracker) self.ID = ID self.block_trial_ID = block_trial_ID self.frame_n = -1 self.response = None self.draw_crosses = False self.response_type = 0 # 0 = no response, 1 = correct, 2 = wrong, 3 = too early self.feedback_type = 0 # 0 = too late, 1 = correct, 2 = wrong, 3 = too early self.stimulus = self.session.stimulus self.stimulus.trial_evidence_arrays = parameters['trial_evidence_arrays'] self.evidence_shown = np.repeat([0], self.session.n_flashers) self.total_increments = 0 self.cuetext = None self.late_responses = [] # keep track of number of TRs recorded. Only end trial if at least 2 TRs are recorded (3 TRs per trial). self.n_TRs = 0 # Initialize cue. This is a bit of a hacky workaround in order to be able to use this class for both conditions if 'cue' in parameters.keys(): self.cuetext = parameters['cue'] if self.cuetext in ['LEFT', 'RIGHT', 'NEU']: if self.cuetext == 'LEFT': self.cue = self.session.arrow_stimuli[0] elif self.cuetext == 'RIGHT': self.cue = self.session.arrow_stimuli[1] elif self.cuetext == 'NEU': self.cue = self.session.arrow_stimuli[2] else: self.cue = self.session.cue_object self.cue.text = self.cuetext else: cuetext = 'Warning! No cue passed to trial!' self.cue = self.session.cue_object self.cue.text = cuetext # Initialize times self.run_time = 0.0 self.t_time = self.fix1_time = self.cue_time = self.fix2_time = self.stimulus_time = self.post_stimulus_time \ = self.feedback_time = self.ITI_time = 0.0 self.response_time = None def draw(self): """ Draws the current frame """ if self.phase == 0: # waiting for scanner-time if self.block_trial_ID == 0: self.session.scanner_wait_screen.draw() # Only show this before the first trial else: self.session.fixation_cross.draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() elif self.phase == 1: # Pre-cue fix cross self.session.fixation_cross.draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() # if not os.path.isfile('screenshot_trial_fixcross.png'): # self.session.screen.flip() # self.session.screen.getMovieFrame() # self.session.screen.saveMovieFrames('screenshot_trial_fixcross.png') elif self.phase == 2: # Cue self.cue.draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() # if not os.path.isfile('screenshot_trial_cue_' + self.cuetext + '.png'): # self.session.screen.flip() # self.session.screen.getMovieFrame() # self.session.screen.saveMovieFrames('screenshot_trial_cue_' + self.cuetext + '.png') elif self.phase == 3: # post-cue fix cross self.session.fixation_cross.draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() elif self.phase == 4: # stimulus self.session.fixation_cross.draw() shown_opacities = self.stimulus.draw(frame_n=self.frame_n) self.evidence_shown = self.evidence_shown + shown_opacities self.total_increments += 1 if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() # if self.stimulus.trial_evidence_arrays[0][self.frame_n] == 1 and self.stimulus.trial_evidence_arrays[1][ # self.frame_n] == 1: # if not os.path.isfile('screenshot_trial_stim.png'): # self.session.screen.flip() # self.session.screen.getMovieFrame() # self.session.screen.saveMovieFrames('screenshot_trial_stim.png') elif self.phase == 5: # post-stimulus fill time self.session.fixation_cross.draw() self.stimulus.draw(frame_n=self.frame_n, continuous=False) # Continuous creates constant streams of flashes if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() elif self.phase == 6: # feedback self.session.feedback_text_objects[self.feedback_type].draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() # fb_name = self.session.feedback_text_objects[self.feedback_type].text # if not os.path.isfile('screenshot_trial_feedback_' + fb_name[0] + fb_name[-2] + '.png'): # self.session.screen.flip() # self.session.screen.getMovieFrame() # self.session.screen.saveMovieFrames('screenshot_trial_feedback_' + fb_name[0] + fb_name[-2] + '.png') elif self.phase == 7: self.session.fixation_cross.draw() if self.draw_crosses: self.session.crosses[0].draw() self.session.crosses[1].draw() super(FlashTrial, self).draw() def event(self): """ Event-checking is determined by the subclass (either check for keyboard responses or a saccade) """ pass def phase_forward(self): """ Call the superclass phase_forward method first, and reset the current frame number to 0 """ super(FlashTrial, self).phase_forward() self.phase_time = self.session.clock.getTime() self.frame_n = 0 def run(self): super(FlashTrial, self).run() while not self.stopped: self.frame_n += 1 self.run_time = self.session.clock.getTime() - self.start_time # Fixation cross: waits for scanner pulse! if self.phase == 0: self.t_time = self.session.clock.getTime() if self.session.scanner == 'n': self.phase_forward() # In phase 1, we show the cue if self.phase == 1: self.fix1_time = self.session.clock.getTime() if (self.fix1_time - self.t_time) > self.phase_durations[1]: self.phase_forward() # In phase 2, we show the cue if self.phase == 2: self.cue_time = self.session.clock.getTime() if (self.cue_time - self.fix1_time) > self.phase_durations[2]: self.phase_forward() # In phase 3, we show the fix cross again if self.phase == 3: self.fix2_time = self.session.clock.getTime() if (self.fix2_time - self.cue_time) > self.phase_durations[3]: self.phase_forward() # In phase 4, the stimulus is presented and the participant can respond if self.phase == 4: self.stimulus_time = self.session.clock.getTime() if (self.stimulus_time - self.fix2_time) > self.phase_durations[4]: self.phase_forward() # In phase 5, the stimulus is presented, participant has responded if self.phase == 5: self.post_stimulus_time = self.session.clock.getTime() # if self.session.scanner == 'n': # Outside the scanner we can just move on # self.phase_forward() # else: if (self.post_stimulus_time - self.fix2_time) > self.phase_durations[4]: # Use phase_durations[4]!! self.phase_forward() # Phase 6 reflects feedback if self.phase == 6: self.feedback_time = self.session.clock.getTime() if (self.feedback_time - self.post_stimulus_time) > self.phase_durations[6]: self.phase_forward() # keep track of timing # Finally, we show ITI if self.phase == 7: self.ITI_time = self.session.clock.getTime() if self.block_trial_ID == self.session.last_ID_this_block or self.session.scanner == 'n': # If this is the last trial of the block, show the FULL ITI print('Trial number %d (block trial %d)' % (self.ID, self.block_trial_ID)) print('Actively showing full ITI') if self.ITI_time - self.feedback_time > self.phase_durations[7]: self.stopped = True else: # Only allow stopping if at least 3 TRs are recorded (including the start-volume!) # The rest of the ITI is used for preparing the next trial. if self.n_TRs >= 3: self.stopped = True # events and draw if not self.stopped: self.event() self.draw() self.stop() # The next two classes handle reponses via keyboard or saccades class FlashTrialSaccade(FlashTrial): """ FlashTrial on which participants respond by eye movements Currently, can only handle TWO flashers / choice options! """ def __init__(self, ID, block_trial_ID=0, parameters={}, phase_durations=[], session=None, screen=None, tracker=None): super(FlashTrialSaccade, self).__init__(ID, block_trial_ID=block_trial_ID, parameters=parameters, phase_durations=phase_durations, session=session, screen=screen, tracker=tracker) self.correct_direction = parameters['correct_answer'] self.directions_verbose = ['left saccade', 'right saccade'] self.eye_movement_detected_in_phase = False self.eye_pos_start_phase = [None, None, None, None, None, None, None, None] self.draw_crosses = False def event(self): """ Checks for saccades as answers and keyboard responses for escape / scanner pulse """ # First check keyboard responses for kill signals and/or scanner pulses for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)): if len(ev) > 0: if ev in ['esc', 'escape']: self.events.append([-99, ev_time, 'escape: user killed session']) self.stopped = True self.session.stopped = True print('Session stopped!') elif ev == 'equal': self.events.append([-99, ev_time - self.start_time, 'user skipped trial']) self.stopped = True print('Trial canceled by user') elif ev == 't': # Scanner pulse self.events.append([99, ev_time, 'pulse']) self.n_TRs += 1 if self.phase == 0: self.phase_forward() # Make sure to get eye position at the start of each phase if self.eye_pos_start_phase[self.phase] is None: eyepos = self.session.eye_pos() distance_from_center = np.divide(np.sqrt((eyepos[0]-self.session.screen_pix_size[0]/2)**2 + (eyepos[1]-self.session.screen_pix_size[1]/2)**2), self.session.pixels_per_degree) if distance_from_center < 6: # If the distance from the center is less than 6 degrees, we are probably not in a blink. We can # accept the current position as the start position self.eye_pos_start_phase[self.phase] = self.session.eye_pos() else: # Distance from center > 8: subject is probably blinking. Do not accept, wait for next frame. return if not self.eye_movement_detected_in_phase: # Get eye position eyepos = self.session.eye_pos() eyepos_time = self.session.clock.getTime() # We calculate the distance travelled from the eye position at the start of this phase. center = self.eye_pos_start_phase[self.phase] distance_from_center = np.divide(np.sqrt((eyepos[0]-center[0])**2 + (eyepos[1]-center[1])**2), self.session.pixels_per_degree) if distance_from_center >= self.session.eye_travel_threshold: self.eye_movement_detected_in_phase = True # Is the final xpos left or right from initial position? left = 0, right = 1 saccade_direction = 0 if eyepos[0] < center[0] else 1 saccade_direction_verbose = self.directions_verbose[saccade_direction] if self.phase == 1: self.events.append([saccade_direction_verbose, eyepos_time, 'during fixation cross 1']) elif self.phase == 2: self.events.append([saccade_direction_verbose, eyepos_time, 'during cue']) elif self.phase == 3: self.events.append([saccade_direction_verbose, eyepos_time, 'during fixation cross 2']) elif self.phase == 4: self.response = saccade_direction_verbose self.response_time = eyepos_time - self.fix2_time # Check for early response if self.response_time < 0.150: # (seconds) self.feedback_type = 3 # Too fast if saccade_direction == self.correct_direction: self.response_type = 1 self.events.append([saccade_direction_verbose, eyepos_time, 'too fast response', 'correct']) else: self.response_type = 2 self.events.append([saccade_direction_verbose, eyepos_time, 'too fast response', 'incorrect']) else: # In SPEED conditions, make "too slow"-feedback probabilistic if self.cuetext == 'SPD' and np.random.binomial(n=1, p=stats.expon.cdf( self.response_time, loc=.75, scale=1 / 2.75)): self.feedback_type = 0 if saccade_direction == self.correct_direction: self.response_type = 1 self.events.append([saccade_direction_verbose, eyepos_time, 'response saccade', 'correct, too slow feedback']) else: self.response_type = 2 self.events.append([saccade_direction_verbose, eyepos_time, 'response saccade', 'incorrect, too slow feedback']) else: # If fast enough in speed condition, or in non-speed condition, normal feedback if saccade_direction == self.correct_direction: self.response_type = 1 self.feedback_type = 1 self.events.append([saccade_direction_verbose, eyepos_time, 'response saccade', 'correct']) else: self.response_type = 2 self.feedback_type = 2 self.events.append([saccade_direction_verbose, eyepos_time, 'response saccade', 'incorrect']) self.phase_forward() # End stimulus presentation when saccade is detected (this will be removed) elif self.phase == 5: self.late_responses.append((saccade_direction, eyepos_time - self.fix2_time)) self.events.append([saccade_direction_verbose, eyepos_time, 'during post-stimulus fill time']) # # This will probably always be detected: drift correction? elif self.phase == 6: self.late_responses.append((saccade_direction, eyepos_time - self.fix2_time)) self.events.append([saccade_direction_verbose, eyepos_time, 'during feedback']) # This will # probably always be detected: drift correction? elif self.phase == 7: self.late_responses.append((saccade_direction, eyepos_time - self.fix2_time)) self.events.append([saccade_direction_verbose, eyepos_time, 'during ITI']) # This will # probably always be detected: drift correction? def phase_forward(self): """ Do everything the superclass does, but also reset current phase eye movement detection """ super(FlashTrialSaccade, self).phase_forward() self.eye_movement_detected_in_phase = False class FlashTrialKeyboard(FlashTrial): """ FlashTrial on which participants respond with a keypress """ def __init__(self, ID, block_trial_ID=0, parameters={}, phase_durations=[], session=None, screen=None, tracker=None): super(FlashTrialKeyboard, self).__init__(ID, block_trial_ID=block_trial_ID, parameters=parameters, phase_durations=phase_durations, session=session, screen=screen, tracker=tracker) self.correct_answer = parameters['correct_answer'] self.correct_key = self.session.response_keys[self.correct_answer] def event(self): """ Checks for the keyboard responses only """ for i, (ev, ev_time) in enumerate(event.getKeys(timeStamped=self.session.clock)): # ev_time is the event timestamp relative to the Session Clock if len(ev) > 0: if ev in ['esc', 'escape']: self.events.append([-99, ev_time, 'escape: user killed session']) self.stopped = True self.session.stopped = True print('Session stopped!') elif ev == 'equal': self.events.append([-99, ev_time - self.start_time, 'user skipped trial']) self.stopped = True print('Trial canceled by user') elif ev in self.session.response_keys: if self.phase == 1: self.events.append([ev, ev_time, 'early keypress during fix cross 1']) elif self.phase == 2: self.events.append([ev, ev_time, 'early keypress during cue']) elif self.phase == 3: self.events.append([ev, ev_time, 'early keypress during fix cross 2']) elif self.phase == 4: if i == 0: # First keypress self.response = ev self.response_time = ev_time - self.fix2_time # Check for early response if self.response_time < 0.150: self.feedback_type = 3 # Too fast if ev == self.correct_key: self.response_type = 1 self.events.append([ev, ev_time, 'too fast response', 'correct', self.response_time]) else: self.response_type = 2 self.events.append([ev, ev_time, 'too fast response', 'incorrect', self.response_time]) else: # In SPEED conditions, make "too slow"-feedback probabilistic if self.cuetext == 'SPD' and np.random.binomial(n=1, p=stats.expon.cdf( self.response_time, loc=.75, scale=1/2.75)): self.feedback_type = 0 if ev == self.correct_key: self.response_type = 1 self.events.append([ev, ev_time, 'first keypress', 'correct, too slow feedback', self.response_time]) else: self.response_type = 2 self.events.append([ev, ev_time, 'first keypress', 'incorrect, ' 'too slow feedback', self.response_time]) else: if ev == self.correct_key: self.response_type = 1 self.feedback_type = 1 self.events.append([ev, ev_time, 'first keypress', 'correct', self.response_time]) else: self.response_type = 2 self.feedback_type = 2 self.events.append([ev, ev_time, 'first keypress', 'incorrect', self.response_time]) self.phase_forward() else: self.events.append([ev, ev_time, 'late keypress (during stimulus)']) elif self.phase == 5: self.late_responses.append((ev, ev_time-self.fix2_time)) self.events.append([ev, ev_time, 'late keypress (during post-stimulus fill time)']) elif self.phase == 6: self.late_responses.append((ev, ev_time-self.fix2_time)) self.events.append([ev, ev_time, 'late keypress (during feedback)']) elif self.phase == 7: self.late_responses.append((ev, ev_time-self.fix2_time)) self.events.append([ev, ev_time, 'late keypress (during ITI)']) elif ev == 't': # Scanner pulse self.events.append([99, ev_time, 'pulse']) self.n_TRs += 1 if self.phase == 0: self.phase_forward()
##################################################################################### # # Copyright (c) Crossbar.io Technologies GmbH # # Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g. # you have purchased a commercial license), the license terms below apply. # # Should you enter into a separate license agreement after having received a copy of # this software, then the terms of such license agreement replace the terms below at # the time at which such license agreement becomes effective. # # In case a separate license agreement ends, and such agreement ends without being # replaced by another separate license agreement, the license terms below apply # from the time at which said agreement ends. # # LICENSE TERMS # # This program is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License, version 3, as published by the # Free Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <https://www.gnu.org/licenses/gpl-3.0.en.html>. # ##################################################################################### import sys import os import socket import json import yaml import asyncio import click import pygments from pygments import highlight, lexers, formatters from prompt_toolkit.history import FileHistory from prompt_toolkit.styles import style_from_dict from prompt_toolkit.token import Token import txaio from autobahn.websocket.util import parse_url from autobahn.wamp.types import ComponentConfig from autobahn.wamp.exception import ApplicationError from autobahn.asyncio.wamp import ApplicationRunner from cbsh.util import (style_crossbar, style_finished_line, style_error, style_ok, localnow) from cbsh import client, repl, config, key, __version__ _DEFAULT_CFC_URL = u'wss://fabric.crossbario.com/ws' class WebSocketURL(click.ParamType): """ WebSocket URL validator. """ name = 'WebSocket URL' def __init__(self): click.ParamType.__init__(self) def convert(self, value, param, ctx): try: parse_url(value) except Exception as e: self.fail(style_error(str(e))) else: return value def _prompt_for_url(): """ Prompt user for CFC URL to create a new ~/.cbf/config.ini file """ value = click.prompt( 'Crossbar.io Fabric Center URL', type=WebSocketURL(), default=_DEFAULT_CFC_URL) return value # default configuration stored in $HOME/.cbf/config.ini _DEFAULT_CONFIG = """[default] url={url} privkey=default.priv pubkey=default.pub """ class Application(object): OUTPUT_FORMAT_PLAIN = 'plain' OUTPUT_FORMAT_JSON = 'json' OUTPUT_FORMAT_JSON_COLORED = 'json-color' OUTPUT_FORMAT_YAML = 'yaml' OUTPUT_FORMAT_YAML_COLORED = 'yaml-color' OUTPUT_FORMAT = [ OUTPUT_FORMAT_PLAIN, OUTPUT_FORMAT_JSON, OUTPUT_FORMAT_JSON_COLORED, OUTPUT_FORMAT_YAML, OUTPUT_FORMAT_YAML_COLORED ] OUTPUT_VERBOSITY_SILENT = 'silent' OUTPUT_VERBOSITY_RESULT_ONLY = 'result-only' OUTPUT_VERBOSITY_NORMAL = 'normal' OUTPUT_VERBOSITY_EXTENDED = 'extended' OUTPUT_VERBOSITY = [ OUTPUT_VERBOSITY_SILENT, OUTPUT_VERBOSITY_RESULT_ONLY, OUTPUT_VERBOSITY_NORMAL, OUTPUT_VERBOSITY_EXTENDED ] # list of all available Pygments styles (including ones loaded from plugins) # https://www.complang.tuwien.ac.at/doc/python-pygments/styles.html OUTPUT_STYLE = list(pygments.styles.get_all_styles()) WELCOME = """ Welcome to {title} v{version} Press Ctrl-C to cancel the current command, and Ctrl-D to exit the shell. Type "help" to get help. Try TAB for auto-completion. """.format( title=style_crossbar('Crossbar.io Shell'), version=__version__) CONNECTED = """ Connection: url : {url} authmethod : {authmethod} realm : {realm} authid : {authid} authrole : {authrole} session : {session} """ def __init__(self): self.current_resource_type = None # type: str self.current_resource = None self.session = None self._history = FileHistory('.cbsh-history') self._output_format = Application.OUTPUT_FORMAT_JSON_COLORED self._output_verbosity = Application.OUTPUT_VERBOSITY_NORMAL self._style = style_from_dict({ Token.Toolbar: '#fce94f bg:#333333', # User input. # Token: '#ff0066', # Prompt. # Token.Username: '#884444', # Token.At: '#00aa00', # Token.Colon: '#00aa00', # Token.Pound: '#00aa00', # Token.Host: '#000088 bg:#aaaaff', # Token.Path: '#884444 underline', }) self._output_style = 'fruity' def _load_profile(self, dotdir=None, profile=None): dotdir = dotdir or u'~/.cbf' profile = profile or u'default' cbf_dir = os.path.expanduser(dotdir) if not os.path.isdir(cbf_dir): os.mkdir(cbf_dir) click.echo(u'Created new local user directory: {}'.format( style_ok(cbf_dir))) config_path = os.path.join(cbf_dir, 'config.ini') if not os.path.isfile(config_path): with open(config_path, 'w') as f: url = _prompt_for_url() f.write(_DEFAULT_CONFIG.format(url=url)) click.echo(u'Created new local user configuration: {}'.format( style_ok(config_path))) config_obj = config.UserConfig(config_path) profile_obj = config_obj.profiles.get(profile, None) if not profile_obj: raise click.ClickException('no such profile: "{}"'.format(profile)) else: click.echo('Active user profile: {}'.format(style_ok(profile))) privkey_path = os.path.join( cbf_dir, profile_obj.privkey or u'{}.priv'.format(profile)) # noqa: W503 pubkey_path = os.path.join(cbf_dir, profile_obj.pubkey or u'default.pub') # noqa: W503 key_obj = key.UserKey(privkey_path, pubkey_path) return key_obj, profile_obj def set_output_format(self, output_format): """ Set command output format. :param output_format: The verbosity to use. :type output_format: str """ if output_format in Application.OUTPUT_FORMAT: self._output_format = output_format else: raise Exception( 'invalid value {} for output_format (not in {})'.format( output_format, Application.OUTPUT_FORMAT)) def set_output_verbosity(self, output_verbosity): """ Set command output verbosity. :param output_verbosity: The verbosity to use. :type output_verbosity: str """ if output_verbosity in Application.OUTPUT_VERBOSITY: self._output_verbosity = output_verbosity else: raise Exception( 'invalid value {} for output_verbosity (not in {})'.format( output_verbosity, Application.OUTPUT_VERBOSITY)) def set_output_style(self, output_style): """ Set pygments syntax highlighting style ("theme") to be used for command result output. :param output_style: The style to use. :type output_style: str """ if output_style in Application.OUTPUT_STYLE: self._output_style = output_style else: raise Exception( 'invalid value {} for output_style (not in {})'.format( output_style, Application.OUTPUT_STYLE)) def error(self, msg): click.echo() def format_selected(self): return u'{} -> {}.\n'.format(self.current_resource_type, self.current_resource) def print_selected(self): click.echo(self.format_selected()) def selected(self): return self.current_resource_type, self.current_resource def __str__(self): return u'Application(current_resource_type={}, current_resource={})'.format( self.current_resource_type, self.current_resource) async def run_command(self, cmd): try: result = await cmd.run(self.session) except Exception as e: print(e) if self._output_format in [ Application.OUTPUT_FORMAT_JSON, Application.OUTPUT_FORMAT_JSON_COLORED ]: json_str = json.dumps( result.result, separators=(', ', ': '), sort_keys=True, indent=4, ensure_ascii=False) if self._output_format == Application.OUTPUT_FORMAT_JSON_COLORED: console_str = highlight( json_str, lexers.JsonLexer(), formatters.Terminal256Formatter(style=self._output_style)) else: console_str = json_str elif self._output_format in [ Application.OUTPUT_FORMAT_YAML, Application.OUTPUT_FORMAT_YAML_COLORED ]: yaml_str = yaml.safe_dump(result.result) if self._output_format == Application.OUTPUT_FORMAT_YAML_COLORED: console_str = highlight( yaml_str, lexers.YamlLexer(), formatters.Terminal256Formatter(style=self._output_style)) else: console_str = yaml_str elif self._output_format == Application.OUTPUT_FORMAT_PLAIN: console_str = u'{}'.format(result) else: # should not arrive here raise Exception( 'internal error: unprocessed value "{}" for output format'. format(self._output_format)) # output command metadata (such as runtime) if self._output_verbosity == Application.OUTPUT_VERBOSITY_SILENT: pass else: # output result of command click.echo(console_str) if self._output_verbosity == Application.OUTPUT_VERBOSITY_RESULT_ONLY or self._output_format == Application.OUTPUT_FORMAT_PLAIN: pass elif self._output_verbosity == Application.OUTPUT_VERBOSITY_NORMAL: if result.duration: click.echo( style_finished_line(u'Finished in {} ms.'.format( result.duration))) else: click.echo(style_finished_line(u'Finished successfully.')) elif self._output_verbosity == Application.OUTPUT_VERBOSITY_EXTENDED: if result.duration: click.echo( style_finished_line(u'Finished in {} ms on {}.'.format( result.duration, localnow()))) else: click.echo( style_finished_line( u'Finished successfully on {}.'.format( localnow()))) else: # should not arrive here raise Exception('internal error') def _get_bottom_toolbar_tokens(self, cli): toolbar_str = ' Current resource path: {}'.format( self.format_selected()) return [ (Token.Toolbar, toolbar_str), ] def _get_prompt_tokens(self, cli): return [ (Token.Username, 'john'), (Token.At, '@'), (Token.Host, 'localhost'), (Token.Colon, ':'), (Token.Path, '/user/john'), (Token.Pound, '# '), ] def run_context(self, ctx): if False: click.echo('Logging started ..') txaio.start_logging(level='debug', out=sys.stdout) # cfg contains the command lines options and arguments that # click collected for us cfg = ctx.obj cmd = ctx.command.name if cmd not in [u'auth', u'shell']: raise click.ClickException( '"{}" command can only be run in shell'.format(cmd)) click.echo('Crossbar.io Shell: {}'.format( style_ok('v{}'.format(__version__)))) # load user profile and key for given profile name key, profile = self._load_profile(profile=cfg.profile) # set the Fabric URL to connect to from the profile or default url = profile.url or u'wss://fabric.crossbario.com' # users always authenticate with the user_id from the key, which # filled from the email the user provided authid = key.user_id # the realm can be set from command line, env var, the profile # or can be None, which means the user will be joined to the global # Crossbar.io Fabric users realm (u'com.crossbario.fabric') realm = cfg.realm or profile.realm or None # the authrole can be set from command line, env var, the profile # or can be None, in which case the role is chosen automatically # from the list of roles the user us authorized for authrole = cfg.role or profile.role or None # this will be fired when the ShellClient below actually has joined # the respective realm on Crossbar.io Fabric (either the global users # realm, or a management realm the user has a role on) ready = asyncio.Future() # type: ignore extra = { # these are forward on the actual client connection u'authid': authid, u'authrole': authrole, # these are native Python object and only used client-side u'key': key.key, u'done': ready } # for the "auth" command, forward additional command line options if ctx.command.name == u'auth': # user provides authentication code to verify extra[u'activation_code'] = cfg.code # user requests sending of a new authentication code (while an old one is still pending) extra[u'request_new_activation_code'] = cfg.new_code # this is the WAMP ApplicationSession that connects the CLI to Crossbar.io Fabric self.session = client.ShellClient(ComponentConfig(realm, extra)) loop = asyncio.get_event_loop() runner = ApplicationRunner(url, realm) # this might fail eg when the transport connection cannot be established try: click.echo('Connecting to {} ..'.format(url)) _res = runner.run(self.session, start_loop=False) except socket.gaierror as e: click.echo( style_error('Could not connect to {}: {}'.format(url, e))) loop.close() sys.exit(1) exit_code = 0 try: # "connected" will complete when the WAMP session to Fabric # has been established and is ready click.echo('Entering event loop ..') transport, protocol = loop.run_until_complete(_res) # click.echo('transport, protocol: {} {}'.format(transport, protocol)) # loop.run_forever() session_details = loop.run_until_complete(ready) # click.echo('SessionDetails: {}'.format(session_details)) except ApplicationError as e: # some ApplicationErrors are actually signaling progress # in the authentication flow, some are real errors if e.error.startswith(u'fabric.auth-failed.'): error = e.error.split(u'.')[2] message = e.args[0] if error == u'new-user-auth-code-sent': click.echo('\nThanks for registering! {}'.format(message)) click.echo( style_ok( 'Please check your inbox and run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>.\n' )) elif error == u'registered-user-auth-code-sent': click.echo('\nWelcome back! {}'.format(message)) click.echo( style_ok( 'Please check your inbox and run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>.\n' )) elif error == u'pending-activation': click.echo() click.echo(style_ok(message)) click.echo() click.echo( 'Tip: to activate, run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>"' ) click.echo( 'Tip: you can request sending a new code with "cbsh auth --new-code"' ) click.echo() elif error == u'no-pending-activation': exit_code = 1 click.echo() click.echo(style_error('{} [{}]'.format(message, e.error))) click.echo() elif error == u'email-failure': exit_code = 1 click.echo() click.echo(style_error('{} [{}]'.format(message, e.error))) click.echo() elif error == u'invalid-activation-code': exit_code = 1 click.echo() click.echo(style_error('{} [{}]'.format(message, e.error))) click.echo() else: # we should not arrive here! otherwise, add a new clause above and handle the situation exit_code = 1 click.echo( style_error( 'Internal error: unprocessed error type {}:'. format(error))) click.echo(style_error(message)) elif e.error.startswith(u'crossbar.error.'): error = e.error.split(u'.')[2] message = e.args[0] if error == u'invalid_configuration': click.echo() click.echo(style_error('{} [{}]'.format(message, e.error))) click.echo() else: # we should not arrive here! otherwise, add a new clause above and handle the situation exit_code = 1 click.echo( style_error( 'Internal error: unprocessed error type {}:'. format(error))) click.echo(style_error(message)) else: click.echo(style_error('{}'.format(e))) exit_code = 1 raise else: if cmd == u'auth': self._print_welcome(url, session_details) elif cmd == 'shell': click.clear() try: self._print_welcome(url, session_details) except Exception as e: click.echo('err: {}'.format(e)) prompt_kwargs = { 'history': self._history, } shell_task = loop.create_task( repl.repl( ctx, get_bottom_toolbar_tokens=self. _get_bottom_toolbar_tokens, # get_prompt_tokens=self._get_prompt_tokens, style=self._style, prompt_kwargs=prompt_kwargs)) loop.run_until_complete(shell_task) else: # should not arrive here, as we checked cmd in the beginning raise Exception('logic error') finally: loop.close() sys.exit(exit_code) def _print_welcome(self, url, session_details): click.echo(self.WELCOME) click.echo( self.CONNECTED.format( url=url, realm=style_crossbar(session_details.realm) if session_details else None, authmethod=session_details.authmethod if session_details else None, authid=style_crossbar(session_details.authid) if session_details else None, authrole=style_crossbar(session_details.authrole) if session_details else None, session=session_details.session if session_details else None))
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Routines to list, select, and load masters and builders in master.cfg. These routines help to load up master.cfgs in all directories, then locate masters and builders among those loaded. This is intended to simplify master selection and processing in frontend and build analysis tools, especially the buildrunner. When run standalone, the script acts as example usage which lists masters and builders of a selected master. """ # pylint: disable=C0323 import contextlib import os import optparse import sys import traceback BASE_DIR = os.path.abspath(os.path.join( os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) # Install the common Infra environment (main only). if __name__ == '__main__': sys.path.insert(0, os.path.join(BASE_DIR, 'scripts')) print sys.path[0] import common.env common.env.Install() from common import chromium_utils @contextlib.contextmanager def TemporaryMasterPasswords(): all_paths = [os.path.join(BASE_DIR, 'site_config', '.bot_password')] all_paths.extend(os.path.join(path, '.apply_issue_password') for path in chromium_utils.ListMasters()) created_paths = [] for path in all_paths: if not os.path.exists(path): try: with open(path, 'w') as f: f.write('reindeer flotilla\n') created_paths.append(path) except OSError: pass try: yield finally: for path in created_paths: try: os.remove(path) except OSError: print 'WARNING: Could not remove %s!' % path def ExecuteConfig(canonical_config): """Execute a master.cfg file and return its dictionary. WARNING: executing a master.cfg loads modules into the python process. Attempting to load another master.cfg with similar module names will cause subtle (and not-so-subtle) errors. It is recommended to only call this once per process. """ localDict = {'basedir': os.path.dirname(canonical_config), '__file__': canonical_config} f = open(canonical_config, 'r') mycwd = os.getcwd() os.chdir(localDict['basedir']) beforepath = list(sys.path) # make a 'backup' of it sys.path.append(localDict['basedir']) try: exec f in localDict return localDict finally: sys.path = beforepath os.chdir(mycwd) f.close() def LoadConfig(basedir, config_file='master.cfg', suppress=False): """Load and execute a master.cfg file from a directory. This is a nicer wrapper around ExecuteConfig which will trap IO or execution errors and provide an informative message if one occurs. WARNING: executing a master.cfg loads modules into the python process. Attempting to load another master.cfg with similar module names will cause subtle (and not-so-subtle) errors. It is recommended to only call this once per process. """ canonical_basedir = os.path.abspath(os.path.expanduser(basedir)) canonical_config = os.path.join(canonical_basedir, config_file) with TemporaryMasterPasswords(): try: localdict = ExecuteConfig(canonical_config) except IOError as err: errno, strerror = err filename = err.filename print >>sys.stderr, 'error %d executing %s: %s: %s' % (errno, canonical_config, strerror, filename) print >>sys.stderr, traceback.format_exc() return None except Exception: if not suppress: print >>sys.stderr, ('error while parsing %s: ' % canonical_config) print >>sys.stderr, traceback.format_exc() return None return localdict def PrettyPrintInternal(items, columns, title, notfound, spacing=4): """Display column-based information from an array of hashes.""" if not items: print print notfound return itemdata = {} for col in columns: itemdata[col] = [s[col] if col in s else 'n/a' for s in items] lengths = {} for col in columns: datalen = max([len(x) for x in itemdata[col]]) lengths[col] = max(len(col), datalen) maxwidth = sum([lengths[col] for col in columns]) + ( spacing * (len(columns) - 1)) spac = ' ' * spacing print print title print print spac.join([col.rjust(lengths[col]) for col in columns]) print '-' * maxwidth for i in range(len(items)): print spac.join([itemdata[col][i].rjust(lengths[col]) for col in columns]) def PrettyPrintBuilders(builders, master): """Pretty-print a list of builders from a master.""" columns = ['name', 'slavename', 'category'] title = 'outputting builders for: %s' % master notfound = 'no builders found.' builders = Denormalize(builders, 'slavenames', 'slavename', columns) PrettyPrintInternal(builders, columns, title, notfound) def PrettyPrintMasters(masterpairs): masters = [] for mastername, path in masterpairs: abspath = os.path.abspath(path) relpath = os.path.relpath(path) shortpath = abspath if len(abspath) < len(relpath) else relpath master = {} master['mastername'] = mastername master['path'] = shortpath masters.append(master) columns = ['mastername', 'path'] title = 'listing available masters:' notfound = 'no masters found.' PrettyPrintInternal(masters, columns, title, notfound) def Denormalize(items, over, newcol, wanted): """Splits a one-to-many hash into many one-to-ones. PrettyPrintInternal needs a list of many builders with one slave, this will properly format the data as such. items: a list of dictionaries to be denormalized over: the column (key) over which to separate items newcol: the new name of 'over' in the new item wanted: the desired keys in the new item Example: take some diners with different meals: [{'name': 'diner1', 'toasts': ['rye', 'wheat'], eggs:['scrambled']}, {'name': 'diner2', 'toasts': ['rye', 'white'], eggs:['fried']}] Let's say you only cared about your diner/toast options. If you denormalized with over=toasts, newcol=toast, wanted=['name', toast'], you'd get: [{'name': 'diner1', 'toast': 'rye'}, {'name': 'diner1', 'toast': 'wheat'}, {'name': 'diner2', 'toast': 'rye'}, {'name': 'diner2', 'toast': 'white'}] """ def arrayify(possible_array): """Convert 'string' into ['string']. Leave actual arrays alone.""" if isinstance(possible_array, basestring): return [possible_array] return possible_array wanted_cols = set(wanted) wanted_cols.discard(newcol) result = [] for row in items: for element in arrayify(row[over]): newitem = {} # Only bring over the requested columns, instead of all. for col in wanted_cols: if col in row: newitem[col] = row[col] newitem[newcol] = element result.append(newitem) return result def OnlyGetOne(seq, key, source): """Confirm a sequence only contains one unique value and return it. This is used when searching for a specific builder. If a match turns up multiple results that all share the same builder, then select that builder. """ def uniquify(seq): return list(frozenset(seq)) res = uniquify([s[key] for s in seq]) if len(res) > 1: print >>sys.stderr, 'error: %s too many %ss:' % (source, key) for r in res: print ' ', r return None elif not res: print 'error: %s zero %ss' % (source, key) return None else: return res[0] def GetMasters(include_public=True, include_internal=True): """Return a pair of (mastername, path) for all masters found.""" # note: ListMasters uses master.cfg hardcoded as part of its search path def parse_master_name(masterpath): """Returns a mastername from a pathname to a master.""" _, tail = os.path.split(masterpath) sep = '.' hdr = 'master' chunks = tail.split(sep) if not chunks or chunks[0] != hdr or len(chunks) < 2: raise ValueError('unable to parse mastername from path! (%s)' % tail) return sep.join(chunks[1:]) return [(parse_master_name(m), m) for m in chromium_utils.ListMasters(include_public=include_public, include_internal=include_internal)] def ChooseMaster(searchname): """Given a string, find all masters and pick the master that matches.""" masters = GetMasters() masternames = [] master_lookup = {} for mn, path in masters: master = {} master['mastername'] = mn master_lookup[mn] = path masternames.append(master) candidates = [mn for mn in masternames if mn['mastername'] == searchname] errstring = 'string \'%s\' matches' % searchname master = OnlyGetOne(candidates, 'mastername', errstring) if not master: return None return master_lookup[master] def SearchBuilders(builders, spec): """Return a list of builders which match what is specified in 'spec'. 'spec' can be a hash with a key of either 'name', 'slavename', or 'either'. This allows for flexibility in how a frontend gets information from the user. """ if 'builder' in spec: return [b for b in builders if b['name'] == spec['builder']] elif 'hostname' in spec: return [b for b in builders if b['slavename'] == spec['hostname']] else: return [b for b in builders if (b['name'] == spec['either']) or (b['slavename'] == spec['either'])] def GetBuilderName(builders, keyval): """Return unique builder name from a list of builders.""" errstring = 'string \'%s\' matches' % keyval return OnlyGetOne(builders, 'name', errstring) def ChooseBuilder(builders, spec): """Search through builders matching 'spec' and return it.""" denormedbuilders = Denormalize(builders, 'slavenames', 'slavename', ['name']) candidates = SearchBuilders(denormedbuilders, spec) buildername = GetBuilderName(candidates, spec.values()[0]) if not buildername: return None builder = [b for b in builders if b['name'] == buildername][0] if 'hostname' in spec: builder['slavename'] = spec['hostname'] elif 'either' in spec and spec['either'] in builder['slavenames']: builder['slavename'] = spec['either'] else: # User selected builder instead of slavename, so just pick the first # slave the builder has. builder['slavename'] = builder['slavenames'][0] return builder def main(): prog_desc = 'List all masters or builders within a master.' usage = '%prog [master] [builder or slave]' parser = optparse.OptionParser(usage=(usage + '\n\n' + prog_desc)) (_, args) = parser.parse_args() if len(args) > 2: parser.error("Too many arguments specified!") masterpairs = GetMasters() if len(args) < 1: PrettyPrintMasters(masterpairs) return 0 master_path = ChooseMaster(args[0]) if not master_path: return 2 config = LoadConfig(master_path) if not config: return 2 mastername = config['BuildmasterConfig']['properties']['mastername'] builders = config['BuildmasterConfig']['builders'] if len(args) < 2: PrettyPrintBuilders(builders, mastername) return 0 my_builder = ChooseBuilder(builders, {'either': args[1]}) if not my_builder: return 2 print "Matched %s/%s." % (mastername, my_builder['name']) return 0 if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import yaml import os import sys from html.parser import HTMLParser import argparse class DependencyReportParser(HTMLParser): # This class parses the given html file to find all dependency reports under "Project dependencies" # and "Projection transparent dependencies" sections. # The parser works based on the state machine and its state is updated whenever it reads a new tag. # The state changes as below: # # none -> h2_start -> project_dependencies_start -> h3_start -> compile_start -> table_start -> row_start -> th_start / td_start -> th_end / td_end -> row_end -> table_end -> compile_end -> h3_end -> project_dependencies_end -> h2_end -> none attr_index = 0 group_id = None artifact_id = None version = None classifier = None dep_type = None license = None state = "none" dep_to_license = None compatible_license_names = None include_classifier = False druid_module_name = None def __init__(self, druid_module_name, compatible_license_names): HTMLParser.__init__(self) self.state = "none" self.druid_module_name = druid_module_name self.compatible_license_names = compatible_license_names def parse(self, f): self.dep_to_license = {} self.feed(f.read()) return self.dep_to_license def handle_starttag(self, tag, attrs): # print("current: {}, start tag: {}, attrs:{} ".format(self.state, tag, attrs)) if self.state == "none": if tag == "h2": self.state = "h2_start" if self.state == "h2_start": if tag == "a": for attr in attrs: if attr[0] == "name" and (attr[1] == "Project_Dependencies" or attr[1] == "Project_Transitive_Dependencies"): self.state = "project_dependencies_start" self.include_classifier = False if self.state == "h2_end": if tag == "h3": self.state = "h3_start" if self.state == "h3_start": if tag == "a": for attr in attrs: if attr[0] == "name" and attr[1] == "compile": self.state = "compile_start" if self.state == "h3_end": if tag == "table": self.state = "table_start" if self.state == "table_start": if tag == "tr": self.state = "row_start" self.clear_attr() if self.state == "row_end": if tag == "tr": self.state = "row_start" self.clear_attr() if self.state == "row_start": if tag == "td": self.state = "td_start" elif tag == "th": self.state = "th_start" if self.state == "th_end": if tag == "th": self.state = "th_start" if self.state == "td_end": if tag == "td": self.state = "td_start" def handle_endtag(self, tag): # print("current: {}, end tag: {}".format(self.state, tag)) if self.state == "project_dependencies_start": if tag == "a": self.state = "project_dependencies_end" if self.state == "h2_start": if tag == "h2": self.state = "h2_end" if self.state == "project_dependencies_end": if tag == "h2": self.state = "h2_end" if self.state == "compile_start": if tag == "a": self.state = "compile_end" if self.state == "compile_end": if tag == "h3": self.state = "h3_end" if self.state == "table_start": if tag == "table": self.state = "none" if self.state == "td_start": if tag == "td": self.state = "td_end" self.attr_index = self.attr_index + 1 if self.state == "th_start": if tag == "th": self.state = "th_end" if self.state == "row_start": if tag == "tr": self.state = "row_end" if self.state == "th_end": if tag == "tr": self.state = "row_end" if self.state == "td_end": if tag == "tr": self.state = "row_end" # print(json.dumps({"groupId": self.group_id, "artifactId": self.artifact_id, "version": self.version, "classifier": self.classifier, "type": self.dep_type, "license": self.license})) if self.group_id.find("org.apache.druid") < 0: self.dep_to_license[get_dep_key(self.group_id, self.artifact_id, self.version)] = (self.license, self.druid_module_name) if self.state == "row_end": if tag == "table": self.state = "none" def handle_data(self, data): if self.state == "td_start": self.set_attr(data) elif self.state == "th_start": if data.lower() == "classifier": self.include_classifier = True def clear_attr(self): self.group_id = None self.artifact_id = None self.version = None self.classifier = None self.dep_type = None self.license = None self.attr_index = 0 def set_attr(self, data): #print("set data: {}".format(data)) if self.attr_index == 0: self.group_id = data elif self.attr_index == 1: self.artifact_id = data elif self.attr_index == 2: self.version = get_version_string(data) elif self.attr_index == 3: if self.include_classifier: self.classifier = data else: self.dep_type = data elif self.attr_index == 4: if self.include_classifier: self.dep_type = data else: self.set_license(data) elif self.attr_index == 5: if self.include_classifier: self.set_license(data) else: raise Exception("Unknown attr_index [{}]".format(self.attr_index)) else: raise Exception("Unknown attr_index [{}]".format(self.attr_index)) def set_license(self, data): if data.upper().find("GPL") < 0: if self.license != 'Apache License version 2.0': self.license = self.compatible_license_names[data] def print_log_to_stderr(string): print(string, file=sys.stderr) def build_compatible_license_names(): compatible_licenses = {} compatible_licenses['Apache License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['The Apache Software License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache 2'] = 'Apache License version 2.0' compatible_licenses['Apache License 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache Software License - Version 2.0'] = 'Apache License version 2.0' compatible_licenses['The Apache License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License Version 2'] = 'Apache License version 2.0' compatible_licenses['Apache License v2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License, version 2.0'] = 'Apache License version 2.0' compatible_licenses['Public Domain'] = 'Public Domain' compatible_licenses['BSD-2-Clause License'] = 'BSD-2-Clause License' compatible_licenses['BSD-2-Clause'] = 'BSD-2-Clause License' compatible_licenses['BSD-3-Clause License'] = 'BSD-3-Clause License' compatible_licenses['New BSD license'] = 'BSD-3-Clause License' compatible_licenses['BSD'] = 'BSD-3-Clause License' compatible_licenses['The BSD License'] = 'BSD-3-Clause License' compatible_licenses['BSD licence'] = 'BSD-3-Clause License' compatible_licenses['BSD License'] = 'BSD-3-Clause License' compatible_licenses['BSD-like'] = 'BSD-3-Clause License' compatible_licenses['The BSD 3-Clause License'] = 'BSD-3-Clause License' compatible_licenses['Revised BSD'] = 'BSD-3-Clause License' compatible_licenses['New BSD License'] = 'BSD-3-Clause License' compatible_licenses['3-Clause BSD License'] = 'BSD-3-Clause License' compatible_licenses['ICU License'] = 'ICU License' compatible_licenses['SIL Open Font License 1.1'] = 'SIL Open Font License 1.1' compatible_licenses['CDDL 1.1'] = 'CDDL 1.1' compatible_licenses['CDDL/GPLv2+CE'] = 'CDDL 1.1' compatible_licenses['CDDL + GPLv2 with classpath exception'] = 'CDDL 1.1' compatible_licenses['CDDL License'] = 'CDDL 1.1' compatible_licenses['Eclipse Public License 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['The Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Eclipse Public License - Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Eclipse Distribution License 1.0'] = 'Eclipse Distribution License 1.0' compatible_licenses['Eclipse Distribution License - v 1.0'] = 'Eclipse Distribution License 1.0' compatible_licenses['EDL 1.0'] = 'Eclipse Distribution License 1.0' compatible_licenses['Mozilla Public License Version 2.0'] = 'Mozilla Public License Version 2.0' compatible_licenses['Mozilla Public License, Version 2.0'] = 'Mozilla Public License Version 2.0' compatible_licenses['Creative Commons Attribution 2.5'] = 'Creative Commons Attribution 2.5' compatible_licenses['Creative Commons CC0'] = 'Creative Commons CC0' compatible_licenses['CC0'] = 'Creative Commons CC0' compatible_licenses['The MIT License'] = 'MIT License' compatible_licenses['MIT License'] = 'MIT License' compatible_licenses['-'] = '-' return compatible_licenses def get_dep_key(group_id, artifact_id, version): return (group_id, artifact_id, version) def get_version_string(version): if type(version) == str: return version else: return str(version) def find_druid_module_name(dirpath): ext_start = dirpath.find("/ext/") if ext_start > 0: # Found an extension subpath = dirpath[(len("/ext/") + ext_start):] ext_name_end = subpath.find("/") if ext_name_end < 0: raise Exception("Can't determine extension name from [{}]".format(dirpath)) else: return subpath[0:ext_name_end] else: # Druid core return "core" def check_licenses(license_yaml, dependency_reports_root): # Build a dictionary to facilitate comparing reported licenses and registered ones. # These dictionaries are the mapping of (group_id, artifact_id, version) to license_name. # Build reported license dictionary. reported_dep_to_licenses = {} compatible_license_names = build_compatible_license_names() for dirpath, dirnames, filenames in os.walk(dependency_reports_root): for filename in filenames: if filename == "dependencies.html": full_path = os.path.join(dirpath, filename) # Determine if it's druid core or an extension druid_module_name = find_druid_module_name(dirpath) print_log_to_stderr("Parsing {}".format(full_path)) with open(full_path) as report_file: parser = DependencyReportParser(druid_module_name, compatible_license_names) reported_dep_to_licenses.update(parser.parse(report_file)) if len(reported_dep_to_licenses) == 0: raise Exception("No dependency reports are found") print_log_to_stderr("Found {} reported licenses\n".format(len(reported_dep_to_licenses))) # Build registered license dictionary. registered_dep_to_licenses = {} skipping_licenses = {} with open(license_yaml) as registry_file: licenses_list = list(yaml.load_all(registry_file)) for license in licenses_list: if 'libraries' in license: for library in license['libraries']: if type(library) is not dict: raise Exception("Expected dict but got {}[{}]".format(type(library), library)) if len(library) > 1: raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library)) for group_id, artifact_id in library.items(): if 'version' not in license: raise Exception("version is missing in {}".format(license)) if 'license_name' not in license: raise Exception("name is missing in {}".format(license)) if 'skip_dependency_report_check' in license and license['skip_dependency_report_check']: if 'version' not in license: version = "-" else: version = get_version_string(license['version']) skipping_licenses[get_dep_key(group_id, artifact_id, version)] = license else: registered_dep_to_licenses[get_dep_key(group_id, artifact_id, get_version_string(license['version']))] = compatible_license_names[license['license_name']] if len(registered_dep_to_licenses) == 0: raise Exception("No registered licenses are found") # Compare licenses in registry and those in dependency reports. mismatched_licenses = [] missing_licenses = [] unchecked_licenses = [] # Iterate through registered licenses and check if its license is same with the reported one. for key, registered_license in registered_dep_to_licenses.items(): if key in reported_dep_to_licenses: # key is (group_id, artifact_id, version) reported_license_druid_module = reported_dep_to_licenses[key] reported_license = reported_license_druid_module[0] druid_module = reported_license_druid_module[1] if reported_license is not None and reported_license != "-" and reported_license != registered_license: group_id = key[0] artifact_id = key[1] version = key[2] mismatched_licenses.append((druid_module, group_id, artifact_id, version, reported_license, registered_license)) # If we find any mismatched license, stop immediately. if len(mismatched_licenses) > 0: print_log_to_stderr("Error: found {} mismatches between reported licenses and registered licenses".format(len(mismatched_licenses))) for mismatched_license in mismatched_licenses: print_log_to_stderr("druid_module: {}, groupId: {}, artifactId: {}, version: {}, reported_license: {}, registered_license: {}".format(mismatched_license[0], mismatched_license[1], mismatched_license[2], mismatched_license[3], mismatched_license[4], mismatched_license[5])) print_log_to_stderr("") # Let's find missing licenses, which are reported but missing in the registry. for key, reported_license_druid_module in reported_dep_to_licenses.items(): if reported_license_druid_module[0] != "-" and key not in registered_dep_to_licenses and key not in skipping_licenses: missing_licenses.append((reported_license_druid_module[1], key[0], key[1], key[2], reported_license_druid_module[0])) if len(missing_licenses) > 0: print_log_to_stderr("Error: found {} missing licenses. These licenses are reported, but missing in the registry".format(len(missing_licenses))) for missing_license in missing_licenses: print_log_to_stderr("druid_module: {}, groupId: {}, artifactId: {}, version: {}, license: {}".format(missing_license[0], missing_license[1], missing_license[2], missing_license[3], missing_license[4])) print_log_to_stderr("") # Let's find unchecked licenses, which are registered but missing in the report. # These licenses should be checked manually. for key, registered_license in registered_dep_to_licenses.items(): if key not in reported_dep_to_licenses: unchecked_licenses.append((key[0], key[1], key[2], registered_license)) elif reported_dep_to_licenses[key][0] == "-": unchecked_licenses.append((key[0], key[1], key[2], registered_license)) if len(unchecked_licenses) > 0: print_log_to_stderr("Warn: found {} unchecked licenses. These licenses are registered, but not found in dependency reports.".format(len(unchecked_licenses))) print_log_to_stderr("These licenses must be checked manually.") for unchecked_license in unchecked_licenses: print_log_to_stderr("groupId: {}, artifactId: {}, version: {}, reported_license: {}".format(unchecked_license[0], unchecked_license[1], unchecked_license[2], unchecked_license[3])) print_log_to_stderr("") if len(mismatched_licenses) > 0 or len(missing_licenses) > 0: sys.exit(1) if __name__ == "__main__": try: parser = argparse.ArgumentParser(description='Check and generate license file.') parser.add_argument('license_yaml', metavar='<path to license.yaml>', type=str) parser.add_argument('dependency_reports_root', metavar='<root to maven dependency reports>', type=str) args = parser.parse_args() license_yaml = args.license_yaml dependency_reports_root = args.dependency_reports_root check_licenses(license_yaml, dependency_reports_root) except KeyboardInterrupt: print('Interrupted, closing.')
"""Command-line arguments for tests. This enables some common command-line arguments: ``--runslow`` Run tests flagged with @pytest.fixture.slow ``--runperf`` Run tests flagged with @pytest.fixture.performance ``--ingest-v2`` Run tests flagged with @pytest.fixture.ingest_v2 ``--redis-address`` hostname:portnumber for a Redis instance ``--profile outfile`` log profiling data per test to outfile ``--profile-truncate`` clobber any old profiling out file at start of run ``--elastic-address`` hostname:portnumber for an Elasticsearch instance ----- This software is released under an MIT/X11 open source license. Copyright 2012-2014 Diffeo, Inc. """ from __future__ import absolute_import import logging import os.path import pstats import time try: import cProfile as profile except: import profile import pytest from six import StringIO def pytest_addoption(parser): group = parser.getgroup('test selection') group.addoption('--runslow', action='store_true', help='run known-slow tests') group.addoption('--runperf', action='store_true', help='run performance tests') group.addoption('--runload', action='store_true', help='run load tests') group.addoption('--run-integration', action='store_true', help='run integration tests') group = parser.getgroup('external systems') group.addoption('--ingest-v2', metavar='URL', help='URL for Streamcorpus v2 ingest service') group.addoption('--elastic-address', metavar='HOST:PORT', help='location of an ElasticSearch database server') group.addoption('--redis-address', metavar='HOST:PORT', help='location of a Redis database server') group.addoption('--third-dir', metavar='THIRD-DIR', help='location of a third party software') group.addoption('--external-data', metavar='EXTERNAL_DATA', help='location of a external data resources') group = parser.getgroup('general') group.addoption('--profile', metavar='path', help='run tests with profiling, write results to file') group.addoption('--profile-truncate', action='store_true', default=False, help='when profiling, truncate output file at start') group.addoption('--log-level', metavar='DEBUG|INFO|WARNING|ERROR|FATAL', default='DEBUG', help='Control logging level of tests.') def pytest_configure(config): # Declare our markers config.addinivalue_line('markers', 'slow: mark tests as taking longer than ' 'your average unit test') config.addinivalue_line('markers', 'performance: mark tests as performance tests') config.addinivalue_line('markers', 'load: mark tests as load tests') config.addinivalue_line('markers', 'integration: mark tests as integration tests') if config.getoption('profile_truncate'): profile_outpath = config.getoption('profile') if profile_outpath: fout = open(profile_outpath, 'w') fout.truncate(0) fout.close() def pytest_runtest_setup(item): pairs = [ ('slow', 'slow'), ('perf', 'performance'), ('load', 'load'), ('-integration', 'integration'), ] for option, marker in pairs: run = '--run{0}'.format(option) if marker in item.keywords and not item.config.getoption(run): pytest.skip('need {0} option to run'.format(run)) profile_outpath = item.config.getoption('profile') if profile_outpath: prof = profile.Profile() prof.enable() item.profiler = prof logger = logging.getLogger() level = getattr(logging, item.config.getoption('log_level')) assert level is not None logger.setLevel(level) def pytest_runtest_teardown(item, nextitem): profile_outpath = item.config.getoption('profile') if profile_outpath: prof = getattr(item, 'profiler', None) if prof: prof.disable() # build blob to write one-shot to beat thread interleaving. fout = StringIO() fout.write( '\n{0} {1}\n'.format(time.strftime('%Y%m%d_%H%M%S'), item)) ps = pstats.Stats(prof, stream=fout) ps.sort_stats('cumulative', 'calls') ps.print_stats() fout.write('\n\tfunction callers\n') ps.print_callers() fout.write('\n\tfunction callees\n') ps.print_callees() ff = open(profile_outpath, 'a') ff.write(fout.getvalue()) ff.close() @pytest.fixture(scope='session') def redis_address(request): '''network address for a redis server to be used by tests ''' addr = request.config.getoption('--redis-address') if addr is None: host = os.environ.get('REDIS_PORT_6379_TCP_ADDR', None) port = os.environ.get('REDIS_PORT_6379_TCP_PORT', None) if host and port: addr = host + ':' + port assert addr is not None, \ "this test requires --redis-address on the command line" return addr @pytest.fixture(scope='session') def elastic_address(request): 'network address for an ElasticSearch server to be used by tests' addr = request.config.getoption('--elastic-address') if addr is None: host = os.environ.get('ELASTICSEARCH_PORT_9200_TCP_ADDR', None) port = os.environ.get('ELASTICSEARCH_PORT_9200_TCP_PORT', None) if host and port: addr = host + ':' + port assert addr is not None, \ "this test requires --elastic-address on the command line" return addr @pytest.fixture(scope='session') def ingest_v2(request): 'URL for Streamcorpus v2 ingest service' url = request.config.getoption('--ingest-v2') if url is None: url = os.environ.get('STREAMCORPUS_INGEST_URL', None) # returning None means tests marked with this will not run. --jrf # Uhh, not, it doesn't. Skip it explicitly to get desired behavior. ---AG if url is None: pytest.skip('set --ingest-v2 or env var STREAMCORPUS_INGEST_URL') return url @pytest.fixture(scope='session') def third_dir(request): '''directory containing third-party software, such as NLP taggers ''' third_dir = request.config.getoption('--third-dir') assert third_dir is not None, \ "this test requires --third_dir on the command line" assert os.path.exists(third_dir), "Directory must exist" return third_dir @pytest.fixture(scope='session') def external_data(request): '''directory containing external data for tests ''' external_data = request.config.getoption('--external-data') if external_data is None: external_data = os.environ.get('EXTERNAL_DATA', None) if external_data is None: pytest.skip('set --external-data or env var EXTERNAL_DATA') else: assert os.path.exists(external_data), \ 'Could not find external_data=%r' % external_data return external_data
""" logger/message.py: Python logger base for expfactory Copyright (c) 2016-2022 Vanessa Sochat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import sys from .spinner import Spinner ABORT = -5 CRITICAL = -4 ERROR = -3 WARNING = -2 LOG = -1 INFO = 1 CUSTOM = 1 QUIET = 0 VERBOSE = VERBOSE1 = 2 VERBOSE2 = 3 VERBOSE3 = 4 DEBUG = 5 TEST = 5 PURPLE = "\033[95m" YELLOW = "\033[93m" RED = "\033[91m" DARKRED = "\033[31m" CYAN = "\033[36m" class ExpfactoryMessage: def __init__(self, MESSAGELEVEL=None): self.level = get_logging_level() self.history = [] self.errorStream = sys.stderr self.outputStream = sys.stdout self.colorize = self.useColor() self.colors = { ABORT: DARKRED, CRITICAL: RED, ERROR: RED, WARNING: YELLOW, LOG: PURPLE, CUSTOM: PURPLE, DEBUG: CYAN, TEST: PURPLE, "OFF": "\033[0m", # end sequence "CYAN": CYAN, "PURPLE": PURPLE, "RED": RED, "DARKRED": DARKRED, "YELLOW": YELLOW, } # Colors -------------------------------------------- def useColor(self): """useColor will determine if color should be added to a print. Will check if being run in a terminal, and if has support for asci""" COLORIZE = get_user_color_preference() if COLORIZE is not None: return COLORIZE streams = [self.errorStream, self.outputStream] for stream in streams: if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False return True def addColor(self, level, text): """addColor to the prompt (usually prefix) if terminal supports, and specified to do so""" if self.colorize: if level in self.colors: text = "%s%s%s" % (self.colors[level], text, self.colors["OFF"]) return text def emitError(self, level): """determine if a level should print to stderr, includes all levels but INFO and QUIET""" if level in [ ABORT, ERROR, WARNING, VERBOSE, VERBOSE1, VERBOSE2, VERBOSE3, DEBUG, ]: return True return False def emitOutput(self, level): """determine if a level should print to stdout only includes INFO""" if level in [LOG, INFO]: return True return False def isEnabledFor(self, messageLevel): """check if a messageLevel is enabled to emit a level""" if messageLevel <= self.level: return True return False def emit(self, level, message, prefix=None, color=None): """emit is the main function to print the message optionally with a prefix :param level: the level of the message :param message: the message to print :param prefix: a prefix for the message """ if color is None: color = level if prefix is not None: prefix = self.addColor(color, "%s " % (prefix)) else: prefix = "" message = self.addColor(color, message) # Add the prefix message = "%s%s" % (prefix, message) if not message.endswith("\n"): message = "%s\n" % message # If the level is quiet, only print to error if self.level == QUIET: pass # Otherwise if in range print to stdout and stderr elif self.isEnabledFor(level): if self.emitError(level): self.write(self.errorStream, message) else: self.write(self.outputStream, message) # Add all log messages to history self.history.append(message) def write(self, stream, message): """write will write a message to a stream, first checking the encoding """ if isinstance(message, bytes): message = message.decode("utf-8") stream.write(message) def get_logs(self, join_newline=True): """'get_logs will return the complete history, joined by newline (default) or as is. """ if join_newline: return "\n".join(self.history) return self.history def show_progress( self, iteration, total, length=40, min_level=0, prefix=None, carriage_return=True, suffix=None, symbol=None, ): """create a terminal progress bar, default bar shows for verbose+ :param iteration: current iteration (Int) :param total: total iterations (Int) :param length: character length of bar (Int) """ percent = 100 * (iteration / float(total)) progress = int(length * iteration // total) if suffix is None: suffix = "" if prefix is None: prefix = "Progress" # Download sizes can be imperfect, setting carriage_return to False # and writing newline with caller cleans up the UI if percent >= 100: percent = 100 progress = length if symbol is None: symbol = "=" if progress < length: bar = symbol * progress + "|" + "-" * (length - progress - 1) else: bar = symbol * progress + "-" * (length - progress) # Only show progress bar for level > min_level if self.level > min_level: percent = "%5s" % ("{0:.1f}").format(percent) output = "\r" + prefix + " |%s| %s%s %s" % (bar, percent, "%", suffix) sys.stdout.write(output), if iteration == total and carriage_return: sys.stdout.write("\n") sys.stdout.flush() # Logging ------------------------------------------ def abort(self, message): self.emit(ABORT, message, "ABORT") def critical(self, message): self.emit(CRITICAL, message, "CRITICAL") def error(self, message): self.emit(ERROR, message, "ERROR") def warning(self, message): self.emit(WARNING, message, "WARNING") def log(self, message): self.emit(LOG, message, "LOG") def custom(self, prefix, message, color=PURPLE): self.emit(CUSTOM, message, prefix, color) def info(self, message): self.emit(INFO, message) def newline(self): return self.info("") def verbose(self, message): self.emit(VERBOSE, message, "VERBOSE") def verbose1(self, message): self.emit(VERBOSE, message, "VERBOSE1") def verbose2(self, message): self.emit(VERBOSE2, message, "VERBOSE2") def verbose3(self, message): self.emit(VERBOSE3, message, "VERBOSE3") def debug(self, message): self.emit(DEBUG, message, "DEBUG") def test(self, message): self.emit(TEST, message, "TEST") def is_quiet(self): """is_quiet returns true if the level is under 1""" if self.level < 1: return False return True # Terminal ------------------------------------------ def table(self, rows, col_width=2): """table will print a table of entries. If the rows is a dictionary, the keys are interpreted as column names. if not, a numbered list is used. """ labels = [str(x) for x in range(1, len(rows) + 1)] if isinstance(rows, dict): labels = list(rows.keys()) rows = list(rows.values()) for row in rows: label = labels.pop(0) label = label.ljust(col_width) message = "\t".join(row) self.custom(prefix=label, message=message) def get_logging_level(): """get_logging_level will configure a logging to standard out based on the user's selected level, which should be in an environment variable called MESSAGELEVEL. if MESSAGELEVEL is not set, the maximum level (5) is assumed (all messages). """ try: level = int(os.environ.get("MESSAGELEVEL", INFO)) except ValueError: level = os.environ.get("MESSAGELEVEL", INFO) if level == "CRITICAL": return CRITICAL elif level == "ABORT": return ABORT elif level == "ERROR": return ERROR elif level == "WARNING": return WARNING elif level == "LOG": return LOG elif level == "INFO": return INFO elif level == "QUIET": return QUIET elif level == "TEST": return TEST elif level.startswith("VERBOSE"): return VERBOSE3 elif level == "LOG": return LOG elif level == "DEBUG": return DEBUG return level def get_user_color_preference(): COLORIZE = os.environ.get("EXPFACTORY_COLORIZE", None) if COLORIZE is not None: COLORIZE = convert2boolean(COLORIZE) return COLORIZE def convert2boolean(arg): """convert2boolean is used for environmental variables that must be returned as boolean""" if not isinstance(arg, bool): return arg.lower() in ("yes", "true", "t", "1", "y") return arg ExpfactoryMessage.spinner = Spinner() bot = ExpfactoryMessage()
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union import pkg_resources import google.auth # type: ignore import google.api_core from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.debugger_v2.types import controller try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-debugger-client", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() class Controller2Transport(abc.ABC): """Abstract transport class for Controller2.""" AUTH_SCOPES = ( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger", ) DEFAULT_HOST: str = "clouddebugger.googleapis.com" def __init__( self, *, host: str = DEFAULT_HOST, credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" self._host = host scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) # If the credentials are service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access") ): credentials = credentials.with_always_use_jwt_access(True) # Save the credentials. self._credentials = credentials def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.register_debuggee: gapic_v1.method.wrap_method( self.register_debuggee, default_timeout=600.0, client_info=client_info, ), self.list_active_breakpoints: gapic_v1.method.wrap_method( self.list_active_breakpoints, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.update_active_breakpoint: gapic_v1.method.wrap_method( self.update_active_breakpoint, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), } def close(self): """Closes resources associated with the transport. .. warning:: Only call this method if the transport is NOT shared with other clients - this may cause errors in other clients! """ raise NotImplementedError() @property def register_debuggee( self, ) -> Callable[ [controller.RegisterDebuggeeRequest], Union[ controller.RegisterDebuggeeResponse, Awaitable[controller.RegisterDebuggeeResponse], ], ]: raise NotImplementedError() @property def list_active_breakpoints( self, ) -> Callable[ [controller.ListActiveBreakpointsRequest], Union[ controller.ListActiveBreakpointsResponse, Awaitable[controller.ListActiveBreakpointsResponse], ], ]: raise NotImplementedError() @property def update_active_breakpoint( self, ) -> Callable[ [controller.UpdateActiveBreakpointRequest], Union[ controller.UpdateActiveBreakpointResponse, Awaitable[controller.UpdateActiveBreakpointResponse], ], ]: raise NotImplementedError() __all__ = ("Controller2Transport",)
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests of the links interface of RPC Framework.""" # unittest is referenced from specification in this module. import abc import unittest # pylint: disable=unused-import from grpc.framework.common import test_constants from grpc.framework.interfaces.links import links from grpc.framework.interfaces.links import test_utilities def at_least_n_payloads_received_predicate(n): def predicate(ticket_sequence): payload_count = 0 for ticket in ticket_sequence: if ticket.payload is not None: payload_count += 1 if n <= payload_count: return True else: return False return predicate def terminated(ticket_sequence): return ticket_sequence and ticket_sequence[-1].termination is not None _TRANSMISSION_GROUP = 'test.Group' _TRANSMISSION_METHOD = 'TestMethod' class TransmissionTest(object): """Tests ticket transmission between two connected links. This class must be mixed into a unittest.TestCase that implements the abstract methods it provides. """ __metaclass__ = abc.ABCMeta # This is a unittest.TestCase mix-in. # pylint: disable=invalid-name @abc.abstractmethod def create_transmitting_links(self): """Creates two connected links for use in this test. Returns: Two links.Links, the first of which will be used on the invocation side of RPCs and the second of which will be used on the service side of RPCs. """ raise NotImplementedError() @abc.abstractmethod def destroy_transmitting_links(self, invocation_side_link, service_side_link): """Destroys the two connected links created for this test. Args: invocation_side_link: The link used on the invocation side of RPCs in this test. service_side_link: The link used on the service side of RPCs in this test. """ raise NotImplementedError() @abc.abstractmethod def create_invocation_initial_metadata(self): """Creates a value for use as invocation-side initial metadata. Returns: A metadata value appropriate for use as invocation-side initial metadata or None if invocation-side initial metadata transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def create_invocation_terminal_metadata(self): """Creates a value for use as invocation-side terminal metadata. Returns: A metadata value appropriate for use as invocation-side terminal metadata or None if invocation-side terminal metadata transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def create_service_initial_metadata(self): """Creates a value for use as service-side initial metadata. Returns: A metadata value appropriate for use as service-side initial metadata or None if service-side initial metadata transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def create_service_terminal_metadata(self): """Creates a value for use as service-side terminal metadata. Returns: A metadata value appropriate for use as service-side terminal metadata or None if service-side terminal metadata transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def create_invocation_completion(self): """Creates values for use as invocation-side code and message. Returns: An invocation-side code value and an invocation-side message value. Either or both may be None if invocation-side code and/or invocation-side message transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def create_service_completion(self): """Creates values for use as service-side code and message. Returns: A service-side code value and a service-side message value. Either or both may be None if service-side code and/or service-side message transmission is not supported by the links under test. """ raise NotImplementedError() @abc.abstractmethod def assertMetadataTransmitted(self, original_metadata, transmitted_metadata): """Asserts that transmitted_metadata contains original_metadata. Args: original_metadata: A metadata object used in this test. transmitted_metadata: A metadata object obtained after transmission through the system under test. Raises: AssertionError: if the transmitted_metadata object does not contain original_metadata. """ raise NotImplementedError() def group_and_method(self): """Returns the group and method used in this test case. Returns: A pair of the group and method used in this test case. """ return _TRANSMISSION_GROUP, _TRANSMISSION_METHOD def serialize_request(self, request): """Serializes a request value used in this test case. Args: request: A request value created by this test case. Returns: A bytestring that is the serialization of the given request. """ return request def deserialize_request(self, serialized_request): """Deserializes a request value used in this test case. Args: serialized_request: A bytestring that is the serialization of some request used in this test case. Returns: The request value encoded by the given bytestring. """ return serialized_request def serialize_response(self, response): """Serializes a response value used in this test case. Args: response: A response value created by this test case. Returns: A bytestring that is the serialization of the given response. """ return response def deserialize_response(self, serialized_response): """Deserializes a response value used in this test case. Args: serialized_response: A bytestring that is the serialization of some response used in this test case. Returns: The response value encoded by the given bytestring. """ return serialized_response def _assert_is_valid_metadata_payload_sequence( self, ticket_sequence, payloads, initial_metadata, terminal_metadata): initial_metadata_seen = False seen_payloads = [] terminal_metadata_seen = False for ticket in ticket_sequence: if ticket.initial_metadata is not None: self.assertFalse(initial_metadata_seen) self.assertFalse(seen_payloads) self.assertFalse(terminal_metadata_seen) self.assertMetadataTransmitted(initial_metadata, ticket.initial_metadata) initial_metadata_seen = True if ticket.payload is not None: self.assertFalse(terminal_metadata_seen) seen_payloads.append(ticket.payload) if ticket.terminal_metadata is not None: self.assertFalse(terminal_metadata_seen) self.assertMetadataTransmitted(terminal_metadata, ticket.terminal_metadata) terminal_metadata_seen = True self.assertSequenceEqual(payloads, seen_payloads) def _assert_is_valid_invocation_sequence( self, ticket_sequence, group, method, payloads, initial_metadata, terminal_metadata, termination): self.assertLess(0, len(ticket_sequence)) self.assertEqual(group, ticket_sequence[0].group) self.assertEqual(method, ticket_sequence[0].method) self._assert_is_valid_metadata_payload_sequence( ticket_sequence, payloads, initial_metadata, terminal_metadata) self.assertIs(termination, ticket_sequence[-1].termination) def _assert_is_valid_service_sequence( self, ticket_sequence, payloads, initial_metadata, terminal_metadata, code, message, termination): self.assertLess(0, len(ticket_sequence)) self._assert_is_valid_metadata_payload_sequence( ticket_sequence, payloads, initial_metadata, terminal_metadata) self.assertEqual(code, ticket_sequence[-1].code) self.assertEqual(message, ticket_sequence[-1].message) self.assertIs(termination, ticket_sequence[-1].termination) def setUp(self): self._invocation_link, self._service_link = self.create_transmitting_links() self._invocation_mate = test_utilities.RecordingLink() self._service_mate = test_utilities.RecordingLink() self._invocation_link.join_link(self._invocation_mate) self._service_link.join_link(self._service_mate) def tearDown(self): self.destroy_transmitting_links(self._invocation_link, self._service_link) def testSimplestRoundTrip(self): """Tests transmission of one ticket in each direction.""" invocation_operation_id = object() invocation_payload = b'\x07' * 1023 timeout = test_constants.LONG_TIMEOUT invocation_initial_metadata = self.create_invocation_initial_metadata() invocation_terminal_metadata = self.create_invocation_terminal_metadata() invocation_code, invocation_message = self.create_invocation_completion() service_payload = b'\x08' * 1025 service_initial_metadata = self.create_service_initial_metadata() service_terminal_metadata = self.create_service_terminal_metadata() service_code, service_message = self.create_service_completion() original_invocation_ticket = links.Ticket( invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata, invocation_payload, invocation_terminal_metadata, invocation_code, invocation_message, links.Ticket.Termination.COMPLETION) self._invocation_link.accept_ticket(original_invocation_ticket) # TODO(nathaniel): This shouldn't be necessary. Detecting the end of the # invocation-side ticket sequence shouldn't require granting allowance for # another payload. self._service_mate.block_until_tickets_satisfy( at_least_n_payloads_received_predicate(1)) service_operation_id = self._service_mate.tickets()[0].operation_id self._service_link.accept_ticket( links.Ticket( service_operation_id, 0, None, None, links.Ticket.Subscription.FULL, None, 1, None, None, None, None, None, None)) self._service_mate.block_until_tickets_satisfy(terminated) self._assert_is_valid_invocation_sequence( self._service_mate.tickets(), _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, (invocation_payload,), invocation_initial_metadata, invocation_terminal_metadata, links.Ticket.Termination.COMPLETION) original_service_ticket = links.Ticket( service_operation_id, 1, None, None, links.Ticket.Subscription.FULL, timeout, 0, service_initial_metadata, service_payload, service_terminal_metadata, service_code, service_message, links.Ticket.Termination.COMPLETION) self._service_link.accept_ticket(original_service_ticket) self._invocation_mate.block_until_tickets_satisfy(terminated) self._assert_is_valid_service_sequence( self._invocation_mate.tickets(), (service_payload,), service_initial_metadata, service_terminal_metadata, service_code, service_message, links.Ticket.Termination.COMPLETION)
"""Support to select a date and/or a time.""" import logging import datetime import voluptuous as vol from homeassistant.const import ATTR_ENTITY_ID, CONF_ICON, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.util import dt as dt_util _LOGGER = logging.getLogger(__name__) DOMAIN = 'input_datetime' ENTITY_ID_FORMAT = DOMAIN + '.{}' CONF_HAS_DATE = 'has_date' CONF_HAS_TIME = 'has_time' CONF_INITIAL = 'initial' DEFAULT_VALUE = '1970-01-01 00:00:00' ATTR_DATE = 'date' ATTR_TIME = 'time' SERVICE_SET_DATETIME = 'set_datetime' SERVICE_SET_DATETIME_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_DATE): cv.date, vol.Optional(ATTR_TIME): cv.time, }) def has_date_or_time(conf): """Check at least date or time is true.""" if conf[CONF_HAS_DATE] or conf[CONF_HAS_TIME]: return conf raise vol.Invalid('Entity needs at least a date or a time') CONFIG_SCHEMA = vol.Schema({ DOMAIN: cv.schema_with_slug_keys( vol.All({ vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_HAS_DATE, default=False): cv.boolean, vol.Optional(CONF_HAS_TIME, default=False): cv.boolean, vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_INITIAL): cv.string, }, has_date_or_time) ) }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up an input datetime.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] for object_id, cfg in config[DOMAIN].items(): name = cfg.get(CONF_NAME) has_time = cfg.get(CONF_HAS_TIME) has_date = cfg.get(CONF_HAS_DATE) icon = cfg.get(CONF_ICON) initial = cfg.get(CONF_INITIAL) entities.append(InputDatetime(object_id, name, has_date, has_time, icon, initial)) if not entities: return False async def async_set_datetime_service(entity, call): """Handle a call to the input datetime 'set datetime' service.""" time = call.data.get(ATTR_TIME) date = call.data.get(ATTR_DATE) if (entity.has_date and not date) or (entity.has_time and not time): _LOGGER.error("Invalid service data for %s " "input_datetime.set_datetime: %s", entity.entity_id, str(call.data)) return entity.async_set_datetime(date, time) component.async_register_entity_service( SERVICE_SET_DATETIME, SERVICE_SET_DATETIME_SCHEMA, async_set_datetime_service ) await component.async_add_entities(entities) return True class InputDatetime(RestoreEntity): """Representation of a datetime input.""" def __init__(self, object_id, name, has_date, has_time, icon, initial): """Initialize a select input.""" self.entity_id = ENTITY_ID_FORMAT.format(object_id) self._name = name self.has_date = has_date self.has_time = has_time self._icon = icon self._initial = initial self._current_datetime = None async def async_added_to_hass(self): """Run when entity about to be added.""" await super().async_added_to_hass() restore_val = None # Priority 1: Initial State if self._initial is not None: restore_val = self._initial # Priority 2: Old state if restore_val is None: old_state = await self.async_get_last_state() if old_state is not None: restore_val = old_state.state if not self.has_date: if not restore_val: restore_val = DEFAULT_VALUE.split()[1] self._current_datetime = dt_util.parse_time(restore_val) elif not self.has_time: if not restore_val: restore_val = DEFAULT_VALUE.split()[0] self._current_datetime = dt_util.parse_date(restore_val) else: if not restore_val: restore_val = DEFAULT_VALUE self._current_datetime = dt_util.parse_datetime(restore_val) @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return the name of the select input.""" return self._name @property def icon(self): """Return the icon to be used for this entity.""" return self._icon @property def state(self): """Return the state of the component.""" return self._current_datetime @property def state_attributes(self): """Return the state attributes.""" attrs = { 'has_date': self.has_date, 'has_time': self.has_time, } if self._current_datetime is None: return attrs if self.has_date and self._current_datetime is not None: attrs['year'] = self._current_datetime.year attrs['month'] = self._current_datetime.month attrs['day'] = self._current_datetime.day if self.has_time and self._current_datetime is not None: attrs['hour'] = self._current_datetime.hour attrs['minute'] = self._current_datetime.minute attrs['second'] = self._current_datetime.second if not self.has_date: attrs['timestamp'] = self._current_datetime.hour * 3600 + \ self._current_datetime.minute * 60 + \ self._current_datetime.second elif not self.has_time: extended = datetime.datetime.combine(self._current_datetime, datetime.time(0, 0)) attrs['timestamp'] = extended.timestamp() else: attrs['timestamp'] = self._current_datetime.timestamp() return attrs def async_set_datetime(self, date_val, time_val): """Set a new date / time.""" if self.has_date and self.has_time and date_val and time_val: self._current_datetime = datetime.datetime.combine(date_val, time_val) elif self.has_date and not self.has_time and date_val: self._current_datetime = date_val if self.has_time and not self.has_date and time_val: self._current_datetime = time_val self.async_schedule_update_ha_state()
#!/usr/bin/env python # Python 2.7 backward compatibility from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import paramiko import paramiko.py3compat import os import os.path import sys import errno from stat import S_ISDIR, S_ISLNK, S_ISREG, S_IMODE, S_IFMT import argparse import logging from getpass import getuser, getpass import glob import socket """SFTPClone: sync local and remote directories.""" logger = None try: # Not available in Python 2.x FileNotFoundError except NameError: FileNotFoundError = IOError def configure_logging(level=logging.DEBUG): """Configure the module logging engine.""" if level == logging.DEBUG: # For debugging purposes, log from everyone! logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s' ) return logging logger = logging.getLogger(__name__) logger.setLevel(level) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(formatter) logger.addHandler(ch) return logger def path_join(*args): """ Wrapper around `os.path.join`. Makes sure to join paths of the same type (bytes). """ args = (paramiko.py3compat.u(arg) for arg in args) return os.path.join(*args) class SFTPClone(object): """The SFTPClone class.""" def __init__(self, local_path, remote_url, identity_files=None, port=None, fix_symlinks=False, ssh_config_path=None, ssh_agent=False, exclude_file=None, known_hosts_path=None, delete=True, allow_unknown=False ): """Init the needed parameters and the SFTPClient.""" self.local_path = os.path.realpath(os.path.expanduser(local_path)) self.logger = logger or configure_logging() if not os.path.exists(self.local_path): self.logger.error("Local path MUST exist. Exiting.") sys.exit(1) if exclude_file: with open(exclude_file) as f: # As in rsync's exclude from, ignore lines with leading ; and # # and treat each path as relative (thus by removing the leading # /) exclude_list = [ line.rstrip().lstrip("/") for line in f if not line.startswith((";", "#")) ] # actually, is a set of excluded files self.exclude_list = { g for pattern in exclude_list for g in glob.glob(path_join(self.local_path, pattern)) } else: self.exclude_list = set() if '@' in remote_url: username, hostname = remote_url.split('@', 1) else: username, hostname = None, remote_url hostname, self.remote_path = hostname.split(':', 1) password = None if username and ':' in username: username, password = username.split(':', 1) identity_files = identity_files or [] if ssh_config_path: try: with open(os.path.expanduser(ssh_config_path)) as c_file: ssh_config = paramiko.SSHConfig() ssh_config.parse(c_file) c = ssh_config.lookup(hostname) hostname = c.get("hostname", hostname) username = c.get("user", username) port = int(c.get("port", port)) identity_files = c.get("identityfile", identity_files) except Exception as e: # it could be safe to continue anyway, # because parameters could have been manually specified self.logger.error( "Error while parsing ssh_config file: %s. Trying to continue anyway...", e ) # Set default values if not username: username = getuser() # defaults to current user port = port or 22 allow_unknown = allow_unknown or False self.chown = False self.fix_symlinks = fix_symlinks or False self.delete = delete if delete is not None else True agent_keys = list() agent = None if ssh_agent: try: agent = paramiko.agent.Agent() _agent_keys = agent.get_keys() if not _agent_keys: agent.close() self.logger.error( "SSH agent didn't provide any valid key. Trying to continue..." ) else: agent_keys.append(*_agent_keys) except paramiko.SSHException: if agent: agent.close() self.logger.error( "SSH agent speaks a non-compatible protocol. Ignoring it.") if not identity_files and not password and not agent_keys: self.logger.error( "You need to specify either a password, an identity or to enable the ssh-agent support." ) sys.exit(1) # only root can change file owner if username == 'root': self.chown = True try: transport = paramiko.Transport((hostname, port)) except socket.gaierror: self.logger.error( "Hostname not known. Are you sure you inserted it correctly?") sys.exit(1) try: ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port) known_hosts = None """ Before starting the transport session, we have to configure it. Specifically, we need to configure the preferred PK algorithm. If the system already knows a public key of a specific kind for a remote host, we have to peek its type as the preferred one. """ if known_hosts_path: known_hosts = paramiko.HostKeys() known_hosts_path = os.path.realpath( os.path.expanduser(known_hosts_path)) try: known_hosts.load(known_hosts_path) except IOError: self.logger.error( "Error while loading known hosts file at {}. Exiting...".format( known_hosts_path) ) sys.exit(1) known_keys = known_hosts.lookup(ssh_host) if known_keys is not None: # one or more keys are already known # set their type as preferred transport.get_security_options().key_types = \ tuple(known_keys.keys()) transport.start_client() if not known_hosts: self.logger.warning("Security warning: skipping known hosts check...") else: pubk = transport.get_remote_server_key() if ssh_host in known_hosts.keys(): if not known_hosts.check(ssh_host, pubk): self.logger.error( "Security warning: " "remote key fingerprint {} for hostname " "{} didn't match the one in known_hosts {}. " "Exiting...".format( pubk.get_base64(), ssh_host, known_hosts.lookup(hostname), ) ) sys.exit(1) elif not allow_unknown: prompt = ("The authenticity of host '{}' can't be established.\n" "{} key is {}.\n" "Are you sure you want to continue connecting? [y/n] ").format( ssh_host, pubk.get_name(), pubk.get_base64()) try: # Renamed to `input` in Python 3.x response = raw_input(prompt) except NameError: response = input(prompt) # Note: we do not modify the user's known_hosts file if not (response == "y" or response == "yes"): self.logger.error( "Host authentication failed." ) sys.exit(1) def perform_key_auth(pkey): try: transport.auth_publickey( username=username, key=pkey ) return True except paramiko.SSHException: self.logger.warning( "Authentication with identity {}... failed".format(pkey.get_base64()[:10]) ) return False if password: # Password auth, if specified. transport.auth_password( username=username, password=password ) elif agent_keys: # SSH agent keys have higher priority for pkey in agent_keys: if perform_key_auth(pkey): break # Authentication worked. else: # None of the keys worked. raise paramiko.SSHException elif identity_files: # Then follow identity file (specified from CL or ssh_config) # Try identity files one by one, until one works for key_path in identity_files: key_path = os.path.expanduser(key_path) try: key = paramiko.RSAKey.from_private_key_file(key_path) except paramiko.PasswordRequiredException: pk_password = getpass( "It seems that your identity from '{}' is encrypted. " "Please enter your password: ".format(key_path) ) try: key = paramiko.RSAKey.from_private_key_file(key_path, pk_password) except paramiko.SSHException: self.logger.error( "Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path) ) continue except IOError or paramiko.SSHException: self.logger.error( "Something went wrong while opening '{}'. Skipping it.".format(key_path) ) continue if perform_key_auth(key): break # Authentication worked. else: # None of the keys worked. raise paramiko.SSHException else: # No authentication method specified, we shouldn't arrive here. assert False except paramiko.SSHException: self.logger.error( "None of the provided authentication methods worked. Exiting." ) transport.close() sys.exit(1) finally: if agent: agent.close() self.sftp = paramiko.SFTPClient.from_transport(transport) if self.remote_path.startswith("~"): # nasty hack to let getcwd work without changing dir! self.sftp.chdir('.') self.remote_path = self.remote_path.replace( "~", self.sftp.getcwd()) # home is the initial sftp dir @staticmethod def _file_need_upload(l_st, r_st): return True if \ l_st.st_size != r_st.st_size or int(l_st.st_mtime) != r_st.st_mtime \ else False @staticmethod def _must_be_deleted(local_path, r_st): """Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.""" # if the file doesn't exists if not os.path.lexists(local_path): return True # or if the file type is different l_st = os.lstat(local_path) if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode): return True return False def _match_modes(self, remote_path, l_st): """Match mod, utime and uid/gid with locals one.""" self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode)) self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime)) if self.chown: self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid) def file_upload(self, local_path, remote_path, l_st): """Upload local_path to remote_path and set permission and mtime.""" self.sftp.put(local_path, remote_path) self._match_modes(remote_path, l_st) def remote_delete(self, remote_path, r_st): """Remove the remote directory node.""" # If it's a directory, then delete content and directory if S_ISDIR(r_st.st_mode): for item in self.sftp.listdir_attr(remote_path): full_path = path_join(remote_path, item.filename) self.remote_delete(full_path, item) self.sftp.rmdir(remote_path) # Or simply delete files else: try: self.sftp.remove(remote_path) except FileNotFoundError as e: self.logger.error( "error while removing {}. trace: {}".format(remote_path, e) ) def check_for_deletion(self, relative_path=None): """Traverse the entire remote_path tree. Find files/directories that need to be deleted, not being present in the local folder. """ if not relative_path: relative_path = str() # root of shared directory tree remote_path = path_join(self.remote_path, relative_path) local_path = path_join(self.local_path, relative_path) for remote_st in self.sftp.listdir_attr(remote_path): r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename)) inner_remote_path = path_join(remote_path, remote_st.filename) inner_local_path = path_join(local_path, remote_st.filename) # check if remote_st is a symlink # otherwise could delete file outside shared directory if S_ISLNK(r_lstat.st_mode): if self._must_be_deleted(inner_local_path, r_lstat): self.remote_delete(inner_remote_path, r_lstat) continue if self._must_be_deleted(inner_local_path, remote_st): self.remote_delete(inner_remote_path, remote_st) elif S_ISDIR(remote_st.st_mode): self.check_for_deletion( path_join(relative_path, remote_st.filename) ) def create_update_symlink(self, link_destination, remote_path): """Create a new link pointing to link_destination in remote_path position.""" try: # if there's anything, delete it self.sftp.remove(remote_path) except IOError: # that's fine, nothing exists there! pass finally: # and recreate the link try: self.sftp.symlink(link_destination, remote_path) except OSError as e: # Sometimes, if links are "too" different, symlink fails. # Sadly, nothing we can do about it. self.logger.error("error while symlinking {} to {}: {}".format( remote_path, link_destination, e)) def node_check_for_upload_create(self, relative_path, f): """Check if the given directory tree node has to be uploaded/created on the remote folder.""" if not relative_path: # we're at the root of the shared directory tree relative_path = str() # the (absolute) local address of f. local_path = path_join(self.local_path, relative_path, f) try: l_st = os.lstat(local_path) except OSError as e: """A little background here. Sometimes, in big clusters configurations (mail, etc.), files could disappear or be moved, suddenly. There's nothing to do about it, system should be stopped before doing backups. Anyway, we log it, and skip it. """ self.logger.error("error while checking {}: {}".format(relative_path, e)) return if local_path in self.exclude_list: self.logger.info("Skipping excluded file %s.", local_path) return # the (absolute) remote address of f. remote_path = path_join(self.remote_path, relative_path, f) # First case: f is a directory if S_ISDIR(l_st.st_mode): # we check if the folder exists on the remote side # it has to be a folder, otherwise it would have already been # deleted try: self.sftp.stat(remote_path) except IOError: # it doesn't exist yet on remote side self.sftp.mkdir(remote_path) self._match_modes(remote_path, l_st) # now, we should traverse f too (recursion magic!) self.check_for_upload_create(path_join(relative_path, f)) # Second case: f is a symbolic link elif S_ISLNK(l_st.st_mode): # read the local link local_link = os.readlink(local_path) absolute_local_link = os.path.realpath(local_link) # is it absolute? is_absolute = local_link.startswith("/") # and does it point inside the shared directory? # add trailing slash (security) trailing_local_path = path_join(self.local_path, '') relpath = os.path.commonprefix( [absolute_local_link, trailing_local_path] ) == trailing_local_path if relpath: relative_link = absolute_local_link[len(trailing_local_path):] else: relative_link = None """ # Refactor them all, be efficient! # Case A: absolute link pointing outside shared directory # (we can only update the remote part) if is_absolute and not relpath: self.create_update_symlink(local_link, remote_path) # Case B: absolute link pointing inside shared directory # (we can leave it as it is or fix the prefix to match the one of the remote server) elif is_absolute and relpath: if self.fix_symlinks: self.create_update_symlink( join( self.remote_path, relative_link, ), remote_path ) else: self.create_update_symlink(local_link, remote_path) # Case C: relative link pointing outside shared directory # (all we can do is try to make the link anyway) elif not is_absolute and not relpath: self.create_update_symlink(local_link, remote_path) # Case D: relative link pointing inside shared directory # (we preserve the relativity and link it!) elif not is_absolute and relpath: self.create_update_symlink(local_link, remote_path) """ if is_absolute and relpath: if self.fix_symlinks: self.create_update_symlink( path_join( self.remote_path, relative_link, ), remote_path ) else: self.create_update_symlink(local_link, remote_path) # Third case: regular file elif S_ISREG(l_st.st_mode): try: r_st = self.sftp.lstat(remote_path) if self._file_need_upload(l_st, r_st): self.file_upload(local_path, remote_path, l_st) except IOError as e: if e.errno == errno.ENOENT: self.file_upload(local_path, remote_path, l_st) # Anything else. else: self.logger.warning("Skipping unsupported file %s.", local_path) def check_for_upload_create(self, relative_path=None): """Traverse the relative_path tree and check for files that need to be uploaded/created. Relativity here refers to the shared directory tree.""" for f in os.listdir( path_join( self.local_path, relative_path) if relative_path else self.local_path ): self.node_check_for_upload_create(relative_path, f) def run(self): """Run the sync. Confront the local and the remote directories and perform the needed changes.""" try: if self.delete: # First check for items to be removed self.check_for_deletion() # Now scan local for items to upload/create self.check_for_upload_create() except FileNotFoundError: # If this happens, probably the remote folder doesn't exist. self.logger.error( "Error while opening remote folder. Are you sure it does exist?") sys.exit(1) def create_parser(): """Create the CLI argument parser.""" parser = argparse.ArgumentParser( description='Sync a local and a remote folder through SFTP.' ) parser.add_argument( "path", type=str, metavar="local-path", help="the path of the local folder", ) parser.add_argument( "remote", type=str, metavar="user[:password]@hostname:remote-path", help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. " "The hostname can be specified as a ssh_config's hostname too. " "Every missing information will be gathered from there", ) parser.add_argument( "-k", "--key", metavar="identity-path", action="append", help="private key identity path (defaults to ~/.ssh/id_rsa)" ) parser.add_argument( "-l", "--logging", choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'], default='ERROR', help="set logging level" ) parser.add_argument( "-p", "--port", default=22, type=int, help="SSH remote port (defaults to 22)" ) parser.add_argument( "-f", "--fix-symlinks", action="store_true", help="fix symbolic links on remote side" ) parser.add_argument( "-a", "--ssh-agent", action="store_true", help="enable ssh-agent support" ) parser.add_argument( "-c", "--ssh-config", metavar="ssh_config path", default="~/.ssh/config", type=str, help="path to the ssh-configuration file (default to ~/.ssh/config)" ) parser.add_argument( "-n", "--known-hosts", metavar="known_hosts path", default="~/.ssh/known_hosts", type=str, help="path to the openSSH known_hosts file" ) parser.add_argument( "-d", "--disable-known-hosts", action="store_true", help="disable known_hosts fingerprint checking (security warning!)" ) parser.add_argument( "-e", "--exclude-from", metavar="exclude-from-file-path", type=str, help="exclude files matching pattern in exclude-from-file-path" ) parser.add_argument( "-t", "--do-not-delete", action="store_true", help="do not delete remote files missing from local folder" ) parser.add_argument( "-o", "--allow-unknown", action="store_true", help="allow connection to unknown hosts" ) return parser def main(args=None): """The main.""" parser = create_parser() args = vars(parser.parse_args(args)) log_mapping = { 'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'NOTSET': logging.NOTSET, } log_level = log_mapping[args['logging']] del(args['logging']) global logger logger = configure_logging(log_level) args_mapping = { "path": "local_path", "remote": "remote_url", "ssh_config": "ssh_config_path", "exclude_from": "exclude_file", "known_hosts": "known_hosts_path", "do_not_delete": "delete", "key": "identity_files", } kwargs = { # convert the argument names to class constructor parameters args_mapping[k]: v for k, v in args.items() if v and k in args_mapping } kwargs.update({ k: v for k, v in args.items() if v and k not in args_mapping }) # Special case: disable known_hosts check if args['disable_known_hosts']: kwargs['known_hosts_path'] = None del(kwargs['disable_known_hosts']) # Toggle `do_not_delete` flag if "delete" in kwargs: kwargs["delete"] = not kwargs["delete"] # Manually set the default identity file. kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"] sync = SFTPClone( **kwargs ) sync.run() if __name__ == '__main__': main()
#!/usr/bin/env python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Generates the appropriate build.json data for all the end2end tests.""" import yaml import collections import hashlib FixtureOptions = collections.namedtuple( 'FixtureOptions', 'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes') default_unsecure_fixture_options = FixtureOptions( True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True) socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False) default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True) uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv']) fd_unsecure_fixture_options = default_unsecure_fixture_options._replace( dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv']) # maps fixture name to whether it requires the security library END2END_FIXTURES = { 'h2_compress': default_unsecure_fixture_options, 'h2_census': default_unsecure_fixture_options, 'h2_load_reporting': default_unsecure_fixture_options, 'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False), 'h2_fd': fd_unsecure_fixture_options, 'h2_full': default_unsecure_fixture_options, 'h2_full+pipe': default_unsecure_fixture_options._replace( platforms=['linux'], exclude_iomgrs=['uv']), 'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True), 'h2_http_proxy': default_unsecure_fixture_options._replace( ci_mac=False, exclude_iomgrs=['uv']), 'h2_oauth2': default_secure_fixture_options._replace( ci_mac=False, exclude_iomgrs=['uv']), 'h2_proxy': default_unsecure_fixture_options._replace( includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']), 'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace( ci_mac=False, exclude_configs=['msan'], large_writes=False, exclude_iomgrs=['uv']), 'h2_sockpair': socketpair_unsecure_fixture_options._replace( ci_mac=False, exclude_iomgrs=['uv']), 'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace( ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']), 'h2_ssl': default_secure_fixture_options, 'h2_ssl_cert': default_secure_fixture_options, 'h2_ssl_proxy': default_secure_fixture_options._replace( includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']), 'h2_uds': uds_fixture_options, } TestOptions = collections.namedtuple( 'TestOptions', 'needs_fullstack needs_dns proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky') default_test_options = TestOptions(False, False, True, False, True, 1.0, [], False, False) connectivity_test_options = default_test_options._replace(needs_fullstack=True) LOWCPU = 0.1 # maps test names to options END2END_TESTS = { 'authority_not_supported': default_test_options, 'bad_hostname': default_test_options, 'bad_ping': connectivity_test_options._replace(proxyable=False), 'binary_metadata': default_test_options, 'resource_quota_server': default_test_options._replace(large_writes=True, proxyable=False), 'call_creds': default_test_options._replace(secure=True), 'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_after_client_done': default_test_options, 'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU), 'compressed_payload': default_test_options._replace(proxyable=False), 'connectivity': connectivity_test_options._replace( proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']), 'default_host': default_test_options._replace(needs_fullstack=True, needs_dns=True), 'disappearing_server': connectivity_test_options._replace(flaky=True), 'empty_batch': default_test_options, 'filter_causes_close': default_test_options, 'filter_call_init_fails': default_test_options, 'filter_latency': default_test_options, 'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU), 'hpack_size': default_test_options._replace(proxyable=False, traceable=False), 'high_initial_seqno': default_test_options, 'idempotent_request': default_test_options, 'invoke_large_request': default_test_options, 'keepalive_timeout': default_test_options._replace(proxyable=False), 'large_metadata': default_test_options, 'max_concurrent_streams': default_test_options._replace(proxyable=False), 'max_connection_age': default_test_options, 'max_connection_idle': connectivity_test_options._replace( proxyable=False, exclude_iomgrs=['uv']), 'max_message_length': default_test_options, 'negative_deadline': default_test_options, 'network_status_change': default_test_options, 'no_logging': default_test_options._replace(traceable=False), 'no_op': default_test_options, 'payload': default_test_options, 'load_reporting_hook': default_test_options, 'ping_pong_streaming': default_test_options, 'ping': connectivity_test_options._replace(proxyable=False), 'registered_call': default_test_options, 'request_with_flags': default_test_options._replace( proxyable=False, cpu_cost=LOWCPU), 'request_with_payload': default_test_options, 'server_finishes_request': default_test_options, 'shutdown_finishes_calls': default_test_options, 'shutdown_finishes_tags': default_test_options, 'simple_cacheable_request': default_test_options, 'simple_delayed_request': connectivity_test_options, 'simple_metadata': default_test_options, 'simple_request': default_test_options, 'streaming_error_response': default_test_options, 'trailing_metadata': default_test_options, 'write_buffering': default_test_options, 'write_buffering_at_end': default_test_options, } def compatible(f, t): if END2END_TESTS[t].needs_fullstack: if not END2END_FIXTURES[f].fullstack: return False if END2END_TESTS[t].needs_dns: if not END2END_FIXTURES[f].dns_resolver: return False if not END2END_TESTS[t].proxyable: if END2END_FIXTURES[f].includes_proxy: return False if not END2END_TESTS[t].traceable: if END2END_FIXTURES[f].tracing: return False if END2END_TESTS[t].large_writes: if not END2END_FIXTURES[f].large_writes: return False return True def without(l, e): l = l[:] l.remove(e) return l def main(): sec_deps = [ 'grpc_test_util', 'grpc', 'gpr_test_util', 'gpr' ] unsec_deps = [ 'grpc_test_util_unsecure', 'grpc_unsecure', 'gpr_test_util', 'gpr' ] json = { '#': 'generated with test/end2end/gen_build_json.py', 'libs': [ { 'name': 'end2end_tests', 'build': 'private', 'language': 'c', 'secure': True, 'src': ['test/core/end2end/end2end_tests.c', 'test/core/end2end/end2end_test_utils.c'] + [ 'test/core/end2end/tests/%s.c' % t for t in sorted(END2END_TESTS.keys())], 'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'test/core/end2end/end2end_tests.h'], 'deps': sec_deps, 'vs_proj_dir': 'test/end2end/tests', } ] + [ { 'name': 'end2end_nosec_tests', 'build': 'private', 'language': 'c', 'secure': False, 'src': ['test/core/end2end/end2end_nosec_tests.c', 'test/core/end2end/end2end_test_utils.c'] + [ 'test/core/end2end/tests/%s.c' % t for t in sorted(END2END_TESTS.keys()) if not END2END_TESTS[t].secure], 'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'test/core/end2end/end2end_tests.h'], 'deps': unsec_deps, 'vs_proj_dir': 'test/end2end/tests', } ], 'targets': [ { 'name': '%s_test' % f, 'build': 'test', 'language': 'c', 'run': False, 'src': ['test/core/end2end/fixtures/%s.c' % f], 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'deps': [ 'end2end_tests' ] + sec_deps, 'vs_proj_dir': 'test/end2end/fixtures', } for f in sorted(END2END_FIXTURES.keys()) ] + [ { 'name': '%s_nosec_test' % f, 'build': 'test', 'language': 'c', 'secure': False, 'src': ['test/core/end2end/fixtures/%s.c' % f], 'run': False, 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'deps': [ 'end2end_nosec_tests' ] + unsec_deps, 'vs_proj_dir': 'test/end2end/fixtures', } for f in sorted(END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure ], 'tests': [ { 'name': '%s_test' % f, 'args': [t], 'exclude_configs': END2END_FIXTURES[f].exclude_configs, 'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) | set(END2END_TESTS[t].exclude_iomgrs)), 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'flaky': END2END_TESTS[t].flaky, 'language': 'c', 'cpu_cost': END2END_TESTS[t].cpu_cost, } for f in sorted(END2END_FIXTURES.keys()) for t in sorted(END2END_TESTS.keys()) if compatible(f, t) ] + [ { 'name': '%s_nosec_test' % f, 'args': [t], 'exclude_configs': END2END_FIXTURES[f].exclude_configs, 'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) | set(END2END_TESTS[t].exclude_iomgrs)), 'platforms': END2END_FIXTURES[f].platforms, 'ci_platforms': (END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac else without( END2END_FIXTURES[f].platforms, 'mac')), 'flaky': END2END_TESTS[t].flaky, 'language': 'c', 'cpu_cost': END2END_TESTS[t].cpu_cost, } for f in sorted(END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure for t in sorted(END2END_TESTS.keys()) if compatible(f, t) and not END2END_TESTS[t].secure ], 'core_end2end_tests': dict( (t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys() ) } print yaml.dump(json) if __name__ == '__main__': main()
""" Tests which exist to pinpoint specific bugs and verify that they are fixed. """ import os import signal import string import sys import tempfile import time import traceback import unittest from jobmon import config, launcher, protocol, service, transport # Not in /etc/services, probably safe TEST_CMD_PORT = 12321 TEST_EVENT_PORT = TEST_CMD_PORT + 1 # This allows us to format things inside of test config strings without # having to double up on { and } def expand_vars(template, **variables): """ Expands a configuration template which uses $-style substitutions. """ template = string.Template(template) return template.safe_substitute(**variables) class TestTimeoutException(Exception): """ Exception raise when a test takes longer than it should to execute. """ class TimeoutManager: """ A context manager which handles setting timeouts via SIGALRM. """ def __init__(self, timeout): self.timeout = timeout self.executing_timeout = False def __enter__(self): def handler(sig_number, frame): raise TestTimeoutException('Timed out in TimeoutManager') self.old_handler = signal.signal(signal.SIGALRM, handler) signal.alarm(self.timeout) return self def __exit__(self, *excinfo): signal.alarm(0) signal.signal(signal.SIGALRM, self.old_handler) def double_restart_bug(log_filename, timeout=120): """ The 'double restart' bug occurs when a user tries to start a process that died, right before the auto-restart processes kick in and try to start it also. This leads to an uncaught ValueError which takes down the service module and renders jobmon alive but non-receptive to commands. """ server_pid = None event_stream = None try: with TimeoutManager(timeout): # What is desired here is a job that will predictably die and trigger the # auto-restart mechanism, but which will allow us to interrupt the restart # before it occurs. with tempfile.TemporaryDirectory() as temp_dir: print(" >>> Expanding configuration") DOUBLE_RESTART_TEST_CONFIG = expand_vars(''' { "supervisor": { "control-port": $CMDPORT, "event-port": $EVENTPORT, "log-level": "DEBUG", "log-file": "$LOGFILE" }, "jobs": { "test": { "command": "sleep 5; /bin/false", "restart": true } } } ''', DIR=temp_dir, CMDPORT=str(TEST_CMD_PORT), EVENTPORT=str(TEST_EVENT_PORT), LOGFILE=log_filename) print(" <<< Expanding configuration") print(" >>> Writing configuration") with open(temp_dir + '/config.json', 'w') as config_file: config_file.write(DOUBLE_RESTART_TEST_CONFIG) print(" <<< Writing configuration") print(" >>> Loading configuration") config_handler = config.ConfigHandler() config_handler.load(temp_dir + '/config.json') print(" <<< Loading configuration") print(" >>> Launching server") server_pid = launcher.run_fork(config_handler) print(" <<< Launching server [PID", server_pid, "]") # Give the server some time to set up before we start shoving # things at it print(" >>> Starting server") while True: try: event_stream = transport.EventStream(TEST_EVENT_PORT) break except OSError: time.sleep(0.5) # Give the server some time to set up parts other than the event handler time.sleep(5) print(" <<< Starting server") # Now, start the child - it should die a and induce a restart # with no delay, since it's the first restart. We'll give it # 1s grace period cmd_pipe = transport.CommandPipe(TEST_CMD_PORT) print(" >>> Starting the child") cmd_pipe.start_job('test') print(" <<< Starting the child") # At this point, the process will take die again. On death, it will # have died and been restarted, which we want to wait for so that # we can intercept its next death. print(" >>> Waiting for restart") while True: evt = event_stream.next_event() if evt == protocol.Event('test', protocol.EVENT_RESTARTJOB): break print(" <<< Waiting for restart") # Now, we can induce the bug by sending a start request. If the # service hits the bug, it'll raise a ValueError after the # backoff and the terminate will fail, tripping the timeout on this # test. print(" >>> Starting job again") cmd_pipe.start_job('test') print(" <<< Starting job again") # The moment of truth - the restart should happen. If it doesn't, then # the timeout will trip eventually and we'll die. print(" >>> Job being restarted") while True: evt = event_stream.next_event() if evt == protocol.Event('test', protocol.EVENT_RESTARTJOB): break print(" <<< Job being restarted") print(" >>> Terminating server") cmd_pipe.terminate() while True: evt = event_stream.next_event() if evt == protocol.Event('', protocol.EVENT_TERMINATE): break print(" <<< Terminating server") # It might take some time between delivery of the event and the # server shutting itself down completely. In this case, give it a # little while. time.sleep(5) print(" >>> Doing final check for server") os.kill(server_pid, signal.SIGKILL) os.waitpid(server_pid, 0) server_pid = None finally: if server_pid is not None: os.kill(server_pid, signal.SIGKILL) os.waitpid(server_pid, 0) if event_stream is not None: event_stream.destroy() def not_stopping_child_processes(log_filename, timeout=120): """ This is designed to test for the presence of a bug, where the service manager doesn't shut down all the children fully before it terminates; this leaves them (in most real cases) to be picked up by the init process. """ server_pid = None event_stream = None terminated_events = set() all_jobs = {'test{}'.format(x) for x in range(1, 11)} try: with TimeoutManager(timeout): # Here, we want several jobs that will be running when we decide to # terminate the server with tempfile.TemporaryDirectory() as temp_dir: print(" >>> Expanding configuration") DOUBLE_RESTART_TEST_CONFIG = expand_vars(''' { "supervisor": { "control-port": $CMDPORT, "event-port": $EVENTPORT, "log-level": "DEBUG", "log-file": "$LOGFILE" }, "jobs": { "test1": { "command": "sleep 300", "autostart": true }, "test2": { "command": "sleep 300", "autostart": true }, "test3": { "command": "sleep 300", "autostart": true }, "test4": { "command": "sleep 300", "autostart": true }, "test5": { "command": "sleep 300", "autostart": true }, "test6": { "command": "sleep 300", "autostart": true }, "test7": { "command": "sleep 300", "autostart": true }, "test8": { "command": "sleep 300", "autostart": true }, "test9": { "command": "sleep 300", "autostart": true }, "test10": { "command": "sleep 300", "autostart": true } } } ''', DIR=temp_dir, CMDPORT=str(TEST_CMD_PORT), EVENTPORT=str(TEST_EVENT_PORT), LOGFILE=log_filename) print(" <<< Expanding configuration") print(" >>> Writing configuration") with open(temp_dir + '/config.json', 'w') as config_file: config_file.write(DOUBLE_RESTART_TEST_CONFIG) print(" <<< Writing configuration") print(" >>> Loading configuration") config_handler = config.ConfigHandler() config_handler.load(temp_dir + '/config.json') print(" <<< Loading configuration") print(" >>> Launching server") server_pid = launcher.run_fork(config_handler) print(" <<< Launching server [PID", server_pid, "]") # Give the server some time to set up before we start shoving # things at it print(" >>> Starting server") while True: try: event_stream = transport.EventStream(TEST_EVENT_PORT) break except OSError: time.sleep(0.5) # We won't need this until later event_stream.destroy() print(" <<< Starting server") # Give the server time to autostart everything. Events are bit # finnicky here, since we might not connect in time to get them. time.sleep(5) # Wait for all jobs to autostart print(" >>> Waiting on autostart") cmd_pipe = transport.CommandPipe(TEST_CMD_PORT) for job in all_jobs: while not cmd_pipe.is_running(job): time.sleep(1) print(" --- Running:", job, "@") print(" <<< Waiting on autostart") # Terminate the server, and see how many events are actually # reported as stopping print(" >>> Terminating the server") cmd_pipe.terminate() print(" <<< Terminating the server") print(" >>> Waiting on termination") event_stream = transport.EventStream(TEST_EVENT_PORT) while True: print(" ~~~ Awaiting @") evt = event_stream.next_event() if evt.event_code == protocol.EVENT_TERMINATE: print(" --- Terminated @") break elif evt.event_code == protocol.EVENT_STOPJOB: print(" --- Stopped", evt.job_name, "@") terminated_events.add(evt.job_name) else: print(" --- Unknown", evt, "@") print(" <<< Waiting on termination") except Exception as ex: # Finally will eat the exception, so make sure it gets logged for diagnostics print(" *** ERROR", ex) traceback.print_exc() finally: if server_pid is not None: os.kill(server_pid, signal.SIGKILL) os.waitpid(server_pid, 0) if event_stream is not None: event_stream.destroy() return all_jobs, terminated_events class TestBugfixes(unittest.TestCase): def test_double_restart_bug(self): """ Tests the double restart bug. """ with tempfile.NamedTemporaryFile(mode='r') as log_file: try: double_restart_bug(log_file.name) finally: print('=====') log_file.seek(0) print(log_file.read()) print('-----') def test_terminate_(self): """ Tests the double restart bug. """ with tempfile.NamedTemporaryFile(mode='r') as log_file: try: all_jobs, stopped_jobs = not_stopping_child_processes( log_file.name) self.assertEqual(all_jobs, stopped_jobs) finally: print('=====') log_file.seek(0) print(log_file.read()) print('-----')
from sympy import (Lambda, Symbol, Function, Derivative, Subs, sqrt, log, exp, Rational, Float, sin, cos, acos, diff, I, re, im, E, expand, pi, O, Sum, S, polygamma, loggamma, expint, Tuple, Dummy, Eq, Expr, symbols, nfloat) from sympy.utilities.pytest import XFAIL, raises from sympy.abc import t, w, x, y, z from sympy.core.function import PoleError from sympy.solvers import solve from sympy.utilities.iterables import subsets, variations f, g, h = symbols('f g h', cls=Function) def test_f_expand_complex(): x = Symbol('x', real=True) assert f(x).expand(complex=True) == I*im(f(x)) + re(f(x)) assert exp(x).expand(complex=True) == exp(x) assert exp(I*x).expand(complex=True) == cos(x) + I*sin(x) assert exp(z).expand(complex=True) == cos(im(z))*exp(re(z)) + \ I*sin(im(z))*exp(re(z)) def test_bug1(): e = sqrt(-log(w)) assert e.subs(log(w), -x) == sqrt(x) e = sqrt(-5*log(w)) assert e.subs(log(w), -x) == sqrt(5*x) def test_general_function(): nu = Function('nu') e = nu(x) edx = e.diff(x) edy = e.diff(y) edxdx = e.diff(x).diff(x) edxdy = e.diff(x).diff(y) assert e == nu(x) assert edx != nu(x) assert edx == diff(nu(x), x) assert edy == 0 assert edxdx == diff(diff(nu(x), x), x) assert edxdy == 0 def test_derivative_subs_bug(): e = diff(g(x), x) assert e.subs(g(x), f(x)) != e assert e.subs(g(x), f(x)) == Derivative(f(x), x) assert e.subs(g(x), -f(x)) == Derivative(-f(x), x) assert e.subs(x, y) == Derivative(g(y), y) def test_derivative_subs_self_bug(): d = diff(f(x), x) assert d.subs(d, y) == y def test_derivative_linearity(): assert diff(-f(x), x) == -diff(f(x), x) assert diff(8*f(x), x) == 8*diff(f(x), x) assert diff(8*f(x), x) != 7*diff(f(x), x) assert diff(8*f(x)*x, x) == 8*f(x) + 8*x*diff(f(x), x) assert diff(8*f(x)*y*x, x) == 8*y*f(x) + 8*y*x*diff(f(x), x) def test_derivative_evaluate(): assert Derivative(sin(x), x) != diff(sin(x), x) assert Derivative(sin(x), x).doit() == diff(sin(x), x) assert Derivative(Derivative(f(x), x), x) == diff(f(x), x, x) assert Derivative(sin(x), x, 0) == sin(x) def test_diff_symbols(): assert diff(f(x, y, z), x, y, z) == Derivative(f(x, y, z), x, y, z) assert diff(f(x, y, z), x, x, x) == Derivative(f(x, y, z), x, x, x) assert diff(f(x, y, z), x, 3) == Derivative(f(x, y, z), x, 3) # issue 1929 assert [diff(-z + x/y, sym) for sym in (z, x, y)] == [-1, 1/y, -x/y**2] assert diff(f(x, y, z), x, y, z, 2) == Derivative(f(x, y, z), x, y, z, z) assert diff(f(x, y, z), x, y, z, 2, evaluate=False) == \ Derivative(f(x, y, z), x, y, z, z) assert Derivative(f(x, y, z), x, y, z)._eval_derivative(z) == \ Derivative(f(x, y, z), x, y, z, z) assert Derivative(Derivative(f(x, y, z), x), y)._eval_derivative(z) == \ Derivative(f(x, y, z), x, y, z) def test_Lambda(): e = Lambda(x, x**2) assert e(4) == 16 assert e(x) == x**2 assert e(y) == y**2 assert Lambda(x, x**2) == Lambda(x, x**2) assert Lambda(x, x**2) == Lambda(y, y**2) assert Lambda(x, x**2) != Lambda(y, y**2 + 1) assert Lambda((x, y), x**y) == Lambda((y, x), y**x) assert Lambda((x, y), x**y) != Lambda((x, y), y**x) assert Lambda((x, y), x**y)(x, y) == x**y assert Lambda((x, y), x**y)(3, 3) == 3**3 assert Lambda((x, y), x**y)(x, 3) == x**3 assert Lambda((x, y), x**y)(3, y) == 3**y assert Lambda(x, f(x))(x) == f(x) assert Lambda(x, x**2)(e(x)) == x**4 assert e(e(x)) == x**4 assert Lambda((x, y), x + y).nargs == 2 p = x, y, z, t assert Lambda(p, t*(x + y + z))(*p) == t * (x + y + z) assert Lambda(x, 2*x) + Lambda(y, 2*y) == 2*Lambda(x, 2*x) assert Lambda(x, 2*x) not in [ Lambda(x, x) ] def test_IdentityFunction(): assert Lambda(x, x) is Lambda(y, y) is S.IdentityFunction assert Lambda(x, 2*x) is not S.IdentityFunction assert Lambda((x, y), x) is not S.IdentityFunction def test_Lambda_symbols(): assert Lambda(x, 2*x).free_symbols == set() assert Lambda(x, x*y).free_symbols == set([y]) def test_Lambda_arguments(): raises(TypeError, lambda: Lambda(x, 2*x)(x, y)) raises(TypeError, lambda: Lambda((x, y), x + y)(x)) def test_Lambda_equality(): assert Lambda(x, 2*x) != Lambda((x, y), 2*x) assert (Lambda(x, 2*x) == Lambda((x, y), 2*x)) is False assert Lambda((x, y), 2*x) == Lambda((x, y), 2*x) assert (Lambda((x, y), 2*x) != Lambda((x, y), 2*x)) is False assert Lambda(x, 2*x) != 2*x assert (Lambda(x, 2*x) == 2*x) is False def test_Subs(): assert Subs(x, x, 0).subs(x, 1) == Subs(x, x, 1) assert Subs(y, x, 0).subs(y, 1) == Subs(1, x, 0) assert Subs(f(x), x, 0).doit() == f(0) assert Subs(f(x**2), x**2, 0).doit() == f(0) assert Subs(f(x, y, z), (x, y, z), (0, 1, 1)) != \ Subs(f(x, y, z), (x, y, z), (0, 0, 1)) assert Subs(f(x, y), (x, y, z), (0, 1, 1)) == \ Subs(f(x, y), (x, y, z), (0, 1, 2)) assert Subs(f(x, y), (x, y, z), (0, 1, 1)) != \ Subs(f(x, y) + z, (x, y, z), (0, 1, 0)) assert Subs(f(x, y), (x, y), (0, 1)).doit() == f(0, 1) assert Subs(Subs(f(x, y), x, 0), y, 1).doit() == f(0, 1) raises(ValueError, lambda: Subs(f(x, y), (x, y), (0, 0, 1))) raises(ValueError, lambda: Subs(f(x, y), (x, x, y), (0, 0, 1))) assert len(Subs(f(x, y), (x, y), (0, 1)).variables) == 2 assert Subs(f(x, y), (x, y), (0, 1)).point == Tuple(0, 1) assert Subs(f(x), x, 0) == Subs(f(y), y, 0) assert Subs(f(x, y), (x, y), (0, 1)) == Subs(f(x, y), (y, x), (1, 0)) assert Subs(f(x)*y, (x, y), (0, 1)) == Subs(f(y)*x, (y, x), (0, 1)) assert Subs(f(x)*y, (x, y), (1, 1)) == Subs(f(y)*x, (x, y), (1, 1)) assert Subs(f(x), x, 0).subs(x, 1).doit() == f(1) assert Subs(f(x), x, y).subs(y, 0) == Subs(f(x), x, 0) assert Subs(y*f(x), x, y).subs(y, 2) == Subs(2*f(x), x, 2) assert (2 * Subs(f(x), x, 0)).subs(Subs(f(x), x, 0), y) == 2*y assert Subs(f(x), x, 0).free_symbols == set([]) assert Subs(f(x, y), x, z).free_symbols == set([y, z]) assert Subs(f(x).diff(x), x, 0).doit(), Subs(f(x).diff(x), x, 0) assert Subs(1 + f(x).diff(x), x, 0).doit(), 1 + Subs(f(x).diff(x), x, 0) assert Subs(y*f(x, y).diff(x), (x, y), (0, 2)).doit() == \ 2*Subs(Derivative(f(x, 2), x), x, 0) assert Subs(y**2*f(x), x, 0).diff(y) == 2*y*f(0) e = Subs(y**2*f(x), x, y) assert e.diff(y) == e.doit().diff(y) == y**2*Derivative(f(y), y) + 2*y*f(y) assert Subs(f(x), x, 0) + Subs(f(x), x, 0) == 2*Subs(f(x), x, 0) e1 = Subs(z*f(x), x, 1) e2 = Subs(z*f(y), y, 1) assert e1 + e2 == 2*e1 assert e1.__hash__() == e2.__hash__() assert Subs(z*f(x + 1), x, 1) not in [ e1, e2 ] assert Derivative( f(x), x).subs(x, g(x)) == Subs(Derivative(f(x), x), (x,), (g(x),)) assert Subs(f(x)*cos(y) + z, (x, y), (0, pi/3)).n(2) == \ Subs(f(x)*cos(y) + z, (x, y), (0, pi/3)).evalf(2) == \ z + Rational('1/2').n(2)*f(0) @XFAIL def test_Subs2(): # this reflects a limitation of subs(), probably won't fix assert Subs(f(x), x**2, x).doit() == f(sqrt(x)) def test_expand_function(): assert expand(x + y) == x + y assert expand(x + y, complex=True) == I*im(x) + I*im(y) + re(x) + re(y) assert expand((x + y)**11, modulus=11) == x**11 + y**11 def test_function_comparable(): assert sin(x).is_comparable is False assert cos(x).is_comparable is False assert sin(Float('0.1')).is_comparable is True assert cos(Float('0.1')).is_comparable is True assert sin(E).is_comparable is True assert cos(E).is_comparable is True assert sin(Rational(1, 3)).is_comparable is True assert cos(Rational(1, 3)).is_comparable is True @XFAIL def test_function_comparable_infinities(): assert sin(oo).is_comparable is False assert sin(-oo).is_comparable is False assert sin(zoo).is_comparable is False assert sin(nan).is_comparable is False def test_deriv1(): # These all requre derivatives evaluated at a point (issue 1620) to work. # See issue 1525 assert f(2*x).diff(x) == 2*Subs(Derivative(f(x), x), Tuple(x), Tuple(2*x)) assert (f(x)**3).diff(x) == 3*f(x)**2*f(x).diff(x) assert ( f(2*x)**3).diff(x) == 6*f(2*x)**2*Subs(Derivative(f(x), x), Tuple(x), Tuple(2*x)) assert f(2 + x).diff(x) == Subs(Derivative(f(x), x), Tuple(x), Tuple(x + 2)) assert f(2 + 3*x).diff(x) == 3*Subs(Derivative(f(x), x), Tuple(x), Tuple(3*x + 2)) assert f(3*sin(x)).diff(x) == 3*cos(x)*Subs(Derivative(f(x), x), Tuple(x), Tuple(3*sin(x))) def test_deriv2(): assert (x**3).diff(x) == 3*x**2 assert (x**3).diff(x, evaluate=False) != 3*x**2 assert (x**3).diff(x, evaluate=False) == Derivative(x**3, x) assert diff(x**3, x) == 3*x**2 assert diff(x**3, x, evaluate=False) != 3*x**2 assert diff(x**3, x, evaluate=False) == Derivative(x**3, x) def test_func_deriv(): assert f(x).diff(x) == Derivative(f(x), x) # issue 1435 assert f(x, y).diff(x, y) - f(x, y).diff(y, x) == 0 assert Derivative(f(x, y), x, y).args[1:] == (x, y) assert Derivative(f(x, y), y, x).args[1:] == (y, x) assert (Derivative(f(x, y), x, y) - Derivative(f(x, y), y, x)).doit() == 0 def test_suppressed_evaluation(): a = sin(0, evaluate=False) assert a != 0 assert a.func is sin assert a.args == (0,) def test_function_evalf(): def eq(a, b, eps): return abs(a - b) < eps assert eq(sin(1).evalf(15), Float("0.841470984807897"), 1e-13) assert eq( sin(2).evalf(25), Float("0.9092974268256816953960199", 25), 1e-23) assert eq(sin(1 + I).evalf( 15), Float("1.29845758141598") + Float("0.634963914784736")*I, 1e-13) assert eq(exp(1 + I).evalf(15), Float( "1.46869393991588") + Float("2.28735528717884239")*I, 1e-13) assert eq(exp(-0.5 + 1.5*I).evalf(15), Float( "0.0429042815937374") + Float("0.605011292285002")*I, 1e-13) assert eq(log(pi + sqrt(2)*I).evalf( 15), Float("1.23699044022052") + Float("0.422985442737893")*I, 1e-13) assert eq(cos(100).evalf(15), Float("0.86231887228768"), 1e-13) def test_extensibility_eval(): class MyFunc(Function): @classmethod def eval(cls, *args): return (0, 0, 0) assert MyFunc(0) == (0, 0, 0) def test_function_non_commutative(): x = Symbol('x', commutative=False) assert f(x).is_commutative is False assert sin(x).is_commutative is False assert exp(x).is_commutative is False assert log(x).is_commutative is False def test_function__eval_nseries(): n = Symbol('n') assert sin(x)._eval_nseries(x, 2, None) == x + O(x**2) assert sin(x + 1)._eval_nseries(x, 2, None) == x*cos(1) + sin(1) + O(x**2) assert sin(pi*(1 - x))._eval_nseries(x, 2, None) == pi*x + O(x**2) assert acos(1 - x**2)._eval_nseries(x, 2, None) == sqrt(2)*x + O(x**2) assert polygamma(n, x + 1)._eval_nseries(x, 2, None) == \ polygamma(n, 1) + polygamma(n + 1, 1)*x + O(x**2) raises(PoleError, lambda: sin(1/x)._eval_nseries(x, 2, None)) raises(PoleError, lambda: acos(1 - x)._eval_nseries(x, 2, None)) raises(PoleError, lambda: acos(1 + x)._eval_nseries(x, 2, None)) assert loggamma(1/x)._eval_nseries(x, 0, None) == \ log(x)/2 - log(x)/x - 1/x + O(1, x) assert loggamma(log(1/x)).nseries(x, n=1, logx=y) == loggamma(-y) # issue 3626: assert expint(S(3)/2, -x)._eval_nseries(x, 5, None) == \ 2 - 2*sqrt(pi)*sqrt(-x) - 2*x - x**2/3 - x**3/15 - x**4/84 + O(x**5) assert sin(sqrt(x))._eval_nseries(x, 3, None) == \ sqrt(x) - x**(S(3)/2)/6 + x**(S(5)/2)/120 + O(x**3) def test_doit(): n = Symbol('n', integer=True) f = Sum(2 * n * x, (n, 1, 3)) d = Derivative(f, x) assert d.doit() == 12 assert d.doit(deep=False) == Sum(2*n, (n, 1, 3)) def test_evalf_default(): from sympy.functions.special.gamma_functions import polygamma assert type(sin(4.0)) == Float assert type(re(sin(I + 1.0))) == Float assert type(im(sin(I + 1.0))) == Float assert type(sin(4)) == sin assert type(polygamma(2.0, 4.0)) == Float assert type(sin(Rational(1, 4))) == sin def test_issue2300(): args = [x, y, S(2), S.Half] def ok(a): """Return True if the input args for diff are ok""" if not a: return False if a[0].is_Symbol is False: return False s_at = [i for i in range(len(a)) if a[i].is_Symbol] n_at = [i for i in range(len(a)) if not a[i].is_Symbol] # every symbol is followed by symbol or int # every number is followed by a symbol return (all(a[i + 1].is_Symbol or a[i + 1].is_Integer for i in s_at if i + 1 < len(a)) and all(a[i + 1].is_Symbol for i in n_at if i + 1 < len(a))) eq = x**10*y**8 for a in subsets(args): for v in variations(a, len(a)): if ok(v): noraise = eq.diff(*v) else: raises(ValueError, lambda: eq.diff(*v)) def test_derivative_numerically(): from random import random z0 = random() + I*random() assert abs(Derivative(sin(x), x).doit_numerically(z0) - cos(z0)) < 1e-15 def test_fdiff_argument_index_error(): from sympy.core.function import ArgumentIndexError class myfunc(Function): nargs = 1 def fdiff(self, idx): raise ArgumentIndexError mf = myfunc(x) assert mf.diff(x) == Derivative(mf, x) raises(TypeError, lambda: myfunc(x, x)) def test_deriv_wrt_function(): x = f(t) xd = diff(x, t) xdd = diff(xd, t) y = g(t) yd = diff(y, t) assert diff(x, t) == xd assert diff(2 * x + 4, t) == 2 * xd assert diff(2 * x + 4 + y, t) == 2 * xd + yd assert diff(2 * x + 4 + y * x, t) == 2 * xd + x * yd + xd * y assert diff(2 * x + 4 + y * x, x) == 2 + y assert (diff(4 * x**2 + 3 * x + x * y, t) == 3 * xd + x * yd + xd * y + 8 * x * xd) assert (diff(4 * x**2 + 3 * xd + x * y, t) == 3 * xdd + x * yd + xd * y + 8 * x * xd) assert diff(4 * x**2 + 3 * xd + x * y, xd) == 3 assert diff(4 * x**2 + 3 * xd + x * y, xdd) == 0 assert diff(sin(x), t) == xd * cos(x) assert diff(exp(x), t) == xd * exp(x) assert diff(sqrt(x), t) == xd / (2 * sqrt(x)) def test_diff_wrt_value(): assert Expr()._diff_wrt is False assert x._diff_wrt is True assert f(x)._diff_wrt is True assert Derivative(f(x), x)._diff_wrt is True assert Derivative(x**2, x)._diff_wrt is False def test_diff_wrt(): fx = f(x) dfx = diff(f(x), x) ddfx = diff(f(x), x, x) assert diff(sin(fx) + fx**2, fx) == cos(fx) + 2*fx assert diff(sin(dfx) + dfx**2, dfx) == cos(dfx) + 2*dfx assert diff(sin(ddfx) + ddfx**2, ddfx) == cos(ddfx) + 2*ddfx assert diff(fx**2, dfx) == 0 assert diff(fx**2, ddfx) == 0 assert diff(dfx**2, fx) == 0 assert diff(dfx**2, ddfx) == 0 assert diff(ddfx**2, dfx) == 0 assert diff(fx*dfx*ddfx, fx) == dfx*ddfx assert diff(fx*dfx*ddfx, dfx) == fx*ddfx assert diff(fx*dfx*ddfx, ddfx) == fx*dfx assert diff(f(x), x).diff(f(x)) == 0 assert (sin(f(x)) - cos(diff(f(x), x))).diff(f(x)) == cos(f(x)) assert diff(sin(fx), fx, x) == diff(sin(fx), x, fx) # Chain rule cases assert f(g(x)).diff(x) == \ Subs(Derivative(f(x), x), (x,), (g(x),))*Derivative(g(x), x) assert diff(f(g(x), h(x)), x) == \ Subs(Derivative(f(y, h(x)), y), (y,), (g(x),))*Derivative(g(x), x) + \ Subs(Derivative(f(g(x), y), y), (y,), (h(x),))*Derivative(h(x), x) assert f( sin(x)).diff(x) == Subs(Derivative(f(x), x), (x,), (sin(x),))*cos(x) assert diff(f(g(x)), g(x)) == Subs(Derivative(f(x), x), (x,), (g(x),)) def test_diff_wrt_func_subs(): assert f(g(x)).diff(x).subs(g, Lambda(x, 2*x)).doit() == f(2*x).diff(x) def test_diff_wrt_not_allowed(): raises(ValueError, lambda: diff(sin(x**2), x**2)) raises(ValueError, lambda: diff(exp(x*y), x*y)) raises(ValueError, lambda: diff(1 + x, 1 + x)) def test_klein_gordon_lagrangian(): m = Symbol('m') phi = f(x, t) L = -(diff(phi, t)**2 - diff(phi, x)**2 - m**2*phi**2)/2 eqna = Eq( diff(L, phi) - diff(L, diff(phi, x), x) - diff(L, diff(phi, t), t), 0) eqnb = Eq(diff(phi, t, t) - diff(phi, x, x) + m**2*phi, 0) assert eqna == eqnb def test_sho_lagrangian(): m = Symbol('m') k = Symbol('k') x = f(t) L = m*diff(x, t)**2/2 - k*x**2/2 eqna = Eq(diff(L, x), diff(L, diff(x, t), t)) eqnb = Eq(-k*x, m*diff(x, t, t)) assert eqna == eqnb assert diff(L, x, t) == diff(L, t, x) assert diff(L, diff(x, t), t) == m*diff(x, t, 2) assert diff(L, t, diff(x, t)) == -k*x + m*diff(x, t, 2) def test_straight_line(): F = f(x) Fd = F.diff(x) L = sqrt(1 + Fd**2) assert diff(L, F) == 0 assert diff(L, Fd) == Fd/sqrt(1 + Fd**2) def test_sort_variable(): vsort = Derivative._sort_variables assert vsort((x, y, z)) == [x, y, z] assert vsort((h(x), g(x), f(x))) == [f(x), g(x), h(x)] assert vsort((z, y, x, h(x), g(x), f(x))) == [x, y, z, f(x), g(x), h(x)] assert vsort((x, f(x), y, f(y))) == [x, f(x), y, f(y)] assert vsort((y, x, g(x), f(x), z, h(x), y, x)) == \ [x, y, f(x), g(x), z, h(x), x, y] assert vsort((z, y, f(x), x, f(x), g(x))) == [y, z, f(x), x, f(x), g(x)] assert vsort((z, y, f(x), x, f(x), g(x), z, z, y, x)) == \ [y, z, f(x), x, f(x), g(x), x, y, z, z] def test_unhandled(): class MyExpr(Expr): def _eval_derivative(self, s): if not s.name.startswith('xi'): return self else: return None expr = MyExpr(x, y, z) assert diff(expr, x, y, f(x), z) == Derivative(expr, f(x), z) assert diff(expr, f(x), x) == Derivative(expr, f(x), x) @XFAIL def test_issue_1612(): x = Symbol("x") assert Symbol('f')(x) == f(x) def test_nfloat(): from sympy.core.basic import _aresame x = Symbol("x") eq = x**(S(4)/3) + 4*x**(S(1)/3)/3 assert _aresame(nfloat(eq), x**(S(4)/3) + (4.0/3)*x**(S(1)/3)) assert _aresame(nfloat(eq, exponent=True), x**(4.0/3) + (4.0/3)*x**(1.0/3)) eq = x**(S(4)/3) + 4*x**(x/3)/3 assert _aresame(nfloat(eq), x**(S(4)/3) + (4.0/3)*x**(x/3)) big = 12345678901234567890 Float_big = Float(big) assert _aresame(nfloat(x**big, exponent=True), x**Float_big) assert _aresame(nfloat(big), Float_big) assert nfloat({x: sqrt(2)}) == {x: nfloat(sqrt(2))} assert nfloat({sqrt(2): x}) == {sqrt(2): x} assert nfloat(cos(x + sqrt(2))) == cos(x + nfloat(sqrt(2))) # issues 3243 f = S('x*lamda + lamda**3*(x/2 + 1/2) + lamda**2 + 1/4') assert not any(a.free_symbols for a in solve(f.subs(x, -0.139))) # issue 3533 assert nfloat(-100000*sqrt(2500000001) + 5000000001) == \ 9.99999999800000e-11
# -*- coding: utf-8 -*- import contextlib import errno import importlib import itertools import json import os import queue import sys from atomicwrites import atomic_write import click import click_threading from . import cli_logger from .. import BUGTRACKER_HOME, DOCS_HOME, exceptions from ..sync import IdentConflict, PartialSync, StorageEmpty, SyncConflict from ..utils import expand_path, get_storage_init_args STATUS_PERMISSIONS = 0o600 STATUS_DIR_PERMISSIONS = 0o700 class _StorageIndex(object): def __init__(self): self._storages = dict( caldav='vdirsyncer.storage.dav.CalDAVStorage', carddav='vdirsyncer.storage.dav.CardDAVStorage', filesystem='vdirsyncer.storage.filesystem.FilesystemStorage', http='vdirsyncer.storage.http.HttpStorage', singlefile='vdirsyncer.storage.singlefile.SingleFileStorage', remotestorage_contacts=( 'vdirsyncer.storage.remotestorage.RemoteStorageContacts'), remotestorage_calendars=( 'vdirsyncer.storage.remotestorage.RemoteStorageCalendars'), google_calendar='vdirsyncer.storage.google.GoogleCalendarStorage', google_contacts='vdirsyncer.storage.google.GoogleContactsStorage' ) def __getitem__(self, name): item = self._storages[name] if not isinstance(item, str): return item modname, clsname = item.rsplit('.', 1) mod = importlib.import_module(modname) self._storages[name] = rv = getattr(mod, clsname) assert rv.storage_name == name return rv storage_names = _StorageIndex() del _StorageIndex class JobFailed(RuntimeError): pass def handle_cli_error(status_name=None, e=None): ''' Print a useful error message for the current exception. This is supposed to catch all exceptions, and should never raise any exceptions itself. ''' try: if e is not None: raise e else: raise except exceptions.UserError as e: cli_logger.critical(e) except StorageEmpty as e: cli_logger.error( '{status_name}: Storage "{name}" was completely emptied. If you ' 'want to delete ALL entries on BOTH sides, then use ' '`vdirsyncer sync --force-delete {status_name}`. ' 'Otherwise delete the files for {status_name} in your status ' 'directory.'.format( name=e.empty_storage.instance_name, status_name=status_name ) ) except PartialSync as e: cli_logger.error( '{status_name}: Attempted change on {storage}, which is read-only' '. Set `partial_sync` in your pair section to `ignore` to ignore ' 'those changes, or `revert` to revert them on the other side.' .format(status_name=status_name, storage=e.storage) ) except SyncConflict as e: cli_logger.error( '{status_name}: One item changed on both sides. Resolve this ' 'conflict manually, or by setting the `conflict_resolution` ' 'parameter in your config file.\n' 'See also {docs}/config.html#pair-section\n' 'Item ID: {e.ident}\n' 'Item href on side A: {e.href_a}\n' 'Item href on side B: {e.href_b}\n' .format(status_name=status_name, e=e, docs=DOCS_HOME) ) except IdentConflict as e: cli_logger.error( '{status_name}: Storage "{storage.instance_name}" contains ' 'multiple items with the same UID or even content. Vdirsyncer ' 'will now abort the synchronization of this collection, because ' 'the fix for this is not clear; It could be the result of a badly ' 'behaving server. You can try running:\n\n' ' vdirsyncer repair {storage.instance_name}\n\n' 'But make sure to have a backup of your data in some form. The ' 'offending hrefs are:\n\n{href_list}\n' .format(status_name=status_name, storage=e.storage, href_list='\n'.join(map(repr, e.hrefs))) ) except (click.Abort, KeyboardInterrupt, JobFailed): pass except exceptions.PairNotFound as e: cli_logger.error( 'Pair {pair_name} does not exist. Please check your ' 'configuration file and make sure you\'ve typed the pair name ' 'correctly'.format(pair_name=e.pair_name) ) except exceptions.InvalidResponse as e: cli_logger.error( 'The server returned something vdirsyncer doesn\'t understand. ' 'Error message: {!r}\n' 'While this is most likely a serverside problem, the vdirsyncer ' 'devs are generally interested in such bugs. Please report it in ' 'the issue tracker at {}' .format(e, BUGTRACKER_HOME) ) except exceptions.CollectionRequired as e: cli_logger.error( 'One or more storages don\'t support `collections = null`. ' 'You probably want to set `collections = ["from a", "from b"]`.' ) except Exception as e: tb = sys.exc_info()[2] import traceback tb = traceback.format_tb(tb) if status_name: msg = 'Unknown error occured for {}'.format(status_name) else: msg = 'Unknown error occured' msg += ': {}\nUse `-vdebug` to see the full traceback.'.format(e) cli_logger.error(msg) cli_logger.debug(''.join(tb)) def get_status_name(pair, collection): if collection is None: return pair return pair + '/' + collection def load_status(base_path, pair, collection=None, data_type=None): assert data_type is not None status_name = get_status_name(pair, collection) path = expand_path(os.path.join(base_path, status_name)) if os.path.isfile(path) and data_type == 'items': new_path = path + '.items' # XXX: Legacy migration cli_logger.warning('Migrating statuses: Renaming {} to {}' .format(path, new_path)) os.rename(path, new_path) path += '.' + data_type if not os.path.exists(path): return None assert_permissions(path, STATUS_PERMISSIONS) with open(path) as f: try: return dict(json.load(f)) except ValueError: pass return {} def save_status(base_path, pair, collection=None, data_type=None, data=None): assert data_type is not None assert data is not None status_name = get_status_name(pair, collection) path = expand_path(os.path.join(base_path, status_name)) + '.' + data_type dirname = os.path.dirname(path) try: os.makedirs(dirname, STATUS_DIR_PERMISSIONS) except OSError as e: if e.errno != errno.EEXIST: raise with atomic_write(path, mode='w', overwrite=True) as f: json.dump(data, f) os.chmod(path, STATUS_PERMISSIONS) def storage_class_from_config(config): config = dict(config) storage_name = config.pop('type') try: cls = storage_names[storage_name] except KeyError: raise exceptions.UserError( 'Unknown storage type: {}'.format(storage_name)) return cls, config def storage_instance_from_config(config, create=True): ''' :param config: A configuration dictionary to pass as kwargs to the class corresponding to config['type'] ''' cls, new_config = storage_class_from_config(config) try: return cls(**new_config) except exceptions.CollectionNotFound as e: if create: config = handle_collection_not_found( config, config.get('collection', None), e=str(e)) return storage_instance_from_config(config, create=False) else: raise except Exception: return handle_storage_init_error(cls, new_config) def handle_storage_init_error(cls, config): e = sys.exc_info()[1] if not isinstance(e, TypeError) or '__init__' not in repr(e): raise all, required = get_storage_init_args(cls) given = set(config) missing = required - given invalid = given - all problems = [] if missing: problems.append( u'{} storage requires the parameters: {}' .format(cls.storage_name, u', '.join(missing))) if invalid: problems.append( u'{} storage doesn\'t take the parameters: {}' .format(cls.storage_name, u', '.join(invalid))) if not problems: raise e raise exceptions.UserError( u'Failed to initialize {}'.format(config['instance_name']), problems=problems ) class WorkerQueue(object): ''' A simple worker-queue setup. Note that workers quit if queue is empty. That means you have to first put things into the queue before spawning the worker! ''' def __init__(self, max_workers): self._queue = queue.Queue() self._workers = [] self._max_workers = max_workers self._shutdown_handlers = [] # According to http://stackoverflow.com/a/27062830, those are # threadsafe compared to increasing a simple integer variable. self.num_done_tasks = itertools.count() self.num_failed_tasks = itertools.count() def shutdown(self): while self._shutdown_handlers: try: self._shutdown_handlers.pop()() except Exception: pass def _worker(self): while True: try: func = self._queue.get(False) except queue.Empty: break try: func(wq=self) except Exception: handle_cli_error() next(self.num_failed_tasks) finally: self._queue.task_done() next(self.num_done_tasks) if not self._queue.unfinished_tasks: self.shutdown() def spawn_worker(self): if self._max_workers and len(self._workers) >= self._max_workers: return t = click_threading.Thread(target=self._worker) t.start() self._workers.append(t) @contextlib.contextmanager def join(self): assert self._workers or not self._queue.unfinished_tasks ui_worker = click_threading.UiWorker() self._shutdown_handlers.append(ui_worker.shutdown) _echo = click.echo with ui_worker.patch_click(): yield if not self._workers: # Ugly hack, needed because ui_worker is not running. click.echo = _echo cli_logger.critical('Nothing to do.') sys.exit(5) ui_worker.run() self._queue.join() for worker in self._workers: worker.join() tasks_failed = next(self.num_failed_tasks) tasks_done = next(self.num_done_tasks) if tasks_failed > 0: cli_logger.error('{} out of {} tasks failed.' .format(tasks_failed, tasks_done)) sys.exit(1) def put(self, f): return self._queue.put(f) def assert_permissions(path, wanted): permissions = os.stat(path).st_mode & 0o777 if permissions > wanted: cli_logger.warning('Correcting permissions of {} from {:o} to {:o}' .format(path, permissions, wanted)) os.chmod(path, wanted) def handle_collection_not_found(config, collection, e=None): storage_name = config.get('instance_name', None) cli_logger.warning('{}No collection {} found for storage {}.' .format('{}\n'.format(e) if e else '', json.dumps(collection), storage_name)) if click.confirm('Should vdirsyncer attempt to create it?'): storage_type = config['type'] cls, config = storage_class_from_config(config) config['collection'] = collection try: args = cls.create_collection(**config) args['type'] = storage_type return args except NotImplementedError as e: cli_logger.error(e) raise exceptions.UserError( 'Unable to find or create collection "{collection}" for ' 'storage "{storage}". Please create the collection ' 'yourself.'.format(collection=collection, storage=storage_name))
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import hmac import os from time import time from hashlib import sha1 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlencode try: import simplejson as json except ImportError: import json # type: ignore from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import b from libcloud.utils.py3 import urlquote if PY3: from io import FileIO as file from libcloud.utils.files import read_in_chunks from libcloud.common.types import MalformedResponseError, LibcloudError from libcloud.common.base import Response, RawResponse from libcloud.storage.providers import Provider from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.types import InvalidContainerNameError from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.rackspace import AUTH_URL CDN_HOST = "cdn.clouddrive.com" API_VERSION = "v1.0" # Keys which are used to select a correct endpoint from the service catalog. INTERNAL_ENDPOINT_KEY = "internalURL" PUBLIC_ENDPOINT_KEY = "publicURL" class CloudFilesResponse(Response): valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT] def success(self): i = int(self.status) return 200 <= i <= 299 or i in self.valid_response_codes def parse_body(self): if not self.body: return None if "content-type" in self.headers: key = "content-type" elif "Content-Type" in self.headers: key = "Content-Type" else: raise LibcloudError("Missing content-type header") content_type = self.headers[key] if content_type.find(";") != -1: content_type = content_type.split(";")[0] if content_type == "application/json": try: data = json.loads(self.body) except Exception: raise MalformedResponseError( "Failed to parse JSON", body=self.body, driver=CloudFilesStorageDriver, ) elif content_type == "text/plain": data = self.body else: data = self.body return data class CloudFilesRawResponse(CloudFilesResponse, RawResponse): pass class OpenStackSwiftConnection(OpenStackBaseConnection): """ Connection class for the OpenStack Swift endpoint. """ responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse auth_url = AUTH_URL _auth_version = "1.0" # TODO: Reverse the relationship - Swift -> CloudFiles def __init__(self, user_id, key, secure=True, **kwargs): # Ignore this for now kwargs.pop("use_internal_url", None) super(OpenStackSwiftConnection, self).__init__( user_id, key, secure=secure, **kwargs ) self.api_version = API_VERSION self.accept_format = "application/json" self._service_type = self._ex_force_service_type or "object-store" self._service_name = self._ex_force_service_name or "swift" if self._ex_force_service_region: self._service_region = self._ex_force_service_region else: self._service_region = None def get_endpoint(self, *args, **kwargs): if ("2.0" in self._auth_version) or ("3.x" in self._auth_version): endpoint = self.service_catalog.get_endpoint( service_type=self._service_type, name=self._service_name, region=self._service_region, ) elif ("1.1" in self._auth_version) or ("1.0" in self._auth_version): endpoint = self.service_catalog.get_endpoint( name=self._service_name, region=self._service_region ) else: endpoint = None if endpoint: return endpoint.url else: raise LibcloudError("Could not find specified endpoint") def request( self, action, params=None, data="", headers=None, method="GET", raw=False, cdn_request=False, ): if not headers: headers = {} if not params: params = {} self.cdn_request = cdn_request params["format"] = "json" if method in ["POST", "PUT"] and "Content-Type" not in headers: headers.update({"Content-Type": "application/json; charset=UTF-8"}) return super(OpenStackSwiftConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=raw, ) class CloudFilesConnection(OpenStackSwiftConnection): """ Base connection class for the Cloudfiles driver. """ responseCls = CloudFilesResponse rawResponseCls = CloudFilesRawResponse auth_url = AUTH_URL _auth_version = "2.0" def __init__(self, user_id, key, secure=True, use_internal_url=False, **kwargs): super(CloudFilesConnection, self).__init__( user_id, key, secure=secure, **kwargs ) self.api_version = API_VERSION self.accept_format = "application/json" self.cdn_request = False self.use_internal_url = use_internal_url def get_endpoint(self): region = self._ex_force_service_region.upper() if self.use_internal_url: endpoint_type = "internal" else: endpoint_type = "external" if "2.0" in self._auth_version: ep = self.service_catalog.get_endpoint( service_type="object-store", name="cloudFiles", region=region, endpoint_type=endpoint_type, ) cdn_ep = self.service_catalog.get_endpoint( service_type="rax:object-cdn", name="cloudFilesCDN", region=region, endpoint_type=endpoint_type, ) else: raise LibcloudError( 'Auth version "%s" not supported' % (self._auth_version) ) # if this is a CDN request, return the cdn url instead if self.cdn_request: ep = cdn_ep if not ep or not ep.url: raise LibcloudError("Could not find specified endpoint") return ep.url def request( self, action, params=None, data="", headers=None, method="GET", raw=False, cdn_request=False, ): if not headers: headers = {} if not params: params = {} self.cdn_request = cdn_request params["format"] = "json" if method in ["POST", "PUT"] and "Content-Type" not in headers: headers.update({"Content-Type": "application/json; charset=UTF-8"}) return super(CloudFilesConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=raw, cdn_request=cdn_request, ) class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): """ CloudFiles driver. """ name = "CloudFiles" website = "http://www.rackspace.com/" connectionCls = CloudFilesConnection hash_type = "md5" supports_chunked_encoding = True def __init__( self, key, secret=None, secure=True, host=None, port=None, region="ord", use_internal_url=False, **kwargs, ): """ @inherits: :class:`StorageDriver.__init__` :param region: ID of the region which should be used. :type region: ``str`` """ # This is here for backard compatibility if "ex_force_service_region" in kwargs: region = kwargs["ex_force_service_region"] self.use_internal_url = use_internal_url OpenStackDriverMixin.__init__(self, **kwargs) super(CloudFilesStorageDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs, ) @classmethod def list_regions(cls): return ["ord", "dfw", "iad", "lon", "hkg", "syd"] def iterate_containers(self): response = self.connection.request("") if response.status == httplib.NO_CONTENT: return [] elif response.status == httplib.OK: return self._to_container_list(json.loads(response.body)) raise LibcloudError("Unexpected status code: %s" % (response.status)) def get_container(self, container_name): container_name_encoded = self._encode_container_name(container_name) response = self.connection.request( "/%s" % (container_name_encoded), method="HEAD" ) if response.status == httplib.NO_CONTENT: container = self._headers_to_container(container_name, response.headers) return container elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(None, self, container_name) raise LibcloudError("Unexpected status code: %s" % (response.status)) def get_object(self, container_name, object_name): container = self.get_container(container_name) container_name_encoded = self._encode_container_name(container_name) object_name_encoded = self._encode_object_name(object_name) response = self.connection.request( "/%s/%s" % (container_name_encoded, object_name_encoded), method="HEAD" ) if response.status in [httplib.OK, httplib.NO_CONTENT]: obj = self._headers_to_object(object_name, container, response.headers) return obj elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(None, self, object_name) raise LibcloudError("Unexpected status code: %s" % (response.status)) def get_container_cdn_url(self, container, ex_ssl_uri=False): # pylint: disable=unexpected-keyword-arg container_name_encoded = self._encode_container_name(container.name) response = self.connection.request( "/%s" % (container_name_encoded), method="HEAD", cdn_request=True ) if response.status == httplib.NO_CONTENT: if ex_ssl_uri: cdn_url = response.headers["x-cdn-ssl-uri"] else: cdn_url = response.headers["x-cdn-uri"] return cdn_url elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError( value="", container_name=container.name, driver=self ) raise LibcloudError("Unexpected status code: %s" % (response.status)) def get_object_cdn_url(self, obj): container_cdn_url = self.get_container_cdn_url(container=obj.container) return "%s/%s" % (container_cdn_url, obj.name) def enable_container_cdn(self, container, ex_ttl=None): """ @inherits: :class:`StorageDriver.enable_container_cdn` :param ex_ttl: cache time to live :type ex_ttl: ``int`` """ container_name = self._encode_container_name(container.name) headers = {"X-CDN-Enabled": "True"} if ex_ttl: headers["X-TTL"] = ex_ttl # pylint: disable=unexpected-keyword-arg response = self.connection.request( "/%s" % (container_name), method="PUT", headers=headers, cdn_request=True ) return response.status in [httplib.CREATED, httplib.ACCEPTED] def create_container(self, container_name): container_name_encoded = self._encode_container_name(container_name) response = self.connection.request( "/%s" % (container_name_encoded), method="PUT" ) if response.status == httplib.CREATED: # Accepted mean that container is not yet created but it will be # eventually extra = {"object_count": 0} container = Container(name=container_name, extra=extra, driver=self) return container elif response.status == httplib.ACCEPTED: error = ContainerAlreadyExistsError(None, self, container_name) raise error raise LibcloudError("Unexpected status code: %s" % (response.status)) def delete_container(self, container): name = self._encode_container_name(container.name) # Only empty container can be deleted response = self.connection.request("/%s" % (name), method="DELETE") if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value="", container_name=name, driver=self) elif response.status == httplib.CONFLICT: # @TODO: Add "delete_all_objects" parameter? raise ContainerIsNotEmptyError(value="", container_name=name, driver=self) def download_object( self, obj, destination_path, overwrite_existing=False, delete_on_failure=True ): container_name = obj.container.name object_name = obj.name response = self.connection.request( "/%s/%s" % (container_name, object_name), method="GET", raw=True ) return self._get_object( obj=obj, callback=self._save_object, response=response, callback_kwargs={ "obj": obj, "response": response.response, "destination_path": destination_path, "overwrite_existing": overwrite_existing, "delete_on_failure": delete_on_failure, }, success_status_code=httplib.OK, ) def download_object_as_stream(self, obj, chunk_size=None): container_name = obj.container.name object_name = obj.name response = self.connection.request( "/%s/%s" % (container_name, object_name), method="GET", raw=True ) return self._get_object( obj=obj, callback=read_in_chunks, response=response, callback_kwargs={ "iterator": response.iter_content(chunk_size), "chunk_size": chunk_size, }, success_status_code=httplib.OK, ) def download_object_range( self, obj, destination_path, start_bytes, end_bytes=None, overwrite_existing=False, delete_on_failure=True, ): self._validate_start_and_end_bytes(start_bytes=start_bytes, end_bytes=end_bytes) container_name = obj.container.name object_name = obj.name headers = {"Range": self._get_standard_range_str(start_bytes, end_bytes)} response = self.connection.request( "/%s/%s" % (container_name, object_name), method="GET", headers=headers, raw=True, ) return self._get_object( obj=obj, callback=self._save_object, response=response, callback_kwargs={ "obj": obj, "response": response.response, "destination_path": destination_path, "overwrite_existing": overwrite_existing, "delete_on_failure": delete_on_failure, "partial_download": True, }, success_status_code=httplib.PARTIAL_CONTENT, ) def download_object_range_as_stream( self, obj, start_bytes, end_bytes=None, chunk_size=None ): self._validate_start_and_end_bytes(start_bytes=start_bytes, end_bytes=end_bytes) container_name = obj.container.name object_name = obj.name headers = {"Range": self._get_standard_range_str(start_bytes, end_bytes)} response = self.connection.request( "/%s/%s" % (container_name, object_name), headers=headers, method="GET", raw=True, ) return self._get_object( obj=obj, callback=read_in_chunks, response=response, callback_kwargs={ "iterator": response.iter_content(chunk_size), "chunk_size": chunk_size, }, success_status_code=httplib.PARTIAL_CONTENT, ) def upload_object( self, file_path, container, object_name, extra=None, verify_hash=True, headers=None, ): """ Upload an object. Note: This will override file with a same name if it already exists. """ return self._put_object( container=container, object_name=object_name, extra=extra, file_path=file_path, verify_hash=verify_hash, headers=headers, ) def upload_object_via_stream( self, iterator, container, object_name, extra=None, headers=None ): if isinstance(iterator, file): iterator = iter(iterator) return self._put_object( container=container, object_name=object_name, extra=extra, stream=iterator, headers=headers, ) def delete_object(self, obj): container_name = self._encode_container_name(obj.container.name) object_name = self._encode_object_name(obj.name) response = self.connection.request( "/%s/%s" % (container_name, object_name), method="DELETE" ) if response.status == httplib.NO_CONTENT: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError( value="", object_name=object_name, driver=self ) raise LibcloudError("Unexpected status code: %s" % (response.status)) def ex_purge_object_from_cdn(self, obj, email=None): """ Purge edge cache for the specified object. :param email: Email where a notification will be sent when the job completes. (optional) :type email: ``str`` """ container_name = self._encode_container_name(obj.container.name) object_name = self._encode_object_name(obj.name) headers = {"X-Purge-Email": email} if email else {} # pylint: disable=unexpected-keyword-arg response = self.connection.request( "/%s/%s" % (container_name, object_name), method="DELETE", headers=headers, cdn_request=True, ) return response.status == httplib.NO_CONTENT def ex_get_meta_data(self): """ Get meta data :rtype: ``dict`` """ response = self.connection.request("", method="HEAD") if response.status == httplib.NO_CONTENT: container_count = response.headers.get( "x-account-container-count", "unknown" ) object_count = response.headers.get("x-account-object-count", "unknown") bytes_used = response.headers.get("x-account-bytes-used", "unknown") temp_url_key = response.headers.get("x-account-meta-temp-url-key", None) return { "container_count": int(container_count), "object_count": int(object_count), "bytes_used": int(bytes_used), "temp_url_key": temp_url_key, } raise LibcloudError("Unexpected status code: %s" % (response.status)) def ex_multipart_upload_object( self, file_path, container, object_name, chunk_size=33554432, extra=None, verify_hash=True, ): object_size = os.path.getsize(file_path) if object_size < chunk_size: return self.upload_object( file_path, container, object_name, extra=extra, verify_hash=verify_hash ) iter_chunk_reader = FileChunkReader(file_path, chunk_size) for index, iterator in enumerate(iter_chunk_reader): self._upload_object_part( container=container, object_name=object_name, part_number=index, iterator=iterator, verify_hash=verify_hash, ) return self._upload_object_manifest( container=container, object_name=object_name, extra=extra, verify_hash=verify_hash, ) def ex_enable_static_website(self, container, index_file="index.html"): """ Enable serving a static website. :param container: Container instance :type container: :class:`Container` :param index_file: Name of the object which becomes an index page for every sub-directory in this container. :type index_file: ``str`` :rtype: ``bool`` """ container_name = container.name headers = {"X-Container-Meta-Web-Index": index_file} # pylint: disable=unexpected-keyword-arg response = self.connection.request( "/%s" % (container_name), method="POST", headers=headers, cdn_request=False ) return response.status in [httplib.CREATED, httplib.ACCEPTED] def ex_set_error_page(self, container, file_name="error.html"): """ Set a custom error page which is displayed if file is not found and serving of a static website is enabled. :param container: Container instance :type container: :class:`Container` :param file_name: Name of the object which becomes the error page. :type file_name: ``str`` :rtype: ``bool`` """ container_name = container.name headers = {"X-Container-Meta-Web-Error": file_name} # pylint: disable=unexpected-keyword-arg response = self.connection.request( "/%s" % (container_name), method="POST", headers=headers, cdn_request=False ) return response.status in [httplib.CREATED, httplib.ACCEPTED] def ex_set_account_metadata_temp_url_key(self, key): """ Set the metadata header X-Account-Meta-Temp-URL-Key on your Cloud Files account. :param key: X-Account-Meta-Temp-URL-Key :type key: ``str`` :rtype: ``bool`` """ headers = {"X-Account-Meta-Temp-URL-Key": key} # pylint: disable=unexpected-keyword-arg response = self.connection.request( "", method="POST", headers=headers, cdn_request=False ) return response.status in [ httplib.OK, httplib.NO_CONTENT, httplib.CREATED, httplib.ACCEPTED, ] def ex_get_object_temp_url(self, obj, method="GET", timeout=60): """ Create a temporary URL to allow others to retrieve or put objects in your Cloud Files account for as long or as short a time as you wish. This method is specifically for allowing users to retrieve or update an object. :param obj: The object that you wish to make temporarily public :type obj: :class:`Object` :param method: Which method you would like to allow, 'PUT' or 'GET' :type method: ``str`` :param timeout: Time (in seconds) after which you want the TempURL to expire. :type timeout: ``int`` :rtype: ``bool`` """ # pylint: disable=no-member self.connection._populate_hosts_and_request_paths() expires = int(time() + timeout) path = "%s/%s/%s" % (self.connection.request_path, obj.container.name, obj.name) try: key = self.ex_get_meta_data()["temp_url_key"] assert key is not None except Exception: raise KeyError( "You must first set the " + "X-Account-Meta-Temp-URL-Key header on your " + "Cloud Files account using " + "ex_set_account_metadata_temp_url_key before " + "you can use this method." ) hmac_body = "%s\n%s\n%s" % (method, expires, path) sig = hmac.new(b(key), b(hmac_body), sha1).hexdigest() params = urlencode({"temp_url_sig": sig, "temp_url_expires": expires}) temp_url = "https://%s/%s/%s?%s" % ( self.connection.host + self.connection.request_path, obj.container.name, obj.name, params, ) return temp_url def _upload_object_part( self, container, object_name, part_number, iterator, verify_hash=True ): part_name = object_name + "/%08d" % part_number extra = {"content_type": "application/octet-stream"} self._put_object( container=container, object_name=part_name, extra=extra, stream=iterator, verify_hash=verify_hash, ) def _upload_object_manifest( self, container, object_name, extra=None, verify_hash=True ): extra = extra or {} meta_data = extra.get("meta_data") container_name_encoded = self._encode_container_name(container.name) object_name_encoded = self._encode_object_name(object_name) request_path = "/%s/%s" % (container_name_encoded, object_name_encoded) # pylint: disable=no-member headers = { "X-Auth-Token": self.connection.auth_token, "X-Object-Manifest": "%s/%s/" % (container_name_encoded, object_name_encoded), } data = "" response = self.connection.request( request_path, method="PUT", data=data, headers=headers, raw=True ) object_hash = None if verify_hash: hash_function = self._get_hash_function() hash_function.update(b(data)) data_hash = hash_function.hexdigest() object_hash = response.headers.get("etag") if object_hash != data_hash: raise ObjectHashMismatchError( value=( "MD5 hash checksum does not match (expected=%s, " + "actual=%s)" ) % (data_hash, object_hash), object_name=object_name, driver=self, ) obj = Object( name=object_name, size=0, hash=object_hash, extra=None, meta_data=meta_data, container=container, driver=self, ) return obj def iterate_container_objects(self, container, prefix=None, ex_prefix=None): """ Return a generator of objects for the given container. :param container: Container instance :type container: :class:`Container` :param prefix: Only get objects with names starting with prefix :type prefix: ``str`` :param ex_prefix: (Deprecated.) Only get objects with names starting with ex_prefix :type ex_prefix: ``str`` :return: A generator of Object instances. :rtype: ``generator`` of :class:`Object` """ prefix = self._normalize_prefix_argument(prefix, ex_prefix) params = {} if prefix: params["prefix"] = prefix while True: container_name_encoded = self._encode_container_name(container.name) response = self.connection.request( "/%s" % (container_name_encoded), params=params ) if response.status == httplib.NO_CONTENT: # Empty or non-existent container break elif response.status == httplib.OK: objects = self._to_object_list(json.loads(response.body), container) if len(objects) == 0: break for obj in objects: yield obj params["marker"] = obj.name else: raise LibcloudError("Unexpected status code: %s" % (response.status)) def _put_object( self, container, object_name, extra=None, file_path=None, stream=None, verify_hash=True, headers=None, ): extra = extra or {} container_name_encoded = self._encode_container_name(container.name) object_name_encoded = self._encode_object_name(object_name) content_type = extra.get("content_type", None) meta_data = extra.get("meta_data", None) content_disposition = extra.get("content_disposition", None) headers = headers or {} if meta_data: for key, value in list(meta_data.items()): key = "X-Object-Meta-%s" % (key) headers[key] = value if content_disposition is not None: headers["Content-Disposition"] = content_disposition request_path = "/%s/%s" % (container_name_encoded, object_name_encoded) result_dict = self._upload_object( object_name=object_name, content_type=content_type, request_path=request_path, request_method="PUT", headers=headers, file_path=file_path, stream=stream, ) response = result_dict["response"] bytes_transferred = result_dict["bytes_transferred"] server_hash = result_dict["response"].headers.get("etag", None) if response.status == httplib.EXPECTATION_FAILED: raise LibcloudError(value="Missing content-type header", driver=self) elif verify_hash and not server_hash: raise LibcloudError(value="Server didn't return etag", driver=self) elif verify_hash and result_dict["data_hash"] != server_hash: raise ObjectHashMismatchError( value=("MD5 hash checksum does not match (expected=%s, " + "actual=%s)") % (result_dict["data_hash"], server_hash), object_name=object_name, driver=self, ) elif response.status == httplib.CREATED: obj = Object( name=object_name, size=bytes_transferred, hash=server_hash, extra=None, meta_data=meta_data, container=container, driver=self, ) return obj else: # @TODO: Add test case for this condition (probably 411) raise LibcloudError("status_code=%s" % (response.status), driver=self) def _encode_container_name(self, name): """ Encode container name so it can be used as part of the HTTP request. """ if name.startswith("/"): name = name[1:] name = urlquote(name) if name.find("/") != -1: raise InvalidContainerNameError( value="Container name cannot" " contain slashes", container_name=name, driver=self, ) if len(name) > 256: raise InvalidContainerNameError( value="Container name cannot be longer than 256 bytes", container_name=name, driver=self, ) return name def _encode_object_name(self, name): name = urlquote(name) return name def _to_container_list(self, response): # @TODO: Handle more than 10k containers - use "lazy list"? for container in response: extra = { "object_count": int(container["count"]), "size": int(container["bytes"]), } yield Container(name=container["name"], extra=extra, driver=self) def _to_object_list(self, response, container): objects = [] for obj in response: name = obj["name"] size = int(obj["bytes"]) hash = obj["hash"] extra = { "content_type": obj["content_type"], "last_modified": obj["last_modified"], } objects.append( Object( name=name, size=size, hash=hash, extra=extra, meta_data=None, container=container, driver=self, ) ) return objects def _headers_to_container(self, name, headers): size = int(headers.get("x-container-bytes-used", 0)) object_count = int(headers.get("x-container-object-count", 0)) extra = {"object_count": object_count, "size": size} container = Container(name=name, extra=extra, driver=self) return container def _headers_to_object(self, name, container, headers): size = int(headers.pop("content-length", 0)) last_modified = headers.pop("last-modified", None) etag = headers.pop("etag", None) content_type = headers.pop("content-type", None) meta_data = {} for key, value in list(headers.items()): if key.find("x-object-meta-") != -1: key = key.replace("x-object-meta-", "") meta_data[key] = value extra = {"content_type": content_type, "last_modified": last_modified} obj = Object( name=name, size=size, hash=etag, extra=extra, meta_data=meta_data, container=container, driver=self, ) return obj def _ex_connection_class_kwargs(self): kwargs = self.openstack_connection_kwargs() kwargs["ex_force_service_region"] = self.region kwargs["use_internal_url"] = self.use_internal_url return kwargs class OpenStackSwiftStorageDriver(CloudFilesStorageDriver): """ Storage driver for the OpenStack Swift. """ type = Provider.CLOUDFILES_SWIFT name = "OpenStack Swift" connectionCls = OpenStackSwiftConnection # TODO: Reverse the relationship - Swift -> CloudFiles def __init__( self, key, secret=None, secure=True, host=None, port=None, region=None, **kwargs ): super(OpenStackSwiftStorageDriver, self).__init__( key=key, secret=secret, secure=secure, host=host, port=port, region=region, **kwargs, ) class FileChunkReader(object): def __init__(self, file_path, chunk_size): self.file_path = file_path self.total = os.path.getsize(file_path) self.chunk_size = chunk_size self.bytes_read = 0 self.stop_iteration = False def __iter__(self): return self def next(self): if self.stop_iteration: raise StopIteration start_block = self.bytes_read end_block = start_block + self.chunk_size if end_block >= self.total: end_block = self.total self.stop_iteration = True self.bytes_read += end_block - start_block return ChunkStreamReader( file_path=self.file_path, start_block=start_block, end_block=end_block, chunk_size=8192, ) def __next__(self): return self.next() class ChunkStreamReader(object): def __init__(self, file_path, start_block, end_block, chunk_size): self.fd = open(file_path, "rb") self.fd.seek(start_block) self.start_block = start_block self.end_block = end_block self.chunk_size = chunk_size self.bytes_read = 0 self.stop_iteration = False # Work around to make sure file description is closed even if the # iterator is never read from or if it's not fully exhausted def close_file(fd): try: fd.close() except Exception: pass atexit.register(close_file, self.fd) def __iter__(self): return self def next(self): if self.stop_iteration: self.fd.close() raise StopIteration block_size = self.chunk_size if self.bytes_read + block_size > self.end_block - self.start_block: block_size = self.end_block - self.start_block - self.bytes_read self.stop_iteration = True block = self.fd.read(block_size) self.bytes_read += block_size return block def __next__(self): return self.next()
# -*- coding: utf-8 -*- """ Created on Sat Jan 16 13:04:06 2016 @author: lilian """ #from onvif import ONVIFCamera from cameraUtils import IPCamera import urllib import logging import time class PTZCamera(IPCamera): def __init__(self, host, port ,user, passwd): IPCamera.__init__(self, host, port, user, passwd) self.ptzService = self.create_ptz_service() self.profile = self.mediaService.GetProfiles()[0] self.initializePanTiltBoundaries() def initializePanTiltBoundaries(self): # Get PTZ configuration options for getting continuous move range request = self.ptzService.create_type('GetConfigurationOptions') request.ConfigurationToken = self.profile.PTZConfiguration._token ptz_configuration_options = self.ptzService.GetConfigurationOptions(request) self.XMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max self.XMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min self.YMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max self.YMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min self.ZMAX = ptz_configuration_options.Spaces.ContinuousZoomVelocitySpace[0].XRange.Max self.ZMIN = ptz_configuration_options.Spaces.ContinuousZoomVelocitySpace[0].XRange.Min def getStreamUri(self): # return self.mediaService.GetStreamUri()[0] return 'rtsp://192.168.1.49:554/Streaming/Channels/1?transportmode=unicast&profile=Profile_1' def getStatus(self): media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('GetStatus') request.ProfileToken = media_profile._token ptzStatus = self.ptzService.GetStatus(request) pan = ptzStatus.Position.PanTilt._x tilt = ptzStatus.Position.PanTilt._y zoom = ptzStatus.Position.Zoom._x return (pan, tilt, zoom) def continuousToRight(self): panVelocityFactor = self.XMAX tiltVelocityFactor = 0 zoomVelocityFactor = 0 self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousToLeft(self): panVelocityFactor = self.XMIN tiltVelocityFactor = 0 zoomVelocityFactor = 0 self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousToUp(self): panVelocityFactor = 0 tiltVelocityFactor = self.YMAX zoomVelocityFactor = 0 self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousToDown(self): panVelocityFactor = 0 tiltVelocityFactor = self.YMIN zoomVelocityFactor = 0 self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousZoomIn(self): panVelocityFactor = 0 tiltVelocityFactor = 0 zoomVelocityFactor = self.ZMAX self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousZoomOut(self): panVelocityFactor = 0 tiltVelocityFactor = 0 zoomVelocityFactor = self.ZMIN self.continuousMove(panVelocityFactor, tiltVelocityFactor, zoomVelocityFactor) def continuousMove(self, panFactor, tiltFactor, zoomFactor): request = self.ptzService.create_type('ContinuousMove') request.ProfileToken = self.profile._token request.Velocity.PanTilt._x = panFactor request.Velocity.PanTilt._y = tiltFactor request.Velocity.Zoom._x = zoomFactor self.ptzService.ContinuousMove(request) # Wait a certain time timeout = 1 time.sleep(timeout) # Stop continuous move self.ptzService.Stop({'ProfileToken': request.ProfileToken}) def oneStepRight(self): status = self.getStatus() logging.info("Movimiento hacia derecha desde " + str(status)) actualPan = status[0] actualTilt = status[1] media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token pan = actualPan - float(2)/360 if pan <= -1: pan = 1 request.Position.PanTilt._x = pan request.Position.PanTilt._y = actualTilt absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def oneStepLeft(self): status = self.getStatus() print "Movimiento hacia izquierda desde " + str(status) actualPan = status[0] actualTilt = status[1] media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token pan = round(actualPan + float(2)/360 , 6) if pan >= 1: pan = -1 print pan request.Position.PanTilt._x = pan request.Position.PanTilt._y = actualTilt absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def oneStepUp(self): status = self.getStatus() print "Movimiento hacia arriba desde " + str(status) actualPan = status[0] actualTilt = status[1] media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token tilt = round(actualTilt - float(2)/90, 6) pan = actualPan if tilt <= -1: tilt = -1 pan = actualPan elif tilt >= 1: tilt = 1 pan = actualPan + 180*float(2)/360 request.Position.PanTilt._x = pan request.Position.PanTilt._y = tilt absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def oneStepDown(self): status = self.getStatus() print "Movimiento hacia abajo desde " + str(status) actualPan = status[0] actualTilt = status[1] media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token tilt = round(actualTilt + float(2)/90, 6) pan = actualPan if tilt <= -1: tilt = -1 pan = actualPan elif tilt >= 1: tilt = 1 pan = actualPan + 180*float(2)/360 request.Position.PanTilt._x = pan request.Position.PanTilt._y = tilt absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def oneStepZoomIn(self): status = self.getStatus() print "Zoom in desde " + str(status) media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token if status[2] < 0.05: paso = 0.07 else: paso = 0.035 pZoom = status[2] + paso if pZoom > 1: pZoom = 1 request.Position.Zoom._x = pZoom absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def oneStepZoomOut(self): status = self.getStatus() print "Zoom out desde " + str(status) media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token pZoom = status[2] - 0.01 # Con este paso anda bien if pZoom < 0: pZoom = 0 request.Position.Zoom._x = pZoom absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def continuousRight(self): logging.info("Movimiento continuo hacia derecha") media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token pan = actualPan - float(2)/360 if pan <= -1: pan = 1 request.Position.PanTilt._x = pan request.Position.PanTilt._y = actualTilt absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def moveAbsolute(self, pan, tilt, zoom = 0): media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token # pPan = round(1 - float(pan)/180, 6) # pTilt = round(1 - float(tilt)/45, 6) # pZoom = round(float(zoom/100), 6) # request.Position.PanTilt._x = pan request.Position.PanTilt._y = tilt request.Position.Zoom._x = zoom absoluteMoveResponse = self.ptzService.AbsoluteMove(request) def setHomePosition(self): media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('SetHomePosition') request.ProfileToken = media_profile._token self.ptzService.SetHomePosition(request) def gotoHomePosition(self): media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('GotoHomePosition') request.ProfileToken = media_profile._token self.ptzService.GotoHomePosition(request) def getSnapshotUri(self): media_profile = self.mediaService.GetProfiles()[0] request = self.mediaService.create_type('GetSnapshotUri') request.ProfileToken = media_profile._token response = self.mediaService.GetSnapshotUri(request) logging.info(response.Uri) # urllib.urlretrieve("http://10.2.1.49/onvif-http/snapshot", "local-filename.jpeg") ''' Metodo para probar capturas en la PTZ ''' def testAbsolute(self, pan, tilt, zoom = 0): media_profile = self.mediaService.GetProfiles()[0] request = self.ptzService.create_type('AbsoluteMove') request.ProfileToken = media_profile._token request.Position.PanTilt._x = pan request.Position.PanTilt._y = tilt request.Position.Zoom._x = zoom testAbsoluteResponse = self.ptzService.AbsoluteMove(request)
from ev import Object, Character, utils, create_object, create_channel, search_object_tag from game.gamesrc.commands.world.character_commands import CharacterCmdSet import random from prettytable import PrettyTable class Hero(Character): """ Main player character class This class will be a very large part of just about everything related to the game. TODO: Allow for auxillary stats to be updated when core attributes change (i.e. buffs and debuffs). Effects manager - Write a management function to add and remove effects from the character model. Crafting manager - Need something to facilitate crafting. Most of this will likely end up here. """ def at_object_creation(self): self.db.attributes = { 'name': self.key, 'strength': 10, 'constitution': 10, 'intelligence': 10, 'dexterity': 10, 'luck': 10, 'health': 0, 'mana': 0, 'stamina': 0, 'temp_health': 0, 'temp_mana': 0, 'temp_stamina': 0, 'level': 1, 'exp_needed': 300, 'exp': 0, 'experience_currency': 0, 'total_exp': 0, 'race': None, 'deity': None, 'gender': None, } self.db.combat_attributes = {'attack_rating': 0, 'armor_rating': 0, 'defense_rating': 0 } self.db.currency = { 'gold': 0, 'silver': 0, 'copper': 0 } self.db.skills = { 'listen': { 'rating': 0, 'desc': 'Your ability to listen to your surroundings.'}, 'search': { 'rating': 0, 'desc': 'Your ability to search your surroundings visually'}, 'bladed weapons': { 'rating': 0, 'desc': 'Your innate ability to wield bladed weaponry'}, 'blunt weapons': {'rating': 0, 'desc': 'Your innate ability to wield blunt weaponry.'}, 'hide': { 'rating': 0, 'desc': 'Your innate ability to hide in the shadows and become unseen.'}, } self.db.archtypes = { 'soldier': { 'level': 1, 'exp_to_level': 100, 'exp': 0, 'total_exp': 0 }, 'mage': { 'level': 1, 'exp_to_level': 100, 'exp': 0, 'total_exp': 0 }, 'rogue': { 'level': 1, 'exp_to_level': 100, 'exp': 0, 'total_exp': 0 }, 'leader': { 'level': 1, 'exp_to_level': 100, 'exp': 0, 'total_exp': 0} } self.db.equipment = { 'armor': None, 'main_hand_weapon': None, 'offhand_weapon': None, 'shield': None, 'right_hand_ring': None, 'left_hand_ring': None} #Object creation questlog = create_object('game.gamesrc.objects.world.quests.QuestManager', location=self, key="Questlog") self.db.questlog = questlog self.tags.add('character_runner') self.at_post_creation() def at_post_creation(self): """ Hook used to set auxillary stats that are based off of core attributes. Called at the end of at_object_creation """ a = self.db.attributes c = self.db.combat_attributes a['health'] = a['constitution'] * 4 a['temp_health'] = a['health'] a['mana'] = a['intelligence'] * 4 a['temp_mana'] = a['mana'] a['stamina'] = a['constitution'] * 2 a['temp_stamina'] = a['stamina'] c['attack_rating'] = a['dexterity'] / 10 c['defense_rating'] = (a['dexterity'] / 10) + 10 c['damage_threshold'] = a['constitution'] / 10 self.db.attributes = a self.db.combat_attributes = c def at_disconnect(self): self.prelogout_location = self.location def at_post_puppet(self): self.cmdset.add(CharacterCmdSet) self.location = self.db.prelogout_location def accept_quest(self, quest): print "In accept_quest" manager = self.db.questlog quest_object = search_object_tag(quest.lower())[0] print quest_object exclusions = quest_object.db.exclusions print exclusions attributes = self.db.attributes try: split_list = exclusions.split(":") except: split_list = [] print len(split_list) if len(split_list) > 1: print "in deity logic" attribute = split_list[0] exclude = split_list[1] if 'deity' in attributes: if attributes['deity'] in exclude: self.msg("{rYou are a devout follower of %s and therefore have moral and religious objections to what this person asks of you.{n" % attributes['deity']) return print "past deity checks" if quest_object.db.prereq is not None: if ';' in quest_object.db.prereq: found = 0 split_list = quest_object.prereq.split(';') for item in split_list: item = item.strip() if item.title() in [key.title() for key in manager.db.completed_quests.keys()]: found = 1 if found != 1: self.msg("{RPre req not met.{n") return else: if quest_object.prereq.title() in [key.title() for key in manager.db.completed_quests.keys()]: pass else: self.msg("{RPre requisite not met.{n") return character_quest = quest_object.copy() character_quest.name = quest_object.name character_quest.add_help_entry() manager.add_quest(character_quest) character_quest.move_to(manager, quiet=True) self.db.quest_log = manager self.msg("{yYou have accepted: %s" % character_quest.name) return def display_character_sheet(self): a_table = PrettyTable() ca_table = PrettyTable() a_table._set_field_names(["Attributes", "Value"]) ca_table._set_field_names(["Attributes", "Value"]) for k in self.db.attributes: a_table.add_row(["%s:" % k, self.db.attributes[k]]) for k in self.db.combat_attributes: ca_table.add_row(["%s:" % k , self.db.combat_attributes[k]]) a_string = a_table.get_string() self.msg(a_string) ca_string = ca_table.get_string() self.msg(ca_string) def award_currency(self, amount, type='copper'): """ award the passed amount of currency to the characters money levels. """ c = self.db.currency c[type] += amount self.msg("{CYou have received %s %s.{n" % (amount, type)) self.db.currency = c def award_exp(self, exp, archtype=None): """ Award passed amount of experience to character experience levels and archtype experience levels. """ attributes = self.db.attributes archtypes = self.db.archtypes if archtype is None: self.msg("{CYou have earned %s experience.{n" % exp) else: self.msg("{CYou have earned %s experience in archtype: %s" % (exp, archtype)) if archtype is not None: archtypes[archtype]['exp'] += exp archtypes[archtype]['total_exp'] += exp self.db.archtypes = archtypes if archtypes[archtype]['exp'] == archtypes[archtype]['exp_to_level']: self.level_up_archtype(archtype='%s' % archtype) elif archtypes[archtype]['exp'] > archtypes[archtype]['exp_to_level']: offset = archtypes[archtype]['exp'] - archtypes[archtype]['exp_to_level'] self.level_up_archtype(archtype='%s' % archtype, offset=offset) attributes['exp'] += exp attributes['total_exp'] += exp self.db.attributes = attributes attributes['total_exp'] = int(attributes['total_exp']) + int(exp) attributes['experience_currency'] += int(exp) difference = int(attributes['exp_needed']) - exp if difference == 0: self.level_up(zero_out_exp=True) return elif difference < 0: #self.msg("Added %s to %s" %(attributes['experience_needed'], difference)) attributes['exp_needed'] = int(attributes['exp_needed']) + difference #get a positive number for the amount made into the next level positive_difference = difference * -1 exp_made = positive_difference attributes['exp_made'] = exp_made attributes['exp_needed'] = attributes['exp_needed'] - exp_made self.db.attributes = attributes self.level_up(difference=positive_difference) return attributes['exp_made'] = (int(attributes['exp_made']) + exp) attributes['exp_needed'] = (int(attributes['exp_needed']) - exp) self.db.attributes = attributes self.msg("{gYou have been awarded %s experience.{n" % exp_to_award) return def level_up_archtype(self, archtype, offset=None): archtypes = self.db.archtypes if archtype is not None: if offset is not None: archtypes[archtype]['exp'] = offset archtypes[archtype]['exp_to_level'] = archtypes[archtype]['total_exp'] * 1.5 archtypes[archtype]['level'] += 1 else: archtypes[archtype]['exp'] = 0 archtypes[archtype]['exp_to_level'] = archtypes[archtype]['total_exp'] * 1.5 archtypes[archtype]['level'] += 1 self.msg("{cYou have gained an archtype level in: {C%s.{n" % archtype) self.db.archtypes = archtypes else: return def level_up(self, zero_out_exp=False, difference=0): attributes = self.db.attributes attributes['level'] = int(attributes['level']) + 1 if zero_out_exp is True: attributes['experience_made'] = 0 attributes['experience_needed'] = int((int(attributes['total_exp_made']) * .50) + attributes['total_exp_made']) attributes['experience_to_next_level'] = attributes['experience_needed'] attributes['experience_needed'] = attributes['experience_needed'] - attributes['experience_made'] attributes['attribute_points'] = attributes['attribute_points'] + (int(attributes['intelligence'] / 2)) self.db.attributes = attributes self.msg("{CYou have gained a level of experience! You are now level %s! {n" % attributes['level']) def equip_item(self, ite=None, slot=None): """ Equip items, either specified or not. If no item is given, then we simply try to equip the first item we find in our inventory for each slot type respectively. """ equipment = self.db.equipment if equipment[slot] is not None: self.msg("You must unequip %s before you may equip %s." % (equipment[slot].name, ite.name)) return if ite is None: wep_equipped = 0 armor_equipped = 0 lring_equipped = 0 rring_equipped = 0 back_equipped = 0 trinket_equipped = 0 shield_equipped = 0 for item in self.contents: if item.db.item_type is not None: if 'weapon' in item.db.slot and wep_equipped == 0: equipment['weapon'] = item wep_equipped = 1 item.on_equip() elif 'armor' in item.db.slot and armor_equipped == 0: equipment['armor'] = item armor_equipped = 1 item.on_equip() elif 'left finger' in item.db.slot and lring_equipped == 0: equipment['left finger'] = item lring_equipped = 1 item.on_equip() elif 'right finger' in item.db.slot and rring_equipped == 0: equipment['right finger'] = item rring_equipped = 1 item.on_equip() elif 'back' in item.db.slot and back_equipped == 0: equipment['back'] = item back_equipped = 1 item.on_equip() elif 'trinket' in item.db.slot and trinket_equipped == 0: equipment['trinket'] = item trinket_equipped = 1 item.on_equip() elif 'shield' in item.db.slot and shield_equipped == 0: equipment['shield'] = item shield_equipped = 1 item.on_equip() if wep_equipped != 1: self.msg("You had no weapons to equip.") else: self.db.equipment = equipment self.msg("You now wield %s in your main hand." % self.db.equipment['weapon']) if armor_equipped != 1: self.msg("You had no armor to equip") else: self.db.equipment = equipment self.msg("You are now wearing %s for armor." % self.db.equipment['armor']) return if 'main_hand_weapon' in slot: equipment[slot] = ite self.db.equipment = equipment self.msg("You now wield %s in your main hand." % self.db.equipment['main_hand_weapon']) elif 'armor' in slot: equipment['armor'] = ite self.db.equipment = equipment self.msg("You are now wearing %s for armor." % self.db.equipment['armor']) elif 'left finger' in slot: equipment['left finger'] = ite self.db.equipment = equipment self.msg("You are now wearing %s on your left finger." % ite.name) elif 'right finger' in slot: equipment['right finger'] = ite self.db.equipment = equipment self.msg("You are now wearing %s on your right finger." % ite.name) elif 'back' in slot: equipment['back'] = ite self.db.euqipment = equipment self.msg("You are now wearing %s on your back." % ite.name) elif 'shield' in slot: equipment['shield'] = ite self.db.equipment = equipment self.msg("You are now using %s as a shield" % ite.name) elif 'trinket' in slot: equipment['trinket'] = ite self.db.equipment = equipment self.msg("You are now using %s as your trinket." % ite.name) else: self.msg("{r%s is not equippable in any slot!{n" % ite) def tick(self): """ Main function for all things needing to be done/checked every time the mob tick script fires itself (health and mana regen, kos checks etc etc) """ a = self.db.attributes if a['temp_health'] < a['health'] and not self.db.in_combat: pth = int(a['health'] * .02) + 1 a['temp_health'] = a['temp_health'] + pth if a['temp_health'] > a['health']: a['temp_health'] = a['health'] self.db.attributes = a def take_damage(self, damage): """ remove health when damage is taken """ a = self.db.attributes a['temp_health'] -= damage self.db.attributes = a ########################### #COMBAT RELATED FUNCTIONS## ########################### def begin_combat(self, target): """ begins combat sequence """ self.db.target = target target.db.target = self self.scripts.add("game.gamesrc.scripts.world_scripts.combat.CombatController") def unconcious(self): """ put a character unconcious, which adds a script that checks to see if they have woken up yet from their dirt nap. """ attributes = self.db.attributes attributes['temp_health'] = attributes['health'] self.db.attributes = attributes self.db.in_combat = False # self.db.unconcious = True def get_initiative(self): """ roll for attack initiative """ idice = (1, 20) roll = random.randrange(idice[0], idice[1]) return roll def do_attack_phase(self): """ run through attack logic and apply it to self.db.target, return gracefully upon None target. """ t = self.db.target e = self.db.equipment w = e['main_hand_weapon'] attack_roll = self.attack_roll() print "attack roll" if attack_roll >= t.db.combat_attributes['defense_rating']: damage = self.get_damage() unarmed_hit_texts = [ 'You punch %s unrelenlessly for %s damage' % (t.name, damage), 'You pummel the daylights out of %s for %s damage.' % (t.name, damage), 'As %s attempts to grab you, you dodge and uppercut them for %s damage.' % (t.name, damage), 'You punch %s hard in the mouth for %s damage' % (t.name, damage), 'As you land a hard blow against %s, you feel bones breaking under your fist. You deal %s damage.' % (t.name, damage) ] sword_hit_texts = [ 'You swing your blade deftly at %s for %s damage.' % (t.name, damage) ] print "unarmed hit texts" if w is None: ht = random.choice(unarmed_hit_texts) else: if w.db.attributes['weapon_type'] == 'sword': ht = random.choice(sword_hit_texts) self.msg(ht) t.take_damage(damage) else: #miss pass def do_skill_phase(self): #placeholder pass def get_damage(self): e = self.db.equipment w = e['main_hand_weapon'] if w is None: damagedice = (1, 4) damage = random.randrange(damagedice[0], damagedice[1]) return damage else: damagedice = w.db.attributes['damage_dice'] damage = random.randrange(damagedice[0], damagedice[1]) return damage def attack_roll(self): dice = (1, 20) roll = random.randrange(dice[0], dice[1]) return roll ################################# # SETTERS ################################# def set_deity(self, deity): attributes = self.db.attributes attributes['deity'] = deity self.db.attributes = attributes def set_race(self, race): attributes = self.db.attributes attributes['race'] = race self.db.attributes = attributes def set_gender(self, gender): attributes = self.db.attributes attributes['gender'] = gender self.db.attributes = attributes ################################## # BOOLEAN CHECKS ################################## def on_quest(self, quest, completed=False): """ return true if on said quest, false otherwise. """ manager = self.db.questlog print "objects.world.character.CharacterClass on_quest check => %s " % quest if completed: print "in completed" quest = manager.find_quest(quest, completed=True) else: print "non completed" quest = manager.find_quest(quest) if quest is None: return False else: return True
#!/usr/bin/python3 import fontforge # import os.path import re import collections import contextlib import logging from dvilike import OpcodeCommandsMachine, VFProcessor def parse_map(map_file): """Extracts font names, encodings, and Type1 glyph files from a map file. This parser was built using the format description here: https://www.tug.org/texinfohtml/dvips.html#psfonts_002emap Briefly, each line of a map file that doesn't start with one of the comment characters contains the TeX name of a font, a Type1 font glyph file (pf[ab] file), and optionally Postscript code enclosed in double quotes (spaces are allowed in the double quotes), a Postscript name for the font, and/or an encoding file. Args: map: A readable file object pointing to a map file. Returns: A list of named tuples each containing the TeX font name (.tfm file name), the Postscript name, any Postscript code, the encoding file name, and the pf[ab] file name for each font in the map file. """ font = collections.namedtuple('font', 'tex_name ps_name ps_code enc_name type1_name') fonts = [] # These regexes match special words in the format # Matches anything enclosed in double quotes, Postscript code ps_regex = re.compile('"([^"]*)"') # Matches words starting with '<[' or starting with '<' and ending # with '.enc', encoding files enc_regex = re.compile('<(\[\S+|\S+\.enc)') # Matches words starting with '<<' or starting with '<' and ending # with '.pf[ab]', Type 1 glyph files type1_regex = re.compile('<(<\S+|\S+\.pf[ab])') for line in map_file: tex_name = None ps_name = None ps_code = None enc_name = None type1_name = None # Skip lines starting with comment characters if not line.startswith((' ', '%', '*', ';', '#')): # Extract Postscript code in double quotes if ps_regex.search(line): ps_code = ps_regex.search(line).group(1) line = ps_regex.sub('', line) # Break the rest of the line into words for word in line.split(): if enc_regex.match(word): enc_name = enc_regex.match(word).group(1).lstrip('[') elif type1_regex.match(word): type1_name = type1_regex.match(word).group(1).lstrip('<') # tex_name will be None for the first non-file word elif not tex_name: tex_name = word ps_name = word # Because of the previous block, tex_name will be # the same as ps_name if and only if it's reading # the second non-file word elif tex_name == ps_name: ps_name = word fonts.append(font(tex_name, ps_name, ps_code, enc_name, type1_name)) return fonts def test_file(package, tex_names): """Generates a LaTeX file to test the .htfs for a font using fonttable. Args: package: The name of a font package. tex_names: A list of the TeX names of the fonts in the package. """ with open(package + '-test.tex', 'w') as test: test.write('\\documentclass{article}\n\n') test.write('\\usepackage{' + package + '}\n') test.write('\\usepackage{fonttable}\n\n') test.write('\\begin{document}\n\n') for tex_name in sorted(tex_names): test.write('\\section{' + tex_name + '}\n') test.write('\\fonttable{' + tex_name + '}\n\n') test.write('\n\\end{document}\n') def write_htf(chars, tex_name, htf): """ Writes a list of positions and characters to a file in .htf format. The format description for .htf files is here: https://www.tug.org/applications/tex4ht/mn-htf.html .htf files contain strings that are either ASCII characters, HTML entities for reserved characters, or HTML entities referring to Unicode code points. This function writes the correct character or HTML entity to the output file, assigns characters to non-pictorial or pictorial classes based on whether they do or don't have a Unicode code point, and adds a comment including the character's name (if any) and its position in the file. Args: chars: A dictionary with character positions as keys (255 should be the highest position) and values as two-element named tuples, 'code_point' as an int and 'name' as a string, of either one-element strings or a positive int representing a Unicode code point (or -1 for characters without code points) and an optional string representing a character name. htf: A writeable file object for the output .htf file. """ htf.write(tex_name + " " + str(min(chars)) + " " + str(max(chars)) + "\n") for i in range(min(chars), max(chars)): # Read this as "if there is a glyph at this position" if chars[i]: if chars[i].str == -1: # I request a pictorial character for glyphs without # Unicode code points by setting the class, the second # field in the .htf file, to '1'. htf.write("'' '1' " + str(i) + " " + chars[i].name + "\n") elif type(chars[i].str) == string and len(chars[i].str) == 1: htf.write(chars[i].str + " '' " + str(i) + " " + chars[i].name + "\n") elif type(chars[i].str) == int and chars[i].str > 0: htf.write("'&#" + hex(chars[i].str).lstrip("0") + ";' '' " + str(i) + " " + chars[i].name + "\n") else: logging.error('The output routine write_htf encountered a bad character, probably because of malformed input.') else: # No character here, write a blank line. htf.write("'' '' " + str(i) + "\n") htf.write(tex_name + " " + str(min(chars)) + " " + str(max(chars)) + "\n") def get_characters(font_file, enc_file = None): """ Gets a list of characters from a font's glyph file in encoding order. This function uses FontForge to get the Unicode code points for each glyph from a font file. Theoretically, it can handle any font file that FontForge can read, but tex4ht can only handle Type1 font files. Args: font_file: The name of a font file to open. enc_file: The name of an encoding file to open. Returns: chars: A dictionary with character positions as keys (255 should be the highest position) and two-element named tuples as values, with 'code_point' as an int and 'name' as a string, of either one-element strings or a positive int representing a Unicode code point (or -1 for characters without code points) and an optional string representing a character name. """ chars = {} character = collections.namedtuple('character', 'code_point name') with contextlib.closing(fontforge.open(font_file)) as font: # Change the encoding, if necessary. if enc_file: font.encoding = fontforge.loadEncodingFile(enc_file) for glyph in font.glyphs('encoding'): # When operating on font files with glyphs at positions # 256 or higher, the iterator will happily return them but # they're outside the positions that TeX cares about, so I # have to break. if glyph.encoding > 255: break chars[glyph.encoding] = character(glyph.unicode, glyph.glyphname) return chars # T1 /usr/share/texmf/tex4ht/ht-fonts/unicode/lm/lm-ec.htf # TS1 /usr/share/texmf/tex4ht/ht-fonts/unicode/jknappen/tc/tcrm.htf # OMS /usr/share/texmf/tex4ht/ht-fonts/unicode/cm/cmsy.htf # OML /usr/share/texmf/tex4ht/ht-fonts/unicode/cm/cmmi.htf # OMX /usr/share/texmf/tex4ht/ht-fonts/unicode/cm/cmex.htf # OT1 /usr/share/texmf/tex4ht/ht-fonts/unicode/lm/lm-rep-cmrm.htf # OT2 /usr/share/texmf/tex4ht/ht-fonts/unicode/ams/cyrillic/wncyr.htf # LGR /usr/share/texmf/tex4ht/ht-fonts/unicode/cbgreek/grmn.htf # T2A, T2B, T2C: /usr/share/texmf/tex4ht/ht-fonts/unicode/lh/lh-t2a/l[abc]rm.htf # I don't understand why tex4ht has aliases for lbrm and lcrm that # link to larm, since the symbols are different. I can't find a # prototype for X2 at all, but there's a ldrm.htf in the same # directory as the t2 fonts so I guessed it might count. known_encodings = {'t1': 'lm-ec', 'ts1': 'tcrm', 'ot1': 'lm-rep-cmrm', 'ot2' : 'wncyr', 'oms' : 'cmsy', 'oml' : 'cmmi', 'omx' : 'cmex', 't2a' : 'larm', 't2b' : 'lbrm', 't2c' : 'lcrm', 'x2' : 'ldrm', 'lgr' : 'grmn'} def external_alias(tex_name, htf): """Writes an alias to an .htf file included with tex4ht. tex4ht accepts the name of an existing .htf file in lieu of a list of characters. This function uses a font's TeX name to select one of the .htf files packaged with tex4ht corresponding to one of the standard TeX encodings. Args: tex_name: The TeX name of a font. htf: A writeable file object for the output .htf file. """ for encoding in known_encodings: if encoding in tex_name.lower(): htf.write("." + known_encodings[encoding] + "\n") # def internal_alias(tex_name, htf): # htf.write("." + tex_name + "\n") def variant_aliases(tex_names, font_css, htf): """Adds aliases and appropriate CSS code to an .htf file. Rather than requiring a separate .htf file for each font, for fonts with the same character set, tex4ht will also accept a list of characters or an alias to another .htf file followed by a list of font names and CSS font properties. Note that for fonts named after the initial alias or list of characters, tex4ht will *only* process fonts whose TeX names can be truncated to the TeX name of the font the .htf file itself is named after. In the output HTML, the CSS font properties are added wherever the font appears in the original TeX/LaTeX code. Note that CSS font properties can also be applied to the font the .htf file is named after. This function writes font names and CSS font properties to an .htf file. Args: tex_names: The TeX names of all variants of a font. font_css: A function that uses the TeX name of a font to assign it appropripate CSS font properties. htf: A writeable file object for the output .htf file. """ for tex_name in tex_names: css = font_css(tex_name) if css: htf.write("htfcss: " + tex_name + " " + css + "\n") class VFtoHTF(OpcodeCommandsMachine): """Builds a dictionary describing the characters in a virtual font from a parsed VF file. This class is designed to operate on the output of VFParser, a class that contains methods to transform a virtual font file (VF file) into lists of containers corresponding to the tree structure of the VF file itself. Containers are a dictionary subclass whose keys can be accessed using attributes, e.g. container.name. The class may be thought of as a state machine: using the __call__ function it inherits from OpcodeCommandsMachine, feed it containers representing individual commands one at a time and it will record the TeX names of the fonts the virtual font refers to and then which character in which real font is typeset at a given position in the virtual font. Attributes: chars: A dictionary with character positions in the virtual font as keys and lists of two-element named tuples as values, each of which has has a 'char_code' attribute holding the character to look up in the real font file as an int and the TeX name of the real font as a string. This attribute serves as the effective return value of the class, access it after the class is finished processing the VF file. In most cases, these will be one-element lists, but sometimes a virtual font will typeset a single character by typesetting two or more characters from the real fonts, in which case the list will contain more than one element. """ def __init__(self): """Initializes the _commands dictionary and several state variables. Calls OpcodeCommandsMachine's __init__ to build _commands, which maps VF and DVI commands to functions, assigns the command 'put' to the function 'set' because the differences between the commands don't matter in this context (they both typeset characters but change the physical position on the page in different ways), and initializes chars; fonts, a dictionary that maps the VF file's internal numbers to TeX font names; vf_char, the named tuple to hold the position and TeX name for a character in a real font; and _default_font, the name of the font that appears first in the VF file. """ super().__init__() self._commands['put'] = self.set self.fonts = {} self.chars = collections.defaultdict(list) self.vf_char = collections.namedtuple('vf_char', 'char_code tex_name') self._default_font = None def _vf_char(self, char_code): """Returns a vf_char with char_code as the position and _current_font as the font. """ return self.vf_char(char_code, self._current_font) def fnt_def(self, container): """Sets the default font to the first font and defines all the fonts. This function assigns the TeX name of the font that appears first in the VF file to _default_font and internal font numbers to font TeX names in fonts. """ if not self._default_font: self._default_font = container.tex_name self.fonts[container.font_num] = container.tex_name def char(self, container): """Defines which VF character is being typeset and recurses on the DVI code. Each time short_char or long_char is called in a VF file, the font that will be used to typeset characters in the DVI code is reset to the default. _current_char holds the position in the virtual font of the character to be typeset. It then calls itself on each command in the DVI code. The subsequent three functions will change the current font (the 'fnt' command) and add characters to chars as appropriate (the 'set' and 'set_char' commands) when the appropriate commands occur in the DVI code. """ self._current_font = self._default_font self._current_char = container.char_code for dvi_container in container.dvi_code: self(dvi_container) def fnt(self, container): self._current_font = self.fonts[container.font_num] def set_char(self, container): self.chars[self._current_char].append(self._vf_char(container.opcode)) def set(self, container): self.chars[self._current_char].append(self._vf_char(container.char_code)) # None of these VF or DVI commands need to be processed to extract # the characters from a virtual font file. def pre(self, container): pass def post(self, container): pass def set_rule(self, container): pass def put_rule(self, container): pass def push(self, container): pass def pop(self, container): pass def right(self, container): pass def w(self, container): pass def x(self, container): pass def down(self, container): pass def y(self, container): pass def z(self, container): pass def xxx(self, container): pass if __name__ == '__main__': file = 'DroidSerif-Regular-ot1.vf' # file = 'extending_hardys_proof.dvi' with open(file, 'rb') as f: machine = VFtoHTF() for x in VFProcessor(f): print(x) machine(x) print(machine.chars) # # Command line handler # if __name__ == '__main__': # import argparse # parser = argparse.ArgumentParser(description = 'This script generates virtual hypertext font files for use with TeX4ht from PostScript Font Binary glyph files, font encoding files, and virtual font files. When called with only font files or only font and encoding files, it outputs as many .htf files as given font files. It assigns encoding files and output file names to font files in order, so any font files without encoding files must come after font files with encoding files. When called with a virtual font file, it will attempt to construct one .htf file for the virtual font. If also supplied with a map file, it will search the map file for the font names and files in the virtual font. Otherwise, it will assume that fonts are in the same order as they are in the virtual font file.') # # Some of these I can't open as file objects. The glyph and # # encoding files have to be passed to FontForge methods as file # # names, I want the default name of the .htf file to depend on the # # glyph or encoding file which I won't know until after parsing # # the arguments. I prefer to pass file names to parse_vf # # parse_map because it makes them easier to use in other scripts. # parser.add_argument('pfb_file', nargs = '+', help = 'The name(s) of PostScript Font Binary file(s).') # parser.add_argument('-e', '--encoding_file', nargs = '+', help = 'The name(s) of font encoding file(s).') # parser.add_argument('-vf', '--virtual_font_file', help = 'The name of a virtual font file.') # parser.add_argument('-m', '--map_file', help = 'The name of a map file.') # parser.add_argument('-o', '--output_file', nargs = '+', help = 'The name(s) of output virtual hypertext font file(s). The default is the name of the virtual font file, the encoding file, and then the name of the pfb file, in order, with .vf, .enc, or .pfb respectively replaced with .htf.') # parser.add_argument('-q', '--quiet', action = 'store_true', help = "Don't print non-error messages.") # parser.add_argument('-f', '--force', action = 'store_true', help ='Overwrite existing files.') # parser.add_argument('-V', '--version', action = 'version', version = '%(prog) 1.0.0', help = 'Print version information and exit.') # args = parser.parse_args() # Should probably replace these complicated conditionals with some # kind of OO class structure that has different behavior. # if args.virtual_font_file: # font_char_lists = [] # out = vf.replace('.vf','.htf') # # Use the map file to line up the encoding and font files # if args.map_file: # # TODO: this is severely broken # fonts = [] # for file in pfb: # fonts.append(fontforge.open(file)) # if enc: # encodings = [] # for file in enc: # encodings.append(fontforge.loadEncodingFile(file)) # else: # pass # else: # # TODO: This is broken by 3.3 because map now truncates like zip does in 2.7 # for font in map(None, args.pfb_file, args.encoding_file, args.output_file): # # Set the output file's name. # if font[2]: # out = font[2] # elif font[1]: # out = font[1].replace('.enc','.htf') # else: # out = font[0].replace('.pfb','.htf') # if not args.quiet: # print('Generating ' + out + ' using characters from ' + pfb) # if font[1]: # print(' with the encoding from ' + font[1], end=' ') # print() # # Write to file # write_htf(out, get_characters(font[0], font[1]), args.force) # # Don't overwrite an existing file unless specifically requested # if not overwrite and os.path.exists(name): # print("Didn't overwrite " + name) # return
# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for S3 or Storage Servers that follow the S3 Protocol""" import hashlib import httplib import math import re import tempfile import boto.exception import eventlet from oslo.config import cfg import six import six.moves.urllib.parse as urlparse from glance.common import exception from glance.common import utils from glance.openstack.common import gettextutils import glance.openstack.common.log as logging from glance.openstack.common import units import glance.store import glance.store.base import glance.store.location LOG = logging.getLogger(__name__) _LE = gettextutils._LE _LI = gettextutils._LI DEFAULT_LARGE_OBJECT_SIZE = 100 # 100M DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 10 # 10M DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE = 5 # 5M DEFAULT_THREAD_POOLS = 10 # 10 pools s3_opts = [ cfg.StrOpt('s3_store_host', help=_('The host where the S3 server is listening.')), cfg.StrOpt('s3_store_access_key', secret=True, help=_('The S3 query token access key.')), cfg.StrOpt('s3_store_secret_key', secret=True, help=_('The S3 query token secret key.')), cfg.StrOpt('s3_store_bucket', help=_('The S3 bucket to be used to store the Glance data.')), cfg.StrOpt('s3_store_object_buffer_dir', help=_('The local directory where uploads will be staged ' 'before they are transferred into S3.')), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False, help=_('A boolean to determine if the S3 bucket should be ' 'created on upload if it does not exist or if ' 'an error should be returned to the user.')), cfg.StrOpt('s3_store_bucket_url_format', default='subdomain', help=_('The S3 calling format used to determine the bucket. ' 'Either subdomain or path can be used.')), cfg.IntOpt('s3_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE, help=_('What size, in MB, should S3 start chunking image files ' 'and do a multipart upload in S3.')), cfg.IntOpt('s3_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE, help=_('What multipart upload part size, in MB, should S3 use ' 'when uploading parts. The size must be greater than or ' 'equal to 5M.')), cfg.IntOpt('s3_store_thread_pools', default=DEFAULT_THREAD_POOLS, help=_('The number of thread pools to perform a multipart ' 'upload in S3.')), ] CONF = cfg.CONF CONF.register_opts(s3_opts) class UploadPart: """ The class for the upload part """ def __init__(self, mpu, fp, partnum, chunks): self.mpu = mpu self.partnum = partnum self.fp = fp self.size = 0 self.chunks = chunks self.etag = {} # partnum -> etag self.success = True def run_upload(part): """ Upload the upload part into S3 and set returned etag and size to its part info. """ pnum = part.partnum bsize = part.chunks LOG.info(_LI("Uploading upload part in S3 partnum=%(pnum)d, " "size=%(bsize)d, key=%(key)s, UploadId=%(UploadId)s") % {'pnum': pnum, 'bsize': bsize, 'key': part.mpu.key_name, 'UploadId': part.mpu.id}) try: key = part.mpu.upload_part_from_file(part.fp, part_num=part.partnum, size=bsize) part.etag[part.partnum] = key.etag part.size = key.size except boto.exception.BotoServerError as e: status = e.status reason = e.reason LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, " "size=%(bsize)d, status=%(status)d, " "reason=%(reason)s") % {'pnum': pnum, 'bsize': bsize, 'status': status, 'reason': reason}) part.success = False except Exception as e: LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, " "size=%(bsize)d due to internal error: %(err)s") % {'pnum': pnum, 'bsize': bsize, 'err': utils.exception_to_str(e)}) part.success = False finally: part.fp.close() class StoreLocation(glance.store.location.StoreLocation): """ Class describing an S3 URI. An S3 URI can look like any of the following: s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id s3+http://accesskey:secretkey@s3.amazonaws.com/bucket/key-id s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id The s3+https:// URIs indicate there is an HTTPS s3service URL """ def process_specs(self): self.scheme = self.specs.get('scheme', 's3') self.accesskey = self.specs.get('accesskey') self.secretkey = self.specs.get('secretkey') s3_host = self.specs.get('s3serviceurl') self.bucket = self.specs.get('bucket') self.key = self.specs.get('key') if s3_host.startswith('https://'): self.scheme = 's3+https' s3_host = s3_host[8:].strip('/') elif s3_host.startswith('http://'): s3_host = s3_host[7:].strip('/') self.s3serviceurl = s3_host.strip('/') def _get_credstring(self): if self.accesskey: return '%s:%s@' % (self.accesskey, self.secretkey) return '' def get_uri(self): return "%s://%s%s/%s/%s" % ( self.scheme, self._get_credstring(), self.s3serviceurl, self.bucket, self.key) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. Note that an Amazon AWS secret key can contain the forward slash, which is entirely retarded, and breaks urlparse miserably. This function works around that issue. """ # Make sure that URIs that contain multiple schemes, such as: # s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence " "of a scheme. If you have specified a URI like " "s3://accesskey:secretkey@" "https://s3.amazonaws.com/bucket/key-id" ", you need to change it to use the " "s3+https:// scheme, like so: " "s3+https://accesskey:secretkey@" "s3.amazonaws.com/bucket/key-id") LOG.info(_LI("Invalid store uri: %s") % reason) raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) assert pieces.scheme in ('s3', 's3+http', 's3+https') self.scheme = pieces.scheme path = pieces.path.strip('/') netloc = pieces.netloc.strip('/') entire_path = (netloc + '/' + path).strip('/') if '@' in uri: creds, path = entire_path.split('@') cred_parts = creds.split(':') try: access_key = cred_parts[0] secret_key = cred_parts[1] # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 access_key = access_key.encode('utf-8') secret_key = secret_key.encode('utf-8') self.accesskey = access_key self.secretkey = secret_key except IndexError: reason = _("Badly formed S3 credentials") LOG.info(reason) raise exception.BadStoreUri(message=reason) else: self.accesskey = None path = entire_path try: path_parts = path.split('/') self.key = path_parts.pop() self.bucket = path_parts.pop() if path_parts: self.s3serviceurl = '/'.join(path_parts).strip('/') else: reason = _("Badly formed S3 URI. Missing s3 service URL.") raise exception.BadStoreUri(message=reason) except IndexError: reason = _("Badly formed S3 URI") LOG.info(reason) raise exception.BadStoreUri(message=reason) class ChunkedFile(object): """ We send this back to the Glance API server as something that can iterate over a ``boto.s3.key.Key`` """ CHUNKSIZE = 65536 def __init__(self, fp): self.fp = fp def __iter__(self): """Return an iterator over the image file""" try: if self.fp: while True: chunk = self.fp.read(ChunkedFile.CHUNKSIZE) if chunk: yield chunk else: break finally: self.close() def getvalue(self): """Return entire string value... used in testing.""" data = "" self.len = 0 for chunk in self: read_bytes = len(chunk) data = data + chunk self.len = self.len + read_bytes return data def close(self): """Close the internal file pointer.""" if self.fp: self.fp.close() self.fp = None class Store(glance.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" def get_schemes(self): return ('s3', 's3+http', 's3+https') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = CONF.s3_store_object_buffer_dir _s3_obj_size = CONF.s3_store_large_object_size self.s3_store_large_object_size = _s3_obj_size * units.Mi _s3_ck_size = CONF.s3_store_large_object_chunk_size _s3_ck_min = DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE if _s3_ck_size < _s3_ck_min: reason = (_("s3_store_large_object_chunk_size must be at " "least %(_s3_ck_min)d MB. " "You configured it as %(_s3_ck_size)d MB") % {'_s3_ck_min': _s3_ck_min, '_s3_ck_size': _s3_ck_size}) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) self.s3_store_large_object_chunk_size = _s3_ck_size * units.Mi if CONF.s3_store_thread_pools <= 0: reason = (_("s3_store_thread_pools must be a positive " "integer. %s") % CONF.s3_store_thread_pools) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) def _option_get(self, param): result = getattr(CONF, param) if not result: reason = ("Could not find %(param)s in configuration " "options." % {'param': param}) LOG.debug(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ key = self._retrieve_key(location) key.BufferSize = self.CHUNKSIZE class ChunkedIndexable(glance.store.Indexable): def another(self): return (self.wrapped.fp.read(ChunkedFile.CHUNKSIZE) if self.wrapped.fp else None) return (ChunkedIndexable(ChunkedFile(key), key.size), key.size) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ try: key = self._retrieve_key(location) return key.size except Exception: return 0 def _retrieve_key(self, location): loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https'), calling_format=get_calling_format()) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key})) LOG.debug(msg) return key def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key}) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https'), calling_format=get_calling_format()) create_bucket_if_missing(self.bucket, s3_conn) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) def _sanitize(uri): return re.sub('//.*:.*@', '//s3_store_secret_key:s3_store_access_key@', uri) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate(_("S3 already has an image at " "location %s") % _sanitize(loc.get_uri())) msg = ("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)" % ({'s3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name})) LOG.debug(msg) LOG.debug("Uploading an image file to S3 for %s" % _sanitize(loc.get_uri())) if image_size < self.s3_store_large_object_size: key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = ("Writing request body file to temporary file " "for %s") % _sanitize(loc.get_uri()) LOG.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() for chunk in utils.chunkreadable(image_file, self.CHUNKSIZE): checksum.update(chunk) temp_file.write(chunk) temp_file.flush() msg = ("Uploading temporary file to S3 " "for %s") % _sanitize(loc.get_uri()) LOG.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'rb'), replace=False) size = key.size checksum_hex = checksum.hexdigest() LOG.debug("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s" % {'size': size, 'obj_name': obj_name, 'checksum_hex': checksum_hex}) return (loc.get_uri(), size, checksum_hex, {}) else: checksum = hashlib.md5() parts = int(math.ceil(float(image_size) / float(self.s3_store_large_object_chunk_size))) threads = parts pool_size = CONF.s3_store_thread_pools pool = eventlet.greenpool.GreenPool(size=pool_size) mpu = bucket_obj.initiate_multipart_upload(obj_name) LOG.debug("Multipart initiate key=%(obj_name)s, " "UploadId=%(UploadId)s" % {'obj_name': obj_name, 'UploadId': mpu.id}) cstart = 0 plist = [] it = utils.chunkreadable(image_file, self.s3_store_large_object_chunk_size) for p in range(threads): chunk = next(it) clen = len(chunk) checksum.update(chunk) fp = six.BytesIO(chunk) fp.seek(0) part = UploadPart(mpu, fp, cstart + 1, clen) pool.spawn_n(run_upload, part) plist.append(part) cstart += 1 pedict = {} total_size = 0 pool.waitall() for part in plist: pedict.update(part.etag) total_size += part.size success = True for part in plist: if not part.success: success = False if success: # Complete xml = get_mpu_xml(pedict) bucket_obj.complete_multipart_upload(obj_name, mpu.id, xml) checksum_hex = checksum.hexdigest() LOG.info(_LI("Multipart complete key=%(obj_name)s " "UploadId=%(UploadId)s " "Wrote %(total_size)d bytes to S3 key" "named %(obj_name)s " "with checksum %(checksum_hex)s") % {'obj_name': obj_name, 'UploadId': mpu.id, 'total_size': total_size, 'obj_name': obj_name, 'checksum_hex': checksum_hex}) return (loc.get_uri(), total_size, checksum_hex, {}) else: # Abort bucket_obj.cancel_multipart_upload(obj_name, mpu.id) LOG.error(_LE("Some parts failed to upload to S3. " "Aborted the object key=%(obj_name)s") % {'obj_name': obj_name}) msg = (_("Failed to add image object to S3. " "key=%(obj_name)s") % {'obj_name': obj_name}) raise glance.store.BackendException(msg) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https'), calling_format=get_calling_format()) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = ("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key})) LOG.debug(msg) return key.delete() def get_bucket(conn, bucket_id): """ Get a bucket from an s3 connection :param conn: The ``boto.s3.connection.S3Connection`` :param bucket_id: ID of the bucket to fetch :raises ``glance.exception.NotFound`` if bucket is not found. """ bucket = conn.get_bucket(bucket_id) if not bucket: msg = "Could not find bucket with ID %s" % bucket_id LOG.debug(msg) raise exception.NotFound(msg) return bucket def get_s3_location(s3_host): from boto.s3.connection import Location locations = { 's3.amazonaws.com': Location.DEFAULT, 's3-eu-west-1.amazonaws.com': Location.EU, 's3-us-west-1.amazonaws.com': Location.USWest, 's3-ap-southeast-1.amazonaws.com': Location.APSoutheast, 's3-ap-northeast-1.amazonaws.com': Location.APNortheast, } # strip off scheme and port if present key = re.sub('^(https?://)?(?P<host>[^:]+)(:[0-9]+)?$', '\g<host>', s3_host) return locations.get(key, Location.DEFAULT) def create_bucket_if_missing(bucket, s3_conn): """ Creates a missing bucket in S3 if the ``s3_store_create_bucket_on_put`` option is set. :param bucket: Name of bucket to create :param s3_conn: Connection to S3 """ from boto.exception import S3ResponseError try: s3_conn.get_bucket(bucket) except S3ResponseError as e: if e.status == httplib.NOT_FOUND: if CONF.s3_store_create_bucket_on_put: location = get_s3_location(CONF.s3_store_host) try: s3_conn.create_bucket(bucket, location=location) except S3ResponseError as e: msg = (_("Failed to add bucket to S3.\n" "Got error from S3: %(e)s") % {'e': e}) raise glance.store.BackendException(msg) else: msg = (_("The bucket %(bucket)s does not exist in " "S3. Please set the " "s3_store_create_bucket_on_put option " "to add bucket to S3 automatically.") % {'bucket': bucket}) raise glance.store.BackendException(msg) def get_key(bucket, obj): """ Get a key from a bucket :param bucket: The ``boto.s3.Bucket`` :param obj: Object to get the key for :raises ``glance.exception.NotFound`` if key is not found. """ key = bucket.get_key(obj) if not key or not key.exists(): msg = ("Could not find key %(obj)s in bucket %(bucket)s" % {'obj': obj, 'bucket': bucket}) LOG.debug(msg) raise exception.NotFound(msg) return key def get_calling_format(bucket_format=None): import boto.s3.connection if bucket_format is None: bucket_format = CONF.s3_store_bucket_url_format if bucket_format.lower() == 'path': return boto.s3.connection.OrdinaryCallingFormat() else: return boto.s3.connection.SubdomainCallingFormat() def get_mpu_xml(pedict): xml = '<CompleteMultipartUpload>\n' for pnum, etag in pedict.iteritems(): xml += ' <Part>\n' xml += ' <PartNumber>%d</PartNumber>\n' % pnum xml += ' <ETag>%s</ETag>\n' % etag xml += ' </Part>\n' xml += '</CompleteMultipartUpload>' return xml
""" This module converts requested URLs to callback view functions. RegexURLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a tuple in this format: (view_function, function_args, function_kwargs) """ from __future__ import unicode_literals import functools import re import warnings from importlib import import_module from threading import local from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.http import Http404 from django.utils import lru_cache, six from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango20Warning from django.utils.encoding import force_str, force_text, iri_to_uri from django.utils.functional import cached_property, lazy from django.utils.http import RFC3986_SUBDELIMS, urlquote from django.utils.module_loading import module_has_submodule from django.utils.regex_helper import normalize from django.utils.translation import get_language # SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for # the current thread (which is the only one we ever access), it is assumed to # be empty. _prefixes = local() # Overridden URLconfs for each thread are stored here. _urlconfs = local() class ResolverMatch(object): def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.app_name = app_name if namespaces: self.namespaces = [x for x in namespaces if x] else: self.namespaces = [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__]) else: # A function-based view self._func_path = '.'.join([func.__module__, func.__name__]) view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_name=%s, namespaces=%s)" % ( self._func_path, self.args, self.kwargs, self.url_name, self.app_name, self.namespaces) class Resolver404(Http404): pass class NoReverseMatch(Exception): pass @lru_cache.lru_cache(maxsize=None) def get_callable(lookup_view, can_fail=False): """ Return a callable corresponding to lookup_view. This function is used by both resolve() and reverse(), so can_fail allows the caller to choose between returning the input as is and raising an exception when the input string can't be interpreted as an import path. If lookup_view is already a callable, return it. If lookup_view is a string import path that can be resolved to a callable, import that callable and return it. If lookup_view is some other kind of string and can_fail is True, the string is returned as is. If can_fail is False, an exception is raised (either ImportError or ViewDoesNotExist). """ if callable(lookup_view): return lookup_view mod_name, func_name = get_mod_func(lookup_view) if not func_name: # No '.' in lookup_view if can_fail: return lookup_view else: raise ImportError( "Could not import '%s'. The path must be fully qualified." % lookup_view) try: mod = import_module(mod_name) except ImportError: if can_fail: return lookup_view else: parentmod, submod = get_mod_func(mod_name) if submod and not module_has_submodule(import_module(parentmod), submod): raise ViewDoesNotExist( "Could not import '%s'. Parent module %s does not exist." % (lookup_view, mod_name)) else: raise else: try: view_func = getattr(mod, func_name) except AttributeError: if can_fail: return lookup_view else: raise ViewDoesNotExist( "Could not import '%s'. View does not exist in module %s." % (lookup_view, mod_name)) else: if not callable(view_func): # For backwards compatibility this is raised regardless of can_fail raise ViewDoesNotExist( "Could not import '%s.%s'. View is not callable." % (mod_name, func_name)) return view_func @lru_cache.lru_cache(maxsize=None) def get_resolver(urlconf): if urlconf is None: from django.conf import settings urlconf = settings.ROOT_URLCONF return RegexURLResolver(r'^/', urlconf) @lru_cache.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver): # Build a namespaced resolver for the given parent urlconf pattern. # This makes it possible to have captured parameters in the parent # urlconf pattern. ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns) return RegexURLResolver(r'^/', [ns_resolver]) def get_mod_func(callback): # Converts 'django.views.news.stories.story_detail' to # ['django.views.news.stories', 'story_detail'] try: dot = callback.rindex('.') except ValueError: return callback, '' return callback[:dot], callback[dot + 1:] class LocaleRegexProvider(object): """ A mixin to provide a default regex property which can vary by active language. """ def __init__(self, regex): # regex is either a string representing a regular expression, or a # translatable string (using ugettext_lazy) representing a regular # expression. self._regex = regex self._regex_dict = {} @property def regex(self): """ Returns a compiled regular expression, depending upon the activated language-code. """ language_code = get_language() if language_code not in self._regex_dict: if isinstance(self._regex, six.string_types): regex = self._regex else: regex = force_text(self._regex) try: compiled_regex = re.compile(regex, re.UNICODE) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, six.text_type(e))) self._regex_dict[language_code] = compiled_regex return self._regex_dict[language_code] class RegexURLPattern(LocaleRegexProvider): def __init__(self, regex, callback, default_args=None, name=None): LocaleRegexProvider.__init__(self, regex) # callback is either a string like 'foo.views.news.stories.story_detail' # which represents the path to a module and a view function name, or a # callable object (view). if callable(callback): self._callback = callback else: self._callback = None self._callback_str = callback self.default_args = default_args or {} self.name = name def __repr__(self): return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)) def add_prefix(self, prefix): """ Adds the prefix string to a string-based callback. """ if not prefix or not hasattr(self, '_callback_str'): return self._callback_str = prefix + '.' + self._callback_str def resolve(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() if kwargs: args = () else: args = match.groups() # In both cases, pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.name) @property def callback(self): if self._callback is not None: return self._callback self._callback = get_callable(self._callback_str) return self._callback class RegexURLResolver(LocaleRegexProvider): def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None): LocaleRegexProvider.__init__(self, regex) # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False def __repr__(self): if isinstance(self.urlconf_name, list) and len(self.urlconf_name): # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return str('<%s %s (%s:%s) %s>') % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.regex.pattern) def _populate(self): lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for pattern in reversed(self.url_patterns): if hasattr(pattern, '_callback_str'): self._callback_strs.add(pattern._callback_str) elif hasattr(pattern, '_callback'): callback = pattern._callback if isinstance(callback, functools.partial): callback = callback.func if not hasattr(callback, '__name__'): lookup_str = callback.__module__ + "." + callback.__class__.__name__ else: lookup_str = callback.__module__ + "." + callback.__name__ self._callback_strs.add(lookup_str) p_pattern = pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(pattern, RegexURLResolver): if pattern.namespace: namespaces[pattern.namespace] = (p_pattern, pattern) if pattern.app_name: apps.setdefault(pattern.app_name, []).append(pattern.namespace) else: parent_pat = pattern.regex.pattern for name in pattern.reverse_dict: for matches, pat, defaults in pattern.reverse_dict.getlist(name): new_matches = normalize(parent_pat + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs), ) ) for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items(): namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(pattern._callback_strs) else: bits = normalize(p_pattern) lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args)) if pattern.name is not None: lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args)) self._reverse_dict[language_code] = lookups self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._populated = True @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = force_text(path) # path may be a reverse_lazy object tried = [] match = self.regex.search(path) if match: new_path = path[match.end():] for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = dict(match.groupdict(), **self.default_kwargs) sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = match.groups() + sub_match.args return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, six.string_types): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError: msg = ( "The included urlconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use default # Lazy import, since django.urls imports this file from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") text_args = [force_text(v) for v in args] text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()} if not self._populated: self._populate() original_lookup = lookup_view try: if self._is_callback(lookup_view): lookup_view = get_callable(lookup_view, True) except (ImportError, AttributeError) as e: raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e)) else: if not callable(original_lookup) and callable(lookup_view): warnings.warn( 'Reversing by dotted path is deprecated (%s).' % original_lookup, RemovedInDjango20Warning, stacklevel=3 ) possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, text_args)) else: if (set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys())): continue matches = True for k, v in defaults.items(): if kwargs.get(k, v) != v: matches = False break if not matches: continue candidate_subs = text_kwargs # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE): # safe characters from `pchar` definition of RFC 3986 url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@')) # Don't allow construction of scheme relative urls. if url.startswith('//'): url = '/%%2F%s' % url[2:] return url # lookup_view can be URL label, or dotted path, or callable, Any of # these can be passed in at the top, but callables are not friendly in # error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (possibility, pattern, defaults) in possibilities] raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword " "arguments '%s' not found. %d pattern(s) tried: %s" % (lookup_view_s, args, kwargs, len(patterns), patterns)) class LocaleRegexURLResolver(RegexURLResolver): """ A URL resolver that always matches the active language code as URL prefix. Rather than taking a regex argument, we just override the ``regex`` function to always return the active language-code as regex. """ def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None): super(LocaleRegexURLResolver, self).__init__( None, urlconf_name, default_kwargs, app_name, namespace) @property def regex(self): language_code = get_language() if language_code not in self._regex_dict: regex_compiled = re.compile('^%s/' % language_code, re.UNICODE) self._regex_dict[language_code] = regex_compiled return self._regex_dict[language_code] def resolve(path, urlconf=None): if urlconf is None: urlconf = get_urlconf() return get_resolver(urlconf).resolve(path) def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} if prefix is None: prefix = get_script_prefix() if not isinstance(viewname, six.string_types): view = viewname else: parts = viewname.split(':') parts.reverse() view = parts[0] path = parts[1:] resolved_path = [] ns_pattern = '' while path: ns = path.pop() # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_app and current_app in app_list: # If we are reversing for a particular app, # use that namespace ns = current_app elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path))) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver(ns_pattern, resolver) return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))) reverse_lazy = lazy(reverse, six.text_type) def clear_url_caches(): get_callable.cache_clear() get_resolver.cache_clear() get_ns_resolver.cache_clear() def set_script_prefix(prefix): """ Sets the script prefix for the current thread. """ if not prefix.endswith('/'): prefix += '/' _prefixes.value = prefix def get_script_prefix(): """ Returns the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner). """ return getattr(_prefixes, "value", '/') def clear_script_prefix(): """ Unsets the script prefix for the current thread. """ try: del _prefixes.value except AttributeError: pass def set_urlconf(urlconf_name): """ Sets the URLconf for the current thread (overriding the default one in settings). Set to None to revert back to the default. """ if urlconf_name: _urlconfs.value = urlconf_name else: if hasattr(_urlconfs, "value"): del _urlconfs.value def get_urlconf(default=None): """ Returns the root URLconf to use for the current thread if it has been changed from the default one. """ return getattr(_urlconfs, "value", default) def is_valid_path(path, urlconf=None): """ Returns True if the given path resolves against the default URL resolver, False otherwise. This is a convenience method to make working with "is this a match?" cases easier, avoiding unnecessarily indented try...except blocks. """ try: resolve(path, urlconf) return True except Resolver404: return False
# # Renderer class # # A glue layer between SimObject and UI from pose import Pose from math import tan, sqrt, atan2 class Renderer: """ The Renderer class is an abstract class describing a generalized drawing engine. It has to be subclassed to implement the drawing in a way specific to the UI that the program is using. The base class does not impose any restrictions on the type of the *canvas* parameter. It is up to a specific implementation to interpret this parameter correctly. """ def __init__(self, canvas): """Create a Renderer on canvas of size _size_. The default pen and brush are transparent """ self._defpose = Pose() # The pose in the bottom-left corner self._zoom = 1.0 # The zooming factor self._zoom_c = False # Whether the scaling is done from center self._show_grid = False # Show the grid self._grid_spacing = 10.0 # default for unscaled self.__grid_subdiv = 1 # Current subdivision step self.__view_rect = None # The rect to keep in view self.size = None self.set_canvas(canvas) def __delete__(self): self.pop_state() self.pop_state() def show_grid(self, show=True): """Draw the grid on the canvas background by default. The grid is adaptive, with minimum interline distance of 40 px, and a maximum of 80 px. In the case the interline distance has to be smaller or larger, it is scaled. The interval is divided either in half, in five parts or in ten parts, to keep the grid decimal. This method will clear the canvas """ self._show_grid = show self.clear_screen() def set_canvas(self, canvas): """Tell the renderer to draw on *canvas*. The type of canvas is implementation-dependent """ self.set_pen(None) self.set_brush(None) self.push_state() # The first pushed state is the default blank self.push_state() # The second pushed state is the scaled one (zoom=1) with default pose self.reset_canvas_size(self._get_canvas_size(canvas)) self._update_default_state() def reset_canvas_size(self,size): """Change canvas size On canvas rescale the zoom factor will be recalculated: If the view rect was set, the view will be rescaled to fit the rect. If the view rect was not set, the zoom factor and default pose will be kept. """ self.size = size if self.__view_rect is not None: self.set_view_rect(*self.__view_rect) def _get_canvas_size(self,canvas): """Return the canvas size tuple (width,height) To be implemented in subclasses """ raise NotImplementedError("Renderer._get_canvas_size") def push_state(self): """Store the current state on the stack. Current state includes default pose, pen and brush. To be implemented in subclasses. """ raise NotImplementedError("Renderer.push_state") def pop_state(self): """Restore the last saved state from the stack The state includes default pose, pen and brush. To be implemented in subclasses. """ raise NotImplementedError("Renderer.pop_state") def scale(self,factor): """Scale all drawing operations by *factor* To be implemented in subclasses. """ raise NotImplementedError("Renderer.scale") def rotate(self, angle): """Rotate canvas by *angle* (in radians) To be implemented in subclasses. """ raise NotImplementedError("Renderer.rotate") def translate(self, dx, dy): """Translate canvas by *dx*, *dy* To be implemented in subclasses. """ raise NotImplementedError("Renderer.translate") def _calculate_bounds(self): """Store the bounds of the smallest rectangle containing the view \ in ``self._bounds``. To be implemented in subclasses. """ raise NotImplementedError("Renderer._calculate_bounds") def _draw_grid(self): """Draw the grid on screen To be implemented in subclasses. """ raise NotImplementedError("Renderer._draw_grid") def set_screen_pose(self, pose): """ Set the pose of the lower-left corner of the canvas. The zoom center will switch to that corner. :param pose: The new pose of the lower-left corner. :type pose: :class:`~pose.Pose` """ self._zoom_c = False self.__view_rect = None self._defpose = pose self._update_default_state() def set_screen_center_pose(self, pose): """ Set the pose of center of the canvas The zoom center will switch to canvas center. :param pose: The new pose of the lower-left corner. :type pose: :class:`~pose.Pose` """ self._zoom_c = True self.__view_rect = None self._defpose = pose self._update_default_state() def _adjust_grid(self, zoom_level): """Calculate the right interline distance for *zoom_level* """ self._grid_spacing *= zoom_level*self.__grid_subdiv while self._grid_spacing < 40: self._grid_spacing *= 10 while self._grid_spacing >= 400: self._grid_spacing /= 10 for self.__grid_subdiv in [1,2,5]: if self._grid_spacing/self.__grid_subdiv < 80: break self._grid_spacing /= zoom_level*self.__grid_subdiv def set_zoom_level(self, zoom_level): """Zoom up the drawing by a factor of *zoom_level* The zoom center is at the last set screen pose. This method will clear the canvas. """ self._adjust_grid(zoom_level) self.__view_rect = None self._zoom = float(zoom_level) self._update_default_state() def _update_default_state(self): """Calculate the default state with the current zoom level and pose This method will clear the canvas. """ self.pop_state() # Reset state self.pop_state() # Set zoom to 1 self.push_state() # Re-save the zoom-1 #print(self._zoom_c, self._defpose) if self._zoom_c: self.translate(self.size[0]/2,self.size[1]/2) self.scale(self._zoom) self.rotate(-self._defpose.theta) self.translate(-self._defpose.x, -self._defpose.y) self.push_state() # Save the zoomed state self._calculate_bounds() self.clear_screen() def scale_zoom_level(self, factor): """Zoom up the drawing by an additional *factor* Equivalent to ``set_zoom_level(zoom_level*factor)`` The zoom center is at the last set screen pose. This method will clear the canvas. """ self.set_zoom_level(self._zoom*factor) def set_view_rect(self, x, y, width, height): """Zoom on the rectangle to fit it into the view """ self.__view_rect = (x,y,width,height) zoom = min(self.size[0]/float(width), self.size[1]/float(height)) xtra_width = self.size[0]/zoom - float(width) xtra_height = self.size[1]/zoom - float(height) self._defpose = Pose(x - xtra_width/2, y - xtra_height/2, 0) self._zoom = zoom self._zoom_c = False self._adjust_grid(zoom) self._update_default_state() def reset_pose(self): """Resets the renderer to default pose and zoom level """ self.pop_state() self.push_state() def set_pose(self, pose): """Set a coordinate transformation based on *pose* """ self.reset_pose() self.add_pose(pose) def add_pose(self, pose): """Add a pose transformation to the current transformation """ self.translate(pose.x, pose.y) self.rotate(pose.theta) def set_pen(self, color = 0, thickness = 1): """Sets the line color anf thickness. Color is interpreted as `0xAARRGGBB`. In case `AA == 0` the color is considered fully opaque. Use None to unset a pen. """ raise NotImplementedError("Renderer.set_pen") def set_brush(self, color): """Sets the fill color. The color is an integer, interpreted as `0xAARRGGBB`. In the case `AA == 0` the color is considered fully opaque. Use `None` to unset a brush. """ raise NotImplementedError("Renderer.set_brush") def clear_screen(self): """Clears the canvas and draws the grid if necessary To be implemented in subclasses. """ if self._show_grid: self._draw_grid() def draw_point(self,x,y): """Draw a single point using the current pen at (x,y)""" raise NotImplementedError("Renderer.draw_point") def draw_points(self,points): """Draw a set of points, given as [(x,y)], using the current pen""" for x,y in points: self.draw_point(x,y) def draw_line(self, x1, y1, x2, y2): """Draw a line using the current pen from (x1,y1) to (x2, y2) """ raise NotImplementedError("Renderer.draw_line") def draw_arrow(self, x1, y1, x2, y2, angle=0.3, ratio=0.1, close=False): """Draw an arrow from (x1, y1) to (x2, y2). You can also specify the arrowhead angle (in radians), the ratio between arrowhead and arrow length and the triangular (close=True) or linear (close=False) arrowhead shape. """ self.push_state() self.translate(x1,y1) self.rotate(atan2(y2-y1,x2-x1)) self.scale(sqrt((x1-x2)**2 + (y1-y2)**2)) xe = 1-ratio ye = tan(angle)*ratio self.draw_line(0,0,1,0) self.draw_line(1,0,xe,-ye) self.draw_line(1,0,xe,ye) if close: self.draw_line(xe,-ye,xe,ye) self.pop_state() def draw_ellipse(self, cx, cy, ra, rb=None): """Draws an ellipse with current pen and fills it with current brush. The center of the ellipse is at (*cx*, *cy*), the half-axes are *ra* and *rb*. In the case *rb* is not specified, the method draws a circle of radius *ra*. """ raise NotImplementedError("Renderer.draw_ellipse") def draw_rectangle(self, x, y, width, height): """Draws a rectangle with current pen and fills it with current brush The bottom-left corner of the rectangle is at (*x*, *y*), if the width and height are positive. """ raise NotImplementedError("Renderer.draw_rectangle") def draw_polygon(self, points): """Draws a polygon with current pen and fills it with current brush Expects a list of points as a list of tuples or as a numpy array. """ raise NotImplementedError("Renderer.draw_polygon") #def draw_text(self, text, x, y, bgcolor = 0): #"""Draws a text string at the defined position using the current brush #""" #raise NotImplementedError("Renderer.draw_text")
""" HTML based serializers. """ import urllib from tiddlyweb.serializations import SerializationInterface from tiddlyweb.web.util import encode_name, escape_attribute_value, tiddler_url from tiddlyweb.wikitext import render_wikitext class Serialization(SerializationInterface): """ Serialize entities and collections to and from HTML representations. This is primarily used to create browser based presentations. """ def __init__(self, environ=None): SerializationInterface.__init__(self, environ) self.environ['tiddlyweb.title'] = '' self.environ['tiddlyweb.links'] = [] def list_recipes(self, recipes): """ List the recipes on the system as html. """ self.environ['tiddlyweb.title'] = 'Recipes' def wrap_list(): yield '<ul id="recipes" class="listing">\n' for recipe in recipes: yield '<li><a href="recipes/%s">%s</a></li>\n' % ( encode_name(recipe.name), recipe.name) yield '\n</ul>' return wrap_list() def list_bags(self, bags): """ List the bags on the system as html. """ self.environ['tiddlyweb.title'] = 'Bags' def wrap_list(): yield '<ul id="bags" class="listing">\n' for bag in bags: yield '<li><a href="bags/%s/tiddlers">%s</a></li>\n' % ( encode_name(bag.name), bag.name) yield '\n</ul>' return wrap_list() def list_tiddlers(self, tiddlers): """ List the tiddlers as html. """ tiddlers.store = None title = tiddlers.title server_prefix = self._server_prefix() lines = [] representation_link = tiddlers.link bag_link = '' for tiddler in tiddlers: if not representation_link: representation_link = self._tiddler_list_info(tiddler) if tiddlers.is_revisions: line = self._tiddler_revision_info(tiddler) else: line = self._tiddler_in_container_info(tiddler) lines.append(line) if tiddlers.is_search: representation_link = '%s/search' % server_prefix try: routing_args = self.environ.get('wsgiorg.routing_args')[1] except (TypeError, IndexError, KeyError): routing_args = {} if 'bag_name' in routing_args and not 'tiddler_name' in routing_args: bag_name = routing_args['bag_name'] bag_name = urllib.unquote(bag_name) bag_name = unicode(bag_name, 'utf-8') bag_link = ('<div class="baglink"><a href="%s/bags/%s">' 'Bag %s</a></div>' % (server_prefix, encode_name(bag_name), bag_name)) output = "\n".join(lines) self.environ['tiddlyweb.title'] = title return """ %s %s <ul id="tiddlers" class="listing"> %s </ul> """ % (self._tiddler_list_header(representation_link), bag_link, output) def recipe_as(self, recipe): """ Recipe as html. """ self.environ['tiddlyweb.title'] = 'Recipe %s' % recipe.name lines = [] for bag, filter_string in recipe.get_recipe(): line = '<li><a href="' if not isinstance(bag, basestring): bag = bag.name line += '%s/bags/%s/tiddlers' % ( self._server_prefix(), encode_name(bag)) if filter_string: line += '?%s' % urllib.quote( filter_string.encode('utf-8'), safe=':=;') line += '">bag: %s filter:%s</a></li>' % (bag, filter_string) lines.append(line) output = "\n".join(lines) title = 'Bags in Recipe %s' % recipe.name tiddler_link = '%s/tiddlers' % encode_name(recipe.name) return """ <div class="tiddlerslink"><a href="%s">Tiddlers in Recipe</a></div> <div id="recipedesc" class="description">%s</div> <ul id="recipe" class="listing"> %s </ul> """ % (tiddler_link, recipe.desc, output) def bag_as(self, bag): """ Bag as html. """ self.environ['tiddlyweb.title'] = 'Bag %s' % bag.name tiddler_link = '%s/tiddlers' % encode_name(bag.name) return """ <div id="bagdesc" class="description">%s</div> <div class="tiddlerslink"><a href="%s">Tiddlers in Bag %s</a></div> """ % (bag.desc, tiddler_link, bag.name) def tiddler_as(self, tiddler): """ Transform the provided tiddler into an HTML representation of the tiddler packaged in a DIV. Render the content using the render_wikitext subsystem. """ if tiddler.recipe: list_link = 'recipes/%s/tiddlers' % encode_name(tiddler.recipe) list_title = 'Tiddlers in Recipe %s' % tiddler.recipe else: list_link = 'bags/%s/tiddlers' % encode_name(tiddler.bag) list_title = 'Tiddlers in Bag %s' % tiddler.bag list_html = ('<div class="tiddlerslink"><a href="%s/%s" ' % (self._server_prefix(), list_link) + 'title="tiddler list">%s</a></div>' % list_title) html = render_wikitext(tiddler, self.environ) self.environ['tiddlyweb.title'] = tiddler.title return list_html + self._tiddler_div(tiddler) + html + '</div>' def _server_prefix(self): """ Return the string that is the server prefix, for creating URLs. """ config = self.environ.get('tiddlyweb.config', {}) return config.get('server_prefix', '') def _tiddler_div(self, tiddler): """ The string that starts the div that contains a tiddler. """ return u""" <div class="tiddler" title="%s" server.page.revision="%s" modifier="%s" creator="%s" modified="%s" created="%s" tags="%s" %s> """ % (escape_attribute_value(tiddler.title), tiddler.revision, escape_attribute_value(tiddler.modifier), escape_attribute_value(tiddler.creator), tiddler.modified, tiddler.created, escape_attribute_value(self.tags_as(tiddler.tags)), self._tiddler_fields(tiddler.fields)) def _tiddler_fields(self, fields): """ Turn tiddler fields into a string suitable for _tiddler_div. """ output = [] for key, val in fields.items(): output.append('%s="%s"' % (key, escape_attribute_value(val))) return ' '.join(output) def _tiddler_in_container_info(self, tiddler): """ Get the info for a non-revision tiddler in a list. """ if tiddler.recipe: base = 'recipes' else: base = 'bags' return '<li><a href="%s">%s</a></li>' % ( tiddler_url(self.environ, tiddler, container=base, full=False), tiddler.title.replace(' ', '&nbsp;', 1)) def _tiddler_list_header(self, representation_link): """ The string we present at the top of a list of tiddlers. """ if representation_link: extension_types = self.environ.get('tiddlyweb.config', {}).get('extension_types', {}).keys() links = [] query_string = self.environ.get('QUERY_STRING', '') if query_string: query_string = '?%s' % query_string for extension in extension_types: link = '<a href="%s.%s%s">%s</a>' % (representation_link, extension, query_string, extension) links.append(link) link_info = ' '.join(links) return """ <div id="tiddlersheader">This list of tiddlers as: %s</div> """ % (link_info) return '' def _tiddler_list_info(self, tiddler): """ Get the basic link info needed for listing tiddlers. """ if tiddler.recipe: representation_link = '%s/recipes/%s/tiddlers' % ( self._server_prefix(), encode_name(tiddler.recipe)) else: representation_link = '%s/bags/%s/tiddlers' % ( self._server_prefix(), encode_name(tiddler.bag)) return representation_link def _tiddler_revision_info(self, tiddler): """ Get the individual revision info for listing revisions. """ if tiddler.recipe: base = 'recipes' else: base = 'bags' return ('<li><a href="%s/revisions/%s">%s:%s</a></li>' % ( tiddler_url(self.environ, tiddler, container=base, full=False), tiddler.revision, tiddler.title, tiddler.revision))
"""Z-Wave discovery schemas.""" from . import const DEFAULT_VALUES_SCHEMA = { "power": { const.DISC_SCHEMAS: [ { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_POWER], }, { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_METER], const.DISC_INDEX: [const.INDEX_METER_POWER], }, ], const.DISC_OPTIONAL: True, } } DISCOVERY_SCHEMAS = [ { const.DISC_COMPONENT: "binary_sensor", const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_ENTRY_CONTROL, const.GENERIC_TYPE_SENSOR_ALARM, const.GENERIC_TYPE_SENSOR_BINARY, const.GENERIC_TYPE_SWITCH_BINARY, const.GENERIC_TYPE_METER, const.GENERIC_TYPE_SENSOR_MULTILEVEL, const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_SENSOR_NOTIFICATION, const.GENERIC_TYPE_THERMOSTAT, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_BINARY], const.DISC_TYPE: const.TYPE_BOOL, const.DISC_GENRE: const.GENRE_USER, }, "off_delay": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION], const.DISC_INDEX: [9], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "climate", # thermostat without COMMAND_CLASS_THERMOSTAT_MODE const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_THERMOSTAT, const.GENERIC_TYPE_SENSOR_MULTILEVEL, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_THERMOSTAT_HEATING, const.SPECIFIC_TYPE_SETPOINT_THERMOSTAT, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT] }, "temperature": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_TEMPERATURE], const.DISC_OPTIONAL: True, }, "fan_mode": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_FAN_MODE], const.DISC_OPTIONAL: True, }, "operating_state": { const.DISC_COMMAND_CLASS: [ const.COMMAND_CLASS_THERMOSTAT_OPERATING_STATE ], const.DISC_OPTIONAL: True, }, "fan_action": { const.DISC_COMMAND_CLASS: [ const.COMMAND_CLASS_THERMOSTAT_FAN_ACTION ], const.DISC_OPTIONAL: True, }, "mode": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_MODE], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "climate", # thermostat with COMMAND_CLASS_THERMOSTAT_MODE const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_THERMOSTAT, const.GENERIC_TYPE_SENSOR_MULTILEVEL, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_THERMOSTAT_GENERAL, const.SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2, const.SPECIFIC_TYPE_SETBACK_THERMOSTAT, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_MODE] }, "setpoint_heating": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [1], const.DISC_OPTIONAL: True, }, "setpoint_cooling": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [2], const.DISC_OPTIONAL: True, }, "setpoint_furnace": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [7], const.DISC_OPTIONAL: True, }, "setpoint_dry_air": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [8], const.DISC_OPTIONAL: True, }, "setpoint_moist_air": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [9], const.DISC_OPTIONAL: True, }, "setpoint_auto_changeover": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [10], const.DISC_OPTIONAL: True, }, "setpoint_eco_heating": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [11], const.DISC_OPTIONAL: True, }, "setpoint_eco_cooling": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [12], const.DISC_OPTIONAL: True, }, "setpoint_away_heating": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [13], const.DISC_OPTIONAL: True, }, "setpoint_away_cooling": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [14], const.DISC_OPTIONAL: True, }, "setpoint_full_power": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT], const.DISC_INDEX: [15], const.DISC_OPTIONAL: True, }, "temperature": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_TEMPERATURE], const.DISC_OPTIONAL: True, }, "fan_mode": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_FAN_MODE], const.DISC_OPTIONAL: True, }, "operating_state": { const.DISC_COMMAND_CLASS: [ const.COMMAND_CLASS_THERMOSTAT_OPERATING_STATE ], const.DISC_OPTIONAL: True, }, "fan_action": { const.DISC_COMMAND_CLASS: [ const.COMMAND_CLASS_THERMOSTAT_FAN_ACTION ], const.DISC_OPTIONAL: True, }, "zxt_120_swing_mode": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION], const.DISC_INDEX: [33], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "cover", # Rollershutter const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_ENTRY_CONTROL, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL, const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION, const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON, const.SPECIFIC_TYPE_SECURE_DOOR, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_GENRE: const.GENRE_USER, }, "open": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_BRIGHT], const.DISC_OPTIONAL: True, }, "close": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DIM], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "cover", # Garage Door Switch const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_ENTRY_CONTROL, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL, const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION, const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON, const.SPECIFIC_TYPE_SECURE_DOOR, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY], const.DISC_GENRE: const.GENRE_USER, } }, ), }, { const.DISC_COMPONENT: "cover", # Garage Door Barrier const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_ENTRY_CONTROL, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL, const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL, const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION, const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON, const.SPECIFIC_TYPE_SECURE_DOOR, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_BARRIER_OPERATOR], const.DISC_INDEX: [const.INDEX_BARRIER_OPERATOR_LABEL], } }, ), }, { const.DISC_COMPONENT: "fan", const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_SWITCH_MULTILEVEL], const.DISC_SPECIFIC_DEVICE_CLASS: [const.SPECIFIC_TYPE_FAN_SWITCH], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL], const.DISC_TYPE: const.TYPE_BYTE, } }, ), }, { const.DISC_COMPONENT: "light", const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_SWITCH_REMOTE, ], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL, const.SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL, const.SPECIFIC_TYPE_NOT_USED, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL], const.DISC_TYPE: const.TYPE_BYTE, }, "dimming_duration": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL], const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DURATION], const.DISC_OPTIONAL: True, }, "color": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR], const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_COLOR], const.DISC_OPTIONAL: True, }, "color_channels": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR], const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_CHANNELS], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "lock", const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_ENTRY_CONTROL], const.DISC_SPECIFIC_DEVICE_CLASS: [ const.SPECIFIC_TYPE_DOOR_LOCK, const.SPECIFIC_TYPE_ADVANCED_DOOR_LOCK, const.SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK, const.SPECIFIC_TYPE_SECURE_LOCKBOX, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_DOOR_LOCK], const.DISC_INDEX: [const.INDEX_DOOR_LOCK_LOCK], }, "access_control": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM], const.DISC_INDEX: [const.INDEX_ALARM_ACCESS_CONTROL], const.DISC_OPTIONAL: True, }, "alarm_type": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM], const.DISC_INDEX: [const.INDEX_ALARM_TYPE], const.DISC_OPTIONAL: True, }, "alarm_level": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM], const.DISC_INDEX: [const.INDEX_ALARM_LEVEL], const.DISC_OPTIONAL: True, }, "v2btze_advanced": { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION], const.DISC_INDEX: [12], const.DISC_OPTIONAL: True, }, }, ), }, { const.DISC_COMPONENT: "sensor", const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [ const.COMMAND_CLASS_SENSOR_MULTILEVEL, const.COMMAND_CLASS_METER, const.COMMAND_CLASS_ALARM, const.COMMAND_CLASS_SENSOR_ALARM, const.COMMAND_CLASS_INDICATOR, const.COMMAND_CLASS_BATTERY, ], const.DISC_GENRE: const.GENRE_USER, } }, ), }, { const.DISC_COMPONENT: "switch", const.DISC_GENERIC_DEVICE_CLASS: [ const.GENERIC_TYPE_METER, const.GENERIC_TYPE_SENSOR_ALARM, const.GENERIC_TYPE_SENSOR_BINARY, const.GENERIC_TYPE_SWITCH_BINARY, const.GENERIC_TYPE_ENTRY_CONTROL, const.GENERIC_TYPE_SENSOR_MULTILEVEL, const.GENERIC_TYPE_SWITCH_MULTILEVEL, const.GENERIC_TYPE_SENSOR_NOTIFICATION, const.GENERIC_TYPE_GENERIC_CONTROLLER, const.GENERIC_TYPE_SWITCH_REMOTE, const.GENERIC_TYPE_REPEATER_SLAVE, const.GENERIC_TYPE_THERMOSTAT, const.GENERIC_TYPE_WALL_CONTROLLER, ], const.DISC_VALUES: dict( DEFAULT_VALUES_SCHEMA, **{ const.DISC_PRIMARY: { const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY], const.DISC_TYPE: const.TYPE_BOOL, const.DISC_GENRE: const.GENRE_USER, } }, ), }, ]
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Pickler for values, functions, and classes. For internal use only. No backwards compatibility guarantees. Pickles created by the pickling library contain non-ASCII characters, so we base64-encode the results so that we can put them in a JSON objects. The pickler is used to embed FlatMap callable objects into the workflow JSON description. The pickler module should be used to pickle functions and modules; for values, the coders.*PickleCoder classes should be used instead. """ import base64 import logging import sys import traceback import types import zlib import dill def _is_nested_class(cls): """Returns true if argument is a class object that appears to be nested.""" return (isinstance(cls, type) and cls.__module__ != '__builtin__' and cls.__name__ not in sys.modules[cls.__module__].__dict__) def _find_containing_class(nested_class): """Finds containing class of a nestec class passed as argument.""" def _find_containing_class_inner(outer): for k, v in outer.__dict__.items(): if v is nested_class: return outer, k elif isinstance(v, type) and hasattr(v, '__dict__'): res = _find_containing_class_inner(v) if res: return res return _find_containing_class_inner(sys.modules[nested_class.__module__]) def _nested_type_wrapper(fun): """A wrapper for the standard pickler handler for class objects. Args: fun: Original pickler handler for type objects. Returns: A wrapper for type objects that handles nested classes. The wrapper detects if an object being pickled is a nested class object. For nested class object only it will save the containing class object so the nested structure is recreated during unpickle. """ def wrapper(pickler, obj): # When the nested class is defined in the __main__ module we do not have to # do anything special because the pickler itself will save the constituent # parts of the type (i.e., name, base classes, dictionary) and then # recreate it during unpickling. if _is_nested_class(obj) and obj.__module__ != '__main__': containing_class_and_name = _find_containing_class(obj) if containing_class_and_name is not None: return pickler.save_reduce( getattr, containing_class_and_name, obj=obj) try: return fun(pickler, obj) except dill.dill.PicklingError: # pylint: disable=protected-access return pickler.save_reduce( dill.dill._create_type, (type(obj), obj.__name__, obj.__bases__, dill.dill._dict_from_dictproxy(obj.__dict__)), obj=obj) # pylint: enable=protected-access return wrapper # Monkey patch the standard pickler dispatch table entry for type objects. # Dill, for certain types, defers to the standard pickler (including type # objects). We wrap the standard handler using type_wrapper() because # for nested class we want to pickle the actual enclosing class object so we # can recreate it during unpickling. # TODO(silviuc): Make sure we submit the fix upstream to GitHub dill project. dill.dill.Pickler.dispatch[type] = _nested_type_wrapper( dill.dill.Pickler.dispatch[type]) # Dill pickles generators objects without complaint, but unpickling produces # TypeError: object.__new__(generator) is not safe, use generator.__new__() # on some versions of Python. def _reject_generators(unused_pickler, unused_obj): raise TypeError("can't (safely) pickle generator objects") dill.dill.Pickler.dispatch[types.GeneratorType] = _reject_generators # This if guards against dill not being full initialized when generating docs. if 'save_module' in dir(dill.dill): # Always pickle non-main modules by name. old_save_module = dill.dill.save_module @dill.dill.register(dill.dill.ModuleType) def save_module(pickler, obj): if dill.dill.is_dill(pickler) and obj is pickler._main: return old_save_module(pickler, obj) else: dill.dill.log.info('M2: %s' % obj) # pylint: disable=protected-access pickler.save_reduce(dill.dill._import_module, (obj.__name__,), obj=obj) # pylint: enable=protected-access dill.dill.log.info('# M2') # Pickle module dictionaries (commonly found in lambda's globals) # by referencing their module. old_save_module_dict = dill.dill.save_module_dict known_module_dicts = {} @dill.dill.register(dict) def new_save_module_dict(pickler, obj): obj_id = id(obj) if not known_module_dicts or '__file__' in obj or '__package__' in obj: if obj_id not in known_module_dicts: for m in sys.modules.values(): try: if m and m.__name__ != '__main__': d = m.__dict__ known_module_dicts[id(d)] = m, d except AttributeError: # Skip modules that do not have the __name__ attribute. pass if obj_id in known_module_dicts and dill.dill.is_dill(pickler): m = known_module_dicts[obj_id][0] try: # pylint: disable=protected-access dill.dill._import_module(m.__name__) return pickler.save_reduce( getattr, (known_module_dicts[obj_id][0], '__dict__'), obj=obj) except (ImportError, AttributeError): return old_save_module_dict(pickler, obj) else: return old_save_module_dict(pickler, obj) dill.dill.save_module_dict = new_save_module_dict def _nest_dill_logging(): """Prefix all dill logging with its depth in the callstack. Useful for debugging pickling of deeply nested structures. """ old_log_info = dill.dill.log.info def new_log_info(msg, *args, **kwargs): old_log_info( ('1 2 3 4 5 6 7 8 9 0 ' * 10)[:len(traceback.extract_stack())] + msg, *args, **kwargs) dill.dill.log.info = new_log_info # Turn off verbose logging from the dill pickler. logging.getLogger('dill').setLevel(logging.WARN) # TODO(ccy): Currently, there are still instances of pickler.dumps() and # pickler.loads() being used for data, which results in an unnecessary base64 # encoding. This should be cleaned up. def dumps(o, enable_trace=True): """For internal use only; no backwards-compatibility guarantees.""" try: s = dill.dumps(o) except Exception: # pylint: disable=broad-except if enable_trace: dill.dill._trace(True) # pylint: disable=protected-access s = dill.dumps(o) else: raise finally: dill.dill._trace(False) # pylint: disable=protected-access # Compress as compactly as possible to decrease peak memory usage (of multiple # in-memory copies) and free up some possibly large and no-longer-needed # memory. c = zlib.compress(s, 9) del s return base64.b64encode(c) def loads(encoded, enable_trace=True): """For internal use only; no backwards-compatibility guarantees.""" c = base64.b64decode(encoded) s = zlib.decompress(c) del c # Free up some possibly large and no-longer-needed memory. try: return dill.loads(s) except Exception: # pylint: disable=broad-except if enable_trace: dill.dill._trace(True) # pylint: disable=protected-access return dill.loads(s) else: raise finally: dill.dill._trace(False) # pylint: disable=protected-access def dump_session(file_path): """For internal use only; no backwards-compatibility guarantees. Pickle the current python session to be used in the worker. Note: Due to the inconsistency in the first dump of dill dump_session we create and load the dump twice to have consistent results in the worker and the running session. Check: https://github.com/uqfoundation/dill/issues/195 """ dill.dump_session(file_path) dill.load_session(file_path) return dill.dump_session(file_path) def load_session(file_path): return dill.load_session(file_path)
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Matti Hamalainen <msh@nmr.mgh.harvard.edu> # Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Denis Engemann <denis.engemann@gmail.com> # Teon Brooks <teon.brooks@gmail.com> # # License: BSD (3-clause) import copy import warnings import os import os.path as op import numpy as np from ..constants import FIFF from ..open import fiff_open, _fiff_get_fid, _get_next_fname from ..meas_info import read_meas_info from ..tree import dir_tree_find from ..tag import read_tag, read_tag_info from ..proj import make_eeg_average_ref_proj, _needs_eeg_average_ref_proj from ..compensator import get_current_comp, set_current_comp, make_compensator from ..base import _BaseRaw, _RawShell, _check_raw_compatibility from ...utils import check_fname, logger, verbose class RawFIF(_BaseRaw): """Raw data Parameters ---------- fnames : list, or string A list of the raw files to treat as a Raw instance, or a single raw file. For files that have automatically been split, only the name of the first file has to be specified. Filenames should end with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif or raw_tsss.fif.gz. allow_maxshield : bool, (default False) allow_maxshield if True, allow loading of data that has been processed with Maxshield. Maxshield-processed data should generally not be loaded directly, but should be processed using SSS first. preload : bool or str (default False) Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory). proj : bool Apply the signal space projection (SSP) operators present in the file to the data. Note: Once the projectors have been applied, they can no longer be removed. It is usually not recommended to apply the projectors at this point as they are applied automatically later on (e.g. when computing inverse solutions). compensation : None | int If None the compensation in the data is not modified. If set to n, e.g. 3, apply gradient compensation of grade n as for CTF systems. add_eeg_ref : bool If True, add average EEG reference projector (if it's not already present). verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Attributes ---------- info : dict Measurement info. ch_names : list of string List of channels' names. n_times : int Total number of time points in the raw file. preload : bool Indicates whether raw data are in memory. verbose : bool, str, int, or None See above. """ @verbose def __init__(self, fnames, allow_maxshield=False, preload=False, proj=False, compensation=None, add_eeg_ref=True, verbose=None): if not isinstance(fnames, list): fnames = [fnames] fnames = [op.realpath(f) for f in fnames] split_fnames = [] raws = [] for ii, fname in enumerate(fnames): do_check_fname = fname not in split_fnames raw, next_fname = self._read_raw_file(fname, allow_maxshield, preload, compensation, do_check_fname) raws.append(raw) if next_fname is not None: if not op.exists(next_fname): logger.warning('Split raw file detected but next file %s ' 'does not exist.' % next_fname) continue if next_fname in fnames: # the user manually specified the split files logger.info('Note: %s is part of a split raw file. It is ' 'not necessary to manually specify the parts ' 'in this case; simply construct Raw using ' 'the name of the first file.' % next_fname) continue # process this file next fnames.insert(ii + 1, next_fname) split_fnames.append(next_fname) _check_raw_compatibility(raws) super(RawFIF, self).__init__( copy.deepcopy(raws[0].info), False, [r.first_samp for r in raws], [r.last_samp for r in raws], [r.filename for r in raws], [r._raw_extras for r in raws], copy.deepcopy(raws[0].comp), raws[0]._orig_comp_grade, raws[0].orig_format, None, verbose=verbose) # combine information from each raw file to construct self if add_eeg_ref and _needs_eeg_average_ref_proj(self.info): eeg_ref = make_eeg_average_ref_proj(self.info, activate=False) self.add_proj(eeg_ref) if preload: self._preload_data(preload) else: self.preload = False # setup the SSP projector if proj: self.apply_proj() @verbose def _read_raw_file(self, fname, allow_maxshield, preload, compensation, do_check_fname=True, verbose=None): """Read in header information from a raw file""" logger.info('Opening raw data file %s...' % fname) if do_check_fname: check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', 'raw.fif.gz', 'raw_sss.fif.gz', 'raw_tsss.fif.gz')) # Read in the whole file if preload is on and .fif.gz (saves time) ext = os.path.splitext(fname)[1].lower() whole_file = preload if '.gz' in ext else False ff, tree, _ = fiff_open(fname, preload=whole_file) with ff as fid: # Read the measurement info info, meas = read_meas_info(fid, tree) # Locate the data of interest raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA) if len(raw_node) == 0: raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA) if (len(raw_node) == 0): raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA) msg = ('This file contains raw Internal Active ' 'Shielding data. It may be distorted. Elekta ' 'recommends it be run through MaxFilter to ' 'produce reliable results. Consider closing ' 'the file and running MaxFilter on the data.') if (len(raw_node) == 0): raise ValueError('No raw data in %s' % fname) elif allow_maxshield: info['maxshield'] = True warnings.warn(msg) else: msg += (' Use allow_maxshield=True if you are sure you' ' want to load the data despite this warning.') raise ValueError(msg) if len(raw_node) == 1: raw_node = raw_node[0] # Set up the output structure info['filename'] = fname # Process the directory directory = raw_node['directory'] nent = raw_node['nent'] nchan = int(info['nchan']) first = 0 first_samp = 0 first_skip = 0 # Get first sample tag if it is there if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE: tag = read_tag(fid, directory[first].pos) first_samp = int(tag.data) first += 1 # Omit initial skip if directory[first].kind == FIFF.FIFF_DATA_SKIP: # This first skip can be applied only after we know the bufsize tag = read_tag(fid, directory[first].pos) first_skip = int(tag.data) first += 1 raw = _RawShell() raw.filename = fname raw.first_samp = first_samp # Go through the remaining tags in the directory raw_extras = list() nskip = 0 orig_format = None for k in range(first, nent): ent = directory[k] if ent.kind == FIFF.FIFF_DATA_SKIP: tag = read_tag(fid, ent.pos) nskip = int(tag.data) elif ent.kind == FIFF.FIFF_DATA_BUFFER: # Figure out the number of samples in this buffer if ent.type == FIFF.FIFFT_DAU_PACK16: nsamp = ent.size // (2 * nchan) elif ent.type == FIFF.FIFFT_SHORT: nsamp = ent.size // (2 * nchan) elif ent.type == FIFF.FIFFT_FLOAT: nsamp = ent.size // (4 * nchan) elif ent.type == FIFF.FIFFT_DOUBLE: nsamp = ent.size // (8 * nchan) elif ent.type == FIFF.FIFFT_INT: nsamp = ent.size // (4 * nchan) elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: nsamp = ent.size // (8 * nchan) elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: nsamp = ent.size // (16 * nchan) else: raise ValueError('Cannot handle data buffers of type ' '%d' % ent.type) if orig_format is None: if ent.type == FIFF.FIFFT_DAU_PACK16: orig_format = 'short' elif ent.type == FIFF.FIFFT_SHORT: orig_format = 'short' elif ent.type == FIFF.FIFFT_FLOAT: orig_format = 'single' elif ent.type == FIFF.FIFFT_DOUBLE: orig_format = 'double' elif ent.type == FIFF.FIFFT_INT: orig_format = 'int' elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: orig_format = 'single' elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: orig_format = 'double' # Do we have an initial skip pending? if first_skip > 0: first_samp += nsamp * first_skip raw.first_samp = first_samp first_skip = 0 # Do we have a skip pending? if nskip > 0: raw_extras.append(dict( ent=None, first=first_samp, nsamp=nskip * nsamp, last=first_samp + nskip * nsamp - 1)) first_samp += nskip * nsamp nskip = 0 # Add a data buffer raw_extras.append(dict(ent=ent, first=first_samp, last=first_samp + nsamp - 1, nsamp=nsamp)) first_samp += nsamp next_fname = _get_next_fname(fid, fname, tree) raw.last_samp = first_samp - 1 raw.orig_format = orig_format # Add the calibration factors cals = np.zeros(info['nchan']) for k in range(info['nchan']): cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] raw._cals = cals raw._raw_extras = raw_extras raw.comp = None raw._orig_comp_grade = None # Set up the CTF compensator current_comp = get_current_comp(info) if current_comp is not None: logger.info('Current compensation grade : %d' % current_comp) if compensation is not None: raw.comp = make_compensator(info, current_comp, compensation) if raw.comp is not None: logger.info('Appropriate compensator added to change to ' 'grade %d.' % (compensation)) raw._orig_comp_grade = current_comp set_current_comp(info, compensation) logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( raw.first_samp, raw.last_samp, float(raw.first_samp) / info['sfreq'], float(raw.last_samp) / info['sfreq'])) # store the original buffer size info['buffer_size_sec'] = (np.median([r['nsamp'] for r in raw_extras]) / info['sfreq']) raw.info = info raw.verbose = verbose logger.info('Ready.') return raw, next_fname @property def _dtype(self): """Get the dtype to use to store data from disk""" if self._dtype_ is not None: return self._dtype_ dtype = None for raw_extra, filename in zip(self._raw_extras, self._filenames): for this in raw_extra: if this['ent'] is not None: with _fiff_get_fid(filename) as fid: fid.seek(this['ent'].pos, 0) tag = read_tag_info(fid) if tag is not None: if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT, FIFF.FIFFT_COMPLEX_DOUBLE): dtype = np.complex128 else: dtype = np.float64 if dtype is not None: break if dtype is not None: break if dtype is None: raise RuntimeError('bug in reading') self._dtype_ = dtype return dtype def _read_segment_file(self, data, idx, offset, fi, start, stop, cals, mult): """Read a segment of data from a file""" with _fiff_get_fid(self._filenames[fi]) as fid: for this in self._raw_extras[fi]: # Do we need this buffer if this['last'] >= start: # The picking logic is a bit complicated if stop > this['last'] and start < this['first']: # We need the whole buffer first_pick = 0 last_pick = this['nsamp'] logger.debug('W') elif start >= this['first']: first_pick = start - this['first'] if stop <= this['last']: # Something from the middle last_pick = this['nsamp'] + stop - this['last'] logger.debug('M') else: # From the middle to the end last_pick = this['nsamp'] logger.debug('E') else: # From the beginning to the middle first_pick = 0 last_pick = stop - this['first'] + 1 logger.debug('B') # Now we are ready to pick picksamp = last_pick - first_pick if picksamp > 0: # only read data if it exists if this['ent'] is not None: one = read_tag(fid, this['ent'].pos, shape=(this['nsamp'], self.info['nchan']), rlims=(first_pick, last_pick)).data one.shape = (picksamp, self.info['nchan']) one = one.T.astype(data.dtype) data_view = data[:, offset:(offset + picksamp)] if mult is not None: data_view[:] = np.dot(mult[fi], one) else: # cals is not None if isinstance(idx, slice): data_view[:] = one[idx] else: # faster to iterate than doing # one = one[idx] for ii, ix in enumerate(idx): data_view[ii] = one[ix] data_view *= cals offset += picksamp # Done? if this['last'] >= stop: break def fix_mag_coil_types(self): """Fix Elekta magnetometer coil types Returns ------- raw : instance of Raw The raw object. Operates in place. Notes ----- This function changes magnetometer coil types 3022 (T1: SQ20483N) and 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition records in the info structure. Neuromag Vectorview systems can contain magnetometers with two different coil sizes (3022 and 3023 vs. 3024). The systems incorporating coils of type 3024 were introduced last and are used at the majority of MEG sites. At some sites with 3024 magnetometers, the data files have still defined the magnetometers to be of type 3022 to ensure compatibility with older versions of Neuromag software. In the MNE software as well as in the present version of Neuromag software coil type 3024 is fully supported. Therefore, it is now safe to upgrade the data files to use the true coil type. .. note:: The effect of the difference between the coil sizes on the current estimates computed by the MNE software is very small. Therefore the use of mne_fix_mag_coil_types is not mandatory. """ from ...channels import fix_mag_coil_types fix_mag_coil_types(self.info) return self def read_raw_fif(fnames, allow_maxshield=False, preload=False, proj=False, compensation=None, add_eeg_ref=True, verbose=None): """Reader function for Raw FIF data Parameters ---------- fnames : list, or string A list of the raw files to treat as a Raw instance, or a single raw file. For files that have automatically been split, only the name of the first file has to be specified. Filenames should end with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif or raw_tsss.fif.gz. allow_maxshield : bool, (default False) allow_maxshield if True, allow loading of data that has been processed with Maxshield. Maxshield-processed data should generally not be loaded directly, but should be processed using SSS first. preload : bool or str (default False) Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory). proj : bool Apply the signal space projection (SSP) operators present in the file to the data. Note: Once the projectors have been applied, they can no longer be removed. It is usually not recommended to apply the projectors at this point as they are applied automatically later on (e.g. when computing inverse solutions). compensation : None | int If None the compensation in the data is not modified. If set to n, e.g. 3, apply gradient compensation of grade n as for CTF systems. add_eeg_ref : bool If True, add average EEG reference projector (if it's not already present). verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- raw : Instance of RawFIF A Raw object containing FIF data. Notes ----- .. versionadded:: 0.9.0 """ return RawFIF(fnames=fnames, allow_maxshield=allow_maxshield, preload=preload, proj=proj, compensation=compensation, add_eeg_ref=add_eeg_ref, verbose=verbose)
""" Copyright (c) 2016, John Deutscher Description: Sample Python script for Motion Detection processor License: MIT (see LICENSE.txt file for details) Documentation : https://azure.microsoft.com/en-us/documentation/articles/media-services-motion-detection/ """ import os import json import amspy import time import sys #import pytz import urllib import logging import datetime from azure import * from azure.storage.blob import BlockBlobService from azure.storage.blob import ContentSettings ########################################################################################### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ########################################################################################### # ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR # AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR # PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS # ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE # DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED! ########################################################################################### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ########################################################################################### # Load Azure app defaults try: with open('../../config.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print_phase_message("ERROR: Expecting config.json in examples folder") sys.exit() account_name = configData['accountName'] account_key = configData['accountKey'] sto_account_name = configData['sto_accountName'] sto_accountKey = configData['sto_accountKey'] log_name = configData['logName'] log_level = configData['logLevel'] purge_log = configData['purgeLog'] #Initialization... print ("\n-----------------------= AMS Py =----------------------") print ("Azure Media Analytics - Motion Detection Sample") print ("for details see: https://azure.microsoft.com/en-us/documentation/articles/media-services-motion-detection/") print ("-------------------------------------------------------\n") #Remove old log file if requested (default behavior)... if (os.path.isdir('./log') != True): os.mkdir('log') if (purge_log.lower() == "yes"): if (os.path.isfile(log_name)): os.remove(log_name) #Basic Logging... logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=log_level, filename=log_name) # Get the access token... response = amspy.get_access_token(account_name, account_key) resjson = response.json() access_token = resjson["access_token"] #Some global vars... NAME = "movie" COUNTER = 0; ENCRYPTION = "1" # 0=None, StorageEncrypted=1, CommonEncryptionProtected=2, EnvelopeEncryptionProtected=4 ENCRYPTION_SCHEME = "StorageEncryption" # StorageEncryption or CommonEncryption. VIDEO_NAME = "movie.mp4" VIDEO_PATH = "../assets/movie.mp4" ASSET_FINAL_NAME = "Python Sample-Motion Detection" PROCESSOR_NAME = "Azure Media Motion Detector" MOTION_CONFIG = "motion_config.json" # Just a simple wrapper function to print the title of each of our phases to the console... def print_phase_header(message): global COUNTER; print ("\n[" + str("%02d" % int(COUNTER)) + "] >>> " + message) COUNTER += 1; # This wrapper function prints our messages to the console with a timestamp... def print_phase_message(message): time_stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") print (str(time_stamp) + ": " + message) ### get ams redirected url response = amspy.get_url(access_token) if (response.status_code == 200): ams_redirected_rest_endpoint = str(response.url) else: print_phase_message("GET Status: " + str(response.status_code) + " - Getting Redirected URL ERROR." + str(response.content)) exit(1) ######################### PHASE 1: UPLOAD ######################### ### create an asset print_phase_header("Creating a Media Asset") response = amspy.create_media_asset(access_token, NAME) if (response.status_code == 201): resjson = response.json() asset_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Asset Name........................: " + NAME) print_phase_message("Media Asset Id..........................: " + asset_id) else: print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Asset: '" + NAME + "' Creation ERROR." + str(response.content)) ### create an assetfile print_phase_header("Creating a Media Assetfile (for the video file)") response = amspy.create_media_assetfile(access_token, asset_id, VIDEO_NAME, "false", "false") if (response.status_code == 201): resjson = response.json() video_assetfile_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Assetfile Name....................: " + str(resjson['d']['Name'])) print_phase_message("Media Assetfile Id......................: " + video_assetfile_id) print_phase_message("Media Assetfile IsPrimary...............: " + str(resjson['d']['IsPrimary'])) else: print_phase_message("POST Status: " + str(response.status_code) + " - Media Assetfile: '" + VIDEO_NAME + "' Creation ERROR." + str(response.content)) ### create an asset write access policy for uploading print_phase_header("Creating an Asset Write Access Policy") duration = "440" response = amspy.create_asset_accesspolicy(access_token, "NewUploadPolicy", duration, "2") if (response.status_code == 201): resjson = response.json() write_accesspolicy_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Asset Access Policy Id..................: " + write_accesspolicy_id) print_phase_message("Asset Access Policy Duration/min........: " + str(resjson['d']['DurationInMinutes'])) else: print_phase_message("POST Status: " + str(response.status_code) + " - Asset Write Access Policy Creation ERROR." + str(response.content)) ### create a sas locator print_phase_header("Creating a write SAS Locator") ## INFO: If you need to upload your files immediately, you should set your StartTime value to five minutes before the current time. #This is because there may be clock skew between your client machine and Media Services. #Also, your StartTime value must be in the following DateTime format: YYYY-MM-DDTHH:mm:ssZ (for example, "2014-05-23T17:53:50Z"). # EDITED: Not providing starttime is the best approach to be able to upload a file immediatly... #starttime = datetime.datetime.now(pytz.timezone(time_zone)).strftime("%Y-%m-%dT%H:%M:%SZ") #response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id, starttime) response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id) if (response.status_code == 201): resjson = response.json() saslocator_id = str(resjson['d']['Id']) saslocator_baseuri = str(resjson['d']['BaseUri']) sto_asset_name = os.path.basename(os.path.normpath(saslocator_baseuri)) saslocator_cac = str(resjson['d']['ContentAccessComponent']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("SAS URL Locator StartTime...............: " + str(resjson['d']['StartTime'])) print_phase_message("SAS URL Locator Id......................: " + saslocator_id) print_phase_message("SAS URL Locator Base URI................: " + saslocator_baseuri) print_phase_message("SAS URL Locator Content Access Component: " + saslocator_cac) else: print_phase_message("POST Status: " + str(response.status_code) + " - SAS URL Locator Creation ERROR." + str(response.content)) ### Use the Azure Blob Blob Servic library from the Azure Storage SDK. block_blob_service = BlockBlobService(account_name=sto_account_name, sas_token=saslocator_cac[1:]) ### Define a callback method to show progress of large uploads def uploadCallback(current, total): if (current != None): print_phase_message('{0:2,f}/{1:2,.0f} MB'.format(current,total/1024/1024)) ### Start upload the video file print_phase_header("Uploading the Video File") with open(VIDEO_PATH, mode='rb') as file: video_content = file.read() video_content_length = len(video_content) response = block_blob_service.create_blob_from_path( sto_asset_name, VIDEO_NAME, VIDEO_PATH, max_connections=5, content_settings=ContentSettings(content_type='video/mp4'), progress_callback=uploadCallback, ) if (response == None): print_phase_message("PUT Status..............................: 201") print_phase_message("Video File Uploaded.....................: OK") ### update the assetfile metadata after uploading print_phase_header("Updating the Video Assetfile") response = amspy.update_media_assetfile(access_token, asset_id, video_assetfile_id, video_content_length, VIDEO_NAME) if (response.status_code == 204): print_phase_message("MERGE Status............................: " + str(response.status_code)) print_phase_message("Assetfile Content Length Updated........: " + str(video_content_length)) else: print_phase_message("MERGE Status............................: " + str(response.status_code) + " - Assetfile: '" + VIDEO_NAME + "' Update ERROR." + str(response.content)) ### delete the locator, so that it can't be used again print_phase_header("Deleting the Locator") response = amspy.delete_sas_locator(access_token, saslocator_id) if (response.status_code == 204): print_phase_message("DELETE Status...........................: " + str(response.status_code)) print_phase_message("SAS URL Locator Deleted.................: " + saslocator_id) else: print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - SAS URL Locator: '" + saslocator_id + "' Delete ERROR." + str(response.content)) ### delete the asset access policy print_phase_header("Deleting the Acess Policy") response = amspy.delete_asset_accesspolicy(access_token, write_accesspolicy_id) if (response.status_code == 204): print_phase_message("DELETE Status...........................: " + str(response.status_code)) print_phase_message("Asset Access Policy Deleted.............: " + write_accesspolicy_id) else: print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - Asset Access Policy: '" + write_accesspolicy_id + "' Delete ERROR." + str(response.content)) ### get the media processor for Motion Detection print_phase_header("Getting the Media Processor for Motion Detection") response = amspy.list_media_processor(access_token) if (response.status_code == 200): resjson = response.json() print_phase_message("GET Status..............................: " + str(response.status_code)) for mp in resjson['d']['results']: if(str(mp['Name']) == PROCESSOR_NAME): processor_id = str(mp['Id']) print_phase_message("MEDIA Processor Id......................: " + processor_id) print_phase_message("MEDIA Processor Name....................: " + PROCESSOR_NAME) else: print_phase_message("GET Status: " + str(response.status_code) + " - Media Processors Listing ERROR." + str(response.content)) ## create a Video Motion Detection Job print_phase_header("Creating a Motion Detection job to process the content") with open(MOTION_CONFIG, mode='r') as file: configuration = file.read() response = amspy.encode_mezzanine_asset(access_token, processor_id, asset_id, ASSET_FINAL_NAME, configuration) if (response.status_code == 201): resjson = response.json() job_id = str(resjson['d']['Id']) print_phase_message("POST Status.............................: " + str(response.status_code)) print_phase_message("Media Job Id............................: " + job_id) else: print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Job Creation ERROR." + str(response.content)) ### list a media job print_phase_header("Getting the Media Job Status") flag = 1 while (flag): response = amspy.list_media_job(access_token, job_id) if (response.status_code == 200): resjson = response.json() job_state = str(resjson['d']['State']) if (resjson['d']['EndTime'] != None): joboutputassets_uri = resjson['d']['OutputMediaAssets']['__deferred']['uri'] flag = 0 print_phase_message("GET Status..............................: " + str(response.status_code)) print_phase_message("Media Job Status........................: " + amspy.translate_job_state(job_state)) else: print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job: '" + asset_id + "' Listing ERROR." + str(response.content)) time.sleep(5) ## getting the output Asset id print_phase_header("Getting the Motion Detection Output Asset Id") response = amspy.get_url(access_token, joboutputassets_uri, False) if (response.status_code == 200): resjson = response.json() motion_asset_id = resjson['d']['results'][0]['Id'] print_phase_message("GET Status..............................: " + str(response.status_code)) print_phase_message("Motion Detection Output Asset Id........: " + motion_asset_id) else: print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job Output Asset: '" + job_id + "' Getting ERROR." + str(response.content)) # Get Asset by using the list_media_asset method and the Asset ID response = amspy.list_media_asset(access_token,motion_asset_id) if (response.status_code == 200): resjson = response.json() # Get the container name from the Uri outputAssetContainer = resjson['d']['Uri'].split('/')[3] print_phase_message("Output Asset Name.......................: " + outputAssetContainer) ### Use the Azure Blob Blob Service library from the Azure Storage SDK to download the Motion Detection JSON block_blob_service = BlockBlobService(account_name=sto_account_name,account_key=sto_accountKey) generator = block_blob_service.list_blobs(outputAssetContainer) for blob in generator: print_phase_message("Output File Name........................: " + blob.name) if (blob.name.endswith(".json")): print_phase_message("\n\n##### Output Results ######") blobText = block_blob_service.get_blob_to_text(outputAssetContainer, blob.name) print("Output File Name........................: " + blobText.content) block_blob_service.get_blob_to_path(outputAssetContainer, blob.name, "output/" + blob.name) else: block_blob_service.get_blob_to_path(outputAssetContainer, blob.name, "output/" + blob.name)
import dcos.marathon import json import pytest import re import shakedown import sdk_cmd as cmd import sdk_install as install import sdk_marathon as marathon import sdk_spin as spin import sdk_tasks as tasks import sdk_test_upgrade from tests.config import ( PACKAGE_NAME, DEFAULT_TASK_COUNT, configured_task_count, hello_task_count, world_task_count, check_running ) def setup_module(module): install.uninstall(PACKAGE_NAME) install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT) def teardown_module(module): install.uninstall(PACKAGE_NAME) def close_enough(val0, val1): epsilon = 0.00001 diff = abs(val0 - val1) return diff < epsilon @pytest.mark.smoke def test_install(): check_running() @pytest.mark.sanity def test_no_colocation_in_podtypes(): # check that no two 'hellos' and no two 'worlds' are colocated on the same agent all_tasks = shakedown.get_service_tasks(PACKAGE_NAME) print(all_tasks) hello_agents = [] world_agents = [] for task in all_tasks: if task['name'].startswith('hello-'): hello_agents.append(task['slave_id']) elif task['name'].startswith('world-'): world_agents.append(task['slave_id']) else: assert False, "Unknown task: " + task['name'] assert len(hello_agents) == len(set(hello_agents)) assert len(world_agents) == len(set(world_agents)) @pytest.mark.sanity @pytest.mark.smoke def test_bump_hello_cpus(): check_running() hello_ids = tasks.get_task_ids(PACKAGE_NAME, 'hello') print('hello ids: ' + str(hello_ids)) config = marathon.get_config(PACKAGE_NAME) cpus = float(config['env']['HELLO_CPUS']) updated_cpus = cpus + 0.1 config['env']['HELLO_CPUS'] = str(updated_cpus) marathon.update_app(PACKAGE_NAME, config) tasks.check_tasks_updated(PACKAGE_NAME, 'hello', hello_ids) check_running() all_tasks = shakedown.get_service_tasks(PACKAGE_NAME) running_tasks = [t for t in all_tasks if t['name'].startswith('hello') and t['state'] == "TASK_RUNNING"] assert len(running_tasks) == hello_task_count() for t in running_tasks: assert close_enough(t['resources']['cpus'], updated_cpus) @pytest.mark.sanity @pytest.mark.smoke def test_bump_world_cpus(): check_running() world_ids = tasks.get_task_ids(PACKAGE_NAME, 'world') print('world ids: ' + str(world_ids)) config = marathon.get_config(PACKAGE_NAME) cpus = float(config['env']['WORLD_CPUS']) updated_cpus = cpus + 0.1 config['env']['WORLD_CPUS'] = str(updated_cpus) marathon.update_app(PACKAGE_NAME, config) tasks.check_tasks_updated(PACKAGE_NAME, 'world', world_ids) check_running() all_tasks = shakedown.get_service_tasks(PACKAGE_NAME) running_tasks = [t for t in all_tasks if t['name'].startswith('world') and t['state'] == "TASK_RUNNING"] assert len(running_tasks) == world_task_count() for t in running_tasks: assert close_enough(t['resources']['cpus'], updated_cpus) @pytest.mark.sanity @pytest.mark.smoke def test_bump_hello_nodes(): check_running() hello_ids = tasks.get_task_ids(PACKAGE_NAME, 'hello') print('hello ids: ' + str(hello_ids)) config = marathon.get_config(PACKAGE_NAME) node_count = int(config['env']['HELLO_COUNT']) + 1 config['env']['HELLO_COUNT'] = str(node_count) marathon.update_app(PACKAGE_NAME, config) check_running() tasks.check_tasks_not_updated(PACKAGE_NAME, 'hello', hello_ids) @pytest.mark.sanity def test_pods_list(): stdout = cmd.run_cli('hello-world pods list') jsonobj = json.loads(stdout) assert len(jsonobj) == configured_task_count() # expect: X instances of 'hello-#' followed by Y instances of 'world-#', # in alphanumerical order first_world = -1 for i in range(len(jsonobj)): entry = jsonobj[i] if first_world < 0: if entry.startswith('world-'): first_world = i if first_world == -1: assert jsonobj[i] == 'hello-{}'.format(i) else: assert jsonobj[i] == 'world-{}'.format(i - first_world) @pytest.mark.sanity def test_pods_status_all(): stdout = cmd.run_cli('hello-world pods status') jsonobj = json.loads(stdout) assert len(jsonobj) == configured_task_count() for k, v in jsonobj.items(): assert re.match('(hello|world)-[0-9]+', k) assert len(v) == 1 task = v[0] assert len(task) == 3 assert re.match('(hello|world)-[0-9]+-server__[0-9a-f-]+', task['id']) assert re.match('(hello|world)-[0-9]+-server', task['name']) assert task['state'] == 'TASK_RUNNING' @pytest.mark.sanity def test_pods_status_one(): stdout = cmd.run_cli('hello-world pods status hello-0') jsonobj = json.loads(stdout) assert len(jsonobj) == 1 task = jsonobj[0] assert len(task) == 3 assert re.match('hello-0-server__[0-9a-f-]+', task['id']) assert task['name'] == 'hello-0-server' assert task['state'] == 'TASK_RUNNING' @pytest.mark.sanity def test_pods_info(): stdout = cmd.run_cli('hello-world pods info world-1') jsonobj = json.loads(stdout) assert len(jsonobj) == 1 task = jsonobj[0] assert len(task) == 2 assert task['info']['name'] == 'world-1-server' assert task['info']['taskId']['value'] == task['status']['taskId']['value'] assert task['status']['state'] == 'TASK_RUNNING' @pytest.mark.sanity def test_state_properties_get(): # 'suppressed' could be missing if the scheduler recently started, loop for a bit just in case: def check_for_nonempty_properties(): stdout = cmd.run_cli('hello-world state properties') return len(json.loads(stdout)) > 0 spin.time_wait_noisy(lambda: check_for_nonempty_properties(), timeout_seconds=30.) stdout = cmd.run_cli('hello-world state properties') jsonobj = json.loads(stdout) assert len(jsonobj) == 1 assert jsonobj[0] == "suppressed" stdout = cmd.run_cli('hello-world state property suppressed') assert stdout == "true\n" @pytest.mark.speedy def test_state_refresh_disable_cache(): '''Disables caching via a scheduler envvar''' check_running() task_ids = tasks.get_task_ids(PACKAGE_NAME, '') # caching enabled by default: stdout = cmd.run_cli('hello-world state refresh_cache') assert "Received cmd: refresh" in stdout config = marathon.get_config(PACKAGE_NAME) cpus = float(config['env']['HELLO_CPUS']) config['env']['DISABLE_STATE_CACHE'] = 'any-text-here' cmd.request('put', marathon.api_url('apps/' + PACKAGE_NAME), json=config) tasks.check_tasks_not_updated(PACKAGE_NAME, '', task_ids) check_running() # caching disabled, refresh_cache should fail with a 409 error (eventually, once scheduler is up): def check_cache_refresh_fails_409conflict(): try: cmd.run_cli('hello-world state refresh_cache') except Exception as e: if "failed: 409 Conflict" in e.args[0]: return True return False spin.time_wait_noisy(lambda: check_cache_refresh_fails_409conflict(), timeout_seconds=120.) config = marathon.get_config(PACKAGE_NAME) cpus = float(config['env']['HELLO_CPUS']) del config['env']['DISABLE_STATE_CACHE'] cmd.request('put', marathon.api_url('apps/' + PACKAGE_NAME), json=config) tasks.check_tasks_not_updated(PACKAGE_NAME, '', task_ids) check_running() # caching reenabled, refresh_cache should succeed (eventually, once scheduler is up): def check_cache_refresh(): return cmd.run_cli('hello-world state refresh_cache') stdout = spin.time_wait_return(lambda: check_cache_refresh(), timeout_seconds=120.) assert "Received cmd: refresh" in stdout @pytest.mark.sanity def test_lock(): '''This test verifies that a second scheduler fails to startup when an existing scheduler is running. Without locking, the scheduler would fail during registration, but after writing its config to ZK. So in order to verify that the scheduler fails immediately, we ensure that the ZK config state is unmodified.''' marathon_client = dcos.marathon.create_client() # Get ZK state from running framework zk_path = "dcos-service-{}/ConfigTarget".format(PACKAGE_NAME) zk_config_old = shakedown.get_zk_node_data(zk_path) # Get marathon app app_id = "/{}".format(PACKAGE_NAME) app = marathon_client.get_app(app_id) old_timestamp = app.get("lastTaskFailure", {}).get("timestamp", None) # Scale to 2 instances labels = app["labels"] labels.pop("MARATHON_SINGLE_INSTANCE_APP") marathon_client.update_app(app_id, {"labels": labels}) shakedown.deployment_wait() marathon_client.update_app(app_id, {"instances": 2}) # Wait for second scheduler to fail def fn(): timestamp = marathon_client.get_app(app_id).get("lastTaskFailure", {}).get("timestamp", None) return timestamp != old_timestamp spin.time_wait_noisy(lambda: fn()) # Verify ZK is unchanged zk_config_new = shakedown.get_zk_node_data(zk_path) assert zk_config_old == zk_config_new @pytest.mark.skip(reason="https://jira.mesosphere.com/browse/INFINITY-1114") @pytest.mark.upgrade @pytest.mark.sanity def test_upgrade_downgrade(): sdk_test_upgrade.upgrade_downgrade(PACKAGE_NAME, DEFAULT_TASK_COUNT)
#!/usr/bin/env python2 import os import sys import time import struct import signal import shutil from subprocess import call from ignition import ProgramGroup from manus_starter.privileged import run_upgrade, run_shutdown try: from manus import VERSION except ImportError: VERSION = 'N/A' LOCAL_CAMERA = os.getenv('MANUS_CAMERA', '/dev/video0') LOCAL_MANIPULATOR = os.getenv('MANUS_MANIPULATOR', '/dev/i2c-1') LOCAL_INTERFACE = os.getenv('MANUS_INTERFACE', 'eth0') kill_now = False def exit_gracefully(signum, frame): global kill_now kill_now = True print "Stopping gracefully" def get_ip_address(ifname): try: import socket import fcntl s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) except IOError: return "unknown" class Menu(object): import curses import curses.panel def __init__(self, items): self.position = 0 self.items = items def navigate(self, n): self.position += n if self.position < 0: self.position = 0 elif self.position >= len(self.items): self.position = len(self.items)-1 def display(self, stdscreen): global kill_now self.curses.curs_set(0) self.screen = stdscreen self.window = stdscreen.subwin(0, 0) self.window.keypad(1) self.panel = self.curses.panel.new_panel(self.window) self.panel.hide() self.curses.panel.update_panels() self.panel.top() self.panel.show() self.window.clear() self.window.timeout(1000) self.update_footer() while not kill_now: try: self.window.refresh() self.curses.doupdate() for index, item in enumerate(self.items): if index == self.position: mode = self.curses.A_REVERSE else: mode = self.curses.A_NORMAL msg = '%d. %s' % (index+1, item[0]) self.window.addstr(3+index, 3, msg, mode) key = self.window.getch() if key in [self.curses.KEY_ENTER, ord('\n')]: if self.items[self.position][1] is None: break else: self.no_curses(self.items[self.position][1]) if key == self.curses.KEY_UP: self.navigate(-1) elif key == self.curses.KEY_DOWN: self.navigate(1) elif key == -1: self.update_footer() except KeyboardInterrupt: pass self.window.clear() self.panel.hide() self.curses.panel.update_panels() self.curses.doupdate() def update_footer(self): s = self.window.getmaxyx() has_camera, has_manipulator = scan_resources() datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) metadata = "%s Manus %s" % (datetime, VERSION) camera = "ON" if has_camera else "OFF" local_address = get_ip_address(LOCAL_INTERFACE) devices = "%s Camera %s" % (local_address, camera) self.window.move(s[0]-3, 0) self.window.clrtoeol() self.window.addstr(s[0]-3, 3, devices, self.curses.A_REVERSE) self.window.addstr(s[0]-3, s[1] - 3 - len(metadata), metadata, self.curses.A_REVERSE) def no_curses(self, fun, **kwargs): self.screen.clear() self.curses.nocbreak() self.screen.keypad(0) self.screen.move(0,0) self.curses.echo() self.curses.endwin() try: fun(**kwargs) finally: self.curses.noecho() self.curses.cbreak() self.screen.keypad(1) def run_script(script, shutdown=None): try: if isinstance(script, ProgramGroup): group = script print "Running %s" % group.description else: print "Running %s" % script group = ProgramGroup(script) group.announce("Starting up ...") group.start() time.sleep(1) except ValueError, e: print "Error opening spark file %s" % e return False stop_requested = False try: while group.valid(): time.sleep(1) if not shutdown is None and shutdown(): stop_requested = True break except KeyboardInterrupt: stop_requested = True group.announce("Shutting down ...") group.stop() return not stop_requested def scan_resources(): has_camera = os.path.exists(LOCAL_CAMERA) has_manipulator = os.path.exists(LOCAL_MANIPULATOR) return has_camera, has_manipulator def run_interactive(launchfiles): import curses signal.signal(signal.SIGTERM, exit_gracefully) menu_items = [] def wrap_run(group): return lambda: run_script(group) for file in os.listdir(launchfiles): if not file.endswith(".spark"): continue try: group = ProgramGroup(os.path.join(launchfiles, file)) menu_items.append((group.description, wrap_run(group))) finally: pass def run_terminal(): call(["sudo", "-u", "manus", "bash"]) return False def run_upgrade_reload(): run_upgrade() os.execv(sys.executable, [sys.executable, sys.argv[0]]) return True menu_items.append(('Upgrade system', run_upgrade_reload)) menu_items.append(('Exit to terminal', run_terminal)) menu_items.append(('Shutdown', run_shutdown)) menu = Menu(menu_items) curses.wrapper(lambda scr: menu.display(scr)) def run_service(launchfile): signal.signal(signal.SIGTERM, exit_gracefully) def resources_available(): camera, manipulator = scan_resources() print camera, manipulator return camera and manipulator try: while not kill_now: if resources_available(): if not run_script(launchfile, shutdown=lambda: kill_now): break else: time.sleep(3) except KeyboardInterrupt: return
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ from __future__ import print_function import errno import os import socket import ssl import sys import time from xml.etree import ElementTree as etree from xml.parsers import expat import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) from oslo.config import cfg import routes.middleware import webob.dec import webob.exc from neutron.common import constants from neutron.common import exceptions as exception from neutron import context from neutron.openstack.common import gettextutils from neutron.openstack.common import jsonutils from neutron.openstack.common import log as logging socket_opts = [ cfg.IntOpt('backlog', default=4096, help=_("Number of backlog requests to configure " "the socket with")), cfg.IntOpt('tcp_keepidle', default=600, help=_("Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X.")), cfg.IntOpt('retry_until_window', default=30, help=_("Number of seconds to keep retrying to listen")), cfg.BoolOpt('use_ssl', default=False, help=_('Enable SSL on the API server')), cfg.StrOpt('ssl_ca_file', default=None, help=_("CA certificate file to use to verify " "connecting clients")), cfg.StrOpt('ssl_cert_file', default=None, help=_("Certificate file to use when starting " "the server securely")), cfg.StrOpt('ssl_key_file', default=None, help=_("Private key file to use when starting " "the server securely")), ] CONF = cfg.CONF CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) def run_server(application, port): """Run a WSGI server with the given application.""" sock = eventlet.listen(('0.0.0.0', port)) eventlet.wsgi.server(sock, application) class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, name, threads=1000): self.pool = eventlet.GreenPool(threads) self.name = name def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: LOG.exception(_("Unable to listen on %(host)s:%(port)s"), {'host': host, 'port': port}) sys.exit(1) if CONF.use_ssl: if not os.path.exists(CONF.ssl_cert_file): raise RuntimeError(_("Unable to find ssl_cert_file " ": %s") % CONF.ssl_cert_file) if not os.path.exists(CONF.ssl_key_file): raise RuntimeError(_("Unable to find " "ssl_key_file : %s") % CONF.ssl_key_file) # ssl_ca_file is optional if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file): raise RuntimeError(_("Unable to find ssl_ca_file " ": %s") % CONF.ssl_ca_file) def wrap_ssl(sock): ssl_kwargs = { 'server_side': True, 'certfile': CONF.ssl_cert_file, 'keyfile': CONF.ssl_key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = CONF.ssl_ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED return ssl.wrap_socket(sock, **ssl_kwargs) sock = None retry_until = time.time() + CONF.retry_until_window while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) if CONF.use_ssl: sock = wrap_ssl(sock) except socket.error as err: if err.errno != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for %(time)d seconds") % {'host': host, 'port': port, 'time': CONF.retry_until_window}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def start(self, application, port, host='0.0.0.0'): """Run a WSGI server with the given application.""" self._host = host self._port = port backlog = CONF.backlog self._socket = self._get_socket(self._host, self._port, backlog=backlog) self._server = self.pool.spawn(self._run, application, self._socket) @property def host(self): return self._socket.getsockname()[0] if self._socket else self._host @property def port(self): return self._socket.getsockname()[1] if self._socket else self._port def stop(self): self._server.kill() def wait(self): """Wait until all servers have completed running.""" try: self.pool.waitall() except KeyboardInterrupt: pass def _run(self, application, socket): """Start a WSGI server in a new green thread.""" logger = logging.getLogger('eventlet.wsgi.server') eventlet.wsgi.server(socket, application, custom_pool=self.pool, log=logging.WritableLogger(logger)) class Middleware(object): """Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = nova.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import nova.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Request(webob.Request): def best_match_content_type(self): """Determine the most acceptable content-type. Based on: 1) URI extension (.json/.xml) 2) Content-type header 3) Accept* headers """ # First lookup http request path parts = self.path.rsplit('.', 1) if len(parts) > 1: _format = parts[1] if _format in ['json', 'xml']: return 'application/{0}'.format(_format) #Then look up content header type_from_header = self.get_content_type() if type_from_header: return type_from_header ctypes = ['application/json', 'application/xml'] #Finally search in Accept-* headers bm = self.accept.best_match(ctypes) return bm or 'application/json' def get_content_type(self): allowed_types = ("application/xml", "application/json") if "Content-Type" not in self.headers: LOG.debug(_("Missing Content-Type")) return None _type = self.content_type if _type in allowed_types: return _type return None def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None all_languages = gettextutils.get_available_languages('neutron') return self.accept_language.best_match(all_languages) @property def context(self): if 'neutron.context' not in self.environ: self.environ['neutron.context'] = context.get_admin_context() return self.environ['neutron.context'] class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): return unicode(obj) return jsonutils.dumps(data, default=sanitizer) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """Object initialization. :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} if not xmlns: xmlns = self.metadata.get('xmlns') if not xmlns: xmlns = constants.XML_NS_V20 self.xmlns = xmlns def default(self, data): """Return data as XML string. :param data: expect data to contain a single key as XML root, or contain another '*_links' key as atom links. Other case will use 'VIRTUAL_ROOT_KEY' as XML root. """ try: links = None has_atom = False if data is None: root_key = constants.VIRTUAL_ROOT_KEY root_value = None else: link_keys = [k for k in data.iterkeys() or [] if k.endswith('_links')] if link_keys: links = data.pop(link_keys[0], None) has_atom = True root_key = (len(data) == 1 and data.keys()[0] or constants.VIRTUAL_ROOT_KEY) root_value = data.get(root_key, data) doc = etree.Element("_temp_root") used_prefixes = [] self._to_xml_node(doc, self.metadata, root_key, root_value, used_prefixes) if links: self._create_link_nodes(list(doc)[0], links) return self.to_xml_string(list(doc)[0], used_prefixes, has_atom) except AttributeError as e: LOG.exception(str(e)) return '' def __call__(self, data): # Provides a migration path to a cleaner WSGI layer, this # "default" stuff and extreme extensibility isn't being used # like originally intended return self.default(data) def to_xml_string(self, node, used_prefixes, has_atom=False): self._add_xmlns(node, used_prefixes, has_atom) return etree.tostring(node, encoding='UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, used_prefixes, has_atom=False): node.set('xmlns', self.xmlns) node.set(constants.TYPE_XMLNS, self.xmlns) if has_atom: node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE) node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE) ext_ns = self.metadata.get(constants.EXT_NS, {}) ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) for prefix in used_prefixes: if prefix in ext_ns: node.set('xmlns:' + prefix, ext_ns[prefix]) if prefix in ext_ns_bc: node.set('xmlns:' + prefix, ext_ns_bc[prefix]) def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes): """Recursive method to convert data members to XML nodes.""" result = etree.SubElement(parent, nodename) if ":" in nodename: used_prefixes.append(nodename.split(":", 1)[0]) #TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): if not data: result.set( constants.TYPE_ATTR, constants.TYPE_LIST) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: self._to_xml_node(result, metadata, singular, item, used_prefixes) #TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): if not data: result.set( constants.TYPE_ATTR, constants.TYPE_DICT) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.set(k, str(v)) else: self._to_xml_node(result, metadata, k, v, used_prefixes) elif data is None: result.set(constants.XSI_ATTR, 'true') else: if isinstance(data, bool): result.set( constants.TYPE_ATTR, constants.TYPE_BOOL) elif isinstance(data, int): result.set( constants.TYPE_ATTR, constants.TYPE_INT) elif isinstance(data, long): result.set( constants.TYPE_ATTR, constants.TYPE_LONG) elif isinstance(data, float): result.set( constants.TYPE_ATTR, constants.TYPE_FLOAT) LOG.debug(_("Data %(data)s type is %(type)s"), {'data': data, 'type': type(data)}) if isinstance(data, str): result.text = unicode(data, 'utf-8') else: result.text = unicode(data) return result def _create_link_nodes(self, xml_doc, links): for link in links: link_node = etree.SubElement(xml_doc, 'atom:link') link_node.set('rel', link['rel']) link_node.set('href', link['href']) class ResponseHeaderSerializer(ActionDispatcher): """Default response headers serialization.""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = (headers_serializer or ResponseHeaderSerializer()) def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("Cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class ProtectedXMLParser(etree.XMLParser): def __init__(self, *args, **kwargs): etree.XMLParser.__init__(self, *args, **kwargs) self._parser.StartDoctypeDeclHandler = self.start_doctype_decl def start_doctype_decl(self, name, sysid, pubid, internal): raise ValueError(_("Inline DTD forbidden")) def doctype(self, name, pubid, system): raise ValueError(_("Inline DTD forbidden")) class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """Object initialization. :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} xmlns = self.metadata.get('xmlns') if not xmlns: xmlns = constants.XML_NS_V20 self.xmlns = xmlns def _get_key(self, tag): tags = tag.split("}", 1) if len(tags) == 2: ns = tags[0][1:] bare_tag = tags[1] ext_ns = self.metadata.get(constants.EXT_NS, {}) if ns == self.xmlns: return bare_tag for prefix, _ns in ext_ns.items(): if ns == _ns: return prefix + ":" + bare_tag ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) for prefix, _ns in ext_ns_bc.items(): if ns == _ns: return prefix + ":" + bare_tag else: return tag def _get_links(self, root_tag, node): link_nodes = node.findall(constants.ATOM_LINK_NOTATION) root_tag = self._get_key(node.tag) link_key = "%s_links" % root_tag link_list = [] for link in link_nodes: link_list.append({'rel': link.get('rel'), 'href': link.get('href')}) # Remove link node in order to avoid link node process as # an item in _from_xml_node node.remove(link) return link_list and {link_key: link_list} or {} def _parseXML(self, text): parser = ProtectedXMLParser() parser.feed(text) return parser.close() def _from_xml(self, datastring): if datastring is None: return None plurals = set(self.metadata.get('plurals', {})) try: node = self._parseXML(datastring) root_tag = self._get_key(node.tag) # Deserialize link node was needed by unit test for verifying # the request's response links = self._get_links(root_tag, node) result = self._from_xml_node(node, plurals) # root_tag = constants.VIRTUAL_ROOT_KEY and links is not None # is not possible because of the way data are serialized. if root_tag == constants.VIRTUAL_ROOT_KEY: return result return dict({root_tag: result}, **links) except Exception as e: parseError = False # Python2.7 if (hasattr(etree, 'ParseError') and isinstance(e, getattr(etree, 'ParseError'))): parseError = True # Python2.6 elif isinstance(e, expat.ExpatError): parseError = True if parseError: msg = _("Cannot understand XML") raise exception.MalformedRequestBody(reason=msg) else: raise def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil"))) attrType = node.get(str(etree.QName( self.metadata.get('xmlns'), "type"))) if (attrNil and attrNil.lower() == 'true'): return None elif not len(node) and not node.text: if (attrType and attrType == constants.TYPE_DICT): return {} elif (attrType and attrType == constants.TYPE_LIST): return [] else: return '' elif (len(node) == 0 and node.text): converters = {constants.TYPE_BOOL: lambda x: x.lower() == 'true', constants.TYPE_INT: lambda x: int(x), constants.TYPE_LONG: lambda x: long(x), constants.TYPE_FLOAT: lambda x: float(x)} if attrType and attrType in converters: return converters[attrType](node.text) else: return node.text elif self._get_key(node.tag) in listnames: return [self._from_xml_node(n, listnames) for n in node] else: result = dict() for attr in node.keys(): if (attr == 'xmlns' or attr.startswith('xmlns:') or attr == constants.XSI_ATTR or attr == constants.TYPE_ATTR): continue result[self._get_key(attr)] = node.get(attr) children = list(node) for child in children: result[self._get_key(child.tag)] = self._from_xml_node( child, listnames) return result def default(self, datastring): return {'body': self._from_xml(datastring)} def __call__(self, datastring): # Adding a migration path to allow us to remove unncessary classes return self.default(datastring) class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer.""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None): self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = (headers_deserializer or RequestHeadersDeserializer()) def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns tuple of expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): try: content_type = request.best_match_content_type() except exception.InvalidContentType: LOG.debug(_("Unrecognized Content-Type provided in request")) return {} if content_type is None: LOG.debug(_("No Content-Type provided in request")) return {} if not len(request.body) > 0: LOG.debug(_("Empty body provided in request")) return {} try: deserializer = self.get_body_deserializer(content_type) except exception.InvalidContentType: LOG.debug(_("Unable to deserialize body as provided Content-Type")) raise return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type() def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = nova.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import neutron.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(detail='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Debug(Middleware): """Middleware for debugging. Helper class that can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify def __call__(self, req): print(("*" * 40) + " REQUEST ENVIRON") for key, value in req.environ.items(): print(key, "=", value) print resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") for (key, value) in resp.headers.iteritems(): print(key, "=", value) print resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """Print contents of a wrapper string iterator when iterated.""" print(("*" * 40) + " BODY") for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" @classmethod def factory(cls, global_config, **local_config): """Return an instance of the WSGI Router class.""" return cls() def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be a wsgi.Controller, who will route the request to the action method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, "/svrlist", controller=sc, action="list") # Actions are all implicitly defined mapper.resource("network", "networks", controller=nc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """Dispatch a Request. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: language = req.best_match_language() msg = _('The resource could not be found.') msg = gettextutils.get_localized_message(msg, language) return webob.exc.HTTPNotFound(explanation=msg) app = match['controller'] return app class Resource(Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, fault_body_function, deserializer=None, serializer=None): """Object initialization. :param controller: object that implement methods created by routes lib :param deserializer: object that can serialize the output of a controller into a webob response :param serializer: object that can deserialize a webob request into necessary pieces :param fault_body_function: a function that will build the response body for HTTP errors raised by operations on this resource object """ self.controller = controller self.deserializer = deserializer or RequestDeserializer() self.serializer = serializer or ResponseSerializer() self._fault_body_function = fault_body_function # use serializer's xmlns for populating Fault generator xmlns xml_serializer = self.serializer.body_serializers['application/xml'] if hasattr(xml_serializer, 'xmlns'): self._xmlns = xml_serializer.xmlns @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info(_("%(method)s %(url)s"), {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") LOG.exception(_("InvalidContentType: %s"), msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg), self._xmlns) except exception.MalformedRequestBody: msg = _("Malformed request body") LOG.exception(_("MalformedRequestBody: %s"), msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg), self._xmlns) try: action_result = self.dispatch(request, action, args) except webob.exc.HTTPException as ex: LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) action_result = Fault(ex, self._xmlns, self._fault_body_function) except Exception: LOG.exception(_("Internal error")) # Do not include the traceback to avoid returning it to clients. action_result = Fault(webob.exc.HTTPServerError(), self._xmlns, self._fault_body_function) if isinstance(action_result, dict) or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, exception=e) msg = _("%(url)s returned a fault: %(exception)s") % msg_dict LOG.info(msg) return response def dispatch(self, request, action, action_args): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) try: #NOTE(salvatore-orlando): the controller method must have # an argument whose name is 'request' return controller_method(request=request, **action_args) except TypeError as exc: LOG.exception(exc) return Fault(webob.exc.HTTPBadRequest(), self._xmlns) def _default_body_function(wrapped_exc): code = wrapped_exc.status_int fault_data = { 'Error': { 'code': code, 'message': wrapped_exc.explanation}} # 'code' is an attribute on the fault tag itself metadata = {'attributes': {'Error': 'code'}} return fault_data, metadata class Fault(webob.exc.HTTPException): """Generates an HTTP response from a webob HTTP exception.""" def __init__(self, exception, xmlns=None, body_function=None): """Creates a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = self.wrapped_exc.status_int self._xmlns = xmlns self._body_function = body_function or _default_body_function @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. fault_data, metadata = self._body_function(self.wrapped_exc) xml_serializer = XMLDictSerializer(metadata, self._xmlns) content_type = req.best_match_content_type() serializer = { 'application/xml': xml_serializer, 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type return self.wrapped_exc # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Controller(object): """WSGI app that dispatched to methods. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Call the method specified in req.environ by RoutesMiddleware.""" arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] if 'format' in arg_dict: del arg_dict['format'] arg_dict['request'] = req result = method(**arg_dict) if isinstance(result, dict) or result is None: if result is None: status = 204 content_type = '' body = None else: status = 200 content_type = req.best_match_content_type() default_xmlns = self.get_default_xmlns(req) body = self._serialize(result, content_type, default_xmlns) response = webob.Response(status=status, content_type=content_type, body=body) msg_dict = dict(url=req.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict LOG.debug(msg) return response else: return result def _serialize(self, data, content_type, default_xmlns): """Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata, default_xmlns) try: return serializer.serialize(data, content_type) except exception.InvalidContentType: msg = _('The requested content type %s is invalid.') % content_type raise webob.exc.HTTPNotAcceptable(msg) def _deserialize(self, data, content_type): """Deserialize the request body to the specefied content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) return serializer.deserialize(data, content_type)['body'] def get_default_xmlns(self, req): """Provide the XML namespace to use if none is otherwise specified.""" return None # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Serializer(object): """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None, default_xmlns=None): """Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} self.default_xmlns = default_xmlns def _get_serialize_handler(self, content_type): handlers = { 'application/json': JSONDictSerializer(), 'application/xml': XMLDictSerializer(self.metadata), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type) def serialize(self, data, content_type): """Serialize a dictionary into the specified content type.""" return self._get_serialize_handler(content_type).serialize(data) def deserialize(self, datastring, content_type): """Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ try: return self.get_deserialize_handler(content_type).deserialize( datastring) except Exception: raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) def get_deserialize_handler(self, content_type): handlers = { 'application/json': JSONDeserializer(), 'application/xml': XMLDeserializer(self.metadata), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type)
import numpy as np from rllab.misc import tensor_utils from rllab.misc import special2 as special from rllab.algos import util import rllab.misc.logger as logger class Sampler(object): def start_worker(self): """ Initialize the sampler, e.g. launching parallel workers if necessary. """ raise NotImplementedError def obtain_samples(self, itr): """ Collect samples for the given iteration number. :param itr: Iteration number. :return: A list of paths. """ raise NotImplementedError def process_samples(self, itr, paths): """ Return processed sample data (typically a dictionary of concatenated tensors) based on the collected paths. :param itr: Iteration number. :param paths: A list of collected paths. :return: Processed sample data. """ raise NotImplementedError def shutdown_worker(self): """ Terminate workers if necessary. """ raise NotImplementedError class BaseSampler(Sampler): def __init__(self, algo): """ :type algo: BatchPolopt """ self.algo = algo def process_advantages(self, advantages): if self.algo.center_adv: advantages = util.center_advantages(advantages) if self.algo.positive_adv: advantages = util.shift_advantages_to_positive(advantages) return advantages def process_samples(self, itr, paths): baselines = [] returns = [] if hasattr(self.algo.baseline, "predict_n"): all_path_baselines = self.algo.baseline.predict_n(paths) else: all_path_baselines = [self.algo.baseline.predict(path) for path in paths] for idx, path in enumerate(paths): path_baselines = np.append(all_path_baselines[idx], 0) deltas = path["rewards"] + \ self.algo.discount * path_baselines[1:] - \ path_baselines[:-1] path["advantages"] = special.discount_cumsum( deltas, self.algo.discount * self.algo.gae_lambda) path["qvalues"] = path["advantages"] + path_baselines[:-1] path["returns"] = special.discount_cumsum(path["rewards"], self.algo.discount) baselines.append(path_baselines[:-1]) returns.append(path["returns"]) ev = special.explained_variance_1d( np.concatenate(baselines), np.concatenate(returns) ) if not self.algo.policy.recurrent: observations = tensor_utils.concat_tensor_list([path["observations"] for path in paths]) actions = tensor_utils.concat_tensor_list([path["actions"] for path in paths]) rewards = tensor_utils.concat_tensor_list([path["rewards"] for path in paths]) returns = tensor_utils.concat_tensor_list([path["returns"] for path in paths]) advantages = tensor_utils.concat_tensor_list([path["advantages"] for path in paths]) qvalues = tensor_utils.concat_tensor_list([path["qvalues"] for path in paths]) baselines_tensor = tensor_utils.concat_tensor_list(baselines) env_infos = tensor_utils.concat_tensor_dict_list([path["env_infos"] for path in paths]) agent_infos = tensor_utils.concat_tensor_dict_list([path["agent_infos"] for path in paths]) etas = None if hasattr(self.algo, 'qprop') and self.algo.qprop: old_advantages = np.copy(advantages) old_advantages = self.process_advantages(old_advantages) old_advantages_scale = np.abs(old_advantages).mean() logger.record_tabular("AbsLearnSignalOld", old_advantages_scale) logger.log("Qprop, subtracting control variate") advantages_bar = self.algo.get_control_variate(observations=observations, actions=actions) if hasattr(self.algo, 'mqprop') and self.algo.mqprop: logger.log("M-Qprop, subtracting values") advantages_bar -= baselines_tensor if self.algo.qprop_eta_option == 'ones': etas = np.ones_like(advantages) elif self.algo.qprop_eta_option == 'adapt1': # conservative etas = (advantages * advantages_bar) > 0 etas = etas.astype(advantages.dtype) logger.log("Qprop, etas: %d 1s, %d 0s"%((etas == 1).sum(), (etas == 0).sum())) elif self.algo.qprop_eta_option == 'adapt2': # aggressive etas = np.sign(advantages * advantages_bar) etas = etas.astype(advantages.dtype) logger.log("Qprop, etas: %d 1s, %d -1s"%((etas == 1).sum(), (etas == -1).sum())) else: raise NotImplementedError(self.algo.qprop_eta_option) advantages -= etas * advantages_bar advantages = self.process_advantages(advantages) advantages_scale = np.abs(advantages).mean() logger.record_tabular("AbsLearnSignalNew", advantages_scale) logger.record_tabular("AbsLearnSignal", advantages_scale) else: advantages = self.process_advantages(advantages) advantages_scale = np.abs(advantages).mean() logger.record_tabular("AbsLearnSignal", advantages_scale) average_discounted_return = \ np.mean([path["returns"][0] for path in paths]) undiscounted_returns = [sum(path["rewards"]) for path in paths] ent = np.mean(self.algo.policy.distribution.entropy(agent_infos)) samples_data = dict( observations=observations, actions=actions, rewards=rewards, returns=returns, advantages=advantages, qvalues=qvalues, env_infos=env_infos, agent_infos=agent_infos, paths=paths, baselines=baselines_tensor, etas=etas, ) else: max_path_length = max([len(path["advantages"]) for path in paths]) # make all paths the same length (pad extra advantages with 0) obs = [path["observations"] for path in paths] obs = tensor_utils.pad_tensor_n(obs, max_path_length) if self.algo.center_adv: raw_adv = np.concatenate([path["advantages"] for path in paths]) adv_mean = np.mean(raw_adv) adv_std = np.std(raw_adv) + 1e-8 adv = [(path["advantages"] - adv_mean) / adv_std for path in paths] else: adv = [path["advantages"] for path in paths] adv = np.asarray([tensor_utils.pad_tensor(a, max_path_length) for a in adv]) actions = [path["actions"] for path in paths] actions = tensor_utils.pad_tensor_n(actions, max_path_length) rewards = [path["rewards"] for path in paths] rewards = tensor_utils.pad_tensor_n(rewards, max_path_length) returns = [path["returns"] for path in paths] returns = tensor_utils.pad_tensor_n(returns, max_path_length) agent_infos = [path["agent_infos"] for path in paths] agent_infos = tensor_utils.stack_tensor_dict_list( [tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos] ) env_infos = [path["env_infos"] for path in paths] env_infos = tensor_utils.stack_tensor_dict_list( [tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos] ) valids = [np.ones_like(path["returns"]) for path in paths] valids = tensor_utils.pad_tensor_n(valids, max_path_length) baselines_tensor = tensor_utils.pad_tensor_n(baselines, max_path_length) average_discounted_return = \ np.mean([path["returns"][0] for path in paths]) undiscounted_returns = [sum(path["rewards"]) for path in paths] ent = np.sum(self.algo.policy.distribution.entropy(agent_infos) * valids) / np.sum(valids) samples_data = dict( observations=obs, actions=actions, advantages=adv, rewards=rewards, returns=returns, valids=valids, agent_infos=agent_infos, env_infos=env_infos, paths=paths, baselines=baselines_tensor, ) logger.log("fitting baseline...") if hasattr(self.algo.baseline, 'fit_with_samples'): self.algo.baseline.fit_with_samples(paths, samples_data) else: self.algo.baseline.fit(paths) logger.log("fitted") logger.record_tabular('Iteration', itr) logger.record_tabular('AverageDiscountedReturn', average_discounted_return) logger.record_tabular('AverageReturn', np.mean(undiscounted_returns)) logger.record_tabular('ExplainedVariance', ev) logger.record_tabular('NumTrajs', len(paths)) logger.record_tabular('Entropy', ent) logger.record_tabular('Perplexity', np.exp(ent)) logger.record_tabular('StdReturn', np.std(undiscounted_returns)) logger.record_tabular('MaxReturn', np.max(undiscounted_returns)) logger.record_tabular('MinReturn', np.min(undiscounted_returns)) return samples_data
from common_fixtures import * # NOQA from cattle import ApiError def _create_stack(client): env = client.create_stack(name=random_str()) env = client.wait_success(env) assert env.state == "active" return env def test_service_add_remove_service_link(client, context): env = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service2 = client.wait_success(service2) # link service2 to service1 service_link = {"serviceId": service2.id} service1 = service1.addservicelink(serviceLink=service_link) _validate_add_service_link(service1, service2, client) # remove service link service1 = service1.removeservicelink(serviceLink=service_link) _validate_remove_service_link(service1, service2, client) # validate adding link with the name service_link = {"serviceId": service2.id, "name": 'myLink'} service1 = service1.addservicelink(serviceLink=service_link) service_maps = client. \ list_serviceConsumeMap(serviceId=service1.id, consumedServiceId=service2.id, name='myLink') assert len(service_maps) == 1 def test_links_after_service_remove(client, context): env = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service2 = client.wait_success(service2) # link servic2 to service1 service_link = {"serviceId": service2.id} service1 = service1.addservicelink(serviceLink=service_link) _validate_add_service_link(service1, service2, client) # link service1 to service2 service_link = {"serviceId": service1.id} service2 = service2.addservicelink(serviceLink=service_link) _validate_add_service_link(service2, service1, client) # remove service1 service1 = client.wait_success(service1.remove()) _validate_remove_service_link(service1, service2, client) _validate_remove_service_link(service2, service1, client) def test_link_services_from_diff_env(client, context): env1 = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service1 = client.wait_success(service1) env2 = _create_stack(client) service2 = client.create_service(name=random_str(), stackId=env2.id, launchConfig=launch_config) service2 = client.wait_success(service2) # try to link - should work service_link = {"serviceId": service2.id} service1.addservicelink(serviceLink=service_link) _validate_add_service_link(service1, service2, client) def test_set_service_links(client, context): env1 = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service2 = client.wait_success(service2) service3 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service3 = client.wait_success(service3) # set service2, service3 links for service1 service_link1 = {"serviceId": service2.id, "name": "link1"} service_link2 = {"serviceId": service3.id, "name": "link2"} service1 = service1. \ setservicelinks(serviceLinks=[service_link1, service_link2]) _validate_add_service_link(service1, service2, client, "link1") _validate_add_service_link(service1, service3, client, "link2") # update the link with new name service_link1 = {"serviceId": service2.id, "name": "link3"} service_link2 = {"serviceId": service3.id, "name": "link4"} service1 = service1. \ setservicelinks(serviceLinks=[service_link1, service_link2]) _validate_remove_service_link(service1, service2, client, "link1") _validate_remove_service_link(service1, service3, client, "link2") _validate_add_service_link(service1, service2, client, "link3") _validate_add_service_link(service1, service3, client, "link4") # set service2 links for service1 service_link = {"serviceId": service2.id} service1 = service1. \ setservicelinks(serviceLinks=[service_link]) _validate_remove_service_link(service1, service3, client, "link4") # set empty service link set service1 = service1.setservicelinks(serviceLinks=[]) _validate_remove_service_link(service1, service2, client, "link3") # try to link to the service from diff stack - should work env2 = _create_stack(client) service4 = client.create_service(name=random_str(), stackId=env2.id, launchConfig=launch_config) service4 = client.wait_success(service4) service_link = {"serviceId": service4.id} service1.setservicelinks(serviceLinks=[service_link]) env1.remove() env2.remove() def test_link_service_twice(client, context): env = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service2 = client.wait_success(service2) # link service2 to service1 service_link = {"serviceId": service2.id} service1 = service1.addservicelink(serviceLink=service_link) _validate_add_service_link(service1, service2, client) # try to link again with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'NotUnique' assert e.value.error.fieldName == 'serviceId' def test_dns_service(client, context): env = _create_stack(client) # create 1 app service, 1 dns service and 2 web services # app service would link to dns, and dns to the web services image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} web1 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) web1 = client.wait_success(web1) web2 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) web2 = client.wait_success(web2) app = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) app = client.wait_success(app) dns = client.create_dnsService(name='tata', stackId=env.id) dns = client.wait_success(dns) env.activateservices() web1 = client.wait_success(web1, 120) web2 = client.wait_success(web2) app = client.wait_success(app) dns = client.wait_success(dns) assert web1.state == 'active' assert web2.state == 'active' assert app.state == 'active' assert dns.state == 'active' service_link = {"serviceId": web1.id} dns = app.addservicelink(serviceLink=service_link) _validate_add_service_link(dns, web1, client) service_link = {"serviceId": web2.id} dns = app.addservicelink(serviceLink=service_link) _validate_add_service_link(dns, web2, client) service_link = {"serviceId": dns.id} app = app.addservicelink(serviceLink=service_link) _validate_add_service_link(app, dns, client) def test_service_link_emu_docker_link(super_client, client, context): env = _create_stack(client) dns = client.create_dns_service(name='dns', stackId=env.id) server = client.create_service(name='server', launchConfig={ 'imageUuid': context.image_uuid }, stackId=env.id) server2 = client.create_service(name='server2', launchConfig={ 'imageUuid': context.image_uuid }, stackId=env.id) service = client.create_service(name='client', launchConfig={ 'imageUuid': context.image_uuid }, stackId=env.id) server3 = client.create_service(name='server3', launchConfig={ 'imageUuid': context.image_uuid }, stackId=env.id) server4 = client.create_service(name='server4', launchConfig={ 'imageUuid': context.image_uuid }, stackId=env.id) service_link1 = {"serviceId": dns.id, "name": "dns"} service_link2 = {"serviceId": server.id, "name": "other"} service_link3 = {"serviceId": server2.id, "name": "server2"} service_link4 = {"serviceId": server3.id} service_link5 = {"serviceId": server4.id, "name": ""} service. \ setservicelinks(serviceLinks=[service_link1, service_link2, service_link3, service_link4, service_link5]) dns = client.wait_success(dns) assert dns.state == 'inactive' server = client.wait_success(server) assert server.state == 'inactive' server2 = client.wait_success(server2) assert server2.state == 'inactive' service = client.wait_success(service) assert service.state == 'inactive' server3 = client.wait_success(server3) assert server3.state == 'inactive' server4 = client.wait_success(server4) assert server4.state == 'inactive' dns = client.wait_success(dns.activate()) assert dns.state == 'active' server = client.wait_success(server.activate()) assert server.state == 'active' server2 = client.wait_success(server2.activate()) assert server2.state == 'active' server3 = client.wait_success(server3.activate()) assert server3.state == 'active' server4 = client.wait_success(server4.activate()) assert server4.state == 'active' service = client.wait_success(service.activate()) assert service.state == 'active' instance = find_one(service.instances) instance = super_client.reload(instance) links = instance.instanceLinks() assert len(links) == 4 for link in links: map = link.serviceConsumeMap() assert map.consumedServiceId in {server.id, server2.id, server3.id, server4.id} assert link.instanceId is not None expose_map = link.targetInstance().serviceExposeMaps()[0] if map.consumedServiceId == server.id: assert link.linkName == 'other' assert expose_map.serviceId == server.id assert expose_map.managed == 1 elif map.consumedServiceId == server2.id: assert link.linkName == 'server2' assert expose_map.serviceId == server2.id elif map.consumedServiceId == server3.id: assert link.linkName == 'server3' assert expose_map.serviceId == server3.id elif map.consumedServiceId == server4.id: assert link.linkName == 'server4' assert expose_map.serviceId == server4.id def test_set_service_links_duplicated_service(client, context): env1 = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env1.id, launchConfig=launch_config) service2 = client.wait_success(service2) # set service links having same service id, diff name service_link1 = {"serviceId": service2.id, "name": "link1"} service_link2 = {"serviceId": service2.id, "name": "link2"} service1 = service1. \ setservicelinks(serviceLinks=[service_link1, service_link2]) _validate_add_service_link(service1, service2, client, "link1") _validate_add_service_link(service1, service2, client, "link2") with pytest.raises(ApiError) as e: service1. \ setservicelinks(serviceLinks=[service_link1, service_link1]) assert e.value.error.status == 422 assert e.value.error.code == 'NotUnique' def _validate_add_service_link(service, consumedService, client, link_name=None): if link_name is None: service_maps = client. \ list_serviceConsumeMap(serviceId=service.id, consumedServiceId=consumedService.id) else: service_maps = client. \ list_serviceConsumeMap(serviceId=service.id, consumedServiceId=consumedService.id, name=link_name) assert len(service_maps) == 1 if link_name is not None: assert service_maps[0].name is not None service_map = service_maps[0] wait_for_condition( client, service_map, _resource_is_active, lambda x: 'State is: ' + x.state) def _validate_remove_service_link(service, consumedService, client, link_name=None): def check(): if link_name is None: service_maps = client. \ list_serviceConsumeMap(serviceId=service.id, consumedServiceId=consumedService.id) else: service_maps = client. \ list_serviceConsumeMap(serviceId=service.id, consumedServiceId=consumedService.id, name=link_name) return len(service_maps) == 0 wait_for(check) def _resource_is_active(resource): return resource.state == 'active' def _resource_is_removed(resource): return resource.state == 'removed' def test_validate_svc_link_name(client, context): env = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} service1 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service1 = client.wait_success(service1) service2 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service2 = client.wait_success(service2) # single invalid char # cannot contain special chars other than ".", "-", "_", "/" service_link = {"serviceId": service2.id, "name": '+'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # multiple invalid chars # cannot contain special chars other than ".", "-", "_", "/" service_link = {"serviceId": service2.id, "name": '$&()#@'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot start with - service_link = {"serviceId": service2.id, "name": '-myLink'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot end with - service_link = {"serviceId": service2.id, "name": 'myLink-'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot contain -- service_link = {"serviceId": service2.id, "name": 'my--Link'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot start with . service_link = {"serviceId": service2.id, "name": '.myLink'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot end with . service_link = {"serviceId": service2.id, "name": 'myLink.'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # cannot contain .. service_link = {"serviceId": service2.id, "name": 'myL..ink'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'InvalidCharacters' # link with no dots longer that 63 service_link = {"serviceId": service2.id, "name": 'myLinkTOOLONGtoolongtoolongtoo' 'longmyLinkTOOLONGtoolongtoolongtoo'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'MaxLengthExceeded' # link with a . with single part longer that 63 service_link = {"serviceId": service2.id, "name": 'myLinkTOOLONGtoolongtoolongtoo' 'longmyLinkTOOLONGtoolongtoolongtoo.secondpart'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'MaxLengthExceeded' # link with . with total length longer that 253 service_link = {"serviceId": service2.id, "name": 'myLinkTOOLONGtoolongtoolongtoo.' 'longmyLinkTOOLONGtoolongtoolongtoo.secondpart.' 'myLinkTOOLONGtoolongtoolongtoo.' 'longmyLinkTOOLONGtoolongtoolongtoo.secondpart.' 'myLinkTOOLONGtoolongtoolongtoo.' 'longmyLinkTOOLONGtoolongtoolongtoo.secondpart.' 'myLinkTOOLONGtoolongtoolongtoo.' 'longmyLinkTOOLONGtoolongtoolongtoo.secondpart'} with pytest.raises(ApiError) as e: service1.addservicelink(serviceLink=service_link) assert e.value.error.status == 422 assert e.value.error.code == 'MaxLengthExceeded' # link service2 to service1 with single valid char link service_link = {"serviceId": service2.id, "name": 'm'} service1 = service1.addservicelink(serviceLink=service_link) _validate_add_service_link(service1, service2, client) service3 = client.create_service(name=random_str(), stackId=env.id, launchConfig=launch_config) service3 = client.wait_success(service3) # link service3 to service1 with multiple valid chars service_link2 = {"serviceId": service3.id, "name": 'm.gh_kl.a-b'} service1 = service1.addservicelink(serviceLink=service_link2) _validate_add_service_link(service1, service3, client) def test_dependencies_start(client, context): stack = _create_stack(client) image_uuid = context.image_uuid launch_config = {"imageUuid": image_uuid} svc1 = client.create_service(name=random_str(), stackId=stack.id, launchConfig=launch_config) svc1 = client.wait_success(svc1) svc2 = client.create_service(name=random_str(), stackId=stack.id, launchConfig=launch_config) svc2 = client.wait_success(svc2) svc3 = client.create_service(name=random_str(), stackId=stack.id, launchConfig=launch_config) svc3 = client.wait_success(svc3) # set service2, service3 links for service1 service_link1 = {"serviceId": svc2.id, "name": "link1"} service_link2 = {"serviceId": svc3.id, "name": "link2"} svc1 = svc1. \ setservicelinks(serviceLinks=[service_link1, service_link2]) _validate_add_service_link(svc1, svc2, client, "link1") _validate_add_service_link(svc1, svc3, client, "link2") # activate services all at once stack.activateservices() wait_for(lambda: client.reload(svc1).state == 'active') # TODO: replace with depends_on wait_for(lambda: client.reload(svc2).state == 'active') wait_for(lambda: client.reload(svc3).state == 'active')
from __future__ import absolute_import from __future__ import unicode_literals import datetime import logging import operator from functools import reduce import enum from docker.errors import APIError from . import parallel from .config import ConfigurationError from .config.config import V1 from .config.sort_services import get_container_name_from_network_mode from .config.sort_services import get_service_name_from_network_mode from .const import IMAGE_EVENTS from .const import LABEL_ONE_OFF from .const import LABEL_PROJECT from .const import LABEL_SERVICE from .container import Container from .network import build_networks from .network import get_networks from .network import ProjectNetworks from .service import BuildAction from .service import ContainerNetworkMode from .service import ContainerPidMode from .service import ConvergenceStrategy from .service import NetworkMode from .service import PidMode from .service import Service from .service import ServiceNetworkMode from .service import ServicePidMode from .utils import microseconds_from_time_nano from .volume import ProjectVolumes log = logging.getLogger(__name__) @enum.unique class OneOffFilter(enum.Enum): include = 0 exclude = 1 only = 2 @classmethod def update_labels(cls, value, labels): if value == cls.only: labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True")) elif value == cls.exclude: labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False")) elif value == cls.include: pass else: raise ValueError("Invalid value for one_off: {}".format(repr(value))) class Project(object): """ A collection of services. """ def __init__(self, name, services, client, networks=None, volumes=None, config_version=None): self.name = name self.services = services self.client = client self.volumes = volumes or ProjectVolumes({}) self.networks = networks or ProjectNetworks({}, False) self.config_version = config_version def labels(self, one_off=OneOffFilter.exclude): labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)] OneOffFilter.update_labels(one_off, labels) return labels @classmethod def from_config(cls, name, config_data, client): """ Construct a Project from a config.Config object. """ use_networking = (config_data.version and config_data.version != V1) networks = build_networks(name, config_data, client) project_networks = ProjectNetworks.from_services( config_data.services, networks, use_networking) volumes = ProjectVolumes.from_config(name, config_data, client) project = cls(name, [], client, project_networks, volumes, config_data.version) for service_dict in config_data.services: service_dict = dict(service_dict) if use_networking: service_networks = get_networks(service_dict, networks) else: service_networks = {} service_dict.pop('networks', None) links = project.get_links(service_dict) network_mode = project.get_network_mode( service_dict, list(service_networks.keys()) ) pid_mode = project.get_pid_mode(service_dict) volumes_from = get_volumes_from(project, service_dict) if config_data.version != V1: service_dict['volumes'] = [ volumes.namespace_spec(volume_spec) for volume_spec in service_dict.get('volumes', []) ] secrets = get_secrets( service_dict['name'], service_dict.pop('secrets', None) or [], config_data.secrets) project.services.append( Service( service_dict.pop('name'), client=client, project=name, use_networking=use_networking, networks=service_networks, links=links, network_mode=network_mode, volumes_from=volumes_from, secrets=secrets, pid_mode=pid_mode, **service_dict) ) return project @property def service_names(self): return [service.name for service in self.services] def get_service(self, name): """ Retrieve a service by name. Raises NoSuchService if the named service does not exist. """ for service in self.services: if service.name == name: return service raise NoSuchService(name) def validate_service_names(self, service_names): """ Validate that the given list of service names only contains valid services. Raises NoSuchService if one of the names is invalid. """ valid_names = self.service_names for name in service_names: if name not in valid_names: raise NoSuchService(name) def get_services(self, service_names=None, include_deps=False): """ Returns a list of this project's services filtered by the provided list of names, or all services if service_names is None or []. If include_deps is specified, returns a list including the dependencies for service_names, in order of dependency. Preserves the original order of self.services where possible, reordering as needed to resolve dependencies. Raises NoSuchService if any of the named services do not exist. """ if service_names is None or len(service_names) == 0: service_names = self.service_names unsorted = [self.get_service(name) for name in service_names] services = [s for s in self.services if s in unsorted] if include_deps: services = reduce(self._inject_deps, services, []) uniques = [] [uniques.append(s) for s in services if s not in uniques] return uniques def get_services_without_duplicate(self, service_names=None, include_deps=False): services = self.get_services(service_names, include_deps) for service in services: service.remove_duplicate_containers() return services def get_links(self, service_dict): links = [] if 'links' in service_dict: for link in service_dict.get('links', []): if ':' in link: service_name, link_name = link.split(':', 1) else: service_name, link_name = link, None try: links.append((self.get_service(service_name), link_name)) except NoSuchService: raise ConfigurationError( 'Service "%s" has a link to service "%s" which does not ' 'exist.' % (service_dict['name'], service_name)) del service_dict['links'] return links def get_network_mode(self, service_dict, networks): network_mode = service_dict.pop('network_mode', None) if not network_mode: if self.networks.use_networking: return NetworkMode(networks[0]) if networks else NetworkMode('none') return NetworkMode(None) service_name = get_service_name_from_network_mode(network_mode) if service_name: return ServiceNetworkMode(self.get_service(service_name)) container_name = get_container_name_from_network_mode(network_mode) if container_name: try: return ContainerNetworkMode(Container.from_id(self.client, container_name)) except APIError: raise ConfigurationError( "Service '{name}' uses the network stack of container '{dep}' which " "does not exist.".format(name=service_dict['name'], dep=container_name)) return NetworkMode(network_mode) def get_pid_mode(self, service_dict): pid_mode = service_dict.pop('pid', None) if not pid_mode: return PidMode(None) service_name = get_service_name_from_network_mode(pid_mode) if service_name: return ServicePidMode(self.get_service(service_name)) container_name = get_container_name_from_network_mode(pid_mode) if container_name: try: return ContainerPidMode(Container.from_id(self.client, container_name)) except APIError: raise ConfigurationError( "Service '{name}' uses the PID namespace of container '{dep}' which " "does not exist.".format(name=service_dict['name'], dep=container_name) ) return PidMode(pid_mode) def start(self, service_names=None, **options): containers = [] def start_service(service): service_containers = service.start(quiet=True, **options) containers.extend(service_containers) services = self.get_services(service_names) def get_deps(service): return { (self.get_service(dep), config) for dep, config in service.get_dependency_configs().items() } parallel.parallel_execute( services, start_service, operator.attrgetter('name'), 'Starting', get_deps, ) return containers def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options): containers = self.containers(service_names, one_off=one_off) def get_deps(container): # actually returning inversed dependencies return {(other, None) for other in containers if container.service in self.get_service(other.service).get_dependency_names()} parallel.parallel_execute( containers, self.build_container_operation_with_timeout_func('stop', options), operator.attrgetter('name'), 'Stopping', get_deps, ) def pause(self, service_names=None, **options): containers = self.containers(service_names) parallel.parallel_pause(reversed(containers), options) return containers def unpause(self, service_names=None, **options): containers = self.containers(service_names) parallel.parallel_unpause(containers, options) return containers def kill(self, service_names=None, **options): parallel.parallel_kill(self.containers(service_names), options) def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options): parallel.parallel_remove(self.containers( service_names, stopped=True, one_off=one_off ), options) def down(self, remove_image_type, include_volumes, remove_orphans=False): self.stop(one_off=OneOffFilter.include) self.find_orphan_containers(remove_orphans) self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include) self.networks.remove() if include_volumes: self.volumes.remove() self.remove_images(remove_image_type) def remove_images(self, remove_image_type): for service in self.get_services(): service.remove_image(remove_image_type) def restart(self, service_names=None, **options): containers = self.containers(service_names, stopped=True) parallel.parallel_execute( containers, self.build_container_operation_with_timeout_func('restart', options), operator.attrgetter('name'), 'Restarting', ) return containers def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None): for service in self.get_services(service_names): if service.can_be_built(): service.build(no_cache, pull, force_rm, build_args) else: log.info('%s uses an image, skipping' % service.name) def create( self, service_names=None, strategy=ConvergenceStrategy.changed, do_build=BuildAction.none, ): services = self.get_services_without_duplicate(service_names, include_deps=True) for svc in services: svc.ensure_image_exists(do_build=do_build) plans = self._get_convergence_plans(services, strategy) for service in services: service.execute_convergence_plan( plans[service.name], detached=True, start=False) def events(self, service_names=None): def build_container_event(event, container): time = datetime.datetime.fromtimestamp(event['time']) time = time.replace( microsecond=microseconds_from_time_nano(event['timeNano'])) return { 'time': time, 'type': 'container', 'action': event['status'], 'id': container.id, 'service': container.service, 'attributes': { 'name': container.name, 'image': event['from'], }, 'container': container, } service_names = set(service_names or self.service_names) for event in self.client.events( filters={'label': self.labels()}, decode=True ): # The first part of this condition is a guard against some events # broadcasted by swarm that don't have a status field. # See https://github.com/docker/compose/issues/3316 if 'status' not in event or event['status'] in IMAGE_EVENTS: # We don't receive any image events because labels aren't applied # to images continue # TODO: get labels from the API v1.22 , see github issue 2618 try: # this can fail if the container has been removed container = Container.from_id(self.client, event['id']) except APIError: continue if container.service not in service_names: continue yield build_container_event(event, container) def up(self, service_names=None, start_deps=True, strategy=ConvergenceStrategy.changed, do_build=BuildAction.none, timeout=None, detached=False, remove_orphans=False, scale_override=None, rescale=True): warn_for_swarm_mode(self.client) self.initialize() self.find_orphan_containers(remove_orphans) if scale_override is None: scale_override = {} services = self.get_services_without_duplicate( service_names, include_deps=start_deps) for svc in services: svc.ensure_image_exists(do_build=do_build) plans = self._get_convergence_plans(services, strategy) def do(service): return service.execute_convergence_plan( plans[service.name], timeout=timeout, detached=detached, scale_override=scale_override.get(service.name), rescale=rescale ) def get_deps(service): return { (self.get_service(dep), config) for dep, config in service.get_dependency_configs().items() } results, errors = parallel.parallel_execute( services, do, operator.attrgetter('name'), None, get_deps, ) if errors: raise ProjectError( 'Encountered errors while bringing up the project.' ) return [ container for svc_containers in results if svc_containers is not None for container in svc_containers ] def initialize(self): self.networks.initialize() self.volumes.initialize() def _get_convergence_plans(self, services, strategy): plans = {} for service in services: updated_dependencies = [ name for name in service.get_dependency_names() if name in plans and plans[name].action in ('recreate', 'create') ] if updated_dependencies and strategy.allows_recreate: log.debug('%s has upstream changes (%s)', service.name, ", ".join(updated_dependencies)) plan = service.convergence_plan(ConvergenceStrategy.always) else: plan = service.convergence_plan(strategy) plans[service.name] = plan return plans def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False): services = self.get_services(service_names, include_deps=False) if parallel_pull: def pull_service(service): service.pull(ignore_pull_failures, True) _, errors = parallel.parallel_execute( services, pull_service, operator.attrgetter('name'), 'Pulling', limit=5, ) if len(errors): raise ProjectError(b"\n".join(errors.values())) else: for service in services: service.pull(ignore_pull_failures, silent=silent) def push(self, service_names=None, ignore_push_failures=False): for service in self.get_services(service_names, include_deps=False): service.push(ignore_push_failures) def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude): return list(filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, filters={'label': self.labels(one_off=one_off)})]) ) def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude): if service_names: self.validate_service_names(service_names) else: service_names = self.service_names containers = self._labeled_containers(stopped, one_off) def matches_service_names(container): return container.labels.get(LABEL_SERVICE) in service_names return [c for c in containers if matches_service_names(c)] def find_orphan_containers(self, remove_orphans): def _find(): containers = self._labeled_containers() for ctnr in containers: service_name = ctnr.labels.get(LABEL_SERVICE) if service_name not in self.service_names: yield ctnr orphans = list(_find()) if not orphans: return if remove_orphans: for ctnr in orphans: log.info('Removing orphan container "{0}"'.format(ctnr.name)) ctnr.kill() ctnr.remove(force=True) else: log.warning( 'Found orphan containers ({0}) for this project. If ' 'you removed or renamed this service in your compose ' 'file, you can run this command with the ' '--remove-orphans flag to clean it up.'.format( ', '.join(["{}".format(ctnr.name) for ctnr in orphans]) ) ) def _inject_deps(self, acc, service): dep_names = service.get_dependency_names() if len(dep_names) > 0: dep_services = self.get_services( service_names=list(set(dep_names)), include_deps=True ) else: dep_services = [] dep_services.append(service) return acc + dep_services def build_container_operation_with_timeout_func(self, operation, options): def container_operation_with_timeout(container): if options.get('timeout') is None: service = self.get_service(container.service) options['timeout'] = service.stop_timeout(None) return getattr(container, operation)(**options) return container_operation_with_timeout def get_volumes_from(project, service_dict): volumes_from = service_dict.pop('volumes_from', None) if not volumes_from: return [] def build_volume_from(spec): if spec.type == 'service': try: return spec._replace(source=project.get_service(spec.source)) except NoSuchService: pass if spec.type == 'container': try: container = Container.from_id(project.client, spec.source) return spec._replace(source=container) except APIError: pass raise ConfigurationError( "Service \"{}\" mounts volumes from \"{}\", which is not the name " "of a service or container.".format( service_dict['name'], spec.source)) return [build_volume_from(vf) for vf in volumes_from] def get_secrets(service, service_secrets, secret_defs): secrets = [] for secret in service_secrets: secret_def = secret_defs.get(secret.source) if not secret_def: raise ConfigurationError( "Service \"{service}\" uses an undefined secret \"{secret}\" " .format(service=service, secret=secret.source)) if secret_def.get('external_name'): log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. " "External secrets are not available to containers created by " "docker-compose.".format(service=service, secret=secret.source)) continue if secret.uid or secret.gid or secret.mode: log.warn( "Service \"{service}\" uses secret \"{secret}\" with uid, " "gid, or mode. These fields are not supported by this " "implementation of the Compose file".format( service=service, secret=secret.source ) ) secrets.append({'secret': secret, 'file': secret_def.get('file')}) return secrets def warn_for_swarm_mode(client): info = client.info() if info.get('Swarm', {}).get('LocalNodeState') == 'active': if info.get('ServerVersion', '').startswith('ucp'): # UCP does multi-node scheduling with traditional Compose files. return log.warn( "The Docker Engine you're using is running in swarm mode.\n\n" "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. " "All containers will be scheduled on the current node.\n\n" "To deploy your application across the swarm, " "use `docker stack deploy`.\n" ) class NoSuchService(Exception): def __init__(self, name): self.name = name self.msg = "No such service: %s" % self.name def __str__(self): return self.msg class ProjectError(Exception): def __init__(self, msg): self.msg = msg
from __future__ import unicode_literals import keyword import re from optparse import make_option from django.core.management.base import NoArgsCommand, CommandError from django.db import connections, DEFAULT_DB_ALIAS from django.utils import six class Command(NoArgsCommand): help = "Introspects the database tables in the given database and outputs a Django model module." option_list = NoArgsCommand.option_list + ( make_option('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database to ' 'introspect. Defaults to using the "default" database.'), ) requires_model_validation = False db_module = 'django.db' def handle_noargs(self, **options): try: for line in self.handle_inspection(options): self.stdout.write("%s\n" % line) except NotImplementedError: raise CommandError("Database inspection isn't supported for the currently selected database backend.") def handle_inspection(self, options): connection = connections[options.get('database')] # 'table_name_filter' is a stealth option table_name_filter = options.get('table_name_filter') table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '') strip_prefix = lambda s: s.startswith("u'") and s[1:] or s cursor = connection.cursor() yield "# This is an auto-generated Django model module." yield "# You'll have to do the following manually to clean this up:" yield "# * Rearrange models' order" yield "# * Make sure each model has one field with primary_key=True" yield "# Feel free to rename the models, but don't rename db_table values or field names." yield "#" yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'" yield "# into your database." yield "from __future__ import unicode_literals" yield '' yield 'from %s import models' % self.db_module yield '' known_models = [] for table_name in connection.introspection.table_names(cursor): if table_name_filter is not None and callable(table_name_filter): if not table_name_filter(table_name): continue yield 'class %s(models.Model):' % table2model(table_name) known_models.append(table2model(table_name)) try: relations = connection.introspection.get_relations(cursor, table_name) except NotImplementedError: relations = {} try: indexes = connection.introspection.get_indexes(cursor, table_name) except NotImplementedError: indexes = {} used_column_names = [] # Holds column names used in the table so far for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)): comment_notes = [] # Holds Field notes, to be displayed in a Python comment. extra_params = {} # Holds Field parameters such as 'db_column'. column_name = row[0] is_relation = i in relations att_name, params, notes = self.normalize_col_name( column_name, used_column_names, is_relation) extra_params.update(params) comment_notes.extend(notes) used_column_names.append(att_name) # Add primary_key and unique, if necessary. if column_name in indexes: if indexes[column_name]['primary_key']: extra_params['primary_key'] = True elif indexes[column_name]['unique']: extra_params['unique'] = True if is_relation: rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1]) if rel_to in known_models: field_type = 'ForeignKey(%s' % rel_to else: field_type = "ForeignKey('%s'" % rel_to else: # Calling `get_field_type` to get the field type string and any # additional paramters and notes. field_type, field_params, field_notes = self.get_field_type(connection, table_name, row) extra_params.update(field_params) comment_notes.extend(field_notes) field_type += '(' # Don't output 'id = meta.AutoField(primary_key=True)', because # that's assumed if it doesn't exist. if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}: continue # Add 'null' and 'blank', if the 'null_ok' flag was present in the # table description. if row[6]: # If it's NULL... extra_params['blank'] = True if not field_type in ('TextField(', 'CharField('): extra_params['null'] = True field_desc = '%s = models.%s' % (att_name, field_type) if extra_params: if not field_desc.endswith('('): field_desc += ', ' field_desc += ', '.join([ '%s=%s' % (k, strip_prefix(repr(v))) for k, v in extra_params.items()]) field_desc += ')' if comment_notes: field_desc += ' # ' + ' '.join(comment_notes) yield ' %s' % field_desc for meta_line in self.get_meta(table_name): yield meta_line def normalize_col_name(self, col_name, used_column_names, is_relation): """ Modify the column name to make it Python-compatible as a field name """ field_params = {} field_notes = [] new_name = col_name.lower() if new_name != col_name: field_notes.append('Field name made lowercase.') if is_relation: if new_name.endswith('_id'): new_name = new_name[:-3] else: field_params['db_column'] = col_name new_name, num_repl = re.subn(r'\W', '_', new_name) if num_repl > 0: field_notes.append('Field renamed to remove unsuitable characters.') if new_name.find('__') >= 0: while new_name.find('__') >= 0: new_name = new_name.replace('__', '_') if col_name.lower().find('__') >= 0: # Only add the comment if the double underscore was in the original name field_notes.append("Field renamed because it contained more than one '_' in a row.") if new_name.startswith('_'): new_name = 'field%s' % new_name field_notes.append("Field renamed because it started with '_'.") if new_name.endswith('_'): new_name = '%sfield' % new_name field_notes.append("Field renamed because it ended with '_'.") if keyword.iskeyword(new_name): new_name += '_field' field_notes.append('Field renamed because it was a Python reserved word.') if new_name[0].isdigit(): new_name = 'number_%s' % new_name field_notes.append("Field renamed because it wasn't a valid Python identifier.") if new_name in used_column_names: num = 0 while '%s_%d' % (new_name, num) in used_column_names: num += 1 new_name = '%s_%d' % (new_name, num) field_notes.append('Field renamed because of name conflict.') if col_name != new_name and field_notes: field_params['db_column'] = col_name return new_name, field_params, field_notes def get_field_type(self, connection, table_name, row): """ Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. """ field_params = {} field_notes = [] try: field_type = connection.introspection.get_field_type(row[1], row) except KeyError: field_type = 'TextField' field_notes.append('This field type is a guess.') # This is a hook for DATA_TYPES_REVERSE to return a tuple of # (field_type, field_params_dict). if type(field_type) is tuple: field_type, new_params = field_type field_params.update(new_params) # Add max_length for all CharFields. if field_type == 'CharField' and row[3]: field_params['max_length'] = row[3] if field_type == 'DecimalField': field_params['max_digits'] = row[4] field_params['decimal_places'] = row[5] return field_type, field_params, field_notes def get_meta(self, table_name): """ Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name. """ return [" class Meta:", " db_table = '%s'" % table_name, ""]
#!/usr/bin/env python # # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Writes a build_config file. The build_config file for a target is a json file containing information about how to build that target based on the target's dependencies. This includes things like: the javac classpath, the list of android resources dependencies, etc. It also includes the information needed to create the build_config for other targets that depend on that one. Android build scripts should not refer to the build_config directly, and the build specification should instead pass information in using the special file-arg syntax (see build_utils.py:ExpandFileArgs). That syntax allows passing of values in a json dict in a file and looks like this: --python-arg=@FileArg(build_config_path:javac:classpath) Note: If paths to input files are passed in this way, it is important that: 1. inputs/deps of the action ensure that the files are available the first time the action runs. 2. Either (a) or (b) a. inputs/deps ensure that the action runs whenever one of the files changes b. the files are added to the action's depfile """ import itertools import optparse import os import sys import xml.dom.minidom from util import build_utils from util import md5_check # Types that should never be used as a dependency of another build config. _ROOT_TYPES = ('android_apk', 'deps_dex', 'java_binary', 'resource_rewriter') # Types that should not allow code deps to pass through. _RESOURCE_TYPES = ('android_assets', 'android_resources') class AndroidManifest(object): def __init__(self, path): self.path = path dom = xml.dom.minidom.parse(path) manifests = dom.getElementsByTagName('manifest') assert len(manifests) == 1 self.manifest = manifests[0] def GetInstrumentationElements(self): instrumentation_els = self.manifest.getElementsByTagName('instrumentation') if len(instrumentation_els) == 0: return None return instrumentation_els def CheckInstrumentationElements(self, expected_package): instrs = self.GetInstrumentationElements() if not instrs: raise Exception('No <instrumentation> elements found in %s' % self.path) for instr in instrs: instrumented_package = instr.getAttributeNS( 'http://schemas.android.com/apk/res/android', 'targetPackage') if instrumented_package != expected_package: raise Exception( 'Wrong instrumented package. Expected %s, got %s' % (expected_package, instrumented_package)) def GetPackageName(self): return self.manifest.getAttribute('package') dep_config_cache = {} def GetDepConfig(path): if not path in dep_config_cache: dep_config_cache[path] = build_utils.ReadJson(path)['deps_info'] return dep_config_cache[path] def DepsOfType(wanted_type, configs): return [c for c in configs if c['type'] == wanted_type] def GetAllDepsConfigsInOrder(deps_config_paths): def GetDeps(path): return set(GetDepConfig(path)['deps_configs']) return build_utils.GetSortedTransitiveDependencies(deps_config_paths, GetDeps) class Deps(object): def __init__(self, direct_deps_config_paths): self.all_deps_config_paths = GetAllDepsConfigsInOrder( direct_deps_config_paths) self.direct_deps_configs = [ GetDepConfig(p) for p in direct_deps_config_paths] self.all_deps_configs = [ GetDepConfig(p) for p in self.all_deps_config_paths] self.direct_deps_config_paths = direct_deps_config_paths def All(self, wanted_type=None): if type is None: return self.all_deps_configs return DepsOfType(wanted_type, self.all_deps_configs) def Direct(self, wanted_type=None): if wanted_type is None: return self.direct_deps_configs return DepsOfType(wanted_type, self.direct_deps_configs) def AllConfigPaths(self): return self.all_deps_config_paths def RemoveNonDirectDep(self, path): if path in self.direct_deps_config_paths: raise Exception('Cannot remove direct dep.') self.all_deps_config_paths.remove(path) self.all_deps_configs.remove(GetDepConfig(path)) def GradlePrebuiltJarPaths(self): ret = [] def helper(cur): for config in cur.Direct('java_library'): if config['is_prebuilt'] or config['gradle_treat_as_prebuilt']: if config['jar_path'] not in ret: ret.append(config['jar_path']) helper(self) return ret def GradleLibraryProjectDeps(self): ret = [] def helper(cur): for config in cur.Direct('java_library'): if config['is_prebuilt']: pass elif config['gradle_treat_as_prebuilt']: helper(Deps(config['deps_configs'])) elif config not in ret: ret.append(config) helper(self) return ret def _MergeAssets(all_assets): """Merges all assets from the given deps. Returns: A tuple of lists: (compressed, uncompressed) Each tuple entry is a list of "srcPath:zipPath". srcPath is the path of the asset to add, and zipPath is the location within the zip (excluding assets/ prefix) """ compressed = {} uncompressed = {} for asset_dep in all_assets: entry = asset_dep['assets'] disable_compression = entry.get('disable_compression', False) dest_map = uncompressed if disable_compression else compressed other_map = compressed if disable_compression else uncompressed outputs = entry.get('outputs', []) for src, dest in itertools.izip_longest(entry['sources'], outputs): if not dest: dest = os.path.basename(src) # Merge so that each path shows up in only one of the lists, and that # deps of the same target override previous ones. other_map.pop(dest, 0) dest_map[dest] = src def create_list(asset_map): ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()] # Sort to ensure deterministic ordering. ret.sort() return ret return create_list(compressed), create_list(uncompressed) def _ResolveGroups(configs): """Returns a list of configs with all groups inlined.""" ret = list(configs) while True: groups = DepsOfType('group', ret) if not groups: return ret for config in groups: index = ret.index(config) expanded_configs = [GetDepConfig(p) for p in config['deps_configs']] ret[index:index + 1] = expanded_configs def _FilterDepsPaths(dep_paths, target_type): """Resolves all groups and trims dependency branches that we never want. E.g. When a resource or asset depends on an apk target, the intent is to include the .apk as a resource/asset, not to have the apk's classpath added. """ configs = [GetDepConfig(p) for p in dep_paths] configs = _ResolveGroups(configs) # Don't allow root targets to be considered as a dep. configs = [c for c in configs if c['type'] not in _ROOT_TYPES] # Don't allow java libraries to cross through assets/resources. if target_type in _RESOURCE_TYPES: configs = [c for c in configs if c['type'] in _RESOURCE_TYPES] return [c['path'] for c in configs] def _AsInterfaceJar(jar_path): return jar_path[:-3] + 'interface.jar' def _ExtractSharedLibsFromRuntimeDeps(runtime_deps_files): ret = [] for path in runtime_deps_files: with open(path) as f: for line in f: line = line.rstrip() if not line.endswith('.so'): continue # Only unstripped .so files are listed in runtime deps. # Convert to the stripped .so by going up one directory. ret.append(os.path.normpath(line.replace('lib.unstripped/', ''))) ret.reverse() return ret def _CreateJavaLibrariesList(library_paths): """Returns a java literal array with the "base" library names: e.g. libfoo.so -> foo """ return ('{%s}' % ','.join(['"%s"' % s[3:-3] for s in library_paths])) def _CreateLocalePaksAssetJavaList(assets): """Returns a java literal array from a list of assets in the form src:dst.""" names_only = (a.split(':')[1][:-4] for a in assets if a.endswith('.pak')) locales_only = (a for a in names_only if '-' in a or len(a) == 2) return '{%s}' % ','.join(sorted('"%s"' % a for a in locales_only)) def main(argv): parser = optparse.OptionParser() build_utils.AddDepfileOption(parser) parser.add_option('--build-config', help='Path to build_config output.') parser.add_option( '--type', help='Type of this target (e.g. android_library).') parser.add_option( '--deps-configs', help='List of paths for dependency\'s build_config files. ') # android_resources options parser.add_option('--srcjar', help='Path to target\'s resources srcjar.') parser.add_option('--resources-zip', help='Path to target\'s resources zip.') parser.add_option('--r-text', help='Path to target\'s R.txt file.') parser.add_option('--package-name', help='Java package name for these resources.') parser.add_option('--android-manifest', help='Path to android manifest.') parser.add_option('--resource-dirs', action='append', default=[], help='GYP-list of resource dirs') # android_assets options parser.add_option('--asset-sources', help='List of asset sources.') parser.add_option('--asset-renaming-sources', help='List of asset sources with custom destinations.') parser.add_option('--asset-renaming-destinations', help='List of asset custom destinations.') parser.add_option('--disable-asset-compression', action='store_true', help='Whether to disable asset compression.') # java library options parser.add_option('--jar-path', help='Path to target\'s jar output.') parser.add_option('--java-sources-file', help='Path to .sources file') parser.add_option('--bundled-srcjars', help='GYP-list of .srcjars that have been included in this java_library.') parser.add_option('--supports-android', action='store_true', help='Whether this library supports running on the Android platform.') parser.add_option('--requires-android', action='store_true', help='Whether this library requires running on the Android platform.') parser.add_option('--bypass-platform-checks', action='store_true', help='Bypass checks for support/require Android platform.') parser.add_option('--extra-classpath-jars', help='GYP-list of .jar files to include on the classpath when compiling, ' 'but not to include in the final binary.') parser.add_option('--gradle-treat-as-prebuilt', action='store_true', help='Whether this library should be treated as a prebuilt library by ' 'generate_gradle.py.') parser.add_option('--main-class', help='Java class for java_binary targets.') parser.add_option('--java-resources-jar-path', help='Path to JAR that contains java resources. Everything ' 'from this JAR except meta-inf/ content and .class files ' 'will be added to the final APK.') parser.add_option('--bootclasspath', help='Path to custom android.jar/rt.jar') # android library options parser.add_option('--dex-path', help='Path to target\'s dex output.') # native library options parser.add_option('--shared-libraries-runtime-deps', help='Path to file containing runtime deps for shared ' 'libraries.') parser.add_option('--secondary-abi-shared-libraries-runtime-deps', help='Path to file containing runtime deps for secondary ' 'abi shared libraries.') parser.add_option('--enable-relocation-packing', help='Whether relocation packing is enabled.') # apk options parser.add_option('--apk-path', help='Path to the target\'s apk output.') parser.add_option('--incremental-apk-path', help="Path to the target's incremental apk output.") parser.add_option('--incremental-install-json-path', help="Path to the target's generated incremental install " "json.") parser.add_option('--tested-apk-config', help='Path to the build config of the tested apk (for an instrumentation ' 'test apk).') parser.add_option('--proguard-enabled', action='store_true', help='Whether proguard is enabled for this apk.') parser.add_option('--proguard-configs', help='GYP-list of proguard flag files to use in final apk.') parser.add_option('--proguard-info', help='Path to the proguard .info output for this apk.') parser.add_option('--fail', help='GYP-list of error message lines to fail with.') options, args = parser.parse_args(argv) if args: parser.error('No positional arguments should be given.') if options.fail: parser.error('\n'.join(build_utils.ParseGnList(options.fail))) required_options_map = { 'java_binary': ['build_config', 'jar_path'], 'java_library': ['build_config', 'jar_path'], 'java_prebuilt': ['build_config', 'jar_path'], 'android_assets': ['build_config'], 'android_resources': ['build_config', 'resources_zip'], 'android_apk': ['build_config', 'jar_path', 'dex_path', 'resources_zip'], 'deps_dex': ['build_config', 'dex_path'], 'dist_jar': ['build_config'], 'resource_rewriter': ['build_config'], 'group': ['build_config'], 'junit_binary': ['build_config'], } required_options = required_options_map.get(options.type) if not required_options: raise Exception('Unknown type: <%s>' % options.type) build_utils.CheckOptions(options, parser, required_options) # Java prebuilts are the same as libraries except for in gradle files. is_java_prebuilt = options.type == 'java_prebuilt' if is_java_prebuilt: options.type = 'java_library' if options.type == 'java_library': if options.supports_android and not options.dex_path: raise Exception('java_library that supports Android requires a dex path.') if options.requires_android and not options.supports_android: raise Exception( '--supports-android is required when using --requires-android') direct_deps_config_paths = build_utils.ParseGnList(options.deps_configs) direct_deps_config_paths = _FilterDepsPaths(direct_deps_config_paths, options.type) deps = Deps(direct_deps_config_paths) all_inputs = deps.AllConfigPaths() direct_library_deps = deps.Direct('java_library') all_library_deps = deps.All('java_library') direct_resources_deps = deps.Direct('android_resources') all_resources_deps = deps.All('android_resources') # Resources should be ordered with the highest-level dependency first so that # overrides are done correctly. all_resources_deps.reverse() # Initialize some common config. # Any value that needs to be queryable by dependents must go within deps_info. config = { 'deps_info': { 'name': os.path.basename(options.build_config), 'path': options.build_config, 'type': options.type, 'deps_configs': direct_deps_config_paths }, # Info needed only by generate_gradle.py. 'gradle': {} } deps_info = config['deps_info'] gradle = config['gradle'] if options.type == 'android_apk' and options.tested_apk_config: tested_apk_deps = Deps([options.tested_apk_config]) tested_apk_name = tested_apk_deps.Direct()[0]['name'] tested_apk_resources_deps = tested_apk_deps.All('android_resources') gradle['apk_under_test'] = tested_apk_name all_resources_deps = [ d for d in all_resources_deps if not d in tested_apk_resources_deps] # Required for generating gradle files. if options.type == 'java_library': deps_info['is_prebuilt'] = is_java_prebuilt deps_info['gradle_treat_as_prebuilt'] = options.gradle_treat_as_prebuilt if options.android_manifest: deps_info['android_manifest'] = options.android_manifest if options.type in ('java_binary', 'java_library', 'android_apk'): if options.java_sources_file: deps_info['java_sources_file'] = options.java_sources_file if options.bundled_srcjars: gradle['bundled_srcjars'] = ( build_utils.ParseGnList(options.bundled_srcjars)) else: gradle['bundled_srcjars'] = [] gradle['dependent_android_projects'] = [] gradle['dependent_java_projects'] = [] gradle['dependent_prebuilt_jars'] = deps.GradlePrebuiltJarPaths() if options.bootclasspath: gradle['bootclasspath'] = options.bootclasspath if options.main_class: gradle['main_class'] = options.main_class for c in deps.GradleLibraryProjectDeps(): if c['requires_android']: gradle['dependent_android_projects'].append(c['path']) else: gradle['dependent_java_projects'].append(c['path']) if options.type == 'android_apk': config['jni'] = {} all_java_sources = [c['java_sources_file'] for c in all_library_deps if 'java_sources_file' in c] if options.java_sources_file: all_java_sources.append(options.java_sources_file) config['jni']['all_source'] = all_java_sources if (options.type in ('java_binary', 'java_library')): deps_info['requires_android'] = options.requires_android deps_info['supports_android'] = options.supports_android if not options.bypass_platform_checks: deps_require_android = (all_resources_deps + [d['name'] for d in all_library_deps if d['requires_android']]) deps_not_support_android = ( [d['name'] for d in all_library_deps if not d['supports_android']]) if deps_require_android and not options.requires_android: raise Exception('Some deps require building for the Android platform: ' + str(deps_require_android)) if deps_not_support_android and options.supports_android: raise Exception('Not all deps support the Android platform: ' + str(deps_not_support_android)) if options.type in ('java_binary', 'java_library', 'android_apk'): deps_info['jar_path'] = options.jar_path if options.type == 'android_apk' or options.supports_android: deps_info['dex_path'] = options.dex_path if options.type == 'android_apk': deps_info['apk_path'] = options.apk_path deps_info['incremental_apk_path'] = options.incremental_apk_path deps_info['incremental_install_json_path'] = ( options.incremental_install_json_path) deps_info['enable_relocation_packing'] = options.enable_relocation_packing requires_javac_classpath = options.type in ( 'java_binary', 'java_library', 'android_apk', 'dist_jar') requires_full_classpath = ( options.type == 'java_prebuilt' or requires_javac_classpath) if requires_javac_classpath: # Classpath values filled in below (after applying tested_apk_config). config['javac'] = {} if options.type in ('java_binary', 'java_library'): # Only resources might have srcjars (normal srcjar targets are listed in # srcjar_deps). A resource's srcjar contains the R.java file for those # resources, and (like Android's default build system) we allow a library to # refer to the resources in any of its dependents. config['javac']['srcjars'] = [ c['srcjar'] for c in all_resources_deps if 'srcjar' in c] # Used to strip out R.class for android_prebuilt()s. if options.type == 'java_library': config['javac']['resource_packages'] = [ c['package_name'] for c in all_resources_deps if 'package_name' in c] if options.type == 'android_apk': # Apks will get their resources srcjar explicitly passed to the java step config['javac']['srcjars'] = [] # Gradle may need to generate resources for some apks. gradle['srcjars'] = [ c['srcjar'] for c in direct_resources_deps if 'srcjar' in c] if options.type == 'android_assets': all_asset_sources = [] if options.asset_renaming_sources: all_asset_sources.extend( build_utils.ParseGnList(options.asset_renaming_sources)) if options.asset_sources: all_asset_sources.extend(build_utils.ParseGnList(options.asset_sources)) deps_info['assets'] = { 'sources': all_asset_sources } if options.asset_renaming_destinations: deps_info['assets']['outputs'] = ( build_utils.ParseGnList(options.asset_renaming_destinations)) if options.disable_asset_compression: deps_info['assets']['disable_compression'] = True if options.type == 'android_resources': deps_info['resources_zip'] = options.resources_zip if options.srcjar: deps_info['srcjar'] = options.srcjar if options.android_manifest: manifest = AndroidManifest(options.android_manifest) deps_info['package_name'] = manifest.GetPackageName() if options.package_name: deps_info['package_name'] = options.package_name if options.r_text: deps_info['r_text'] = options.r_text deps_info['resources_dirs'] = [] if options.resource_dirs: for gyp_list in options.resource_dirs: deps_info['resources_dirs'].extend(build_utils.ParseGnList(gyp_list)) if options.supports_android and options.type in ('android_apk', 'java_library'): # Lint all resources that are not already linted by a dependent library. owned_resource_dirs = set() owned_resource_zips = set() for c in all_resources_deps: # Always use resources_dirs in favour of resources_zips so that lint error # messages have paths that are closer to reality (and to avoid needing to # extract during lint). if c['resources_dirs']: owned_resource_dirs.update(c['resources_dirs']) else: owned_resource_zips.add(c['resources_zip']) for c in all_library_deps: if c['supports_android']: owned_resource_dirs.difference_update(c['owned_resources_dirs']) owned_resource_zips.difference_update(c['owned_resources_zips']) deps_info['owned_resources_dirs'] = list(owned_resource_dirs) deps_info['owned_resources_zips'] = list(owned_resource_zips) if options.type in ( 'android_resources', 'android_apk', 'junit_binary', 'resource_rewriter'): config['resources'] = {} config['resources']['dependency_zips'] = [ c['resources_zip'] for c in all_resources_deps] config['resources']['extra_package_names'] = [] config['resources']['extra_r_text_files'] = [] if options.type == 'android_apk' or options.type == 'resource_rewriter': config['resources']['extra_package_names'] = [ c['package_name'] for c in all_resources_deps if 'package_name' in c] config['resources']['extra_r_text_files'] = [ c['r_text'] for c in all_resources_deps if 'r_text' in c] if options.type in ['android_apk', 'deps_dex']: deps_dex_files = [c['dex_path'] for c in all_library_deps] if requires_javac_classpath: javac_classpath = [c['jar_path'] for c in direct_library_deps] if requires_full_classpath: java_full_classpath = [c['jar_path'] for c in all_library_deps] if options.extra_classpath_jars: extra_jars = build_utils.ParseGnList(options.extra_classpath_jars) deps_info['extra_classpath_jars'] = extra_jars javac_classpath += extra_jars java_full_classpath += extra_jars # The java code for an instrumentation test apk is assembled differently for # ProGuard vs. non-ProGuard. # # Without ProGuard: Each library's jar is dexed separately and then combined # into a single classes.dex. A test apk will include all dex files not already # present in the apk-under-test. At runtime all test code lives in the test # apk, and the program code lives in the apk-under-test. # # With ProGuard: Each library's .jar file is fed into ProGuard, which outputs # a single .jar, which is then dexed into a classes.dex. A test apk includes # all jar files from the program and the tests because having them separate # doesn't work with ProGuard's whole-program optimizations. Although the # apk-under-test still has all of its code in its classes.dex, none of it is # used at runtime because the copy of it within the test apk takes precidence. if options.type == 'android_apk' and options.tested_apk_config: tested_apk_config = GetDepConfig(options.tested_apk_config) expected_tested_package = tested_apk_config['package_name'] AndroidManifest(options.android_manifest).CheckInstrumentationElements( expected_tested_package) if options.proguard_enabled: # Add all tested classes to the test's classpath to ensure that the test's # java code is a superset of the tested apk's java code java_full_classpath += [ jar for jar in tested_apk_config['java']['full_classpath'] if jar not in java_full_classpath] if tested_apk_config['proguard_enabled']: assert options.proguard_enabled, ('proguard must be enabled for ' 'instrumentation apks if it\'s enabled for the tested apk.') # Include in the classpath classes that are added directly to the apk under # test (those that are not a part of a java_library). javac_classpath.append(tested_apk_config['jar_path']) java_full_classpath.append(tested_apk_config['jar_path']) # Exclude dex files from the test apk that exist within the apk under test. # TODO(agrieve): When proguard is enabled, this filtering logic happens # within proguard_util.py. Move the logic for the proguard case into # here as well. tested_apk_library_deps = tested_apk_deps.All('java_library') tested_apk_deps_dex_files = [c['dex_path'] for c in tested_apk_library_deps] deps_dex_files = [ p for p in deps_dex_files if not p in tested_apk_deps_dex_files] if options.proguard_configs: assert options.type == 'java_library' deps_info['proguard_configs'] = ( build_utils.ParseGnList(options.proguard_configs)) if options.type == 'android_apk': deps_info['proguard_enabled'] = options.proguard_enabled deps_info['proguard_info'] = options.proguard_info config['proguard'] = {} proguard_config = config['proguard'] proguard_config['input_paths'] = [options.jar_path] + java_full_classpath extra_jars = set() lib_configs = set() for c in all_library_deps: extra_jars.update(c.get('extra_classpath_jars', ())) lib_configs.update(c.get('proguard_configs', ())) proguard_config['lib_paths'] = list(extra_jars) proguard_config['lib_configs'] = list(lib_configs) # Dependencies for the final dex file of an apk or a 'deps_dex'. if options.type in ['android_apk', 'deps_dex']: config['final_dex'] = {} dex_config = config['final_dex'] dex_config['dependency_dex_files'] = deps_dex_files if requires_javac_classpath: config['javac']['classpath'] = javac_classpath javac_interface_classpath = [ _AsInterfaceJar(p) for p in javac_classpath if p not in deps_info.get('extra_classpath_jars', [])] javac_interface_classpath += deps_info.get('extra_classpath_jars', []) config['javac']['interface_classpath'] = javac_interface_classpath if requires_full_classpath: deps_info['java'] = { 'full_classpath': java_full_classpath, } if options.type in ('android_apk', 'dist_jar'): dependency_jars = [c['jar_path'] for c in all_library_deps] all_interface_jars = [_AsInterfaceJar(p) for p in dependency_jars] if options.type == 'android_apk': all_interface_jars.append(_AsInterfaceJar(options.jar_path)) config['dist_jar'] = { 'dependency_jars': dependency_jars, 'all_interface_jars': all_interface_jars, } if options.type == 'android_apk': manifest = AndroidManifest(options.android_manifest) deps_info['package_name'] = manifest.GetPackageName() if not options.tested_apk_config and manifest.GetInstrumentationElements(): # This must then have instrumentation only for itself. manifest.CheckInstrumentationElements(manifest.GetPackageName()) library_paths = [] java_libraries_list = None runtime_deps_files = build_utils.ParseGnList( options.shared_libraries_runtime_deps or '[]') if runtime_deps_files: library_paths = _ExtractSharedLibsFromRuntimeDeps(runtime_deps_files) java_libraries_list = _CreateJavaLibrariesList(library_paths) secondary_abi_library_paths = [] secondary_abi_java_libraries_list = None secondary_abi_runtime_deps_files = build_utils.ParseGnList( options.secondary_abi_shared_libraries_runtime_deps or '[]') if secondary_abi_runtime_deps_files: secondary_abi_library_paths = _ExtractSharedLibsFromRuntimeDeps( secondary_abi_runtime_deps_files) secondary_abi_java_libraries_list = _CreateJavaLibrariesList( secondary_abi_library_paths) all_inputs.extend(runtime_deps_files) config['native'] = { 'libraries': library_paths, 'secondary_abi_libraries': secondary_abi_library_paths, 'java_libraries_list': java_libraries_list, 'secondary_abi_java_libraries_list': secondary_abi_java_libraries_list, } config['assets'], config['uncompressed_assets'] = ( _MergeAssets(deps.All('android_assets'))) config['compressed_locales_java_list'] = ( _CreateLocalePaksAssetJavaList(config['assets'])) config['uncompressed_locales_java_list'] = ( _CreateLocalePaksAssetJavaList(config['uncompressed_assets'])) config['extra_android_manifests'] = filter(None, ( d.get('android_manifest') for d in all_resources_deps)) # Collect java resources java_resources_jars = [d['java_resources_jar'] for d in all_library_deps if 'java_resources_jar' in d] if options.tested_apk_config: tested_apk_resource_jars = [d['java_resources_jar'] for d in tested_apk_library_deps if 'java_resources_jar' in d] java_resources_jars = [jar for jar in java_resources_jars if jar not in tested_apk_resource_jars] config['java_resources_jars'] = java_resources_jars if options.type == 'java_library' and options.java_resources_jar_path: deps_info['java_resources_jar'] = options.java_resources_jar_path build_utils.WriteJson(config, options.build_config, only_if_changed=True) if options.depfile: build_utils.WriteDepfile(options.depfile, options.build_config, all_inputs) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python __author__ = 'Michael Meisinger' from mock import Mock from nose.plugins.attrib import attr from pyon.util.int_test import IonIntegrationTestCase from pyon.util.unit_test import IonUnitTestCase from pyon.public import RT, PRED, OT, log, IonObject from ion.core.ooiref import OOIReferenceDesignator from ion.processes.bootstrap.ion_loader import TESTED_DOC, IONLoader, OOI_MAPPING_DOC from ion.processes.bootstrap.ooi_loader import OOILoader from interface.services.dm.iingestion_management_service import IngestionManagementServiceClient class TestLoaderAlgo(IonUnitTestCase): @attr('UNIT', group='loader') def test_parse_alert_ranges(self): loader = IONLoader() out = loader._parse_alert_range('5<temp<10') self.assertEqual('<', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual('<', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) out = loader._parse_alert_range('5<=temp<10') self.assertEqual('<=', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual('<', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) out = loader._parse_alert_range('5<temp<=10') self.assertEqual('<', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual('<=', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) out = loader._parse_alert_range('5<=temp<=10') self.assertEqual('<=', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual('<=', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) out = loader._parse_alert_range('5<temp') self.assertEqual('<', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual(3, len(out), msg='value: %r'%out) self.assertEqual('temp', out['value_id']) out = loader._parse_alert_range('5<=temp') self.assertEqual('<=', out['lower_rel_op']) self.assertEqual(5, out['lower_bound']) self.assertEqual('temp', out['value_id']) self.assertEqual(3, len(out)) out = loader._parse_alert_range('temp<10') self.assertEqual('<', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) self.assertEqual(3, len(out)) out = loader._parse_alert_range('temp<=10') self.assertEqual('<=', out['upper_rel_op']) self.assertEqual(10, out['upper_bound']) self.assertEqual('temp', out['value_id']) self.assertEqual(3, len(out)) def test_get_agent_definition(self): loader = IONLoader() ooi_loader = OOILoader(None, asset_path='res/preload/r2_ioc/ooi_assets', mapping_path=OOI_MAPPING_DOC) loader.ooi_loader = ooi_loader loader.ooi_loader.extract_ooi_assets() inst_objs = ooi_loader.get_type_assets("instrument") node_objs = ooi_loader.get_type_assets("node") loader._get_resource_obj = Mock(return_value=IonObject(RT.ExternalDatasetAgent)) # for ooi_id in sorted(inst_objs): # ooi_rd = OOIReferenceDesignator(ooi_id) # agent_id, agent_obj = loader._get_agent_definition(ooi_rd) # log.info("RD: %s, agent_id: %s", ooi_id, agent_id) checks = [ # Check some mapping override cases ("CP01CNSM-MF004-03-DOSTAD999", "DART_DOSTA_D_CSTL"), ("CP01CNSM-RI003-05-FLORTD999", "DART_FLORT_D_CSTL"), ("CP02PMUO-RI001-01-ADCPSL999", "DART_ADCPS_L_CSTL"), # Check some default cases ("GA03FLMB-RI001-03-DOSTAD999", "DART_DOSTA_D"), ("GA03FLMB-RI001-01-FLORTD999", "DART_FLORT_D"), ("GA03FLMB-RI001-04-ADCPSL999", "DART_ADCPS_L"), # Check some cases without mapping (using default) ("GI05MOAS-GL001-01-FLORDM999", "DART_FLORD_M"), ] for ooi_id, expected_agent_id in checks: ooi_rd = OOIReferenceDesignator(ooi_id) agent_id, agent_obj = loader._get_agent_definition(ooi_rd) self.assertEquals(agent_id, expected_agent_id) TEST_PATH = TESTED_DOC class TestLoader(IonIntegrationTestCase): def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') self.ingestion_management = IngestionManagementServiceClient() self.rr = self.container.resource_registry def _perform_preload(self, load_cfg): #load_cfg["ui_path"] = "res/preload/r2_ioc/ui_assets" #load_cfg["path"] = "R2PreloadedResources.xlsx" #load_cfg["assetmappings"] = "OOIPreload.xlsx" self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=load_cfg) def _preload_instrument(self, inst_scenario): load_cfg = dict(op="load", scenario=inst_scenario, attachments="res/preload/r2_ioc/attachments", assets='res/preload/r2_ioc/ooi_assets', ) self._perform_preload(load_cfg) def _preload_ui(self, ui_path="default"): load_cfg = dict(op="load", loadui=True, ui_path=ui_path, ) self._perform_preload(load_cfg) def _preload_cfg(self, cfg, path=TEST_PATH): load_cfg = dict(cfg=cfg, path=path) self._perform_preload(load_cfg) def _preload_scenario(self, scenario, path=TEST_PATH, idmap=False, **kwargs): load_cfg = dict(op="load", scenario=scenario, attachments="res/preload/r2_ioc/attachments", path=path, idmap=idmap) load_cfg.update(kwargs) self._perform_preload(load_cfg) def _preload_ooi(self, path=TEST_PATH): load_cfg = dict(op="load", loadooi=True, assets="res/preload/r2_ioc/ooi_assets", path=path, ooiuntil="12/31/2013", ) self._perform_preload(load_cfg) # ------------------------------------------------------------------------- @attr('PRELOAD') def test_ui_valid(self): """ make sure UI assets are valid using DEFAULT_UI_ASSETS = 'http://userexperience.oceanobservatories.org/database-exports/Stable' """ self._preload_ui(ui_path='default') obj_list,_ = self.rr.find_resources(restype=RT.UISpec, name="ION UI Specs", id_only=False) self.assertEquals(len(obj_list), 1) @attr('PRELOAD') def test_ui_candidates_valid(self): """ make sure UI assets are valid using DEFAULT_UI_ASSETS = 'http://userexperience.oceanobservatories.org/database-exports/Candidates' """ self._preload_ui(ui_path='candidate') obj_list,_ = self.rr.find_resources(restype=RT.UISpec, name="ION UI Specs", id_only=False) self.assertEquals(len(obj_list), 1) @attr('PRELOAD') def test_betademo_valid(self): """ make sure can load asset DB """ self._preload_scenario("BETA,R2_DEMO,RSN_OMS", path=TEST_PATH) self._preload_ooi(path=TEST_PATH) # check that deployment port assignments subobject created correctly #collect a set of deployments deploy_list = [] #DEP3 of PDEV3 obj_list,_ = self.rr.find_resources(restype=RT.Deployment, name="Platform Deployment", id_only=False) deploy_list.extend(obj_list) log.debug('test_betademo_valid DEP3: %s ', obj_list) #DEP4 of PDEV4 obj_list,_ = self.rr.find_resources(restype=RT.Deployment, name="dep4", id_only=False) log.debug('test_betademo_valid DEP4: %s ', obj_list) deploy_list.extend(obj_list) self.assertEquals(len(deploy_list), 2) for dply_obj in deploy_list: for dev_id, platform_port in dply_obj.port_assignments.iteritems(): # all values in the port assignments dict should be PlatformPort objects self.assertEquals(platform_port.type_, OT.PlatformPort) @attr('PRELOAD') def test_incremental(self): """ make sure R2_DEMO scenario in master google doc is valid and self-contained (doesn't rely on rows from other scenarios except BETA) NOTE: test will pass/fail based on current google doc, not just code changes. """ self._preload_cfg("res/preload/r2_ioc/config/ooi_load_config.yml", path=TEST_PATH) self._preload_scenario("OOIR2_DEMO", path=TEST_PATH, idmap=True) dp_list1,_ = self.rr.find_resources(restype=RT.DataProduct, id_only=True) ia_list1,_ = self.rr.find_resources(restype=RT.InstrumentAgent, id_only=True) self._preload_cfg("res/preload/r2_ioc/config/ooi_instruments.yml", path=TEST_PATH) ia_list2,_ = self.rr.find_resources(restype=RT.InstrumentAgent, id_only=True) self.assertGreater(len(ia_list2), len(ia_list1)) dp_list2,_ = self.rr.find_resources(restype=RT.DataProduct, id_only=True) self.assertGreater(len(dp_list2), len(dp_list1)) id_list2,_ = self.rr.find_resources(restype=RT.InstrumentDevice, id_only=True) self._preload_ooi(path=TEST_PATH) dp_list3,_ = self.rr.find_resources(restype=RT.DataProduct, id_only=True) self.assertGreater(len(dp_list3), len(dp_list2)) id_list3,_ = self.rr.find_resources(restype=RT.InstrumentDevice, id_only=True) self.assertEquals(len(id_list3), len(id_list2)) self._preload_ooi(path=TEST_PATH) dp_list4,_ = self.rr.find_resources(restype=RT.DataProduct, id_only=True) self.assertEquals(len(dp_list4), len(dp_list3)) id_list4,_ = self.rr.find_resources(restype=RT.InstrumentDevice, id_only=True) self.assertEquals(len(id_list4), len(id_list3)) def find_object_by_name(self, name, resource_type): objects,_ = self.container.resource_registry.find_resources(resource_type, name=name, id_only=False) self.assertEquals(len(objects), 1) return objects[0] @attr('INT', group='loader') @attr('SMOKE', group='loader') def test_row_values(self): """ use only rows from NOSE scenario for specific names and details included in this test. rows in NOSE may rely on entries in BETA scenarios, but should not specifically test values from those scenarios. """ # first make sure this scenario loads successfully self._preload_scenario("BETA,NOSE") # check for ExternalDataset eds = self.find_object_by_name('Test External CTD Dataset', RT.ExternalDataset) edm1 = self.find_object_by_name('Test External CTD Dataset Model', RT.ExternalDatasetModel) edm2,_ = self.container.resource_registry.find_objects(eds._id, PRED.hasModel, RT.ExternalDatasetModel, True) self.assertEquals(edm1._id, edm2[0]) inst = self.find_object_by_name('Test External CTD Agent Instance', RT.ExternalDatasetAgentInstance) self.assertEquals('value1', inst.driver_config['key1'], msg='driver_config[key1] is not value1:\n%r' % inst.driver_config) # check for an Org org = self.find_object_by_name('CASPER', RT.Org) self.assertFalse(org.contacts is None) self.assertEquals('Userbrough', org.contacts[0].individual_name_family) self.assertEquals('primary', org.contacts[0].roles[0]) # check data product dp = self.find_object_by_name('Test DP L0 CTD', RT.DataProduct) # should be persisted streams, _ = self.container.resource_registry.find_objects(dp._id, PRED.hasStream, RT.Stream, True) self.assertTrue(streams) self.assertEquals(1, len(streams)) self.assertTrue(self.ingestion_management.is_persisted(streams[0])) self.assertAlmostEqual(32.88237, dp.geospatial_bounds.geospatial_latitude_limit_north,places=3) # but L1 data product should not be persisted dp = self.find_object_by_name('Test DP L1 conductivity', RT.DataProduct) streams, _ = self.container.resource_registry.find_objects(dp._id, PRED.hasStream, RT.Stream, True) self.assertEquals(1, len(streams)) self.assertTrue(streams) self.assertFalse(self.ingestion_management.is_persisted(streams[0])) site = self.find_object_by_name('Test Instrument Site', RT.InstrumentSite) self.assertFalse(site.constraint_list is None) self.assertEquals(2, len(site.constraint_list)) con = site.constraint_list[0] self.assertAlmostEqual( 32.88237, con.geospatial_latitude_limit_north, places=3) self.assertAlmostEqual(-117.23214, con.geospatial_longitude_limit_east, places=3) con = site.constraint_list[1] self.assertEquals('TemporalBounds', con.type_) # check that coordinate system was loaded self.assertFalse(site.coordinate_reference_system is None) # check that InstrumentDevice contacts are loaded dev = self.find_object_by_name('Unit Test SMB37', RT.InstrumentDevice) self.assertTrue(len(dev.contacts)==2) self.assertEquals('Userbrough', dev.contacts[0].individual_name_family) # check has attachments attachments = self.container.resource_registry.find_attachments(dev._id) self.assertTrue(len(attachments)>0) # check for platform agents agent = self.find_object_by_name('Unit Test Platform Agent', RT.PlatformAgent) self.assertEquals(2, len(agent.stream_configurations)) parsed = agent.stream_configurations[1] # self.assertEquals('platform_eng_parsed', parsed.parameter_dictionary_name) self.assertEquals('ctd_parsed_param_dict', parsed.parameter_dictionary_name) # OBSOLETE: check that alarm was added to StreamConfig # self.assertEquals(1, len(parsed.alarms), msg='alarms: %r'%parsed.alarms) # self.assertEquals('temp', parsed.alarms[0]['kwargs']['value_id']) # check for platform agents self.find_object_by_name('Unit Test Platform Agent Instance', RT.PlatformAgentInstance) # check for platform model boolean values model = self.find_object_by_name('Nose Testing Platform Model', RT.PlatformModel) self.assertEquals(True, model.shore_networked) self.assertNotEqual('str', model.shore_networked.__class__.__name__) iai = self.find_object_by_name("Test InstrumentAgentInstance", RT.InstrumentAgentInstance) self.assertEqual({'SCHEDULER': {'VERSION': {'number': 3.0}, 'CLOCK_SYNC': 48.2, 'ACQUIRE_STATUS': {}}, 'PARAMETERS': {"TXWAVESTATS": False, 'TXWAVEBURST': 'false', 'TXREALTIME': True}}, iai.startup_config) self.assertEqual(2, len(iai.alerts)) pai = self.find_object_by_name("Unit Test Platform Agent Instance", RT.PlatformAgentInstance) self.assertEqual(1, len(pai.alerts)) self.assertTrue(pai.agent_config.has_key('platform_config')) log.debug('test_row_values PlatformAgentInstance driver_config: %s ', pai.driver_config) self.assertTrue(pai.driver_config.has_key('oms_uri')) oms_uri = pai.driver_config['oms_uri'] log.debug('test_row_values PlatformAgentInstance oms_uri: %s ', oms_uri) self.assertEquals('http://alice:1234@10.180.80.10:9021/', oms_uri) orgs, _ = self.container.resource_registry.find_subjects(RT.Org, PRED.hasResource, iai._id, True) self.assertEqual(1, len(orgs)) self.assertEqual(org._id, orgs[0]) entries ,_ = self.container.resource_registry.find_resources(RT.SchedulerEntry, id_only=False) self.assertGreaterEqual(len(entries), 1) @attr('PRELOAD') def test_alpha_valid(self): """ make sure R2_DEMO scenario in master google doc is valid and self-contained (doesn't rely on rows from other scenarios except BETA) NOTE: test will pass/fail based on current google doc, not just code changes. """ self._preload_cfg("res/preload/r2_ioc/config/ooi_alpha.yml", path=TEST_PATH) @attr('PRELOAD') def test_beta_valid(self): """ make sure R2_DEMO scenario in master google doc is valid and self-contained (doesn't rely on rows from other scenarios except BETA) NOTE: test will pass/fail based on current google doc, not just code changes. """ self._preload_cfg("res/preload/r2_ioc/config/ooi_beta.yml", path=TEST_PATH) failure_list = [] def add_failure(res_obj, msg): fail_msg = "%s[%s/%s]: %s" % (res_obj.type_, res_obj._id, res_obj.name, msg) failure_list.append(fail_msg) log.warn("Starting preload assertions now") res_objs, res_keys = self.rr.find_resources_ext(alt_id_ns="PRE", id_only=False) log.info("Found %s preloaded resources", len(res_objs)) dp_objs = [res for res in res_objs if res.type_ == RT.DataProduct] log.info("Checking %s DataProducts", len(dp_objs)) for dp in dp_objs: pass # Reenable this when we have geospatial coordinates for PNs #if not all([dp.geospatial_bounds.geospatial_latitude_limit_north, # dp.geospatial_bounds.geospatial_latitude_limit_south, # dp.geospatial_bounds.geospatial_longitude_limit_east, # dp.geospatial_bounds.geospatial_longitude_limit_west]): # add_failure(dp, "geospatial_bounds location invalid: %s" % dp.geospatial_bounds) #if not all([dp.geospatial_bounds.geospatial_vertical_min, # dp.geospatial_bounds.geospatial_vertical_max]): # add_failure(dp, "geospatial_bounds vertical invalid: %s" % dp.geospatial_bounds) if failure_list: fail_msg = "Preload assertions violated:\n" + "\n".join(f for f in failure_list) self.fail(fail_msg)
#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2010-2011 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ TOKEN-BASED AUTH MIDDLEWARE FOR SWIFT Authentication on incoming request * grab token from X-Auth-Token header * TODO: grab the memcache servers from the request env * TODOcheck for auth information in memcache * check for auth information from keystone * return if unauthorized * decorate the request for authorization in swift * forward to the swift proxy app Authorization via callback * check the path and extract the tenant * get the auth information stored in keystone.identity during authentication * TODO: check if the user is an account admin or a reseller admin * determine what object-type to authorize (account, container, object) * use knowledge of tenant, admin status, and container acls to authorize """ import json from urlparse import urlparse from webob.exc import HTTPUnauthorized, HTTPNotFound, HTTPExpectationFailed from keystone.common.bufferedhttp import http_connect_raw as http_connect from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed from swift.common.utils import cache_from_env, get_logger, split_path PROTOCOL_NAME = "Swift Token Authentication" class AuthProtocol(object): """Handles authenticating and aurothrizing client calls. Add to your pipeline in paste config like: [pipeline:main] pipeline = catch_errors healthcheck cache keystone proxy-server [filter:keystone] use = egg:keystone#swiftauth keystone_url = http://127.0.0.1:8080 keystone_admin_token = 999888777666 """ def __init__(self, app, conf): """Store valuable bits from the conf and set up logging.""" self.app = app self.keystone_url = urlparse(conf.get('keystone_url')) self.admin_token = conf.get('keystone_admin_token') self.reseller_prefix = conf.get('reseller_prefix', 'AUTH') self.log = get_logger(conf, log_route='keystone') self.log.info('Keystone middleware started') def __call__(self, env, start_response): """Authenticate the incoming request. If authentication fails return an appropriate http status here, otherwise forward through the rest of the app. """ self.log.debug('Keystone middleware called') token = self._get_claims(env) self.log.debug('token: %s', token) if token: identity = self._validate_claims(token) if identity: self.log.debug('request authenticated: %r', identity) return self.perform_authenticated_request(identity, env, start_response) else: self.log.debug('anonymous request') return self.unauthorized_request(env, start_response) self.log.debug('no auth token in request headers') return self.perform_unidentified_request(env, start_response) def unauthorized_request(self, env, start_response): """Clinet provided a token that wasn't acceptable, error out.""" return HTTPUnauthorized()(env, start_response) def unauthorized(self, req): """Return unauthorized given a webob Request object. This can be stuffed into the evironment for swift.authorize or called from the authoriztion callback when authorization fails. """ return HTTPUnauthorized(request=req) def perform_authenticated_request(self, identity, env, start_response): """Client provieded a valid identity, so use it for authorization.""" env['keystone.identity'] = identity env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl self.log.debug('calling app: %s // %r', start_response, env) rv = self.app(env, start_response) self.log.debug('return from app: %r', rv) return rv def perform_unidentified_request(self, env, start_response): """Withouth authentication data, use acls for access control.""" env['swift.authorize'] = self.authorize_via_acl env['swift.clean_acl'] = self.authorize_via_acl return self.app(env, start_response) def authorize(self, req): """Used when we have a valid identity from keystone.""" self.log.debug('keystone middleware authorization begin') env = req.environ tenant = env.get('keystone.identity', {}).get('tenant') if not tenant: self.log.warn('identity info not present in authorize request') return HTTPExpectationFailed('Unable to locate auth claim', request=req) # TODO(todd): everyone under a tenant can do anything to that tenant. # more realistic would be role/group checking to do things # like deleting the account or creating/deleting containers # esp. when owned by other users in the same tenant. if req.path.startswith('/v1/%s_%s' % (self.reseller_prefix, tenant)): self.log.debug('AUTHORIZED OKAY') return None self.log.debug('tenant mismatch: %r', tenant) return self.unauthorized(req) def authorize_via_acl(self, req): """Anon request handling. For now this only allows anon read of objects. Container and account actions are prohibited. """ self.log.debug('authorizing anonymous request') try: version, account, container, obj = split_path(req.path, 1, 4, True) except ValueError: return HTTPNotFound(request=req) if obj: return self._authorize_anon_object(req, account, container, obj) if container: return self._authorize_anon_container(req, account, container) if account: return self._authorize_anon_account(req, account) return self._authorize_anon_toplevel(req) def _authorize_anon_object(self, req, account, container, obj): referrers, groups = parse_acl(getattr(req, 'acl', None)) if referrer_allowed(req.referer, referrers): self.log.debug('anonymous request AUTHORIZED OKAY') return None return self.unauthorized(req) def _authorize_anon_container(self, req, account, container): return self.unauthorized(req) def _authorize_anon_account(self, req, account): return self.unauthorized(req) def _authorize_anon_toplevel(self, req): return self.unauthorized(req) def _get_claims(self, env): claims = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN')) return claims def _validate_claims(self, claims): """Ask keystone (as keystone admin) for information for this user.""" # TODO(todd): cache self.log.debug('Asking keystone to validate token') headers = {"Content-type": "application/json", "Accept": "text/json", "X-Auth-Token": self.admin_token} self.log.debug('headers: %r', headers) self.log.debug('url: %s', self.keystone_url) conn = http_connect(self.keystone_url.hostname, self.keystone_url.port, 'GET', '/v2.0/tokens/%s' % claims, headers=headers) resp = conn.getresponse() data = resp.read() conn.close() # Check http status code for the "OK" family of responses if not str(resp.status).startswith('20'): return False identity_info = json.loads(data) roles = [] role_refs = identity_info["access"]["user"]["roleRefs"] if role_refs is not None: for role_ref in role_refs: roles.append(role_ref["roleId"]) try: tenant = identity_info['access']['token']['tenantId'] except: tenant = None if not tenant: tenant = identity_info['access']['user']['tenantId'] # TODO(Ziad): add groups back in identity = {'user': identity_info['access']['user']['username'], 'tenant': tenant, 'roles': roles} return identity def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return AuthProtocol(app, conf) return auth_filter
import sys import nose from nose.tools import assert_raises from mpl_toolkits.mplot3d import Axes3D, axes3d from matplotlib import cm from matplotlib.testing.decorators import image_comparison, cleanup import matplotlib.pyplot as plt import numpy as np @image_comparison(baseline_images=['bar3d'], remove_text=True) def test_bar3d(): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]): xs = np.arange(20) ys = np.arange(20) cs = [c] * len(xs) cs[0] = 'c' ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8) @image_comparison(baseline_images=['contour3d'], remove_text=True) def test_contour3d(): fig = plt.figure() ax = fig.gca(projection='3d') X, Y, Z = axes3d.get_test_data(0.05) cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm) ax.set_xlim(-40, 40) ax.set_ylim(-40, 40) ax.set_zlim(-100, 100) @image_comparison(baseline_images=['contourf3d'], remove_text=True) def test_contourf3d(): fig = plt.figure() ax = fig.gca(projection='3d') X, Y, Z = axes3d.get_test_data(0.05) cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm) cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm) cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm) ax.set_xlim(-40, 40) ax.set_ylim(-40, 40) ax.set_zlim(-100, 100) @image_comparison(baseline_images=['contourf3d_fill'], remove_text=True) def test_contourf3d_fill(): fig = plt.figure() ax = fig.gca(projection='3d') X, Y = np.meshgrid(np.arange(-2, 2, 0.25), np.arange(-2, 2, 0.25)) Z = X.clip(0, 0) # This produces holes in the z=0 surface that causes rendering errors if # the Poly3DCollection is not aware of path code information (issue #4784) Z[::5, ::5] = 0.1 cset = ax.contourf(X, Y, Z, offset=0, levels=[-0.1, 0], cmap=cm.coolwarm) ax.set_xlim(-2, 2) ax.set_ylim(-2, 2) ax.set_zlim(-1, 1) @image_comparison(baseline_images=['lines3d'], remove_text=True) def test_lines3d(): fig = plt.figure() ax = fig.gca(projection='3d') theta = np.linspace(-4 * np.pi, 4 * np.pi, 100) z = np.linspace(-2, 2, 100) r = z ** 2 + 1 x = r * np.sin(theta) y = r * np.cos(theta) ax.plot(x, y, z) @image_comparison(baseline_images=['mixedsubplot'], remove_text=True) def test_mixedsubplots(): def f(t): s1 = np.cos(2*np.pi*t) e1 = np.exp(-t) return np.multiply(s1, e1) t1 = np.arange(0.0, 5.0, 0.1) t2 = np.arange(0.0, 5.0, 0.02) fig = plt.figure(figsize=plt.figaspect(2.)) ax = fig.add_subplot(2, 1, 1) l = ax.plot(t1, f(t1), 'bo', t2, f(t2), 'k--', markerfacecolor='green') ax.grid(True) ax = fig.add_subplot(2, 1, 2, projection='3d') X, Y = np.meshgrid(np.arange(-5, 5, 0.25), np.arange(-5, 5, 0.25)) R = np.sqrt(X ** 2 + Y ** 2) Z = np.sin(R) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False) ax.set_zlim3d(-1, 1) @image_comparison(baseline_images=['scatter3d'], remove_text=True) def test_scatter3d(): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(np.arange(10), np.arange(10), np.arange(10), c='r', marker='o') ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20), c='b', marker='^') @image_comparison(baseline_images=['surface3d'], remove_text=True) def test_surface3d(): fig = plt.figure() ax = fig.gca(projection='3d') X = np.arange(-5, 5, 0.25) Y = np.arange(-5, 5, 0.25) X, Y = np.meshgrid(X, Y) R = np.sqrt(X ** 2 + Y ** 2) Z = np.sin(R) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, lw=0, antialiased=False) ax.set_zlim(-1.01, 1.01) fig.colorbar(surf, shrink=0.5, aspect=5) @image_comparison(baseline_images=['text3d']) def test_text3d(): fig = plt.figure() ax = fig.gca(projection='3d') zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1)) xs = (2, 6, 4, 9, 7, 2) ys = (6, 4, 8, 7, 2, 2) zs = (4, 2, 5, 6, 1, 7) for zdir, x, y, z in zip(zdirs, xs, ys, zs): label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir) ax.text(x, y, z, label, zdir) ax.text(1, 1, 1, "red", color='red') ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes) ax.set_xlim3d(0, 10) ax.set_ylim3d(0, 10) ax.set_zlim3d(0, 10) ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.set_zlabel('Z axis') @image_comparison(baseline_images=['trisurf3d'], remove_text=True) def test_trisurf3d(): n_angles = 36 n_radii = 8 radii = np.linspace(0.125, 1.0, n_radii) angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False) angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) angles[:, 1::2] += np.pi/n_angles x = np.append(0, (radii*np.cos(angles)).flatten()) y = np.append(0, (radii*np.sin(angles)).flatten()) z = np.sin(-x*y) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2) @image_comparison(baseline_images=['wireframe3d'], remove_text=True) def test_wireframe3d(): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X, Y, Z = axes3d.get_test_data(0.05) ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) @image_comparison(baseline_images=['wireframe3dzerocstride'], remove_text=True, extensions=['png']) def test_wireframe3dzerocstride(): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X, Y, Z = axes3d.get_test_data(0.05) ax.plot_wireframe(X, Y, Z, rstride=10, cstride=0) @image_comparison(baseline_images=['wireframe3dzerorstride'], remove_text=True, extensions=['png']) def test_wireframe3dzerorstride(): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X, Y, Z = axes3d.get_test_data(0.05) ax.plot_wireframe(X, Y, Z, rstride=0, cstride=10) @cleanup def test_wireframe3dzerostrideraises(): if sys.version_info[:2] < (2, 7): raise nose.SkipTest("assert_raises as context manager " "not supported with Python < 2.7") fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X, Y, Z = axes3d.get_test_data(0.05) with assert_raises(ValueError): ax.plot_wireframe(X, Y, Z, rstride=0, cstride=0) @image_comparison(baseline_images=['quiver3d'], remove_text=True) def test_quiver3d(): fig = plt.figure() ax = fig.gca(projection='3d') x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j] u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z) v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z) w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)) ax.quiver(x, y, z, u, v, w, length=0.1) @image_comparison(baseline_images=['quiver3d_empty'], remove_text=True) def test_quiver3d_empty(): fig = plt.figure() ax = fig.gca(projection='3d') x, y, z = np.ogrid[-1:0.8:0j, -1:0.8:0j, -1:0.6:0j] u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z) v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z) w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)) ax.quiver(x, y, z, u, v, w, length=0.1) @image_comparison(baseline_images=['quiver3d_masked'], remove_text=True) def test_quiver3d_masked(): fig = plt.figure() ax = fig.gca(projection='3d') # Using mgrid here instead of ogrid because masked_where doesn't # seem to like broadcasting very much... x, y, z = np.mgrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j] u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z) v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z) w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)) u = np.ma.masked_where((-0.4 < x) & (x < 0.1), u, copy=False) v = np.ma.masked_where((0.1 < y) & (y < 0.7), v, copy=False) ax.quiver(x, y, z, u, v, w, length=0.1) @image_comparison(baseline_images=['quiver3d_pivot_middle'], remove_text=True, extensions=['png']) def test_quiver3d_pivot_middle(): fig = plt.figure() ax = fig.gca(projection='3d') x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j] u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z) v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z) w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)) ax.quiver(x, y, z, u, v, w, length=0.1, pivot='middle') @image_comparison(baseline_images=['quiver3d_pivot_tail'], remove_text=True, extensions=['png']) def test_quiver3d_pivot_tail(): fig = plt.figure() ax = fig.gca(projection='3d') x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j] u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z) v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z) w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)) ax.quiver(x, y, z, u, v, w, length=0.1, pivot='tail') @image_comparison(baseline_images=['axes3d_labelpad'], extensions=['png']) def test_axes3d_labelpad(): from nose.tools import assert_equal from matplotlib import rcParams fig = plt.figure() ax = Axes3D(fig) # labelpad respects rcParams assert_equal(ax.xaxis.labelpad, rcParams['axes.labelpad']) # labelpad can be set in set_label ax.set_xlabel('X LABEL', labelpad=10) assert_equal(ax.xaxis.labelpad, 10) ax.set_ylabel('Y LABEL') ax.set_zlabel('Z LABEL') # or manually ax.yaxis.labelpad = 20 ax.zaxis.labelpad = -40 # Tick labels also respect tick.pad (also from rcParams) for i, tick in enumerate(ax.yaxis.get_major_ticks()): tick.set_pad(tick.get_pad() - i * 5) @image_comparison(baseline_images=['axes3d_cla'], extensions=['png']) def test_axes3d_cla(): # fixed in pull request 4553 fig = plt.figure() ax = fig.add_subplot(1,1,1, projection='3d') ax.set_axis_off() ax.cla() # make sure the axis displayed is 3D (not 2D) if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import glance.common.exception as glance_exception from nova.tests.api.openstack import fakes from nova import context from nova import exception from nova.image import glance from nova import test from nova.tests.glance import stubs as glance_stubs class NullWriter(object): """Used to test ImageService.get which takes a writer object""" def write(self, *arg, **kwargs): pass class TestGlanceSerializer(test.TestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': [ {'virtual': 'aaa', 'device': 'bbb'}, {'virtual': 'xxx', 'device': 'yyy'}], 'block_device_mapping': [ {'virtual_device': 'fake', 'device_name': '/dev/fake'}, {'virtual_device': 'ephemeral0', 'device_name': '/dev/fake0'}]}} converted_expected = { 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': '[{"device": "bbb", "virtual": "aaa"}, ' '{"device": "yyy", "virtual": "xxx"}]', 'block_device_mapping': '[{"virtual_device": "fake", "device_name": "/dev/fake"}, ' '{"virtual_device": "ephemeral0", ' '"device_name": "/dev/fake0"}]'}} converted = glance._convert_to_string(metadata) self.assertEqual(converted, converted_expected) self.assertEqual(glance._convert_from_string(converted), metadata) class TestGlanceImageService(test.TestCase): """ Tests the Glance image service. At a high level, the translations involved are: 1. Glance -> ImageService - This is needed so we can support multple ImageServices (Glance, Local, etc) 2. ImageService -> API - This is needed so we can support multple APIs (OpenStack, EC2) """ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22) def setUp(self): super(TestGlanceImageService, self).setUp() fakes.stub_out_compute_api_snapshot(self.stubs) client = glance_stubs.StubGlanceClient() self.service = glance.GlanceImageService(client=client) self.context = context.RequestContext('fake', 'fake', auth_token=True) self.service.delete_all() @staticmethod def _make_fixture(**kwargs): fixture = {'name': None, 'properties': {}, 'status': None, 'is_public': None} fixture.update(kwargs) return fixture def _make_datetime_fixture(self): return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, updated_at=self.NOW_GLANCE_FORMAT, deleted_at=self.NOW_GLANCE_FORMAT) def test_create_with_instance_id(self): """Ensure instance_id is persisted as an image-property""" fixture = {'name': 'test image', 'is_public': False, 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {'instance_id': '42', 'user_id': 'fake'}, } self.assertDictMatch(image_meta, expected) image_metas = self.service.detail(self.context) self.assertDictMatch(image_metas[0], expected) def test_create_without_instance_id(self): """ Ensure we can create an image without having to specify an instance_id. Public images are an example of an image not tied to an instance. """ fixture = {'name': 'test image', 'is_public': False} image_id = self.service.create(self.context, fixture)['id'] expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, } actual = self.service.show(self.context, image_id) self.assertDictMatch(actual, expected) def test_create(self): fixture = self._make_fixture(name='test image') num_images = len(self.service.index(self.context)) image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) self.assertEquals(num_images + 1, len(self.service.index(self.context))) def test_create_and_show_non_existing_image(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) self.assertRaises(exception.ImageNotFound, self.service.show, self.context, 'bad image id') def test_create_and_show_non_existing_image_by_name(self): self.assertRaises(exception.ImageNotFound, self.service.show_by_name, self.context, 'bad image id') def test_index(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.index(self.context) expected = [{'id': image_id, 'name': 'test image'}] self.assertDictListMatch(image_metas, expected) def test_index_default_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.index(self.context) i = 0 for meta in image_metas: expected = {'id': 'DONTCARE', 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 def test_index_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.index(self.context, marker=ids[1]) self.assertEquals(len(image_metas), 8) i = 2 for meta in image_metas: expected = {'id': 'DONTCARE', 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 def test_index_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.index(self.context, limit=5) self.assertEquals(len(image_metas), 5) def test_index_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.index(self.context, marker=ids[3], limit=1) self.assertEquals(len(image_metas), 1) i = 4 for meta in image_metas: expected = {'id': ids[i], 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 def test_index_invalid_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) self.assertRaises(exception.Invalid, self.service.index, self.context, marker='invalidmarker') def test_index_private_image(self): fixture = self._make_fixture(name='test image') fixture['is_public'] = False properties = {'owner_id': 'proj1'} fixture['properties'] = properties image_id = self.service.create(self.context, fixture)['id'] proj = self.context.project_id self.context.project_id = 'proj1' image_metas = self.service.index(self.context) self.context.project_id = proj expected = [{'id': 'DONTCARE', 'name': 'test image'}] self.assertDictListMatch(image_metas, expected) def test_detail_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[1]) self.assertEquals(len(image_metas), 8) i = 2 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None } self.assertDictMatch(meta, expected) i = i + 1 def test_detail_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, limit=5) self.assertEquals(len(image_metas), 5) def test_detail_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[3], limit=5) self.assertEquals(len(image_metas), 5) i = 4 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None } self.assertDictMatch(meta, expected) i = i + 1 def test_detail_invalid_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) self.assertRaises(exception.Invalid, self.service.detail, self.context, marker='invalidmarker') def test_update(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) self.assertEquals('new image name', new_image_data['name']) def test_delete(self): fixture1 = self._make_fixture(name='test image 1') fixture2 = self._make_fixture(name='test image 2') fixtures = [fixture1, fixture2] num_images = len(self.service.index(self.context)) self.assertEquals(0, num_images, str(self.service.index(self.context))) ids = [] for fixture in fixtures: new_id = self.service.create(self.context, fixture)['id'] ids.append(new_id) num_images = len(self.service.index(self.context)) self.assertEquals(2, num_images, str(self.service.index(self.context))) self.service.delete(self.context, ids[0]) num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) def test_delete_not_by_owner(self): # this test is only relevant for deprecated auth mode self.flags(auth_strategy='deprecated') fixture = self._make_fixture(name='test image') properties = {'project_id': 'proj1'} fixture['properties'] = properties num_images = len(self.service.index(self.context)) self.assertEquals(0, num_images) image_id = self.service.create(self.context, fixture)['id'] num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) proj_id = self.context.project_id self.context.project_id = 'proj2' self.assertRaises(exception.NotAuthorized, self.service.delete, self.context, image_id) self.context.project_id = proj_id num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) def test_show_passes_through_to_client(self): fixture = self._make_fixture(name='image1', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image1', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, } self.assertEqual(image_meta, expected) def test_show_raises_when_no_authtoken_in_the_context(self): fixture = self._make_fixture(name='image1', is_public=False, properties={'one': 'two'}) image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_show_raises_on_missing_credential(self): def raise_missing_credentials(*args, **kwargs): raise glance_exception.MissingCredentialError() self.stubs.Set(glance_stubs.StubGlanceClient, 'get_image_meta', raise_missing_credentials) self.assertRaises(exception.ImageNotAuthorized, self.service.show, self.context, 'test-image-id') def test_detail_passes_through_to_client(self): fixture = self._make_fixture(name='image10', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.detail(self.context) expected = [ { 'id': image_id, 'name': 'image10', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, }, ] self.assertEqual(image_metas, expected) def test_show_makes_datetimes(self): fixture = self._make_datetime_fixture() image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) def test_detail_makes_datetimes(self): fixture = self._make_datetime_fixture() self.service.create(self.context, fixture) image_meta = self.service.detail(self.context)[0] self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) def test_get_makes_datetimes(self): fixture = self._make_datetime_fixture() image_id = self.service.create(self.context, fixture)['id'] writer = NullWriter() image_meta = self.service.get(self.context, image_id, writer) self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) def test_get_with_retries(self): tries = [0] class GlanceBusyException(Exception): pass class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get_image(self, image_id): if tries[0] == 0: tries[0] = 1 raise GlanceBusyException() else: return {}, [] client = MyGlanceStubClient() service = glance.GlanceImageService(client=client) image_id = 1 # doesn't matter writer = NullWriter() # When retries are disabled, we should get an exception self.flags(glance_num_retries=0) self.assertRaises(GlanceBusyException, service.get, self.context, image_id, writer) # Now lets enable retries. No exception should happen now. self.flags(glance_num_retries=1) service.get(self.context, image_id, writer) def test_client_raises_forbidden(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get_image(self, image_id): raise glance_exception.Forbidden() client = MyGlanceStubClient() service = glance.GlanceImageService(client=client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.get, self.context, image_id, writer) def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] client, same_id = glance.get_glance_client(self.context, image_id) self.assertEquals(same_id, image_id) def test_glance_client_image_ref(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_url = 'http://foo/%s' % image_id client, same_id = glance.get_glance_client(self.context, image_url) self.assertEquals(same_id, image_id)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2014, DataCanvasIO # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are # permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of # conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list # of conditions and the following disclaimer in the documentation and/or other # materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be # used to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT # SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. """ A minimum spec.json parser. """ __version__ = "0.2.9" __author__ = "xiaolin" import json from collections import namedtuple import types import os import sys import time import itertools import subprocess import re def gettype(name): type_map = { "string" : "str", "integer" : "int", "float" : "float", "enum" : "str", "file" : "str" } if name not in type_map: raise ValueError(name) t = __builtins__.get(type_map[name], types.StringType) if isinstance(t, type): return t raise ValueError(name) def read_whole_file(filename): with open(filename, "r") as f: return f.read() class Input(str): def __new__(self, x, _types): return str.__new__(self, x) def __init__(self, x, _types): self.x = x self._types = _types def __repr__(self): return str(self.x) def __str__(self): return str(self.x) def as_first_line(self): with open(self.x, "r") as f: return f.readline().rstrip() def as_whole(self): with open(self.x, "r") as f: return f.read() def as_file(self, mode="r"): return open(self.x, mode) def as_datasource(self, mode="r"): ds = json.loads(open(self.x, mode).read()) return ds @property def val(self): # TODO: fix types handling if any([re.match(r"datasource.*", t) for t in self._types]): print self.as_datasource() return os.path.dirname(self.as_datasource()['URL']) else: return self.as_first_line() @property def types(self): return self._types class Output(str): def __new__(self, x, _types): return str.__new__(self, x) def __init__(self, x, _types): self.x = x self._types = _types def __repr__(self): return str(self.x) def __str__(self): return str(self.x) def as_first_line(self): with open(self.x, "r") as f: return f.readline().rstrip() def as_whole(self): with open(self.x, "r") as f: return f.read() def as_file(self, mode="r"): return open(self.x, mode) @property def val(self): return self.as_first_line() @val.setter def val(self, value): with open(self.x, "w+") as f: f.write(value) @property def types(self): return self._types class Param(str): def __new__(self, x, typeinfo): return str.__new__(self, x) def __init__(self, x, typeinfo): self._x = x self._typeinfo = typeinfo def __repr__(self): return str(self._x) def __str__(self): return str(self._x) @property def val(self): type_handler = { "string" : lambda x: x, "float" : lambda x: float(x), "integer" : lambda x: int(x), "enum" : lambda x: x, "file" : read_whole_file } param_type = self._typeinfo['Type'] if param_type in type_handler: return type_handler[param_type](self._x) else: return self._x def input_output_builder(spec_input, spec_output): import sys params = dict(arg.split("=") for arg in sys.argv[1:]) if not all(k in params for k in spec_input.keys()): raise ValueError("Missing input parameters") if not all(k in params for k in spec_output.keys()): raise ValueError("Missing output parameters") InputSettings = namedtuple('InputSettings', spec_input.keys()) in_params = {in_k:Input(params[in_k], in_type) for in_k, in_type in spec_input.items()} input_settings = InputSettings(**in_params) OutputSettings = namedtuple('OutputSettings', spec_output.keys()) out_params = {out_k:Output(params[out_k], out_type) for out_k,out_type in spec_output.items()} output_settings = OutputSettings(**out_params) return input_settings, output_settings def param_builder(spec_param, param_json): def get_param(k): if k in param_json: return param_json[k]['Val'] else: return spec_param[k]['Default'] ParamSettings = namedtuple('ParamSettings', spec_param.keys()) param_dict = {k:Param(get_param(k), v) for k, v in spec_param.items()} env_settings = ParamSettings(**param_dict) return env_settings def global_param_builder(param_json): return {k:v['Val'] for k, v in param_json.items()} def get_settings(spec_json): moderate_keys = ['Name', 'Param', 'Input', 'Output', 'Cmd', 'Description'] if not all(k in spec_json for k in moderate_keys): raise ValueError("One of param from %s may not exist in 'spec.json'" % str(moderate_keys)) # TODO: condition for appending 'GlobalParam' moderate_keys.append('GlobalParam') ModuleSetting = namedtuple('ModuleSetting', moderate_keys) # Load parameters param_json = get_json_file(os.getenv("ZETRT")) param = param_builder(spec_json['Param'], param_json['PARAM']) json_input, json_output = input_output_builder(spec_json['Input'], spec_json['Output']) # TODO: global_param = global_param_builder(param_json['GLOBAL_PARAM']) settings = ModuleSetting(Name=spec_json['Name'], Description=spec_json['Description'], Param=param, Input=json_input, Output=json_output, Cmd=spec_json['Cmd'], GlobalParam=global_param) return settings def get_json_file(filename): with open(filename, "r") as f: return json.load(f) def get_settings_from_file(filename): with open(filename, "r") as f: return get_settings(json.load(f)) def get_settings_from_string(spec_json_str): print(json.loads(spec_json_str)) return get_settings(json.loads(spec_json_str)) # Various Runtime: Hive, Hadoop, Pig class ZetRuntime(object): def __init__(self, spec_filename="spec.json"): self.settings = get_settings_from_file(spec_filename) def __repr__(self): return str(self.settings) class HadoopRuntime(ZetRuntime): def __init__(self, spec_filename="spec.json"): super(HadoopRuntime, self).__init__(spec_filename=spec_filename) @property def hdfs_root(self): ps = self.settings if 'hdfs_root' in ps.Param._asdict(): return ps.Param.hdfs_root.val else: return '/' def get_hdfs_working_dir(self, path=""): ps = self.settings glb_vars = ps.GlobalParam # return os.path.join(self.hdfs_root, 'tmp/zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path) remote_path = os.path.normpath(os.path.join('tmp/zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path)) return os.path.join(self.hdfs_root, remote_path) def get_hive_namespace(self): ps = self.settings glb_vars = ps.GlobalParam return "zetjobns_%s_job%s_blk%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId']) def hdfs_upload_dir(self, local_dir): if local_dir == "": return if not os.path.isdir(local_dir): return hdfs_upload_dir = self.get_hdfs_working_dir(local_dir) ext_files = [f for f in sorted(os.listdir(local_dir)) if os.path.isfile(os.path.join(local_dir,f))] for f in ext_files: # f_remote = os.path.join(hdfs_upload_dir, local_dir, f) # f_remote_dir = os.path.dirname(f_remote) f_local = os.path.join(local_dir, f) f_remote_dir = self.get_hdfs_working_dir(f_local) if cmd("hadoop fs -mkdir -p %s" % f_remote_dir) != 0: raise Exception("Failed to create dir %s" % f_remote_dir) print("HDFS Upload :: %s ====> %s" % (f, f_remote_dir)) print("hadoop fs -copyFromLocal %s %s" % (os.path.join(local_dir, f), os.path.join(f_remote_dir))) if cmd("hadoop fs -copyFromLocal %s %s" % (os.path.join(local_dir, f), f_remote_dir)) == 0: yield os.path.join(f_remote_dir) else: raise Exception("Failed to upload file %s to %s" % (f_local, f_remote_dir)) def hdfs_clean_working_dir(self): hdfs_working_dir = self.get_hdfs_working_dir() if not clean_hdfs_path(hdfs_working_dir): # TODO : refactor to 'HiveException' print self raise Exception("Can not clean hdfs path : %s" % hdfs_working_dir) def clean_working_dir(self): self.hdfs_clean_working_dir() class EmrRuntime(HadoopRuntime): def __init__(self, spec_filename="spec.json"): import boto from boto.emr.connection import EmrConnection, RegionInfo super(EmrRuntime, self).__init__(spec_filename) p = self.settings.Param self.s3_conn = boto.connect_s3(p.AWS_ACCESS_KEY_ID, p.AWS_ACCESS_KEY_SECRET) self.s3_bucket = self.s3_conn.get_bucket(p.S3_BUCKET) self.region = p.AWS_Region self.emr_conn = EmrConnection(p.AWS_ACCESS_KEY_ID, p.AWS_ACCESS_KEY_SECRET, region = RegionInfo(name = self.region, endpoint = self.region + '.elasticmapreduce.amazonaws.com')) self.job_flow_id = p.EMR_jobFlowId def get_s3_working_dir(self, path=""): ps = self.settings glb_vars = ps.GlobalParam remote_path = os.path.normpath(os.path.join(self.s3_bucket.name, 'zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path)) return os.path.join("s3://", remote_path) def get_emr_job_name(self): ps = self.settings glb_vars = ps.GlobalParam return os.path.join('zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId']) def s3_upload_dir(self, local_dir): print("EmrHiveRuntime.s3_uploader()") print("s3_upload_dir :::: %s" % local_dir) if local_dir == "": return if not os.path.isdir(local_dir): return s3_upload_dir = self.get_s3_working_dir(local_dir) ext_files = [f for f in sorted(os.listdir(local_dir)) if os.path.isfile(os.path.join(local_dir,f))] for f in ext_files: f_local = os.path.join(local_dir, f) f_remote_full = self.get_s3_working_dir(os.path.join(local_dir, f)) print("S3 Upload :: %s ====> %s" % (f_local, s3_upload_dir)) print("S3 remote_full :: %s" % f_remote_full) yield s3_upload(self.s3_bucket, f_local, f_remote_full) def s3_clean_working_dir(self): s3_working_dir = self.get_s3_working_dir() if not s3_delete(self.s3_bucket, s3_working_dir): # TODO : refactor to 'HiveException' raise Exception("Can not clean s3 path : %s" % s3_working_dir) def s3_upload(self, filename): from urlparse import urlparse parse_ret = urlparse(filename) if parse_ret.scheme == '': s3_working_dir = self.get_s3_working_dir() file_remote = os.path.join(s3_working_dir, os.path.normpath(os.path.basename(filename))) file_remote_full = s3_upload(self.s3_bucket, filename, file_remote) return file_remote_full elif parse_ret.scheme == 's3': return filename else: raise ValueError("Invalid filename to upload to s3: %s" % filename) def clean_working_dir(self): self.s3_clean_working_dir() class HiveRuntime(HadoopRuntime): def files_uploader(self, local_dir): return self.hdfs_upload_dir(local_dir) def hive_output_builder(self, output_name, output_obj): # TODO: refactor this method ps = self.settings glb_vars = ps.GlobalParam out_type = output_obj.types[0] if out_type.startswith("hive.table"): return "zetjob_%s_job%s_blk%s_OUTPUT_%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId'], output_name) elif out_type.startswith("hive.hdfs"): return self.get_hdfs_working_dir("OUTPUT_%s" % output_name) else: raise ValueError("Invalid type for hive, type must start with 'hive.table' or 'hive.hdfs'") def header_builder(self, hive_ns, uploaded_files, uploaded_jars): # Build Output Tables for output_name,output_obj in self.settings.Output._asdict().items(): output_obj.val = self.hive_output_builder(output_name, output_obj) return "\n".join( itertools.chain( ["ADD FILE %s;" % f for f in uploaded_files], ["ADD JAR %s;" % f for f in uploaded_jars], ["set hivevar:MYNS = %s;" % hive_ns], ["set hivevar:PARAM_%s = %s;" % (k,v) for k,v in self.settings.Param._asdict().items()], ["set hivevar:INPUT_%s = %s;" % (k,v.val) for k,v in self.settings.Input._asdict().items()], ["set hivevar:OUTPUT_%s = %s;" % (k,v.val) for k,v in self.settings.Output._asdict().items()])) def clean_working_dir(self): self.hdfs_clean_working_dir() def generate_script(self, hive_script, target_filename=None): hive_ns = self.get_hive_namespace() # Upload files and UDF jars if 'FILE_DIR' in self.settings.Param._asdict(): file_dir = self.settings.Param.FILE_DIR uploaded_files = self.files_uploader(file_dir.val) else: uploaded_files = [] if 'UDF_DIR' in self.settings.Param._asdict(): jar_dir = self.settings.Param.UDF_DIR uploaded_jars = self.files_uploader(jar_dir.val) else: uploaded_jars = [] # Build Input, Output and Param header = self.header_builder(hive_ns, uploaded_files, uploaded_jars) if target_filename == None: import tempfile tmp_file = tempfile.NamedTemporaryFile(prefix="hive_generated_", suffix=".hql", delete=False) tmp_file.close() target_filename = tmp_file.name with open(hive_script, "r") as f, open(target_filename, "w+") as out_f: out_f.write("--------------------------\n") out_f.write("-- Header\n") out_f.write("--------------------------\n") out_f.write(header) out_f.write("\n") out_f.write("--------------------------\n") out_f.write("-- Main\n") out_f.write("--------------------------\n") out_f.write("\n") out_f.write(f.read()) return target_filename def execute(self, hive_script, generated_hive_script=None): self.clean_working_dir() generated_hive_script = self.generate_script(hive_script, generated_hive_script) if cmd("beeline -u jdbc:hive2://%s:%s -n hive -p tiger -d org.apache.hive.jdbc.HiveDriver -f '%s' --verbose=true " % (self.settings.Param.HiveServer2_Host, self.settings.Param.HiveServer2_Port, generated_hive_script)) != 0: raise Exception("Failed to execute hive script : %s" % generated_hive_script) class EmrHiveRuntime(EmrRuntime, HiveRuntime): def __init__(self, spec_filename="spec.json"): super(EmrHiveRuntime, self).__init__(spec_filename) def hive_output_builder(self, output_name, output_obj): # TODO : should refactor this function to base class ps = self.settings glb_vars = ps.GlobalParam out_type = output_obj.types[0] if out_type.startswith("hive.table"): return "zetjob_%s_job%s_blk%s_OUTPUT_%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId'], output_name) elif out_type.startswith("hive.hdfs"): return self.get_hdfs_working_dir("OUTPUT_%s" % output_name) elif out_type.startswith("hive.s3"): return self.get_s3_working_dir("OUTPUT_%s" % output_name) else: raise ValueError("Invalid type for hive, type must start with 'hive.table' or 'hive.hdfs' or 'hive.s3'") def files_uploader(self, local_dir): return self.s3_upload_dir(local_dir) def emr_execute_hive(self, s3_hive_script): from boto.emr.step import HiveStep hive_step = HiveStep(name=self.get_emr_job_name(), hive_file=s3_hive_script) self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[hive_step]) emr_wait_job(self.emr_conn, self.job_flow_id) def execute(self, main_hive_script, generated_hive_script=None): self.clean_working_dir() hive_script_local = self.generate_script(main_hive_script, generated_hive_script) s3_working_dir = self.get_s3_working_dir() hive_script_remote = os.path.join(s3_working_dir, os.path.basename(hive_script_local)) hive_script_remote_full = s3_upload(self.s3_bucket, hive_script_local, hive_script_remote) print("========= Generated Hive Script =========") print(open(hive_script_local).read()) print("=========================================") print("EmrHiveRuntime.execute()") self.emr_execute_hive(hive_script_remote_full) class EmrJarRuntime(EmrRuntime): def __init__(self, spec_filename="spec.json"): super(EmrJarRuntime, self).__init__(spec_filename) def execute(self, jar_path, args): from boto.emr.step import JarStep s3_jar_path = s3_upload(self.s3_bucket, jar_path, self.get_s3_working_dir(jar_path)) # s3_jar_path = "s3://run-jars/jar/mahout-core-1.0-SNAPSHOT-job.jar" print("Uploading jar to s3 : %s -> %s" % (jar_path, s3_jar_path)) print("Add jobflow step") step = JarStep(name='cl_filter', jar=s3_jar_path, step_args=args) self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[step]) print("Waiting jobflow step done") emr_wait_job(self.emr_conn, self.job_flow_id) class PigRuntime(HadoopRuntime): def __init__(self, spec_filename="spec.json"): super(PigRuntime, self).__init__(spec_filename) def files_uploader(self, local_dir): return self.hdfs_upload_dir(local_dir) def pig_output_builder(self, output_name, output_obj): # TODO: refactor this method ps = self.settings glb_vars = ps.GlobalParam out_type = output_obj.types[0] if out_type.startswith("pig.hdfs"): return self.get_hdfs_working_dir("OUTPUT_%s" % output_name) else: raise ValueError("Invalid type for pig, type must start with 'pig.hdfs'") def header_builder(self, uploaded_jars): # Build Output Tables for output_name,output_obj in self.settings.Output._asdict().items(): output_obj.val = self.pig_output_builder(output_name, output_obj) return "\n".join( itertools.chain( ["%%declare PARAM_%s '%s'" % (k,v) for k,v in self.settings.Param._asdict().items()], ["%%declare INPUT_%s '%s'" % (k,v.val) for k,v in self.settings.Input._asdict().items()], ["%%declare OUTPUT_%s '%s'" % (k,v.val) for k,v in self.settings.Output._asdict().items()], ["REGISTER '%s';" % f for f in uploaded_jars] )) def generate_script(self, pig_script, target_filename=None): if 'UDF_DIR' in self.settings.Param._asdict(): jar_dir = self.settings.Param.UDF_DIR uploaded_jars = self.files_uploader(jar_dir.val) else: uploaded_jars = [] # Build Input, Output and Param header = self.header_builder(uploaded_jars) if target_filename == None: import tempfile tmp_file = tempfile.NamedTemporaryFile(prefix="pig_generated_", suffix=".hql", delete=False) tmp_file.close() target_filename = tmp_file.name with open(pig_script, "r") as f, open(target_filename, "w+") as out_f: out_f.write("/*************************\n") out_f.write(" * Header\n") out_f.write(" *************************/\n") out_f.write(header) out_f.write("\n") out_f.write("/*************************\n") out_f.write(" * Main\n") out_f.write(" *************************/\n") out_f.write("\n") out_f.write(f.read()) return target_filename def generate_pig_conf(self): ps = self.settings glb_vars = ps.GlobalParam with open("/home/run/pig.properties", "a") as pf: pf.write("fs.default.name=%s\n" % ps.Param.hdfs_root) pf.write("yarn.resourcemanager.address=%s\n" % ps.Param.yarn_address) pf.write("yarn.resourcemanager.scheduler.address=%s\n" % ps.Param.yarn_scheduler_address) cmd("cat /home/run/pig.properties") def execute(self, pig_script): self.clean_working_dir() self.generate_pig_conf() generated_pig_script = self.generate_script(pig_script) print("========= Generated Pig Script =========") print(open(generated_pig_script).read()) print("=========================================") print("EmrHiveRuntime.execute()") cmd("pig -x mapreduce -P /home/run/pig.properties %s" % generated_pig_script) class EmrPigRuntime(EmrRuntime, PigRuntime): def __init__(self, spec_filename="spec.json"): super(EmrPigRuntime, self).__init__(spec_filename) def files_uploader(self, local_dir): return self.s3_upload_dir(local_dir) def pig_output_builder(self, output_name, output_obj): # TODO : should refactor this function to base class ps = self.settings glb_vars = ps.GlobalParam out_type = output_obj.types[0] if out_type.startswith("pig.hdfs"): return self.get_hdfs_working_dir("OUTPUT_%s" % output_name) elif out_type.startswith("pig.s3"): return self.get_s3_working_dir("OUTPUT_%s" % output_name) else: raise ValueError("Invalid type for pig, type must start with 'pig.hdfs' or 'pig.s3'") def emr_execute_pig(self, pig_filename): from boto.emr.step import PigStep s3_pig_script = self.s3_upload(pig_filename) pig_step = PigStep(name=self.get_emr_job_name(), pig_file=s3_pig_script) self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[pig_step]) emr_wait_job(self.emr_conn, self.job_flow_id) def execute(self, pig_script): self.clean_working_dir() # TODO: upload S3 additional files generated_pig_script = self.generate_script(pig_script) print("========= Generated Pig Script =========") print(open(generated_pig_script).read()) print("=========================================") print("EmrHiveRuntime.execute()") self.emr_execute_pig(generated_pig_script) # Utility Functions def clean_hdfs_path(p): if cmd("hadoop fs -rm -r -f %s && hadoop fs -mkdir -p %s" % (p, p)) == 0: return True else: return False def percent_cb(complete, total): sys.stdout.write('.') sys.stdout.flush() def s3_delete(bucket, s3_path): import boto from urlparse import urlparse print("s3_delete %s" % s3_path) prefix_path = urlparse(s3_path).path[1:] for key in bucket.list(prefix=prefix_path): key.delete() return True def s3_upload(bucket, local_filename, remote_filename): import boto from urlparse import urlparse # max size in bytes before uploading in parts. # between 1 and 5 GB recommended MAX_SIZE = 40 * 1000 * 1000 # size of parts when uploading in parts PART_SIZE = 6 * 1000 * 1000 fn_local = os.path.normpath(local_filename) fn_remote = urlparse(remote_filename).path fn_remote_full = remote_filename filesize = os.path.getsize(local_filename) print("filesize = %d, maxsize = %d" % (filesize, MAX_SIZE)) if filesize > MAX_SIZE: print("Multi-part uploading...") print("From : %s" % fn_local) print("To : %s" % fn_remote_full) mp = bucket.initiate_multipart_upload(fn_local) fp = open(sourcepath,'rb') fp_num = 0 while (fp.tell() < filesize): fp_num += 1 print "uploading part %i" % fp_num mp.upload_part_from_file(fp, fp_num, cb=percent_cb, num_cb=10, size=PART_SIZE) mp.complete_upload() print("") else: print("Single-part upload...") print("From : %s" % fn_local) print("To : %s" % fn_remote_full) k = boto.s3.key.Key(bucket) k.key = fn_remote k.set_contents_from_filename(fn_local, cb=percent_cb, num_cb=10) print("") return fn_remote_full def emr_wait_job(emr_conn, job_flow_id): blocking_states = ['STARTING', 'BOOTSTRAPPING', 'RUNNING'] cnt = 60 * 60 * 1 # 1 hour time.sleep(10) while cnt > 0: jf_state = emr_conn.describe_jobflow(job_flow_id).state print("jobflow_state = %s" % jf_state) if jf_state not in blocking_states: if jf_state == 'WAITING': print("Job done, continue...") return True else: print("Job may failed.") return False cnt = cnt - 1 time.sleep(10) return False def cmd(cmd_str): print("Execute External Command : '%s'" % cmd_str) ret = subprocess.call(cmd_str, shell=True) print("Exit with exit code = %d" % ret) return ret if __name__ == "__main__": # settings = get_settings_from_file("spec.json") # print(settings) # print(settings.Input) # print(settings.Output) # print("-----------------") # i = Input("test.param") # print(i) # print(i.as_one_line()) # print(i.as_all_line()) # t = MyTest(4) # print(t.val) # t.val = 5 # print(t.val) # o = Output("out.param") # print(o) # print(o.val) # o.val = "cacaca" # settings = get_settings_from_file("spec.json") # hive_runtime = HiveRuntime() # print(hive_runtime) emr_hive_runtime = EmrHiveRuntime() emr_hive_runtime.execute()
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals __author__ = "Bharat Medasani" __copyright__ = "Copyright 2013, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "bkmedasani@lbl.gov" __date__ = "Aug 2, 2013" import unittest import os import re from pymatgen.core.periodic_table import Specie from pymatgen.core.structure import Structure, Molecule from pymatgen.io.cif import CifParser from pymatgen.io.zeopp import ZeoCssr, ZeoVoronoiXYZ, get_voronoi_nodes, \ get_high_accuracy_voronoi_nodes, get_void_volume_surfarea, \ get_free_sphere_params from pymatgen.io.vasp.inputs import Poscar from pymatgen.analysis.bond_valence import BVAnalyzer try: import zeo except ImportError: zeo = None test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') @unittest.skipIf(not zeo, "zeo not present.") class ZeoCssrTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'POSCAR') p = Poscar.from_file(filepath) self.zeocssr = ZeoCssr(p.structure) def test_str(self): expected_string = """4.7595 10.4118 6.0672 90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1 24 0 0 Fe4 P4 O16 1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000 2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000 3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000 4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000 5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000 6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000 7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000 8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000 9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000 10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000 11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000 12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000 13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000 14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000 15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000 16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000 17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000 18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000 19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000 20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000 21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000 22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000 23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000 24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000""" self.assertEqual(str(self.zeocssr), expected_string) def test_from_file(self): filename = os.path.join(test_dir, "EDI.cssr") zeocssr = ZeoCssr.from_file(filename) self.assertIsInstance(zeocssr.structure, Structure) #@unittest.skipIf(not zeo, "zeo not present.") class ZeoCssrOxiTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'POSCAR') p = Poscar.from_file(filepath) structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure) self.zeocssr = ZeoCssr(structure) def test_str(self): expected_string = """4.7595 10.4118 6.0672 90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1 24 0 0 Fe4 P4 O16 1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000 2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000 3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000 4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000 5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000 6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000 7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000 8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000 9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000 10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000 11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000 12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000 13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000 14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000 15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000 16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000 17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000 18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000 19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000 20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000 21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000 22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000 23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000 24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000""" self.assertEqual(str(self.zeocssr), expected_string) def test_from_file(self): filename = os.path.join(test_dir, "EDI_oxistate_decorated.cssr") zeocssr = ZeoCssr.from_file(filename) self.assertIsInstance(zeocssr.structure, Structure) @unittest.skipIf(not zeo, "zeo not present.") class ZeoVoronoiXYZTest(unittest.TestCase): def setUp(self): coords = [ [0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.089000], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000]] prop = [0.4, 0.2, 0.2, 0.2, 0.2] self.mol = Molecule( ["C", "H", "H", "H", "H"], coords, site_properties={"voronoi_radius": prop}) self.xyz = ZeoVoronoiXYZ(self.mol) def test_str(self): ans = """5 H4 C1 C 0.000000 0.000000 0.000000 0.400000 H 1.089000 0.000000 0.000000 0.200000 H -0.363000 1.026719 0.000000 0.200000 H -0.363000 -0.513360 -0.889165 0.200000 H -0.363000 -0.513360 0.889165 0.200000""" self.assertEqual(str(self.xyz), ans) self.assertEqual(str(self.xyz), ans) def test_from_file(self): filename = os.path.join(test_dir, "EDI_voro.xyz") vor = ZeoVoronoiXYZ.from_file(filename) self.assertIsInstance(vor.molecule, Molecule) @unittest.skipIf(not zeo, "zeo not present.") class GetVoronoiNodesTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'POSCAR') p = Poscar.from_file(filepath) self.structure = p.structure bv = BVAnalyzer() valences = bv.get_valences(self.structure) el = [site.species_string for site in self.structure.sites] valence_dict = dict(zip(el, valences)) self.rad_dict = {} for k, v in valence_dict.items(): self.rad_dict[k] = float(Specie(k, v).ionic_radius) assert len(self.rad_dict) == len(self.structure.composition) def test_get_voronoi_nodes(self): vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \ get_voronoi_nodes(self.structure, self.rad_dict) self.assertIsInstance(vor_node_struct, Structure) self.assertIsInstance(vor_edge_center_struct, Structure) self.assertIsInstance(vor_face_center_struct, Structure) print (len(vor_node_struct.sites)) print (len(vor_face_center_struct.sites)) @unittest.skipIf(not zeo, "zeo not present.") class GetFreeSphereParamsTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'free_sph.cif') self.structure = Structure.from_file(filepath) self.rad_dict = {'Ge':0.67,'P':0.52,'S':1.7, 'La':1.17,'Zr':0.86,'O':1.26} def test_get_free_sphere_params(self): free_sph_params = get_free_sphere_params(self.structure, rad_dict=self.rad_dict) # Zeo results can change in future. Hence loose comparison self.assertAlmostEqual( free_sph_params['inc_sph_max_dia'], 2.58251, places=1) self.assertAlmostEqual( free_sph_params['free_sph_max_dia'], 1.29452, places=1) self.assertAlmostEqual( free_sph_params['inc_sph_along_free_sph_path_max_dia'], 2.58251, places=1) @unittest.skipIf(not zeo, "zeo not present.") class GetHighAccuracyVoronoiNodesTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'POSCAR') p = Poscar.from_file(filepath) self.structure = p.structure bv = BVAnalyzer() valences = bv.get_valences(self.structure) el = [site.species_string for site in self.structure.sites] valence_dict = dict(zip(el, valences)) self.rad_dict = {} for k, v in valence_dict.items(): self.rad_dict[k] = float(Specie(k, v).ionic_radius) assert len(self.rad_dict) == len(self.structure.composition) def test_get_voronoi_nodes(self): #vor_node_struct, vor_ec_struct, vor_fc_struct = \ # get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict) vor_node_struct = \ get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict) self.assertIsInstance(vor_node_struct, Structure) #self.assertIsInstance(vor_ec_struct, Structure) #self.assertIsInstance(vor_fc_struct, Structure) print(len(vor_node_struct.sites)) #print(len(vor_fc_struct.sites)) @unittest.skipIf(not zeo, "zeo not present.") class GetVoronoiNodesMultiOxiTest(unittest.TestCase): def setUp(self): filepath = os.path.join(test_dir, 'POSCAR') p = Poscar.from_file(filepath) self.structure = p.structure bv = BVAnalyzer() self.structure = bv.get_oxi_state_decorated_structure(self.structure) valences = bv.get_valences(self.structure) radii = [] for i in range(len(valences)): el = self.structure.sites[i].specie.symbol radius = Specie(el, valences[i]).ionic_radius radii.append(radius) el = [site.species_string for site in self.structure.sites] self.rad_dict = dict(zip(el, radii)) for el in self.rad_dict.keys(): print((el, self.rad_dict[el].real)) def test_get_voronoi_nodes(self): vor_node_struct, vor_edge_center_struct, vor_face_center_struct =\ get_voronoi_nodes(self.structure, self.rad_dict) self.assertIsInstance(vor_node_struct, Structure) self.assertIsInstance(vor_edge_center_struct, Structure) self.assertIsInstance(vor_face_center_struct, Structure) @unittest.skip("The function is deprecated") class GetVoidVolumeSurfaceTest(unittest.TestCase): def setUp(self): filepath1 = os.path.join(test_dir, 'Li2O.cif') p = CifParser(filepath1).get_structures(False)[0] bv = BVAnalyzer() valences = bv.get_valences(p) el = [site.species_string for site in p.sites] val_dict = dict(zip(el, valences)) self._radii = {} for k, v in val_dict.items(): k1 = re.sub(r'[1-9,+,\-]', '', k) self._radii[k1] = float(Specie(k1, v).ionic_radius) p.remove(0) self._vac_struct = p def test_void_volume_surface_area(self): pass vol, sa = get_void_volume_surfarea(self._vac_struct, self._radii) #print "vol: ", vol, "sa: ", sa self.assertIsInstance(vol, float) self.assertIsInstance(sa, float) if __name__ == "__main__": unittest.main()
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import cfg CONF = cfg.CONF url_ref = { "type": "string", "minLength": 8, "pattern": 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]' '|(?:%[0-9a-fA-F][0-9a-fA-F]))+' } boolean_string = { "type": "integer", "minimum": 0, "maximum": 1 } non_empty_string = { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } configuration_data_types = { "type": "string", "minLength": 1, "pattern": "integer|string" } configuration_integer_size = { "type": "string", "maxLength": 40, "pattern": "[0-9]+" } configuration_positive_integer = { "type": "string", "maxLength": 40, "minLength": 1, "pattern": "^[0-9]+$" } configuration_non_empty_string = { "type": "string", "minLength": 1, "maxLength": 128, "pattern": "^.*[0-9a-zA-Z]+.*$" } flavorref = { 'oneOf': [ non_empty_string, { "type": "integer" }] } volume_size = { "oneOf": [ { "type": "integer", "minimum": 0 }, configuration_positive_integer] } host_string = { "type": "string", "minLength": 1, "pattern": "^[%]?[\w(-).]*[%]?$" } name_string = { "type": "string", "minLength": 1, "pattern": "^.*[0-9a-zA-Z]+.*$" } uuid = { "type": "string", "minLength": 1, "maxLength": 64, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}" "-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$" } volume = { "type": "object", "required": ["size"], "properties": { "size": volume_size, "required": True } } nics = { "type": "array", "items": { "type": "object", } } databases_ref_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref_list_required = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref = { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_ref_list_required } } databases_def = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string, "character_set": non_empty_string, "collate": non_empty_string } } } user_attributes = { "type": "object", "additionalProperties": True, "minProperties": 1, "properties": { "name": name_string, "password": non_empty_string, "host": host_string } } users_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name", "password"], "additionalProperties": True, "properties": { "name": name_string, "password": non_empty_string, "host": host_string, "databases": databases_ref_list } } } null_configuration_id = { "type": "null" } configuration_id = { 'oneOf': [ uuid, null_configuration_id ] } module_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid, } } } cluster = { "create": { "type": "object", "required": ["cluster"], "additionalProperties": True, "properties": { "cluster": { "type": "object", "required": ["name", "datastore", "instances"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore": { "type": "object", "required": ["type", "version"], "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "instances": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "modules": module_list, } } } } } } }, "add_shard": { "type": "object", "required": ["add_shard"], "additionalProperties": True, "properties": { "add_shard": { "type": "object" } } }, "grow": { "type": "object", "required": ["grow"], "additionalProperties": True, "properties": { "grow": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "related_to": non_empty_string, "type": non_empty_string } } } } }, "shrink": { "type": "object", "required": ["shrink"], "additionalProperties": True, "properties": { "shrink": { "type": "array", "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid } } } } } } instance = { "create": { "type": "object", "required": ["instance"], "additionalProperties": True, "properties": { "instance": { "type": "object", "required": ["name", "flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "configuration_id": configuration_id, "flavorRef": flavorref, "volume": volume, "databases": databases_def, "users": users_list, "restorePoint": { "type": "object", "required": ["backupRef"], "additionalProperties": True, "properties": { "backupRef": uuid } }, "availability_zone": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "nics": nics, "modules": module_list } } } }, "edit": { "name": "instance:edit", "type": "object", "required": ["instance"], "properties": { "instance": { "type": "object", "required": [], "additionalProperties": False, "properties": { "slave_of": {}, "replica_of": {}, "name": non_empty_string, "configuration": configuration_id, } } } }, "action": { "resize": { "volume": { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["volume"], "additionalProperties": True, "properties": { "volume": volume } } } }, 'flavorRef': { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref } } } } }, "restart": { "type": "object", "required": ["restart"], "additionalProperties": True, "properties": { "restart": { "type": "object" } } } } } mgmt_cluster = { "action": { 'reset-task': { "type": "object", "required": ["reset-task"], "additionalProperties": True, "properties": { "reset-task": { "type": "object" } } } } } mgmt_instance = { "action": { 'migrate': { "type": "object", "required": ["migrate"], "additionalProperties": True, "properties": { "migrate": { "type": "object" } } }, "reboot": { "type": "object", "required": ["reboot"], "additionalProperties": True, "properties": { "reboot": { "type": "object" } } }, "stop": { "type": "object", "required": ["stop"], "additionalProperties": True, "properties": { "stop": { "type": "object" } } } } } user = { "create": { "name": "users:create", "type": "object", "required": ["users"], "properties": { "users": users_list } }, "update_all": { "users": { "type": "object", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } }, "databases": databases_ref }, "update": { "type": "object", "required": ["user"], "additionalProperties": True, "properties": { "user": user_attributes } } } dbschema = { "create": { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_def } } } backup = { "create": { "name": "backup:create", "type": "object", "required": ["backup"], "properties": { "backup": { "type": "object", "required": ["instance", "name"], "properties": { "description": non_empty_string, "instance": uuid, "name": non_empty_string, "parent_id": uuid } } } } } guest_log = { "action": { "name": "guest_log:action", "type": "object", "required": ["name"], "properties": { "name": non_empty_string, "enable": boolean_string, "disable": boolean_string, "publish": boolean_string, "discard": boolean_string } } } module_contents = { "type": "string", "minLength": 1, "maxLength": 16777215, "pattern": "^.*.+.*$" } module = { "create": { "name": "module:create", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": ["name", "module_type", "contents"], "additionalProperties": True, "properties": { "name": non_empty_string, "module_type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "visible": boolean_string, "live_update": boolean_string, } } } }, "update": { "name": "module:update", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": [], "additionalProperties": True, "properties": { "name": non_empty_string, "type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "visible": boolean_string, "live_update": boolean_string, } } } }, "apply": { "name": "module:apply", "type": "object", "required": ["modules"], "properties": { "modules": module_list, } }, "list": { "name": "module:list", "type": "object", "required": [], "properties": { "module": uuid, "from_guest": boolean_string, "include_contents": boolean_string } }, } configuration = { "create": { "name": "configuration:create", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": ["values", "name"], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } } } } } }, "update": { "name": "configuration:update", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string } } } }, "edit": { "name": "configuration:edit", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "values": { "type": "object", } } } } } } mgmt_configuration = { "create": { "name": "configuration_parameter:create", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, "update": { "name": "configuration_parameter:update", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, } account = { 'create': { "type": "object", "name": "users", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } } } upgrade = { "create": { "type": "object", "required": ["upgrade"], "additionalProperties": True, "properties": { "upgrade": { "type": "object", "required": [], "additionalProperties": True, "properties": { "instance_version": non_empty_string, "location": non_empty_string, "metadata": {} } } } } } package_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } } mgmt_datastore_version = { "create": { "name": "mgmt_datastore_version:create", "type": "object", "required": ["version"], "properties": { "version": { "type": "object", "required": ["name", "datastore_name", "image", "active"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore_name": non_empty_string, "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]} } } } }, "edit": { "name": "mgmt_datastore_version:edit", "type": "object", "required": [], "additionalProperties": True, "properties": { "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]}, } } }
""" Examples: >>> table1(''' ... James 10 2 ... Mark 7 3 ... Lila 74 1 ... ''') >>> sql("select * from table1") a | b | c -------------- James | 10 | 2 Mark | 7 | 3 Lila | 74 | 1 >>> sql("coutput '../../tests/output' split:2 mode:rcfile select hashmodarchdep(rank,2),* from (select a as name , b as age, c as rank from table1)") return_value ------------ 1 >>> sql("unionallrcfiles file:../../tests/output") name | age | rank ------------------ Mark | 7 | 3 Lila | 74 | 1 James | 10 | 2 >>> sql("coutput '../../tests/emptyoutput' split:2 mode:rcfile select hashmodarchdep(rank,2),* from (select a as name , b as age, c as rank from table1 limit 0)") return_value ------------ 1 >>> sql("unionallrcfiles file:../../tests/emptyoutput") >>> sql("coutput '../../tests/outputsp8' split:8 mode:rcfile select hashmodarchdep(rank,8),* from (select a as name , b as age, c as rank from table1)") return_value ------------ 1 >>> sql("unionallrcfiles file:../../tests/outputsp8") name | age | rank ------------------ James | 10 | 2 Mark | 7 | 3 Lila | 74 | 1 """ import os.path from vtout import SourceNtoOne import functions import lib.inoutparsing import os import cPickle as cPickle import struct import gc import StringIO as StringIO import cStringIO as cStringIO import zlib from array import array BLOCK_SIZE = 65536000 ZLIB = "zlib" BZ2 = "bzip" RCFILE = 1 SDC = 2 SPAC = 3 registered = True def getSize(v): t = type(v) if t == unicode: return 52 + 4 * len(v) if t in (int, float, None): return 24 return 37 + len(v) def outputData(diter, schema, connection, *args, **formatArgs): import bz2 import msgpack serializer = msgpack ### Parameter handling ### where = None mode = 'sdc' compression = 'zlib' level = 2 split = 0 if 'split' in formatArgs: split = 1 if len(args) > 0: where = args[0] elif 'file' in formatArgs: where = formatArgs['file'] else: raise functions.OperatorError(__name__.rsplit('.')[-1], "No destination provided") if 'file' in formatArgs: del formatArgs['file'] if 'mode' in formatArgs: mode = formatArgs['mode'] if 'compr' in formatArgs: if formatArgs['compr'] == "zlib": compression = ZLIB elif formatArgs['compr'] == "bz2": compression = BZ2 else: raise functions.OperatorError(__name__.rsplit('.')[-1], "Wrong compression algorithm provided. Choose between zlib or bz2") if 'level' in formatArgs: l = formatArgs['level'] try: if int(l) >= 0 and int(l) <= 9: level = int(l) else: raise functions.OperatorError(__name__.rsplit('.')[-1], "Compression level should range from 0 to 9") except: raise functions.OperatorError(__name__.rsplit('.')[-1], "Compression level should range from 0 to 9") filename, ext = os.path.splitext(os.path.basename(where)) fullpath = os.path.split(where)[0] if split == 0: fileIter = open(where, "w+b") fastPickler = cPickle.Pickler(fileIter, 1) fastPickler.fast = 1 else: fileIter = 1 def spac(fileObject, lencols): colnum = len(schema) - 1 serializer.dump(schema[1:], fileObject) setcol = [set([]) for _ in xrange(colnum)] dictsize = 65536 paxcols = [] indextype = 'H' index_init = [0 for _ in xrange(3)] coldicts = [{} for _ in xrange(colnum)] prevsets = [[] for _ in xrange(colnum)] count = 0 blocknum = 0 compress = bz2.compress while True: maxlen = 0 exitGen = False rows = [] try: for i in xrange(lencols): rows.append((yield)) except GeneratorExit: exitGen = True listofvals = zip(*rows) if listofvals != []: for i, col in enumerate(listofvals): if i not in paxcols: setcol[i].update(col) prev = fileObject.tell() + 8 * (colnum + 2) output = cStringIO.StringIO() headindex = [0 for _ in xrange(colnum + 2)] if blocknum == 0: for i in xrange(colnum): headindex[i] = output.tell() + prev if (len(setcol[i]) * 1.0 / lencols > 0.67): paxcols.append(i) l = index_init[:] t = output.tell() output.write(struct.pack('L' * len(l), *l)) output.write(compress(serializer.dumps(listofvals[i]), 2)) l[0] = output.tell() output.seek(t) output.write(struct.pack('L' * len(l), *l)) output.seek(l[0]) else: prevsets[i] = list(set(setcol[i]).copy()) coldicts[i] = dict(((x, y) for y, x in enumerate(prevsets[i]))) coldict = coldicts[i] if len(prevsets[i]) < 256: indextype = 'B' else: indextype = 'H' l = index_init[:] t = output.tell() output.write(struct.pack('L' * len(l), *l)) output.write(compress(serializer.dumps(prevsets[i]), 2)) l[0] = output.tell() output.write(compress(array(indextype, [coldict[val] for val in listofvals[i]]).tostring())) l[1] = output.tell() output.seek(t) output.write(struct.pack('L' * len(l), *l)) output.seek(l[1]) else: for i in xrange(colnum): headindex[i] = output.tell() + prev if i in paxcols: l = index_init[:] t = output.tell() output.write(struct.pack('L' * len(l), *l)) output.write(compress(serializer.dumps(listofvals[i]), 2)) l[0] = output.tell() output.seek(t) output.write(struct.pack('L' * len(l), *l)) output.seek(l[0]) else: pset = set(prevsets[i]) difnew = list(setcol[i] - pset) s = prevsets[i] + difnew d = 0 if len(s) > dictsize: difold = list(pset - setcol[i]) while len(s) > dictsize: s.remove(difold[d]) d += 1 prevsets[i] = s coldicts[i] = dict(((x, y) for y, x in enumerate(s))) coldict = coldicts[i] towritevalues = (x for x in xrange(len(coldict) - d, len(coldict))) l = index_init[:] t = output.tell() output.write(struct.pack('L' * len(l), *l)) if len(prevsets[i]) != 0: if len(prevsets[i]) < 256: indextype = 'B' else: indextype = 'H' output.write(compress(serializer.dumps(difnew), 2)) l[0] = output.tell() output.write(compress(array(indextype, towritevalues).tostring())) l[1] = output.tell() output.write(compress(array(indextype, [coldict[val] for val in listofvals[i]]).tostring())) l[2] = output.tell() output.seek(t) output.write(struct.pack('L' * len(l), *l)) output.seek(l[2]) headindex[i + 1] = output.tell() + prev headindex[i + 2] = count count = 0 fileObject.write(struct.pack('L' * len(headindex), *headindex)) fileObject.write(output.getvalue()) for s in setcol: s.clear() gc.collect() blocknum += 1 if exitGen: fileObject.close() break def sorteddictpercol(fileIter, lencols, compression, level): output = StringIO.StringIO() if split: output.write(struct.pack('!B', 0)) cPickle.dump(schema[1:], output, 1) colnum = len(schema) - 1 cz = output.getvalue() fileIter.write(struct.pack('!i', len(cz))) fileIter.write(cz) else: colnum = len(schema) fileIter.write(struct.pack('!B', 0)) cPickle.dump(schema, fileIter, 1) if hasattr(sys, 'pypy_version_info'): from __pypy__ import newlist_hint else: newlist_hint = lambda size: [] paxcols = [] blocknum = 0 # tempio = cStringIO.StringIO() # fastPickler = cPickle.Pickler(tempio, 2) # fastPickler.fast = 1 exitGen = False compress = zlib.compress if compression == BZ2: compress = bz2.compress if lencols == 0: (yield) while not exitGen: output.truncate(0) mrows = newlist_hint(lencols) try: for i in xrange(lencols): mrows.append((yield)) except GeneratorExit: exitGen = True count = len(mrows) output.write(struct.pack('!B', 1)) if compression == BZ2: output.write(struct.pack('!B', 0)) else: output.write(struct.pack('!B', 1)) headindex = [0 for _ in xrange((colnum * 2) + 1)] type = '!' + 'i' * len(headindex) output.write(struct.pack(type, *headindex)) if mrows != []: for i, col in enumerate(([x[c] for x in mrows] for c in xrange(colnum))): if blocknum == 0: s = sorted(set(col)) lens = len(s) if lens > 50 * 1.0 * count / 100: paxcols.append(i) l = output.tell() # tempio.truncate(0) # fastPickler.dump(col) output.write(compress(serializer.dumps(col), level)) headindex[i * 2] = output.tell() - l else: coldict = dict(((x, y) for y, x in enumerate(s))) l = output.tell() # tempio.truncate(0) # fastPickler.dump(s) output.write(compress(serializer.dumps(s), level)) headindex[i * 2] = output.tell() - l if lens > 1: if lens < 256: output.write(compress(array('B', [coldict[y] for y in col]).tostring(), level)) else: output.write(compress(array('H', [coldict[y] for y in col]).tostring(), level)) headindex[i * 2 + 1] = output.tell() - l - headindex[i * 2] else: if i in paxcols: l = output.tell() # tempio.truncate(0) # fastPickler.dump(col) output.write(compress(serializer.dumps(col), level)) headindex[i * 2] = output.tell() - l else: s = sorted(set(col)) lens = len(s) coldict = dict(((x, y) for y, x in enumerate(s))) l = output.tell() # tempio.truncate(0) # fastPickler.dump(s) output.write(compress(serializer.dumps(s), level)) headindex[i * 2] = output.tell() - l if lens > 1: if lens < 256: output.write(compress(array('B', [coldict[y] for y in col]).tostring(), level)) else: output.write(compress(array('H', [coldict[y] for y in col]).tostring(), level)) headindex[i * 2 + 1] = output.tell() - l - headindex[i * 2] blocknum = 1 headindex[colnum * 2] = count output.seek(0) type = '!' + 'i' * len(headindex) output.write(struct.pack('!B', 1)) if compression == BZ2: output.write(struct.pack('!B', 0)) else: output.write(struct.pack('!B', 1)) output.write(struct.pack(type, *headindex)) cz = output.getvalue() fileIter.write(struct.pack('!i', len(cz))) fileIter.write(cz) fileIter.close() def rcfile(fileObject, lencols, compression, level): colnum = len(schema) - 1 structHeader = '!' + 'i' * colnum indexinit = [0 for _ in xrange(colnum)] fileObject.write(struct.pack('!B', 0)) cPickle.dump(schema[1:], fileObject, 1) # l = cStringIO.StringIO() # fastPickler = cPickle.Pickler(l, 2) # fastPickler.fast = 1 exitGen = False compress = zlib.compress if compression == BZ2: compress = bz2.compress if lencols == 0: (yield) while not exitGen: rows = [] try: for i in xrange(lencols): rows.append((yield)) except GeneratorExit: exitGen = True index = indexinit[:] output = cStringIO.StringIO() output.write(struct.pack('!B', 1)) output.write(struct.pack(structHeader, *index)) if rows != []: for i, col in enumerate(([x[c] for x in rows] for c in xrange(colnum))): # l.truncate(0) # fastPickler.dump(col) cz = zlib.compress(serializer.dumps(col), 5) output.write(cz) index[i] = len(cz) output.seek(1) output.write(struct.pack(structHeader, *index)) fileObject.write(output.getvalue()) fileObject.close() def calclencols(mode): if mode == RCFILE: count = 0 bsize = 0 rows = [] try: while bsize < BLOCK_SIZE: row = diter.next() rows.append(row) count += 1 if split: bsize += sum((getSize(v) for v in row[1:])) else: bsize += sum((getSize(v) for v in row)) except StopIteration: pass return count + 10 * count / 100, rows if mode == SDC or mode == SPAC: count = 0 bsize = 0 rows = [] try: while bsize < BLOCK_SIZE and count < 65535: row = diter.next() rows.append(row) count += 1 if split: bsize += sum((getSize(v) for v in row[1:])) else: bsize += sum((getSize(v) for v in row)) except StopIteration: pass return count, rows if mode == 'spac': if 'split' in formatArgs: filesNum = int(formatArgs['split']) filesList = [None] * filesNum lencols, rows = calclencols(SPAC) for key in xrange(int(formatArgs['split'])): filesList[key] = open(os.path.join(fullpath, filename + '.' + str(key)), 'a') spacgen = [spac(x, lencols) for x in filesList] spacgensend = [x.send for x in spacgen] for j in spacgensend: j(None) for row in rows: spacgensend[row[0]](row[1:]) del (rows) for row in diter: spacgensend[row[0]](row[1:]) for j in spacgen: j.close() elif mode == 'sdc': if 'split' in formatArgs: filesNum = int(formatArgs['split']) filesList = [None] * filesNum lencols, rows = calclencols(SDC) for key in xrange(int(formatArgs['split'])): filesList[key] = open(os.path.join(fullpath, filename + '.' + str(key)), 'wb') sdcgen = [sorteddictpercol(x, lencols, compression, level) for x in filesList] sdcgensend = [x.send for x in sdcgen] for j in sdcgensend: j(None) for row in rows: sdcgensend[row[0]](row[1:]) del (rows) for row in diter: sdcgensend[row[0]](row[1:]) for j in sdcgen: j.close() else: lencols, rows = calclencols(SDC) sdcgen = sorteddictpercol(fileIter, lencols, compression, level) sdcgensend = sdcgen.send sdcgensend(None) for row in rows: sdcgensend(row) del (rows) for row in diter: sdcgensend(row) sdcgen.close() elif mode == 'rcfile': if 'split' in formatArgs: filesNum = int(formatArgs['split']) filesList = [None] * filesNum lencols, rows = calclencols(RCFILE) for key in xrange(int(formatArgs['split'])): filesList[key] = open(os.path.join(fullpath, filename + '.' + str(key)), 'wb') rcgen = [rcfile(x, lencols, compression, level) for x in filesList] rcgensend = [x.send for x in rcgen] for j in rcgensend: j(None) for row in rows: rcgensend[row[0]](row[1:]) del (rows) for row in diter: rcgensend[row[0]](row[1:]) for j in rcgen: j.close() elif mode: raise functions.OperatorError(__name__.rsplit('.')[-1], "Wrong compression format provided.Choose between sdc,rcfile or spac") try: if 'split' not in formatArgs: fileIter.close() except NameError: pass boolargs = lib.inoutparsing.boolargs + ['compression'] def Source(): global boolargs, nonstringargs return SourceNtoOne(outputData, boolargs, lib.inoutparsing.nonstringargs, lib.inoutparsing.needsescape, connectionhandler=True) if not ('.' in __name__): """ This is needed to be able to test the function, put it at the end of every new function you create """ import sys import setpath from functions import * testfunction() if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') import doctest doctest.testmod()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import errno import os import re import signal import subprocess from datetime import datetime from functools import reduce import psutil from jinja2 import Template from airflow.configuration import conf from airflow.exceptions import AirflowException # When killing processes, time to wait after issuing a SIGTERM before issuing a # SIGKILL. DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM = conf.getint( 'core', 'KILLED_TASK_CLEANUP_TIME' ) KEY_REGEX = re.compile(r'^[\w.-]+$') def validate_key(k, max_length=250): """ Validates value used as a key. """ if not isinstance(k, str): raise TypeError("The key has to be a string") elif len(k) > max_length: raise AirflowException( "The key has to be less than {0} characters".format(max_length)) elif not KEY_REGEX.match(k): raise AirflowException( "The key ({k}) has to be made of alphanumeric characters, dashes, " "dots and underscores exclusively".format(k=k)) else: return True def alchemy_to_dict(obj): """ Transforms a SQLAlchemy model instance into a dictionary """ if not obj: return None output = {} for col in obj.__table__.columns: value = getattr(obj, col.name) if isinstance(value, datetime): value = value.isoformat() output[col.name] = value return output def ask_yesno(question): """ Helper to get yes / no answer from user. """ yes = {'yes', 'y'} no = {'no', 'n'} # pylint: disable=invalid-name done = False print(question) while not done: choice = input().lower() if choice in yes: return True elif choice in no: return False else: print("Please respond by yes or no.") def is_container(obj): """ Test if an object is a container (iterable) but not a string """ return hasattr(obj, '__iter__') and not isinstance(obj, str) def as_tuple(obj): """ If obj is a container, returns obj as a tuple. Otherwise, returns a tuple containing obj. """ if is_container(obj): return tuple(obj) else: return tuple([obj]) def chunks(items, chunk_size): """ Yield successive chunks of a given size from a list of items """ if chunk_size <= 0: raise ValueError('Chunk size must be a positive integer') for i in range(0, len(items), chunk_size): yield items[i:i + chunk_size] def reduce_in_chunks(fn, iterable, initializer, chunk_size=0): """ Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer """ if len(iterable) == 0: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer) def as_flattened_list(iterable): """ Return an iterable with one level flattened >>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink'))) ['blue', 'red', 'green', 'yellow', 'pink'] """ return [e for i in iterable for e in i] def pprinttable(rows): """Returns a pretty ascii table from tuples If namedtuple are used, the table will have headers """ if not rows: return None if hasattr(rows[0], '_fields'): # if namedtuple headers = rows[0]._fields else: headers = ["col{}".format(i) for i in range(len(rows[0]))] lens = [len(s) for s in headers] for row in rows: for i in range(len(rows[0])): slenght = len("{}".format(row[i])) if slenght > lens[i]: lens[i] = slenght formats = [] hformats = [] for i in range(len(rows[0])): if isinstance(rows[0][i], int): formats.append("%%%dd" % lens[i]) else: formats.append("%%-%ds" % lens[i]) hformats.append("%%-%ds" % lens[i]) pattern = " | ".join(formats) hpattern = " | ".join(hformats) separator = "-+-".join(['-' * n for n in lens]) tab = "" tab += separator + '\n' tab += (hpattern % tuple(headers)) + '\n' tab += separator + '\n' def _format(t): return "{}".format(t) if isinstance(t, str) else t for line in rows: tab += pattern % tuple(_format(t) for t in line) + '\n' tab += separator + '\n' return tab def reap_process_group(pgid, log, sig=signal.SIGTERM, timeout=DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM): """ Tries really hard to terminate all processes in the group (including grandchildren). Will send sig (SIGTERM) to the process group of pid. If any process is alive after timeout a SIGKILL will be send. :param log: log handler :param pgid: process group id to kill :param sig: signal type :param timeout: how much time a process has to terminate """ returncodes = {} def on_terminate(p): log.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode) returncodes[p.pid] = p.returncode def signal_procs(sig): try: os.killpg(pgid, sig) except OSError as err: # If operation not permitted error is thrown due to run_as_user, # use sudo -n(--non-interactive) to kill the process if err.errno == errno.EPERM: subprocess.check_call( ["sudo", "-n", "kill", "-" + str(sig)] + [str(p.pid) for p in children] ) else: raise if pgid == os.getpgid(0): raise RuntimeError("I refuse to kill myself") try: parent = psutil.Process(pgid) children = parent.children(recursive=True) children.append(parent) except psutil.NoSuchProcess: # The process already exited, but maybe it's children haven't. children = [] for proc in psutil.process_iter(): try: if os.getpgid(proc.pid) == pgid and proc.pid != 0: children.append(proc) except OSError: pass log.info("Sending %s to GPID %s", sig, pgid) try: signal_procs(sig) except OSError as err: # No such process, which means there is no such process group - our job # is done if err.errno == errno.ESRCH: return returncodes _, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate) if alive: for proc in alive: log.warning("process %s did not respond to SIGTERM. Trying SIGKILL", proc) try: signal_procs(signal.SIGKILL) except OSError as err: if err.errno != errno.ESRCH: raise _, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate) if alive: for proc in alive: log.error("Process %s (%s) could not be killed. Giving up.", proc, proc.pid) return returncodes def parse_template_string(template_string): """ Parses Jinja template string. """ if "{{" in template_string: # jinja mode return None, Template(template_string) else: return template_string, None def render_log_filename(ti, try_number, filename_template): """ Given task instance, try_number, filename_template, return the rendered log filename :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number) def convert_camel_to_snake(camel_str): """ Converts CamelCase to snake_case. """ return re.sub('(?!^)([A-Z]+)', r'_\1', camel_str).lower() def merge_dicts(dict1, dict2): """ Merge two dicts recursively, returning new dict (input dict is not mutated). Lists are not concatenated. Items in dict2 overwrite those also found in dict1. """ merged = dict1.copy() for k, v in dict2.items(): if k in merged and isinstance(v, dict): merged[k] = merge_dicts(merged.get(k, {}), v) else: merged[k] = v return merged
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from quantum.agent.linux import ip_lib from quantum.common import exceptions from quantum.tests import base NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] LINK_SAMPLE = [ '1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\' 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', '2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP ' 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' '\ alias openvswitch', '3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN ' '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff', '4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop ' 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff'] ADDR_SAMPLE = (""" 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE2 = (""" 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") class TestSubProcessBase(base.BaseTestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('quantum.agent.linux.utils.execute') self.execute = self.execute_p.start() self.addCleanup(self.execute_p.stop) def test_execute_wrapper(self): ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo') self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'], root_helper='sudo') def test_execute_wrapper_int_options(self): ip_lib.SubProcessBase._execute([4], 'link', ('list',)) self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'], root_helper=None) def test_execute_wrapper_no_options(self): ip_lib.SubProcessBase._execute([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], root_helper=None) def test_run_no_namespace(self): base = ip_lib.SubProcessBase('sudo') base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], root_helper=None) def test_run_namespace(self): base = ip_lib.SubProcessBase('sudo', 'ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], root_helper='sudo') def test_as_root_namespace(self): base = ip_lib.SubProcessBase('sudo', 'ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], root_helper='sudo') def test_as_root_no_root_helper(self): base = ip_lib.SubProcessBase() self.assertRaises(exceptions.SudoRequired, base._as_root, [], 'link', ('list',)) class TestIpWrapper(base.BaseTestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() self.addCleanup(self.execute_p.stop) def test_get_devices(self): self.execute.return_value = '\n'.join(LINK_SAMPLE) retval = ip_lib.IPWrapper('sudo').get_devices() self.assertEqual(retval, [ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49')]) self.execute.assert_called_once_with('o', 'link', ('list',), 'sudo', None) def test_get_devices_malformed_line(self): self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) retval = ip_lib.IPWrapper('sudo').get_devices() self.assertEqual(retval, [ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49')]) self.execute.assert_called_once_with('o', 'link', ('list',), 'sudo', None) def test_get_namespaces(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces('sudo') self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.execute.assert_called_once_with('', 'netns', ('list',), root_helper='sudo') def test_add_tuntap(self): ip_lib.IPWrapper('sudo').add_tuntap('tap0') self.execute.assert_called_once_with('', 'tuntap', ('add', 'tap0', 'mode', 'tap'), 'sudo', None) def test_add_veth(self): ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1') self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1'), 'sudo', None) def test_get_device(self): dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0') self.assertEqual(dev.root_helper, 'sudo') self.assertEqual(dev.namespace, 'ns') self.assertEqual(dev.name, 'eth0') def test_ensure_namespace(self): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper('sudo') with mock.patch.object(ip.netns, 'exists') as ns_exists: ns_exists.return_value = False ip.ensure_namespace('ns') self.execute.assert_has_calls( [mock.call([], 'netns', ('add', 'ns'), 'sudo', None)]) ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual(ns.namespace, 'ns') def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper('sudo', 'ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper('sudo', 'ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper('sudo', 'ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual(mock_is_empty.mock_calls, []) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('sudo', 'ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('sudo', 'ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(ip_ns_cmd_cls.mock_calls, expected) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper('sudo').add_device_to_namespace(dev) self.assertEqual(dev.mock_calls, []) class TestIPDevice(base.BaseTestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') self.assertNotEqual(dev1, None) def test_str(self): self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0') class TestIPCommandBase(base.BaseTestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.root_helper = 'sudo' self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run('link', 'show') self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run('link', options='o') self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) def test_as_root(self): self.ip_cmd._as_root('link') self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link', ), False)]) def test_as_root_with_options(self): self.ip_cmd._as_root('link', options='o') self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link', ), False)]) class TestIPDeviceCommandBase(base.BaseTestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev.root_helper = 'sudo' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual(self.ip_cmd.name, 'eth0') class TestIPCmdBase(base.BaseTestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' self.parent.root_helper = 'sudo' def _assert_call(self, options, args): self.parent.assert_has_calls([ mock.call._run(options, self.command, args)]) def _assert_sudo(self, options, args, force_root_namespace=False): self.parent.assert_has_calls( [mock.call._as_root(options, self.command, args, force_root_namespace)]) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.parent._run.return_value = LINK_SAMPLE[1] self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) def test_set_address(self): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) def test_set_mtu(self): self.link_cmd.set_mtu(1500) self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) def test_set_up(self): self.link_cmd.set_up() self._assert_sudo([], ('set', 'eth0', 'up')) def test_set_down(self): self.link_cmd.set_down() self._assert_sudo([], ('set', 'eth0', 'down')) def test_set_netns(self): self.link_cmd.set_netns('foo') self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) self.assertEqual(self.parent.namespace, 'foo') def test_set_name(self): self.link_cmd.set_name('tap1') self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) self.assertEqual(self.parent.name, 'tap1') def test_set_alias(self): self.link_cmd.set_alias('openvswitch') self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) def test_delete(self): self.link_cmd.delete() self._assert_sudo([], ('delete', 'eth0')) def test_address_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd') def test_mtu_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.mtu, 1500) def test_qdisc_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qdisc, 'mq') def test_qlen_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qlen, 1000) def test_alias_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.alias, 'openvswitch') def test_state_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.state, 'UP') def test_settings_property(self): expected = {'mtu': 1500, 'qlen': 1000, 'state': 'UP', 'qdisc': 'mq', 'brd': 'ff:ff:ff:ff:ff:ff', 'link/ether': 'cc:dd:ee:ff:ab:cd', 'alias': 'openvswitch'} self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.attributes, expected) self._assert_call('o', ('show', 'eth0')) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) def test_add_address(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'global', 'dev', 'tap0')) def test_add_address_scoped(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', scope='link') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'link', 'dev', 'tap0')) def test_del_address(self): self.addr_cmd.delete(4, '192.168.45.100/24') self._assert_sudo([4], ('del', '192.168.45.100/24', 'dev', 'tap0')) def test_flush(self): self.addr_cmd.flush() self._assert_sudo([], ('flush', 'tap0')) def test_list(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', broadcast='::'), dict(ip_version=6, scope='link', dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', broadcast='::')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case) self.assertEqual(self.addr_cmd.list(), expected) self._assert_call([], ('show', 'tap0')) def test_list_filtered(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: output = '\n'.join(test_case.split('\n')[0:4]) self.parent._run.return_value = output self.assertEqual(self.addr_cmd.list('global', filters=['permanent']), expected) self._assert_call([], ('show', 'tap0', 'permanent', 'scope', 'global')) class TestIpRouteCommand(TestIPCmdBase): def setUp(self): super(TestIpRouteCommand, self).setUp() self.parent.name = 'eth0' self.command = 'route' self.route_cmd = ip_lib.IpRouteCommand(self.parent) def test_add_gateway(self): gateway = '192.168.45.100' metric = 100 self.route_cmd.add_gateway(gateway, metric) self._assert_sudo([], ('replace', 'default', 'via', gateway, 'metric', metric, 'dev', self.parent.name)) def test_del_gateway(self): gateway = '192.168.45.100' self.route_cmd.delete_gateway(gateway) self._assert_sudo([], ('del', 'default', 'via', gateway, 'dev', self.parent.name)) def test_get_gateway(self): test_cases = [{'sample': GATEWAY_SAMPLE1, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE2, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE3, 'expected': None}, {'sample': GATEWAY_SAMPLE4, 'expected': {'gateway': '10.35.19.254'}}] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case['sample']) self.assertEqual(self.route_cmd.get_gateway(), test_case['expected']) def test_pullup_route(self): # interface is not the first in the list - requires # deleting and creating existing entries output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) def test_pullup_route_first(self): # interface is first in the list - no changes output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') # Check two calls - device get and subnet get self.assertEqual(len(self.parent._run.mock_calls), 2) class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) def test_add_namespace(self): ns = self.netns_cmd.add('ns') self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) self.assertEqual(ns.namespace, 'ns') def test_delete_namespace(self): with mock.patch('quantum.agent.linux.utils.execute'): self.netns_cmd.delete('ns') self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) def test_namespace_exists(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertTrue( self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_namespace_doest_not_exist(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertFalse( self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('quantum.agent.linux.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], root_helper='sudo', check_exit_code=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('quantum.agent.linux.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( ['FOO=1', 'BAR=2', 'ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], root_helper='sudo', check_exit_code=True) class TestDeviceExists(base.BaseTestCase): def test_device_exists(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(ip_lib.device_exists('eth0')) _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) def test_device_does_not_exist(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = '' _execute.side_effect = RuntimeError self.assertFalse(ip_lib.device_exists('eth0'))
#County Road Inventory Mapbooks #Adam Breznicky. January 2014 #requires installation of ReportLab and PyPDF22 #T:\DATAMGT\MAPPING\Programs\Arc10\Arc10_Tools\Modules # # # # #imports import arcpy, os, string, zipfile, shutil, PyPDF2 from arcpy import mapping from arcpy import env from PyPDF2 import PdfFileMerger, PdfFileReader from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import letter from reportlab.lib.units import inch from reportlab.platypus import BaseDocTemplate, Paragraph, frames, Table, TableStyle, Frame, flowables, Flowable, PageTemplate import datetime now = datetime.datetime.now() month = now.strftime("%B") suffixDate = now.strftime("%Y%m%d") # # #Variables: # # #INPUT VARIABLES: #variables listed and commented out for optional use as an arcgis tool # cycle = arcpy.GetParameterAsText(0) # dataYear = arcpy.GetParameterAsText(1) # dbaseNAME = arcpy.GetParameterAsText(2) # output = arcpy.GetParameterAsText(3) cycle = "South" dataYear = "2014" dbaseNAME = "Connection to Comanche.sde" output = "C:\\TxDOT\\County Road Inventory Mapbooks\\QC_" + suffixDate #DEPENDENT VARIABLES if not os.path.exists(output + os.sep + dataYear): os.makedirs(output + os.sep + dataYear) outputDir = output + os.sep + dataYear #create working geodatabase arcpy.CreateFileGDB_management(outputDir, "Working_Data.gdb") workspace = outputDir + os.sep + "Working_Data.gdb" #SHAPEFILE/FC VARIABLES if cycle == "North": CountyLayer = "C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\South and North County Shapefiles\\NorthCounties.shp" elif cycle == "South": CountyLayer = "C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\South and North County Shapefiles\\SouthCounties.shp" else: arcpy.AddError("You must use ether 'North' or 'South' for your cycle option") inventory2 = workspace + os.sep + "Roadway_Events_Dissolved" projectedRoads = workspace + os.sep + "Roadway_Events_Projected" dissRoads = workspace + os.sep + "RoadLog_Dissolved" #DIRECTORY VARIABLES CoversFolder = outputDir + os.sep + "Covers" LegendFolder = outputDir + os.sep + "Legend" IndexFolder = outputDir + os.sep + "GridIndexes" ShapefileFolder = outputDir + os.sep + "Shapefiles" MapPagesFolder = outputDir + os.sep + "MapPages" MapBooksFolder = outputDir + os.sep + "Combined_PDF" RoadLogFolder = outputDir + os.sep + "RoadLog" PDFLogFolder = RoadLogFolder + os.sep + "PDF" completedPackets = outputDir + os.sep + "_Completed_Packets" descriptiveDirectory = completedPackets + os.sep + "_Descriptive_PDFs" # dataGDB = outputDir + os.sep + "Data_Copy.gdb" GridLayer = dataGDB + os.sep + "County_Grids_22K" subfiles = dataGDB + os.sep + "SUBFILES" txdotRoadways = dataGDB + os.sep + "TXDOT_Roadways" #compile global county name and number lists lowerNames = [] upperNames = [] countyNumbers = [] cursor = arcpy.SearchCursor(CountyLayer) for row in cursor: lowerNames.append(str(row.CNTY_NM)) upperNames.append(str(row.CNTY_NM).upper()) countyNumbers.append(str(row.CNTY_NBR).replace('.0',"")) # adjusted to fix float problem del cursor arcpy.AddMessage("County names and numbers lists compiled.") # # # #Define the functions of the process # # # #copy data local def copyDataLocal(): #create a file GDB and copy Comanche data local arcpy.CreateFileGDB_management(outputDir, "Data_Copy.gdb") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Map_Index_Grids\\TPP_GIS.MCHAMB1.County_Grids_22K", dataGDB + os.sep + "County_Grids_22K") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.SUBFILES", dataGDB + os.sep + "SUBFILES") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Roadways\\TPP_GIS.MCHAMB1.TXDOT_Roadways", dataGDB + os.sep + "TXDOT_Roadways") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.County\\TPP_GIS.MCHAMB1.County", dataGDB + os.sep + "Counties") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.City\\TPP_GIS.MCHAMB1.City", dataGDB + os.sep + "Cities") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Roadways\\TPP_GIS.MCHAMB1.City_Streets", dataGDB + os.sep + "Streets") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.City\\TPP_GIS.MCHAMB1.City_Points", dataGDB + os.sep + "City_Points") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Water\\TPP_GIS.MCHAMB1.Dams", dataGDB + os.sep + "Dam") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Water\\TPP_GIS.MCHAMB1.Water_Bodies", dataGDB + os.sep + "Water_Bodies") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Water\\TPP_GIS.MCHAMB1.Streams", dataGDB + os.sep + "Streams") arcpy.Copy_management("Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Park\\TPP_GIS.MCHAMB1.Public_Lands", dataGDB + os.sep + "Public_Lands") arcpy.AddMessage("Comanche data copied local.") # #1 #COVER # # def createCovers(): arcpy.AddMessage("Generating Covers...") #make directory os.makedirs(outputDir + os.sep + "Covers") map = arcpy.mapping.MapDocument("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\MXD\\CRI_Covers.mxd") dataFrame = arcpy.mapping.ListDataFrames(map)[0] for i in lowerNames: for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"): if textElement.name == "CountyName": textElement.text = i + " - " + dataYear if textElement.name == "Year": textElement.text = "Copyright " + dataYear + " TxDOT" arcpy.AddMessage(i + " Cover Complete.") arcpy.mapping.ExportToPDF(map,CoversFolder + os.sep + i) del map del dataFrame arcpy.AddMessage("Covers Complete.") # #2 #LEGEND # # def createLegend(): arcpy.AddMessage("Generating Legend...") #make directory os.makedirs(outputDir + os.sep + "Legend") lastYear = int(dataYear)-1 map = arcpy.mapping.MapDocument("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\MXD\\Legend.mxd") dataFrame = arcpy.mapping.ListDataFrames(map)[0] for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"): if textElement.name == "Title": textElement.text = "County Road Inventory " + str(dataYear) if textElement.name == "Copyright": textElement.text = "Copyright " + str(dataYear) + " Texas Department of Transportation " if textElement.name == "Disclaimer1": textElement.text = str(lastYear) + "." arcpy.RefreshActiveView() arcpy.mapping.ExportToPDF(map, LegendFolder + os.sep + "Legend_" + str(dataYear) + ".pdf") del map del dataFrame arcpy.AddMessage("Legend Complete.") # #3 #GRID INDEX # # def createGridIndex(): arcpy.AddMessage("Updating the Grid Indexes...") #make directory os.makedirs(outputDir + os.sep + "GridIndexes") map = arcpy.mapping.MapDocument("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\MXD\\GridIndexUpdate.mxd") dataFrame = arcpy.mapping.ListDataFrames(map)[0] for lyr in arcpy.mapping.ListLayers(map): if lyr.name == "TPP_GIS.MCHAMB1.County_Grids_22K": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "County_Grids_22K") if lyr.name == "TPP_GIS.MCHAMB1.County": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Counties") if lyr.name == "TXDOT_Roadways": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "TXDOT_Roadways") if lyr.name == "City": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Cities") arcpy.RefreshActiveView() for i in lowerNames: county = i for lyr in arcpy.mapping.ListLayers(map): if lyr.name == "TPP_GIS.MCHAMB1.County_Grids_22K": lyr.definitionQuery = "CNTY_NM = '" + county + "'" arcpy.RefreshActiveView() extent = lyr.getSelectedExtent() for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"): if textElement.name == "topYEAR": textElement.text = dataYear if textElement.name == "nayme": textElement.text = county + " County - Mapbook Index" if textElement.name == "bottomDate": textElement.text = now.strftime("%B") + " " + now.strftime("%d") + ", " + dataYear if textElement.name == "copyright": textElement.text = "Copyright " + dataYear if textElement.name == "finalDate": lastYear = int(dataYear) - 1 textElement.text = str(lastYear) + "." dataFrame.extent = extent dataFrame.scale *= 1.05 arcpy.RefreshActiveView() arcpy.mapping.ExportToPDF(map, IndexFolder + os.sep + county + " County Mapbook Index.pdf") arcpy.AddMessage(county + " County Mapbook Index.pdf") del map del dataFrame arcpy.AddMessage("Map Indexes Complete.") # #4 #FORMAT DATA #prep data for county shapefile and road log creation # def formatData(): arcpy.AddMessage("Formating Data...") #grab data from comanche to route the roadway events #outboundSHP = "Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.Outbound\\" # lastYear = int(dataYear) - 1 # if arcpy.Exists(outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q4"): # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q4" # elif arcpy.Exists(outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q3"): # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q3" # elif arcpy.Exists(outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q2"): # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q2" # elif arcpy.Exists(outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q1"): # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + dataYear + "_Q1" # elif arcpy.Exists(outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + str(lastYear) + "_Q4"): # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + str(lastYear) + "_Q4" # else: # pubRDS = outboundSHP + "TPP_GIS.MCHAMB1.TXDOT_RTE_RDBD_LN_" + str(lastYear) + "_Q3" #this a variable added for QC purposes to utilize the current txdot roadways instead of the recent quarterly published version. it is a temporary placeholder pubRDS = dataGDB + os.sep + "TXDOT_Roadways" arcpy.AddMessage("Database connection established. Routing roadways event layer.") #route subfile events onto txdot roadways and create a shapefile for creating a shapefile for each county tempRTEevents = "tempRTEevents" arcpy.MakeRouteEventLayer_lr(pubRDS, "RTE_ID", subfiles, "RTE_ID LINE BMP EMP", tempRTEevents) eventlayer = mapping.Layer(tempRTEevents) eventlayer.definitionQuery = """ "SUBFILE" = 2 AND "HIGHWAY_STATUS" = 4 AND "ADMIN_SYSTEM" = 3 AND "CONTROLSEC" NOT LIKE 'TOL%' AND "CONTROLSEC" NOT LIKE 'C%' AND "CONTROLSEC" NOT LIKE 'M%' """ arcpy.AddMessage("Event layer created.") arcpy.FeatureClassToFeatureClass_conversion(eventlayer, workspace, "Roadway_Events") inventory = workspace + os.sep + "Roadway_Events" arcpy.AddMessage("Event layer saved to the workspace database.") #pull the full street names from txdot roadways arcpy.AddMessage("Starting street name update") #define a dictionary to use to compile the roadway names dictNM = {} #use the search cursor to collect the names and put them in the dictionary cursor = arcpy.SearchCursor(txdotRoadways, """RTE_CLASS = '2'""") for row in cursor: ID = row.getValue("RTE_ID") name = row.getValue("FULL_ST_NM") if ID not in dictNM.keys(): dictNM[str(ID)] = str(name) del cursor arcpy.AddMessage("Names collected from TxDOT_Roadways") #create a field in the inventory roads and apply the collected names from the dictionary arcpy.AddField_management(inventory, "ST_NAME", "TEXT", "", "", 50) arcpy.AddMessage("Field created") cursor = arcpy.UpdateCursor(inventory) for row in cursor: ID = row.getValue("RTE_ID") CS = str(row.getValue("CONTROLSEC")).split("A")[-1] if str(ID) in dictNM.keys(): if str(dictNM[ID]) == None or str(dictNM[ID]) == " ": row.setValue("ST_NAME", "County Road " + CS) cursor.updateRow(row) else: row.setValue("ST_NAME", str(dictNM[ID])) cursor.updateRow(row) del cursor arcpy.AddMessage("Street names applied.") #make a copy of the routed roadways in the statewide projection for the road log process later spatialRef = arcpy.Describe(CountyLayer).spatialReference arcpy.Project_management(inventory, projectedRoads, spatialRef) arcpy.AddMessage("Roadway events re-projected for the road log.") #the next 4 groups of code have been added in a recent version of this script after successful runs revealed a need to dissolve rows in the shapefile for each county. this section seems clunky to me, maybe another workflow is in order? #add a unique flag field and populate it based on the attributes its row arcpy.AddField_management(inventory, "unique", "TEXT", "", "", 250) cursor = arcpy.UpdateCursor(inventory) for row in cursor: county = row.getValue("COUNTY") CS = row.getValue("CONTROLSEC") design = row.getValue("HIGHWAY_DESIGN") surface = row.getValue("SURFACE_TYPE") lanes = row.getValue("NUMBER_OF_LANES") row.setValue("unique", str(county) + str(CS) + str(design) + str(surface) + str(lanes)) cursor.updateRow(row) del cursor arcpy.AddMessage("Unique flag field created and populated.") #use the unique field to dissolve the roads. this removes multiple features within the final county shapefiles that have all the same attributes. This problem exists because of subfiles records with the same attributes amongst the fields used here but different attributes in fields not used (example being different FC, MPA or UAN) #inventory2 = workspace + os.sep + "Roadway_Events_Dissolved" arcpy.Dissolve_management(inventory, inventory2, ["unique"], [["LEN_OF_SECTION","SUM"],["ST_NAME","FIRST"],["CONTROLSEC","FIRST"],["HIGHWAY_DESIGN","FIRST"],["SURFACE_TYPE","FIRST"],["COUNTY", "FIRST"],["NUMBER_OF_LANES","FIRST"]], "SINGLE_PART") arcpy.AddMessage("The routed events have been 'uniquely' dissolved.") #add new fields to ensure the delivered shapefiles have clean/proper field names since esri's tools screwed them up and won't let you just change a field name arcpy.AddField_management(inventory2, "COUNTY", "LONG") arcpy.AddField_management(inventory2, "ROUTE", "TEXT", "", "", 10) arcpy.AddField_management(inventory2, "ST_NAME", "TEXT", "", "", 50) arcpy.AddField_management(inventory2, "LENGTH", "DOUBLE") arcpy.AddField_management(inventory2, "SURFACE", "LONG") arcpy.AddField_management(inventory2, "DESIGN", "LONG") arcpy.AddField_management(inventory2, "LANES", "LONG") arcpy.AddField_management(inventory2, "COMMENTS", "TEXT", "", "", 100) arcpy.AddMessage("Replacement fields have been created.") #populate the new fields with the data from the dissolved fields with the ugly names cursor = arcpy.UpdateCursor(inventory2) for row in cursor: CS = row.getValue("FIRST_CONTROLSEC") length = row.getValue("SUM_LEN_OF_SECTION") surface = row.getValue("FIRST_SURFACE_TYPE") design = row.getValue("FIRST_HIGHWAY_DESIGN") lanes = row.getValue("FIRST_NUMBER_OF_LANES") name = row.getValue("FIRST_ST_NAME") county = row.getValue("FIRST_COUNTY") row.setValue("ROUTE", CS) row.setValue("LENGTH", length) row.setValue("SURFACE", surface) row.setValue("DESIGN", design) row.setValue("LANES", lanes) row.setValue("ST_NAME", name) row.setValue("COUNTY", county) cursor.updateRow(row) del cursor arcpy.AddMessage("New Fields have been populated.") #continue with the formatting data process. remove unwanted fields. deleteFields = ["unique", "SUM_LEN_OF_SECTION", "FIRST_ST_NAME", "FIRST_CONTROLSEC", "FIRST_HIGHWAY_DESIGN", "FIRST_SURFACE_TYPE", "FIRST_COUNTY", "FIRST_NUMBER_OF_LANES"] arcpy.DeleteField_management(inventory2, deleteFields) arcpy.AddMessage("Fields reconfigured to match data dictionary.") arcpy.AddMessage("Data Formatted.") # #5 #SHAPEFILES # # #this function was copied from previous the years' map book script. def createShapefiles(): arcpy.AddMessage("Creating Shapefiles...") #make directory os.makedirs(outputDir + os.sep + "Shapefiles") #reference the dissolved roadway events feature class from the workspace #inventory2 = workspace + os.sep + "Roadway_Events_Dissolved" #iterate through the county names list and create a shapefile for each one countRange = range(0,len(upperNames)) arcpy.AddMessage("Found %s counties..." % len(upperNames)) for i in countRange: COUNTY_NAME = upperNames[i] shapeFileName = COUNTY_NAME + "_INVENTORY_" + dataYear +".shp" shapeFilePath = ShapefileFolder + os.sep + shapeFileName shapeFileDefQ = "\"COUNTY\" = "+ str(countyNumbers[i]) arcpy.Select_analysis(inventory2, shapeFilePath, shapeFileDefQ) arcpy.AddMessage("%s definition query: %s" % (shapeFileName, shapeFileDefQ)) arcpy.AddMessage("%s of %s Exporting County %s" % (i+1 , len(countRange),shapeFileName)) arcpy.AddMessage("Shapefiles Complete.") # #6 #MAP PAGES # # def createMapPages(): arcpy.AddMessage("Generating Map Pages...") #make directory os.makedirs(outputDir + os.sep + "MapPages") map = arcpy.mapping.MapDocument("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\MXD\\CountyRoadInventoryMaps.mxd") dataFrame = arcpy.mapping.ListDataFrames(map)[0] for lyr in arcpy.mapping.ListLayers(map): if lyr.name == "Centerline": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "TXDOT_Roadways") if lyr.name == "CountyRoadsRouted": lyr.replaceDataSource(workspace, "FILEGDB_WORKSPACE", "Roadway_Events") lyr.visible = True arcpy.AddMessage("Routed Roadways layer replaced.") if lyr.name == "Streets": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Streets") if lyr.name == "TPP_GIS.MCHAMB1.County": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Counties") if lyr.name == "TPP_GIS.MCHAMB1.City_Points": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "City_Points") if lyr.name == "Dam": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Dam") if lyr.name == "TPP_GIS.MCHAMB1.Water_Bodies": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Water_Bodies") if lyr.name == "TPP_GIS.MCHAMB1.Streams": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Streams") if lyr.name == "TPP_GIS.MCHAMB1.Public_Lands": lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Public_Lands") for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"): if textElement.name == "Year": textElement.text = dataYear arcpy.RefreshActiveView() cursor = arcpy.SearchCursor(GridLayer, "CYCLE = '" + cycle + "' ", "", "CNTY_NM; MAP_ID; WEST; NORTH; EAST; SOUTH; SHAPE", "CNTY_NM A") for row in cursor: dataFrame.extent = row.shape.extent MapID = str(row.Map_ID) for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"): if textElement.name == "PageNumber": textElement.text = " Page-" + MapID if textElement.name == "CountyName": textElement.text = row.CNTY_NM + " County" if textElement.name == "East": textElement.text = row.East if row.East == 0: textElement.text = " " if textElement.name == "West": textElement.text = row.West if row.West == 0: textElement.text = " " if textElement.name == "North": textElement.text = row.North if row.North == 0: textElement.text = " " if textElement.name == "South": textElement.text = row.South if row.South == 0: textElement.text = " " arcpy.RefreshActiveView() arcpy.mapping.ExportToPDF(map, MapPagesFolder + os.sep + row.CNTY_NM + " " + MapID + ".pdf") arcpy.AddMessage(MapPagesFolder + os.sep + row.CNTY_NM + " " + MapID + ".pdf") del cursor del map del dataFrame arcpy.AddMessage("Map Pages Complete.") # #7 #COMBINE MAPBOOKS # # def combineMapbooks(): arcpy.AddMessage("Compiling Mapbooks for each county...") #make directory os.makedirs(outputDir + os.sep + "Combined_PDF") #compile a dictionary of the number of pages for each county pageDICT = {} cursor = arcpy.SearchCursor(CountyLayer) for row in cursor: currentCNTY = row.getValue("CNTY_U") numPages = row.getValue("Max_Pages") pageDICT[str(currentCNTY)] = str(numPages) del cursor arcpy.AddMessage("Number-of-pages dictionary compiled.") #iterate through the counties within the dictionary and compile all the page numbers up until its maximum number of pages from the dictionary value for eachCO in pageDICT.keys(): #announce the current county being compiled and the number of pages being compiled for that county arcpy.AddMessage(str(eachCO) + " has " + str(pageDICT[eachCO]) + " pages.") theGoal = pageDICT[eachCO] #use the PyPDF2 module to merge the PDFs merger = PdfFileMerger() theCover = CoversFolder + os.sep + str(eachCO) + ".pdf" theLegend = LegendFolder + os.sep + "Legend_" + dataYear + ".pdf" theIndex = IndexFolder + os.sep + str(eachCO) + " County Mapbook Index.pdf" merger.append(PdfFileReader(file(theCover, 'rb'))) merger.append(PdfFileReader(file(theLegend, 'rb'))) merger.append(PdfFileReader(file(theIndex, 'rb'))) x = 1 while x <= int(theGoal): currentpage = x pagevalue = str(currentpage) thePage = MapPagesFolder + os.sep + str(eachCO) + " " + pagevalue + ".pdf" merger.append(PdfFileReader(file(thePage, 'rb'))) arcpy.AddMessage(str(eachCO) + " page " + pagevalue + " of " + str(theGoal)) x += 1 theOutput = open(MapBooksFolder + os.sep + str(eachCO) + "_MAPBOOK_" + dataYear + ".pdf", "wb") merger.write(theOutput) theOutput.close() arcpy.AddMessage(str(eachCO) + " complete.") arcpy.AddMessage("Mapbooks Compiled.") # #8 #Road Logs/Report data prep # # # #C:\TxDOT\Scripts\CountyRoadInventoryMapBook\ROAD_LOG_INSTRUCTION\New folder\FINAL_How to create the County Road Update Summary_1.doc #replicated the process described in the process above. # #Report prep: here we go... # def formatRoadLog(): arcpy.AddMessage("Generating Road Log...") #make directory os.makedirs(outputDir + os.sep + "RoadLog") #projectedRoads = workspace + os.sep + "Roadway_Events_Projected" #intersect the county boundaries, county grids, and routed roads logRoads = workspace + os.sep + "RoadLog_Intersect" arcpy.Intersect_analysis([projectedRoads,CountyLayer,GridLayer], logRoads) arcpy.AddMessage("Intersect Complete.") #clean up the intersect of grids which overlap neighbor counties. recalculate the new segment lengths since the intersect broken the linework at the county and grid boundaries. cursor = arcpy.UpdateCursor(logRoads) desc = arcpy.Describe(logRoads) shapefieldname = desc.ShapeFieldName for row in cursor: #recalculate the length for the cut up linework feat = row.getValue(shapefieldname) row.setValue("LEN_OF_SECTION", feat.length*.000621371) cursor.updateRow(row) #remove overlapping neighbor grids #get the linework county number: frst = row.getValue("COUNTY") #get the county boundary county number: scnd = row.getValue("CNTY_NBR") #get the grid layer county value: thrd = row.getValue("CNTY_NM_12") #get the county boundary county name: frth = row.getValue("CNTY_NM") #deletes the row if the linework county doesn't match the county boundary number if int(frst) != int(scnd): cursor.deleteRow(row) #deletes the row if the grid county is overlapping the county boundary if thrd != frth: cursor.deleteRow(row) del cursor arcpy.AddMessage("Intersected roadways have been cleaned up.") # #this section is the VB script replacement arcpy.AddMessage("Compiling page numbers") #sort table properly and collect the numbers via a cursor cursor = arcpy.SearchCursor(logRoads,"","","RTE_ID; MAP_ID","RTE_ID A; MAP_ID A") current = "" previous = "" counter = 0 endAll = int(arcpy.GetCount_management(logRoads).getOutput(0)) beAll = endAll - 1 thesePages = [] dictionary = {} #use the searchCursor to compile all the page numbers for each route ID into a list, and then use that list as the value with the route ID as the key in the dictionary for row in cursor: current = row.getValue("RTE_ID") if counter == 0: previous = current thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',"")) counter += 1 elif previous == current and counter != 0 and counter != beAll: if "," + str(row.getValue("MAP_ID")).replace('.0',"") not in thesePages: thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',"")) counter += 1 else: counter += 1 elif previous == current and counter == beAll: if "," + str(row.getValue("MAP_ID")).replace('.0',"") not in thesePages: thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',"")) thesePages[0] = str(thesePages[0]).replace(",", "") concatPGS = ''.join(thesePages) dictionary[str(previous)] = concatPGS counter += 1 elif previous != current and counter == beAll: thesePages[0] = str(thesePages[0]).replace(",", "") concatPGS = ''.join(thesePages) dictionary[str(previous)] = concatPGS thesePages = [] previous = current dictionary[str(previous)] = str(row.getValue("MAP_ID")).replace('.0',"") counter += 1 else: thesePages[0] = str(thesePages[0]).replace(",", "") concatPGS = ''.join(thesePages) dictionary[str(previous)] = concatPGS thesePages = [] previous = current thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',"")) counter += 1 del cursor arcpy.AddMessage("The page numbers have been compiled into the dictionary.") #summarize the attributes in to remove multiple subfiles with the same attributes of the fields used in the report arcpy.AddField_management(logRoads, "unique", "TEXT", "", "", 250) cursor = arcpy.UpdateCursor(logRoads) for row in cursor: NAM = row.getValue("CNTY_NM") CS = row.getValue("CONTROLSEC") HD = row.getValue("HIGHWAY_DESIGN") ST = row.getValue("SURFACE_TYPE") NL = row.getValue("NUMBER_OF_LANES") row.setValue("unique", str(NAM) + str(CS) + str(HD) + str(ST) + str(NL)) cursor.updateRow(row) del cursor arcpy.AddMessage("Unique flag identifier has been created and populated.") #Dissolve the road log lines and apply the page numbers arcpy.Dissolve_management(logRoads, dissRoads, ["unique"], [["LEN_OF_SECTION","SUM"],["RTE_ID","FIRST"],["ST_NAME","FIRST"],["CNTY_NM","FIRST"],["CONTROLSEC","FIRST"],["HIGHWAY_DESIGN","FIRST"],["SURFACE_TYPE","FIRST"],["NUMBER_OF_LANES","FIRST"]]) arcpy.AddMessage("Road Log Linework dissolved.") #add the page numbers to the summarized routes so that we have all the road log data ready for the report arcpy.AddField_management(dissRoads, "MAP_ID", "TEXT", "", "", 150) cursor = arcpy.UpdateCursor(dissRoads) for row in cursor: rteID = row.getValue("FIRST_RTE_ID") if rteID in dictionary.keys(): row.setValue("MAP_ID", str(dictionary[rteID])) else: arcpy.AddError(str(rteID) + " has no page numbers in the dictionary!") cursor.updateRow(row) del cursor arcpy.AddMessage("Page numbers applied into the new MAP_ID field.") arcpy.AddMessage("Road Log Completed.") # # #9 #Report generation # # # def createRoadLogReport(): arcpy.AddMessage("Starting PDF generation...") #make directory os.makedirs(RoadLogFolder + os.sep + "PDF") #iterate through the list of county names to create a report for each county for Y in lowerNames: #import the dimensions for the report and create variable to determine the maximum measurements for that page size from reportlab.lib.pagesizes import letter width, height = letter #create a variable for the 'flowable' data drawing area on the report. this variable draws the location where the road summary data is inserted into the report template f = frames.Frame(.5*inch,inch, width-inch, 8.65*inch) #create the document doc = BaseDocTemplate(PDFLogFolder + os.sep + str(Y).upper() + "_ROAD_SUMMARY_" + str(dataYear) + ".pdf", pagesize=letter) #drawn the canvas/template of the report def thecanvas(c, doc): from reportlab.lib.pagesizes import letter width, height = letter #the template/canvas is object oriented. this is a list of all the objects, where they are to be drawn, and all the defining information which draws them #the objects are listed from the top of the page down to the bottom c.setFont("Helvetica-Bold",18) c.drawCentredString(width/2,height - .5*inch, str(Y)) c.setFont("Helvetica",14) c.drawCentredString(width/2,height - .75*inch, "COUNTY ROAD SUMMARY") c.setFont("Times-Roman",12) c.drawCentredString(width/2,height - .93*inch, "Texas Department of Transportation") c.setFont("Times-Roman",8) c.drawCentredString(width/2,height - 1.07*inch, "Transportation Planning and Programming Division") c.setFont("Times-Bold",9) c.drawString(.57*inch,9.7*inch, "ROUTE") c.drawString(1.55*inch,9.7*inch, "ROAD NAME") c.drawString(3.25*inch,9.7*inch, "LENGTH") c.drawString(3.95*inch,9.7*inch, "DESIGN") c.drawString(4.7*inch,9.7*inch, "SURFACE") c.drawString(5.48*inch,9.7*inch, "LANES") c.drawString(6.25*inch,9.7*inch, "PAGE(S)") c.line(.5*inch,9.65*inch,width-.5*inch,9.65*inch) #the frame which contains the table and data will be here c.line(.5*inch,inch,width-.5*inch,inch) c.setFont("Times-Bold",8) c.drawString(.5*inch,.88*inch, month + " " + str(dataYear)) c.drawString(2.5*inch,.85*inch, "Key:") c.drawString(3*inch,.85*inch, "Design:") c.drawString(3*inch,.7*inch, "1 = One Way") c.drawString(3*inch,.55*inch, "2 = Two Way") c.drawString(3*inch,.4*inch, "3 = Blvd.") c.drawString(4.5*inch,.85*inch, "Surface Type:") c.drawString(4.5*inch,.7*inch, "10 = Natural") c.drawString(4.5*inch,.55*inch, "32 = All Weather") c.drawString(5.8*inch,.7*inch, "51 = Paved") c.drawString(5.8*inch,.55*inch, "61 = Concrete") pageNUM = c.getPageNumber() c.drawRightString(width-.5*inch,.88*inch, "Page " + str(pageNUM)) #apply the canvas/template and the frame for the flowable road data to the document doc.addPageTemplates([PageTemplate(frames=[f],onPage=thecanvas)]) #search the formatted road log feature class via a cursor. query the feature class for the county being reported, sort, create a list of the attributes from each row, and apply the list to a list of all the data rows cursor = arcpy.SearchCursor(dissRoads, "FIRST_CNTY_NM = '" + Y + "'", "", "", "FIRST_CONTROLSEC A; FIRST_ST_NAME A; SUM_LEN_OF_SECTION D; FIRST_HIGHWAY_DESIGN D; FIRST_SURFACE_TYPE D; FIRST_NUMBER_OF_LANES D") elements = [] data = [] for row in cursor: CS = str(row.getValue("FIRST_CONTROLSEC")) SN = str(row.getValue("FIRST_ST_NAME")) rounded = round(row.getValue("SUM_LEN_OF_SECTION"), 3) LN = str(rounded) HD = str(row.getValue("FIRST_HIGHWAY_DESIGN")) ST = str(row.getValue("FIRST_SURFACE_TYPE")) LA = str(row.getValue("FIRST_NUMBER_OF_LANES")) PG = str(row.getValue("MAP_ID")) eachLine = [CS, SN, LN, HD, ST, LA, PG] data.append(eachLine) #draw the table, apply the data list, and format/stylize it t = Table(data, colWidths=[inch,1.75*inch,.8*inch,.75*inch,.75*inch,.65*inch,1.8*inch],rowHeights=[.2*inch]*len(data)) t.setStyle(TableStyle([('FONTSIZE',(0,0),(6,(len(data)-1)),8),('ALIGN',(0,0),(6,(len(data)-1)),'LEFT'),])) #add the data object (in this case: the populated table of roads) to a list of 'flowable' objects elements.append(t) #use the 'flowable' objects list and build the document doc.build(elements) del cursor arcpy.AddMessage(str(Y) + " completed.") arcpy.AddMessage("PDF generation complete.") # #10 #Put together all the documents #ZIP it UP # # def compilePackets(): arcpy.AddMessage("Zipping up the packets for each county...") os.makedirs(outputDir + os.sep + "_Completed_Packets") os.makedirs(completedPackets + os.sep + "_Descriptive_PDFs") #copy the annually updated documents which accompany all packets shutil.copyfile("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\Documents\\COUNTY_ROAD_CRITERIA.pdf", descriptiveDirectory + os.sep + "COUNTY_ROAD_CRITERIA.pdf") shutil.copyfile("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\Documents\\\\INSTRUCTIONS.pdf", descriptiveDirectory + os.sep + "INSTRUCTIONS.pdf") shutil.copyfile("C:\\TxDOT\\County Road Inventory Mapbooks\\Resources\\Documents\\\\Documents\\README_1ST.pdf", descriptiveDirectory + os.sep + "README_1ST.pdf") arcpy.AddMessage("Annual descriptive documents copied.") #define file extensions list to collect the pieces of the shapefiles theFileExtension = [".dbf",".prj",".sbn",".sbx",".shp",".shx"] arcpy.AddMessage("County and extension lists compiled.") #iterate through the list of county names and compile the packet for each countRange = len(upperNames) arcpy.AddMessage("Found %s counties..." % countRange) i = 1 for theCounty in upperNames: theOutputZip = completedPackets + os.sep + theCounty + "_" + dataYear + ".zip" zippedFile = zipfile.ZipFile(theOutputZip,"a", zipfile.ZIP_DEFLATED) arcpy.AddMessage("%s of %s - Zipping files for %s..." % (i,countRange,theCounty)) # Add the County Road Criteria PDF to the Zip File... zippedFile.write(descriptiveDirectory + os.sep + "COUNTY_ROAD_CRITERIA.pdf", "COUNTY_ROAD_CRITERIA.pdf") # Add the Instructions PDF to the Zip File... zippedFile.write(descriptiveDirectory + os.sep + "INSTRUCTIONS.pdf", "INSTRUCTIONS.pdf") # Add the ReadME PDF to the Zip File... zippedFile.write(descriptiveDirectory + os.sep + "README_1ST.pdf", "README_1ST.pdf") # Add the Road Summary PDF to the Zip File... roadLogsFile = theCounty + "_ROAD_SUMMARY_" + dataYear + ".pdf" zippedFile.write(PDFLogFolder + os.sep + roadLogsFile,roadLogsFile) # Add the Mapbook Page to the Zip file countyMapbookFile = theCounty + "_MAPBOOK_" + dataYear + ".pdf" zippedFile.write(MapBooksFolder + os.sep + countyMapbookFile,countyMapbookFile) # Make a list of Geometry File Names... theGeometryFiles = [] for extentionType in theFileExtension: theGeometryFiles.append(theCounty +"_INVENTORY_" + dataYear + extentionType) # Add the Geometry to the Zip file... for eachFile in theGeometryFiles: theTargetFile = ShapefileFolder + os.sep + eachFile zippedFile.write(theTargetFile,eachFile) arcpy.AddMessage("%s complete." % theCounty) # Close the Zip file... zippedFile.close() i += 1 arcpy.AddMessage("County packets zipped up and completed.") # # # # # # arcpy.AddMessage("And away we go...") copyDataLocal() createCovers() createLegend() createGridIndex() formatData() createShapefiles() createMapPages() combineMapbooks() formatRoadLog() createRoadLogReport() compilePackets() arcpy.AddMessage("Phew...finally finished.")
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf import settings from django.core.urlresolvers import NoReverseMatch # noqa from django.core.urlresolvers import reverse from django.http import HttpResponse # noqa from django.template import defaultfilters as filters from django.utils import html from django.utils.http import urlencode from django.utils import safestring from django.utils.translation import pgettext_lazy from django.utils.translation import string_concat # noqa from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import exceptions from horizon import messages from horizon import tables from openstack_dashboard import api from openstack_dashboard.api import cinder from openstack_dashboard import policy DELETABLE_STATES = ("available", "error", "error_extending") class VolumePolicyTargetMixin(policy.PolicyTargetMixin): policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),) class LaunchVolume(tables.LinkAction): name = "launch_volume" verbose_name = _("Launch as Instance") url = "horizon:project:instances:launch" classes = ("ajax-modal", "btn-launch") icon = "cloud-upload" policy_rules = (("compute", "compute:create"),) def get_link_url(self, datum): base_url = reverse(self.url) vol_id = "%s:vol" % self.table.get_object_id(datum) params = urlencode({"source_type": "volume_id", "source_id": vol_id}) return "?".join([base_url, params]) def allowed(self, request, volume=None): if getattr(volume, 'bootable', '') == 'true': return volume.status == "available" return False class LaunchVolumeNG(LaunchVolume): name = "launch_volume_ng" verbose_name = _("Launch as Instance") url = "horizon:project:volumes:index" classes = ("btn-launch", ) ajax = False def __init__(self, attrs=None, **kwargs): kwargs['preempt'] = True super(LaunchVolume, self).__init__(attrs, **kwargs) def get_link_url(self, datum): url = reverse(self.url) vol_id = "%s:vol" % self.table.get_object_id(datum) ngclick = "modal.openLaunchInstanceWizard(" \ "{successUrl: '%s', volumeId: '%s'})" \ % (url, vol_id.split(":vol")[0]) self.attrs.update({ "ng-controller": "LaunchInstanceModalController as modal", "ng-click": ngclick }) return "javascript:void(0);" class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete Volume", u"Delete Volumes", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Scheduled deletion of Volume", u"Scheduled deletion of Volumes", count ) policy_rules = (("volume", "volume:delete"),) def delete(self, request, obj_id): cinder.volume_delete(request, obj_id) def allowed(self, request, volume=None): if volume: return (volume.status in DELETABLE_STATES and not getattr(volume, 'has_snapshot', False)) return True class CreateVolume(tables.LinkAction): name = "create" verbose_name = _("Create Volume") url = "horizon:project:volumes:volumes:create" classes = ("ajax-modal", "btn-create") icon = "plus" policy_rules = (("volume", "volume:create"),) ajax = True def __init__(self, attrs=None, **kwargs): kwargs['preempt'] = True super(CreateVolume, self).__init__(attrs, **kwargs) def allowed(self, request, volume=None): limits = api.cinder.tenant_absolute_limits(request) gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf")) - limits.get('totalGigabytesUsed', 0)) volumes_available = (limits.get('maxTotalVolumes', float("inf")) - limits.get('totalVolumesUsed', 0)) if gb_available <= 0 or volumes_available <= 0: if "disabled" not in self.classes: self.classes = [c for c in self.classes] + ['disabled'] self.verbose_name = string_concat(self.verbose_name, ' ', _("(Quota exceeded)")) else: self.verbose_name = _("Create Volume") classes = [c for c in self.classes if c != "disabled"] self.classes = classes return True def single(self, table, request, object_id=None): self.allowed(request, None) return HttpResponse(self.render(is_table_action=True)) class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction): name = "extend" verbose_name = _("Extend Volume") url = "horizon:project:volumes:volumes:extend" classes = ("ajax-modal", "btn-extend") policy_rules = (("volume", "volume:extend"),) def allowed(self, request, volume=None): return volume.status == "available" class EditAttachments(tables.LinkAction): name = "attachments" verbose_name = _("Manage Attachments") url = "horizon:project:volumes:volumes:attach" classes = ("ajax-modal",) icon = "pencil" def allowed(self, request, volume=None): if volume: project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None) attach_allowed = \ policy.check((("compute", "compute:attach_volume"),), request, {"project_id": project_id}) detach_allowed = \ policy.check((("compute", "compute:detach_volume"),), request, {"project_id": project_id}) if attach_allowed or detach_allowed: return volume.status in ("available", "in-use") return False class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction): name = "snapshots" verbose_name = _("Create Snapshot") url = "horizon:project:volumes:volumes:create_snapshot" classes = ("ajax-modal",) icon = "camera" policy_rules = (("volume", "volume:create_snapshot"),) def allowed(self, request, volume=None): try: limits = api.cinder.tenant_absolute_limits(request) except Exception: exceptions.handle(request, _('Unable to retrieve tenant limits.')) limits = {} snapshots_available = (limits.get('maxTotalSnapshots', float("inf")) - limits.get('totalSnapshotsUsed', 0)) if snapshots_available <= 0 and "disabled" not in self.classes: self.classes = [c for c in self.classes] + ['disabled'] self.verbose_name = string_concat(self.verbose_name, ' ', _("(Quota exceeded)")) return volume.status in ("available", "in-use") class CreateTransfer(VolumePolicyTargetMixin, tables.LinkAction): name = "create_transfer" verbose_name = _("Create Transfer") url = "horizon:project:volumes:volumes:create_transfer" classes = ("ajax-modal",) policy_rules = (("volume", "volume:create_transfer"),) def allowed(self, request, volume=None): return volume.status == "available" class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction): name = "backups" verbose_name = _("Create Backup") url = "horizon:project:volumes:volumes:create_backup" classes = ("ajax-modal",) policy_rules = (("volume", "backup:create"),) def allowed(self, request, volume=None): return (cinder.volume_backup_supported(request) and volume.status == "available") class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction): name = "upload_to_image" verbose_name = _("Upload to Image") url = "horizon:project:volumes:volumes:upload_to_image" classes = ("ajax-modal",) icon = "cloud-upload" policy_rules = (("volume", "volume:upload_to_image"),) def allowed(self, request, volume=None): has_image_service_perm = \ request.user.has_perm('openstack.services.image') return (volume.status in ("available", "in-use") and has_image_service_perm) class EditVolume(VolumePolicyTargetMixin, tables.LinkAction): name = "edit" verbose_name = _("Edit Volume") url = "horizon:project:volumes:volumes:update" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("volume", "volume:update"),) def allowed(self, request, volume=None): return volume.status in ("available", "in-use") class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction): name = "retype" verbose_name = _("Change Volume Type") url = "horizon:project:volumes:volumes:retype" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("volume", "volume:retype"),) def allowed(self, request, volume=None): return volume.status in ("available", "in-use") class AcceptTransfer(tables.LinkAction): name = "accept_transfer" verbose_name = _("Accept Transfer") url = "horizon:project:volumes:volumes:accept_transfer" classes = ("ajax-modal",) icon = "exchange" policy_rules = (("volume", "volume:accept_transfer"),) ajax = True def single(self, table, request, object_id=None): return HttpResponse(self.render()) class DeleteTransfer(VolumePolicyTargetMixin, tables.Action): # This class inherits from tables.Action instead of the more obvious # tables.DeleteAction due to the confirmation message. When the delete # is successful, DeleteAction automatically appends the name of the # volume to the message, e.g. "Deleted volume transfer 'volume'". But # we are deleting the volume *transfer*, whose name is different. name = "delete_transfer" verbose_name = _("Cancel Transfer") policy_rules = (("volume", "volume:delete_transfer"),) help_text = _("This action cannot be undone.") action_type = "danger" def allowed(self, request, volume): return (volume.status == "awaiting-transfer" and getattr(volume, 'transfer', None)) def single(self, table, request, volume_id): volume = table.get_object_by_id(volume_id) try: cinder.transfer_delete(request, volume.transfer.id) if volume.transfer.name: msg = _('Successfully deleted volume transfer "%s"' ) % volume.transfer.name else: msg = _("Successfully deleted volume transfer") messages.success(request, msg) except Exception: exceptions.handle(request, _("Unable to delete volume transfer.")) class UpdateRow(tables.Row): ajax = True def get_data(self, request, volume_id): volume = cinder.volume_get(request, volume_id) return volume def get_size(volume): return _("%sGiB") % volume.size def get_attachment_name(request, attachment): server_id = attachment.get("server_id", None) if "instance" in attachment and attachment['instance']: name = attachment["instance"].name else: try: server = api.nova.server_get(request, server_id) name = server.name except Exception: name = None exceptions.handle(request, _("Unable to retrieve " "attachment information.")) try: url = reverse("horizon:project:instances:detail", args=(server_id,)) instance = '<a href="%s">%s</a>' % (url, html.escape(name)) except NoReverseMatch: instance = html.escape(name) return instance class AttachmentColumn(tables.Column): """Customized column class. So it that does complex processing on the attachments for a volume instance. """ def get_raw_data(self, volume): request = self.table.request link = _('Attached to %(instance)s on %(dev)s') attachments = [] # Filter out "empty" attachments which the client returns... for attachment in [att for att in volume.attachments if att]: # When a volume is attached it may return the server_id # without the server name... instance = get_attachment_name(request, attachment) vals = {"instance": instance, "dev": html.escape(attachment.get("device", ""))} attachments.append(link % vals) return safestring.mark_safe(", ".join(attachments)) def get_volume_type(volume): return volume.volume_type if volume.volume_type != "None" else None def get_encrypted_value(volume): if not hasattr(volume, 'encrypted') or volume.encrypted is None: return _("-") elif volume.encrypted is False: return _("No") else: return _("Yes") class VolumesTableBase(tables.DataTable): STATUS_CHOICES = ( ("in-use", True), ("available", True), ("creating", None), ("error", False), ("error_extending", False), ("maintenance", False), ) STATUS_DISPLAY_CHOICES = ( ("available", pgettext_lazy("Current status of a Volume", u"Available")), ("in-use", pgettext_lazy("Current status of a Volume", u"In-use")), ("error", pgettext_lazy("Current status of a Volume", u"Error")), ("creating", pgettext_lazy("Current status of a Volume", u"Creating")), ("error_extending", pgettext_lazy("Current status of a Volume", u"Error Extending")), ("extending", pgettext_lazy("Current status of a Volume", u"Extending")), ("attaching", pgettext_lazy("Current status of a Volume", u"Attaching")), ("detaching", pgettext_lazy("Current status of a Volume", u"Detaching")), ("deleting", pgettext_lazy("Current status of a Volume", u"Deleting")), ("error_deleting", pgettext_lazy("Current status of a Volume", u"Error deleting")), ("backing-up", pgettext_lazy("Current status of a Volume", u"Backing Up")), ("restoring-backup", pgettext_lazy("Current status of a Volume", u"Restoring Backup")), ("error_restoring", pgettext_lazy("Current status of a Volume", u"Error Restoring")), ("maintenance", pgettext_lazy("Current status of a Volume", u"Maintenance")), ) name = tables.Column("name", verbose_name=_("Name"), link="horizon:project:volumes:volumes:detail") description = tables.Column("description", verbose_name=_("Description"), truncate=40) size = tables.Column(get_size, verbose_name=_("Size"), attrs={'data-type': 'size'}) status = tables.Column("status", verbose_name=_("Status"), status=True, status_choices=STATUS_CHOICES, display_choices=STATUS_DISPLAY_CHOICES) def get_object_display(self, obj): return obj.name class VolumesFilterAction(tables.FilterAction): def filter(self, table, volumes, filter_string): """Naive case-insensitive search.""" q = filter_string.lower() return [volume for volume in volumes if q in volume.name.lower()] class VolumesTable(VolumesTableBase): name = tables.Column("name", verbose_name=_("Name"), link="horizon:project:volumes:volumes:detail") volume_type = tables.Column(get_volume_type, verbose_name=_("Type")) attachments = AttachmentColumn("attachments", verbose_name=_("Attached To")) availability_zone = tables.Column("availability_zone", verbose_name=_("Availability Zone")) bootable = tables.Column('is_bootable', verbose_name=_("Bootable"), filters=(filters.yesno, filters.capfirst)) encryption = tables.Column(get_encrypted_value, verbose_name=_("Encrypted"), link="horizon:project:volumes:" "volumes:encryption_detail") class Meta(object): name = "volumes" verbose_name = _("Volumes") status_columns = ["status"] row_class = UpdateRow table_actions = (CreateVolume, AcceptTransfer, DeleteVolume, VolumesFilterAction) launch_actions = () if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False): launch_actions = (LaunchVolume,) + launch_actions if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True): launch_actions = (LaunchVolumeNG,) + launch_actions row_actions = ((EditVolume, ExtendVolume,) + launch_actions + (EditAttachments, CreateSnapshot, CreateBackup, RetypeVolume, UploadToImage, CreateTransfer, DeleteTransfer, DeleteVolume)) class DetachVolume(tables.BatchAction): name = "detach" classes = ('btn-detach',) policy_rules = (("compute", "compute:detach_volume"),) help_text = _("The data will remain in the volume and another instance" " will be able to access the data if you attach" " this volume to it.") action_type = "danger" @staticmethod def action_present(count): return ungettext_lazy( u"Detach Volume", u"Detach Volumes", count ) # This action is asynchronous. @staticmethod def action_past(count): return ungettext_lazy( u"Detaching Volume", u"Detaching Volumes", count ) def action(self, request, obj_id): attachment = self.table.get_object_by_id(obj_id) api.nova.instance_volume_detach(request, attachment.get('server_id', None), obj_id) def get_success_url(self, request): return reverse('horizon:project:volumes:index') class AttachedInstanceColumn(tables.Column): """Customized column class that does complex processing on the attachments for a volume instance. """ def get_raw_data(self, attachment): request = self.table.request return safestring.mark_safe(get_attachment_name(request, attachment)) class AttachmentsTable(tables.DataTable): instance = AttachedInstanceColumn(get_attachment_name, verbose_name=_("Instance")) device = tables.Column("device", verbose_name=_("Device")) def get_object_id(self, obj): return obj['id'] def get_object_display(self, attachment): instance_name = get_attachment_name(self.request, attachment) vals = {"volume_name": attachment['volume_name'], "instance_name": html.strip_tags(instance_name)} return _("Volume %(volume_name)s on instance %(instance_name)s") % vals def get_object_by_id(self, obj_id): for obj in self.data: if self.get_object_id(obj) == obj_id: return obj raise ValueError('No match found for the id "%s".' % obj_id) class Meta(object): name = "attachments" verbose_name = _("Attachments") table_actions = (DetachVolume,) row_actions = (DetachVolume,)
import os import pprint import sys import textwrap import py import pytest from _pytest.config import ExitCode from _pytest.main import _in_venv from _pytest.main import Session from _pytest.pytester import Testdir class TestCollector: def test_collect_versus_item(self): from pytest import Collector, Item assert not issubclass(Collector, Item) assert not issubclass(Item, Collector) def test_check_equality(self, testdir: Testdir) -> None: modcol = testdir.getmodulecol( """ def test_pass(): pass def test_fail(): assert 0 """ ) fn1 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn1, pytest.Function) fn2 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn2, pytest.Function) assert fn1 == fn2 assert fn1 != modcol assert hash(fn1) == hash(fn2) fn3 = testdir.collect_by_name(modcol, "test_fail") assert isinstance(fn3, pytest.Function) assert not (fn1 == fn3) assert fn1 != fn3 for fn in fn1, fn2, fn3: assert isinstance(fn, pytest.Function) assert fn != 3 # type: ignore[comparison-overlap] # noqa: F821 assert fn != modcol assert fn != [1, 2, 3] # type: ignore[comparison-overlap] # noqa: F821 assert [1, 2, 3] != fn # type: ignore[comparison-overlap] # noqa: F821 assert modcol != fn assert testdir.collect_by_name(modcol, "doesnotexist") is None def test_getparent(self, testdir): modcol = testdir.getmodulecol( """ class TestClass(object): def test_foo(): pass """ ) cls = testdir.collect_by_name(modcol, "TestClass") fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo") parent = fn.getparent(pytest.Module) assert parent is modcol parent = fn.getparent(pytest.Function) assert parent is fn parent = fn.getparent(pytest.Class) assert parent is cls def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") testdir.makepyfile( conftest=""" import pytest class CustomFile(pytest.File): pass def pytest_collect_file(path, parent): if path.ext == ".xxx": return CustomFile.from_parent(fspath=path, parent=parent) """ ) node = testdir.getpathnode(hello) assert isinstance(node, pytest.File) assert node.name == "hello.xxx" nodes = node.session.perform_collect([node.nodeid], genitems=False) assert len(nodes) == 1 assert isinstance(nodes[0], pytest.File) def test_can_skip_class_with_test_attr(self, testdir): """Assure test class is skipped when using `__test__=False` (See #2007).""" testdir.makepyfile( """ class TestFoo(object): __test__ = False def __init__(self): pass def test_foo(): assert True """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"]) class TestCollectFS: def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir tmpdir.ensure("build", "test_notfound.py") tmpdir.ensure("dist", "test_notfound.py") tmpdir.ensure("_darcs", "test_notfound.py") tmpdir.ensure("CVS", "test_notfound.py") tmpdir.ensure("{arch}", "test_notfound.py") tmpdir.ensure(".whatever", "test_notfound.py") tmpdir.ensure(".bzr", "test_notfound.py") tmpdir.ensure("normal", "test_found.py") for x in tmpdir.visit("test_*.py"): x.write("def test_hello(): pass") result = testdir.runpytest("--collect-only") s = result.stdout.str() assert "test_notfound" not in s assert "test_found" in s @pytest.mark.parametrize( "fname", ( "activate", "activate.csh", "activate.fish", "Activate", "Activate.bat", "Activate.ps1", ), ) def test_ignored_virtualenvs(self, testdir, fname): bindir = "Scripts" if sys.platform.startswith("win") else "bin" testdir.tmpdir.ensure("virtual", bindir, fname) testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py") testfile.write("def test_hello(): pass") # by default, ignore tests inside a virtualenv result = testdir.runpytest() result.stdout.no_fnmatch_line("*test_invenv*") # allow test collection if user insists result = testdir.runpytest("--collect-in-virtualenv") assert "test_invenv" in result.stdout.str() # allow test collection if user directly passes in the directory result = testdir.runpytest("virtual") assert "test_invenv" in result.stdout.str() @pytest.mark.parametrize( "fname", ( "activate", "activate.csh", "activate.fish", "Activate", "Activate.bat", "Activate.ps1", ), ) def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname): bindir = "Scripts" if sys.platform.startswith("win") else "bin" # norecursedirs takes priority testdir.tmpdir.ensure(".virtual", bindir, fname) testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py") testfile.write("def test_hello(): pass") result = testdir.runpytest("--collect-in-virtualenv") result.stdout.no_fnmatch_line("*test_invenv*") # ...unless the virtualenv is explicitly given on the CLI result = testdir.runpytest("--collect-in-virtualenv", ".virtual") assert "test_invenv" in result.stdout.str() @pytest.mark.parametrize( "fname", ( "activate", "activate.csh", "activate.fish", "Activate", "Activate.bat", "Activate.ps1", ), ) def test__in_venv(self, testdir, fname): """Directly test the virtual env detection function""" bindir = "Scripts" if sys.platform.startswith("win") else "bin" # no bin/activate, not a virtualenv base_path = testdir.tmpdir.mkdir("venv") assert _in_venv(base_path) is False # with bin/activate, totally a virtualenv base_path.ensure(bindir, fname) assert _in_venv(base_path) is True def test_custom_norecursedirs(self, testdir): testdir.makeini( """ [pytest] norecursedirs = mydir xyz* """ ) tmpdir = testdir.tmpdir tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass") tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0") tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass") rec = testdir.inline_run() rec.assertoutcome(passed=1) rec = testdir.inline_run("xyz123/test_2.py") rec.assertoutcome(failed=1) def test_testpaths_ini(self, testdir, monkeypatch): testdir.makeini( """ [pytest] testpaths = gui uts """ ) tmpdir = testdir.tmpdir tmpdir.ensure("env", "test_1.py").write("def test_env(): pass") tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass") tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass") # executing from rootdir only tests from `testpaths` directories # are collected items, reprec = testdir.inline_genitems("-v") assert [x.name for x in items] == ["test_gui", "test_uts"] # check that explicitly passing directories in the command-line # collects the tests for dirname in ("env", "gui", "uts"): items, reprec = testdir.inline_genitems(tmpdir.join(dirname)) assert [x.name for x in items] == ["test_%s" % dirname] # changing cwd to each subdirectory and running pytest without # arguments collects the tests in that directory normally for dirname in ("env", "gui", "uts"): monkeypatch.chdir(testdir.tmpdir.join(dirname)) items, reprec = testdir.inline_genitems() assert [x.name for x in items] == ["test_%s" % dirname] class TestCollectPluginHookRelay: def test_pytest_collect_file(self, testdir): wascalled = [] class Plugin: def pytest_collect_file(self, path): if not path.basename.startswith("."): # Ignore hidden files, e.g. .testmondata. wascalled.append(path) testdir.makefile(".abc", "xyz") pytest.main([testdir.tmpdir], plugins=[Plugin()]) assert len(wascalled) == 1 assert wascalled[0].ext == ".abc" @pytest.mark.filterwarnings("ignore:.*pytest_collect_directory.*") def test_pytest_collect_directory(self, testdir): wascalled = [] class Plugin: def pytest_collect_directory(self, path): wascalled.append(path.basename) testdir.mkdir("hello") testdir.mkdir("world") pytest.main(testdir.tmpdir, plugins=[Plugin()]) assert "hello" in wascalled assert "world" in wascalled class TestPrunetraceback: def test_custom_repr_failure(self, testdir): p = testdir.makepyfile( """ import not_exists """ ) testdir.makeconftest( """ import pytest def pytest_collect_file(path, parent): return MyFile(path, parent) class MyError(Exception): pass class MyFile(pytest.File): def collect(self): raise MyError() def repr_failure(self, excinfo): if excinfo.errisinstance(MyError): return "hello world" return pytest.File.repr_failure(self, excinfo) """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"]) @pytest.mark.xfail(reason="other mechanism for adding to reporting needed") def test_collect_report_postprocessing(self, testdir): p = testdir.makepyfile( """ import not_exists """ ) testdir.makeconftest( """ import pytest @pytest.hookimpl(hookwrapper=True) def pytest_make_collect_report(): outcome = yield rep = outcome.get_result() rep.headerlines += ["header1"] outcome.force_result(rep) """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"]) class TestCustomConftests: def test_ignore_collect_path(self, testdir): testdir.makeconftest( """ def pytest_ignore_collect(path, config): return path.basename.startswith("x") or \ path.basename == "test_one.py" """ ) sub = testdir.mkdir("xy123") sub.ensure("test_hello.py").write("syntax error") sub.join("conftest.py").write("syntax error") testdir.makepyfile("def test_hello(): pass") testdir.makepyfile(test_one="syntax error") result = testdir.runpytest("--fulltrace") assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_ignore_collect_not_called_on_argument(self, testdir): testdir.makeconftest( """ def pytest_ignore_collect(path, config): return True """ ) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p) assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED result.stdout.fnmatch_lines(["*collected 0 items*"]) def test_collectignore_exclude_on_option(self, testdir): testdir.makeconftest( """ collect_ignore = ['hello', 'test_world.py'] def pytest_addoption(parser): parser.addoption("--XX", action="store_true", default=False) def pytest_configure(config): if config.getvalue("XX"): collect_ignore[:] = [] """ ) testdir.mkdir("hello") testdir.makepyfile(test_world="def test_hello(): pass") result = testdir.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED result.stdout.no_fnmatch_line("*passed*") result = testdir.runpytest("--XX") assert result.ret == 0 assert "passed" in result.stdout.str() def test_collectignoreglob_exclude_on_option(self, testdir): testdir.makeconftest( """ collect_ignore_glob = ['*w*l[dt]*'] def pytest_addoption(parser): parser.addoption("--XX", action="store_true", default=False) def pytest_configure(config): if config.getvalue("XX"): collect_ignore_glob[:] = [] """ ) testdir.makepyfile(test_world="def test_hello(): pass") testdir.makepyfile(test_welt="def test_hallo(): pass") result = testdir.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED result.stdout.fnmatch_lines(["*collected 0 items*"]) result = testdir.runpytest("--XX") assert result.ret == 0 result.stdout.fnmatch_lines(["*2 passed*"]) def test_pytest_fs_collect_hooks_are_seen(self, testdir): testdir.makeconftest( """ import pytest class MyModule(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule(path, parent) """ ) testdir.mkdir("sub") testdir.makepyfile("def test_x(): pass") result = testdir.runpytest("--co") result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"]) def test_pytest_collect_file_from_sister_dir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") conf1 = testdir.makeconftest( """ import pytest class MyModule1(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule1(path, parent) """ ) conf1.move(sub1.join(conf1.basename)) conf2 = testdir.makeconftest( """ import pytest class MyModule2(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule2(path, parent) """ ) conf2.move(sub2.join(conf2.basename)) p = testdir.makepyfile("def test_x(): pass") p.copy(sub1.join(p.basename)) p.copy(sub2.join(p.basename)) result = testdir.runpytest("--co") result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"]) class TestSession: def test_parsearg(self, testdir) -> None: p = testdir.makepyfile("def test_func(): pass") subdir = testdir.mkdir("sub") subdir.ensure("__init__.py") target = subdir.join(p.basename) p.move(target) subdir.chdir() config = testdir.parseconfig(p.basename) rcol = Session.from_config(config) assert rcol.fspath == subdir fspath, parts = rcol._parsearg(p.basename) assert fspath == target assert len(parts) == 0 fspath, parts = rcol._parsearg(p.basename + "::test_func") assert fspath == target assert parts[0] == "test_func" assert len(parts) == 1 def test_collect_topdir(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) # XXX migrate to collectonly? (see below) config = testdir.parseconfig(id) topdir = testdir.tmpdir rcol = Session.from_config(config) assert topdir == rcol.fspath # rootid = rcol.nodeid # root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] # assert root2 == rcol, rootid colitems = rcol.perform_collect([rcol.nodeid], genitems=False) assert len(colitems) == 1 assert colitems[0].fspath == p def get_reported_items(self, hookrec): """Return pytest.Item instances reported by the pytest_collectreport hook""" calls = hookrec.getcalls("pytest_collectreport") return [ x for call in calls for x in call.report.result if isinstance(x, pytest.Item) ] def test_collect_protocol_single_function(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) items, hookrec = testdir.inline_genitems(id) (item,) = items assert item.name == "test_func" newid = item.nodeid assert newid == id pprint.pprint(hookrec.calls) topdir = testdir.tmpdir # noqa hookrec.assert_contains( [ ("pytest_collectstart", "collector.fspath == topdir"), ("pytest_make_collect_report", "collector.fspath == topdir"), ("pytest_collectstart", "collector.fspath == p"), ("pytest_make_collect_report", "collector.fspath == p"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.result[0].name == 'test_func'"), ] ) # ensure we are reporting the collection of the single test item (#2464) assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"] def test_collect_protocol_method(self, testdir): p = testdir.makepyfile( """ class TestClass(object): def test_method(self): pass """ ) normid = p.basename + "::TestClass::test_method" for id in [p.basename, p.basename + "::TestClass", normid]: items, hookrec = testdir.inline_genitems(id) assert len(items) == 1 assert items[0].name == "test_method" newid = items[0].nodeid assert newid == normid # ensure we are reporting the collection of the single test item (#2464) assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"] def test_collect_custom_nodes_multi_id(self, testdir): p = testdir.makepyfile("def test_func(): pass") testdir.makeconftest( """ import pytest class SpecialItem(pytest.Item): def runtest(self): return # ok class SpecialFile(pytest.File): def collect(self): return [SpecialItem(name="check", parent=self)] def pytest_collect_file(path, parent): if path.basename == %r: return SpecialFile(fspath=path, parent=parent) """ % p.basename ) id = p.basename items, hookrec = testdir.inline_genitems(id) pprint.pprint(hookrec.calls) assert len(items) == 2 hookrec.assert_contains( [ ("pytest_collectstart", "collector.fspath == collector.session.fspath"), ( "pytest_collectstart", "collector.__class__.__name__ == 'SpecialFile'", ), ("pytest_collectstart", "collector.__class__.__name__ == 'Module'"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), ] ) assert len(self.get_reported_items(hookrec)) == 2 def test_collect_subdir_event_ordering(self, testdir): p = testdir.makepyfile("def test_func(): pass") aaa = testdir.mkpydir("aaa") test_aaa = aaa.join("test_aaa.py") p.move(test_aaa) items, hookrec = testdir.inline_genitems() assert len(items) == 1 pprint.pprint(hookrec.calls) hookrec.assert_contains( [ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"), ] ) def test_collect_two_commandline_args(self, testdir): p = testdir.makepyfile("def test_func(): pass") aaa = testdir.mkpydir("aaa") bbb = testdir.mkpydir("bbb") test_aaa = aaa.join("test_aaa.py") p.copy(test_aaa) test_bbb = bbb.join("test_bbb.py") p.move(test_bbb) id = "." items, hookrec = testdir.inline_genitems(id) assert len(items) == 2 pprint.pprint(hookrec.calls) hookrec.assert_contains( [ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"), ("pytest_collectstart", "collector.fspath == test_bbb"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"), ] ) def test_serialization_byid(self, testdir): testdir.makepyfile("def test_func(): pass") items, hookrec = testdir.inline_genitems() assert len(items) == 1 (item,) = items items2, hookrec = testdir.inline_genitems(item.nodeid) (item2,) = items2 assert item2.name == item.name assert item2.fspath == item.fspath def test_find_byid_without_instance_parents(self, testdir): p = testdir.makepyfile( """ class TestClass(object): def test_method(self): pass """ ) arg = p.basename + "::TestClass::test_method" items, hookrec = testdir.inline_genitems(arg) assert len(items) == 1 (item,) = items assert item.nodeid.endswith("TestClass::test_method") # ensure we are reporting the collection of the single test item (#2464) assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"] class Test_getinitialnodes: def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") with tmpdir.as_cwd(): config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == "x.py" assert col.parent.parent is None for col in col.listchain(): assert col.config is config def test_pkgfile(self, testdir): """Verify nesting when a module is within a package. The parent chain should match: Module<x.py> -> Package<subdir> -> Session. Session's parent should always be None. """ tmpdir = testdir.tmpdir subdir = tmpdir.join("subdir") x = subdir.ensure("x.py") subdir.ensure("__init__.py") with subdir.as_cwd(): config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert col.name == "x.py" assert isinstance(col, pytest.Module) assert isinstance(col.parent, pytest.Package) assert isinstance(col.parent.parent, pytest.Session) # session is batman (has no parents) assert col.parent.parent.parent is None for col in col.listchain(): assert col.config is config class Test_genitems: def test_check_collect_hashes(self, testdir): p = testdir.makepyfile( """ def test_1(): pass def test_2(): pass """ ) p.copy(p.dirpath(p.purebasename + "2" + ".py")) items, reprec = testdir.inline_genitems(p.dirpath()) assert len(items) == 4 for numi, i in enumerate(items): for numj, j in enumerate(items): if numj != numi: assert hash(i) != hash(j) assert i != j def test_example_items1(self, testdir): p = testdir.makepyfile( """ import pytest def testone(): pass class TestX(object): def testmethod_one(self): pass class TestY(TestX): @pytest.mark.parametrize("arg0", [".["]) def testmethod_two(self, arg0): pass """ ) items, reprec = testdir.inline_genitems(p) assert len(items) == 4 assert items[0].name == "testone" assert items[1].name == "testmethod_one" assert items[2].name == "testmethod_one" assert items[3].name == "testmethod_two[.[]" # let's also test getmodpath here assert items[0].getmodpath() == "testone" assert items[1].getmodpath() == "TestX.testmethod_one" assert items[2].getmodpath() == "TestY.testmethod_one" # PR #6202: Fix incorrect result of getmodpath method. (Resolves issue #6189) assert items[3].getmodpath() == "TestY.testmethod_two[.[]" s = items[0].getmodpath(stopatmodule=False) assert s.endswith("test_example_items1.testone") print(s) def test_class_and_functions_discovery_using_glob(self, testdir): """ tests that python_classes and python_functions config options work as prefixes and glob-like patterns (issue #600). """ testdir.makeini( """ [pytest] python_classes = *Suite Test python_functions = *_test test """ ) p = testdir.makepyfile( """ class MyTestSuite(object): def x_test(self): pass class TestCase(object): def test_y(self): pass """ ) items, reprec = testdir.inline_genitems(p) ids = [x.getmodpath() for x in items] assert ids == ["MyTestSuite.x_test", "TestCase.test_y"] def test_matchnodes_two_collections_same_file(testdir): testdir.makeconftest( """ import pytest def pytest_configure(config): config.pluginmanager.register(Plugin2()) class Plugin2(object): def pytest_collect_file(self, path, parent): if path.ext == ".abc": return MyFile2(path, parent) def pytest_collect_file(path, parent): if path.ext == ".abc": return MyFile1(path, parent) class MyFile1(pytest.Item, pytest.File): def runtest(self): pass class MyFile2(pytest.File): def collect(self): return [Item2("hello", parent=self)] class Item2(pytest.Item): def runtest(self): pass """ ) p = testdir.makefile(".abc", "") result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines(["*2 passed*"]) res = testdir.runpytest("%s::hello" % p.basename) res.stdout.fnmatch_lines(["*1 passed*"]) class TestNodekeywords: def test_no_under(self, testdir): modcol = testdir.getmodulecol( """ def test_pass(): pass def test_fail(): assert 0 """ ) values = list(modcol.keywords) assert modcol.name in values for x in values: assert not x.startswith("_") assert modcol.name in repr(modcol.keywords) def test_issue345(self, testdir): testdir.makepyfile( """ def test_should_not_be_selected(): assert False, 'I should not have been selected to run' def test___repr__(): pass """ ) reprec = testdir.inline_run("-k repr") reprec.assertoutcome(passed=1, failed=0) def test_keyword_matching_is_case_insensitive_by_default(self, testdir): """Check that selection via -k EXPRESSION is case-insensitive. Since markers are also added to the node keywords, they too can be matched without having to think about case sensitivity. """ testdir.makepyfile( """ import pytest def test_sPeCiFiCToPiC_1(): assert True class TestSpecificTopic_2: def test(self): assert True @pytest.mark.sPeCiFiCToPic_3 def test(): assert True @pytest.mark.sPeCiFiCToPic_4 class Test: def test(self): assert True def test_failing_5(): assert False, "This should not match" """ ) num_matching_tests = 4 for expression in ("specifictopic", "SPECIFICTOPIC", "SpecificTopic"): reprec = testdir.inline_run("-k " + expression) reprec.assertoutcome(passed=num_matching_tests, failed=0) COLLECTION_ERROR_PY_FILES = dict( test_01_failure=""" def test_1(): assert False """, test_02_import_error=""" import asdfasdfasdf def test_2(): assert True """, test_03_import_error=""" import asdfasdfasdf def test_3(): assert True """, test_04_success=""" def test_4(): assert True """, ) def test_exit_on_collection_error(testdir): """Verify that all collection errors are collected and no tests executed""" testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest() assert res.ret == 2 res.stdout.fnmatch_lines( [ "collected 2 items / 2 errors", "*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*", "*ERROR collecting test_03_import_error.py*", "*No module named *asdfa*", ] ) def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir): """ Verify collection is aborted once maxfail errors are encountered ignoring further modules which would cause more collection errors. """ testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--maxfail=1") assert res.ret == 1 res.stdout.fnmatch_lines( [ "collected 1 item / 1 error", "*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*", "*! stopping after 1 failures !*", "*= 1 error in *", ] ) res.stdout.no_fnmatch_line("*test_03*") def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir): """ Verify the test run aborts due to collection errors even if maxfail count of errors was not reached. """ testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--maxfail=4") assert res.ret == 2 res.stdout.fnmatch_lines( [ "collected 2 items / 2 errors", "*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*", "*ERROR collecting test_03_import_error.py*", "*No module named *asdfa*", "*! Interrupted: 2 errors during collection !*", "*= 2 errors in *", ] ) def test_continue_on_collection_errors(testdir): """ Verify tests are executed even when collection errors occur when the --continue-on-collection-errors flag is set """ testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--continue-on-collection-errors") assert res.ret == 1 res.stdout.fnmatch_lines( ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"] ) def test_continue_on_collection_errors_maxfail(testdir): """ Verify tests are executed even when collection errors occur and that maxfail is honoured (including the collection error count). 4 tests: 2 collection errors + 1 failure + 1 success test_4 is never executed because the test run is with --maxfail=3 which means it is interrupted after the 2 collection errors + 1 failure. """ testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3") assert res.ret == 1 res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"]) def test_fixture_scope_sibling_conftests(testdir): """Regression test case for https://github.com/pytest-dev/pytest/issues/2836""" foo_path = testdir.mkdir("foo") foo_path.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def fix(): return 1 """ ) ) foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1") # Tests in `food/` should not see the conftest fixture from `foo/` food_path = testdir.mkpydir("food") food_path.join("test_food.py").write("def test_food(fix): assert fix == 1") res = testdir.runpytest() assert res.ret == 1 res.stdout.fnmatch_lines( [ "*ERROR at setup of test_food*", "E*fixture 'fix' not found", "*1 passed, 1 error*", ] ) def test_collect_init_tests(testdir): """Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)""" p = testdir.copy_example("collect/collect_init_tests") result = testdir.runpytest(p, "--collect-only") result.stdout.fnmatch_lines( [ "collected 2 items", "<Package *", " <Module __init__.py>", " <Function test_init>", " <Module test_foo.py>", " <Function test_foo>", ] ) result = testdir.runpytest("./tests", "--collect-only") result.stdout.fnmatch_lines( [ "collected 2 items", "<Package *", " <Module __init__.py>", " <Function test_init>", " <Module test_foo.py>", " <Function test_foo>", ] ) # Ignores duplicates with "." and pkginit (#4310). result = testdir.runpytest("./tests", ".", "--collect-only") result.stdout.fnmatch_lines( [ "collected 2 items", "<Package */tests>", " <Module __init__.py>", " <Function test_init>", " <Module test_foo.py>", " <Function test_foo>", ] ) # Same as before, but different order. result = testdir.runpytest(".", "tests", "--collect-only") result.stdout.fnmatch_lines( [ "collected 2 items", "<Package */tests>", " <Module __init__.py>", " <Function test_init>", " <Module test_foo.py>", " <Function test_foo>", ] ) result = testdir.runpytest("./tests/test_foo.py", "--collect-only") result.stdout.fnmatch_lines( ["<Package */tests>", " <Module test_foo.py>", " <Function test_foo>"] ) result.stdout.no_fnmatch_line("*test_init*") result = testdir.runpytest("./tests/__init__.py", "--collect-only") result.stdout.fnmatch_lines( ["<Package */tests>", " <Module __init__.py>", " <Function test_init>"] ) result.stdout.no_fnmatch_line("*test_foo*") def test_collect_invalid_signature_message(testdir): """Check that we issue a proper message when we can't determine the signature of a test function (#4026). """ testdir.makepyfile( """ import pytest class TestCase: @pytest.fixture def fix(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( ["Could not determine arguments of *.fix *: invalid method signature"] ) def test_collect_handles_raising_on_dunder_class(testdir): """Handle proxy classes like Django's LazySettings that might raise on ``isinstance`` (#4266). """ testdir.makepyfile( """ class ImproperlyConfigured(Exception): pass class RaisesOnGetAttr(object): def raises(self): raise ImproperlyConfigured __class__ = property(raises) raises = RaisesOnGetAttr() def test_1(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed in*"]) assert result.ret == 0 def test_collect_with_chdir_during_import(testdir): subdir = testdir.tmpdir.mkdir("sub") testdir.tmpdir.join("conftest.py").write( textwrap.dedent( """ import os os.chdir(%r) """ % (str(subdir),) ) ) testdir.makepyfile( """ def test_1(): import os assert os.getcwd() == %r """ % (str(subdir),) ) with testdir.tmpdir.as_cwd(): result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed in*"]) assert result.ret == 0 # Handles relative testpaths. testdir.makeini( """ [pytest] testpaths = . """ ) with testdir.tmpdir.as_cwd(): result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines(["collected 1 item"]) def test_collect_pyargs_with_testpaths(testdir, monkeypatch): testmod = testdir.mkdir("testmod") # NOTE: __init__.py is not collected since it does not match python_files. testmod.ensure("__init__.py").write("def test_func(): pass") testmod.ensure("test_file.py").write("def test_func(): pass") root = testdir.mkdir("root") root.ensure("pytest.ini").write( textwrap.dedent( """ [pytest] addopts = --pyargs testpaths = testmod """ ) ) monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir), prepend=os.pathsep) with root.as_cwd(): result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines(["*1 passed in*"]) @pytest.mark.skipif( not hasattr(py.path.local, "mksymlinkto"), reason="symlink not available on this platform", ) def test_collect_symlink_file_arg(testdir): """Test that collecting a direct symlink, where the target does not match python_files works (#4325).""" real = testdir.makepyfile( real=""" def test_nodeid(request): assert request.node.nodeid == "real.py::test_nodeid" """ ) symlink = testdir.tmpdir.join("symlink.py") symlink.mksymlinkto(real) result = testdir.runpytest("-v", symlink) result.stdout.fnmatch_lines(["real.py::test_nodeid PASSED*", "*1 passed in*"]) assert result.ret == 0 @pytest.mark.skipif( not hasattr(py.path.local, "mksymlinkto"), reason="symlink not available on this platform", ) def test_collect_symlink_out_of_tree(testdir): """Test collection of symlink via out-of-tree rootdir.""" sub = testdir.tmpdir.join("sub") real = sub.join("test_real.py") real.write( textwrap.dedent( """ def test_nodeid(request): # Should not contain sub/ prefix. assert request.node.nodeid == "test_real.py::test_nodeid" """ ), ensure=True, ) out_of_tree = testdir.tmpdir.join("out_of_tree").ensure(dir=True) symlink_to_sub = out_of_tree.join("symlink_to_sub") symlink_to_sub.mksymlinkto(sub) sub.chdir() result = testdir.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub) result.stdout.fnmatch_lines( [ # Should not contain "sub/"! "test_real.py::test_nodeid PASSED" ] ) assert result.ret == 0 def test_collectignore_via_conftest(testdir): """collect_ignore in parent conftest skips importing child (issue #4592).""" tests = testdir.mkpydir("tests") tests.ensure("conftest.py").write("collect_ignore = ['ignore_me']") ignore_me = tests.mkdir("ignore_me") ignore_me.ensure("__init__.py") ignore_me.ensure("conftest.py").write("assert 0, 'should_not_be_called'") result = testdir.runpytest() assert result.ret == ExitCode.NO_TESTS_COLLECTED def test_collect_pkg_init_and_file_in_args(testdir): subdir = testdir.mkdir("sub") init = subdir.ensure("__init__.py") init.write("def test_init(): pass") p = subdir.ensure("test_file.py") p.write("def test_file(): pass") # NOTE: without "-o python_files=*.py" this collects test_file.py twice. # This changed/broke with "Add package scoped fixtures #2283" (2b1410895) # initially (causing a RecursionError). result = testdir.runpytest("-v", str(init), str(p)) result.stdout.fnmatch_lines( [ "sub/test_file.py::test_file PASSED*", "sub/test_file.py::test_file PASSED*", "*2 passed in*", ] ) result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init), str(p)) result.stdout.fnmatch_lines( [ "sub/__init__.py::test_init PASSED*", "sub/test_file.py::test_file PASSED*", "*2 passed in*", ] ) def test_collect_pkg_init_only(testdir): subdir = testdir.mkdir("sub") init = subdir.ensure("__init__.py") init.write("def test_init(): pass") result = testdir.runpytest(str(init)) result.stdout.fnmatch_lines(["*no tests ran in*"]) result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init)) result.stdout.fnmatch_lines(["sub/__init__.py::test_init PASSED*", "*1 passed in*"]) @pytest.mark.skipif( not hasattr(py.path.local, "mksymlinkto"), reason="symlink not available on this platform", ) @pytest.mark.parametrize("use_pkg", (True, False)) def test_collect_sub_with_symlinks(use_pkg, testdir): sub = testdir.mkdir("sub") if use_pkg: sub.ensure("__init__.py") sub.ensure("test_file.py").write("def test_file(): pass") # Create a broken symlink. sub.join("test_broken.py").mksymlinkto("test_doesnotexist.py") # Symlink that gets collected. sub.join("test_symlink.py").mksymlinkto("test_file.py") result = testdir.runpytest("-v", str(sub)) result.stdout.fnmatch_lines( [ "sub/test_file.py::test_file PASSED*", "sub/test_symlink.py::test_file PASSED*", "*2 passed in*", ] ) def test_collector_respects_tbstyle(testdir): p1 = testdir.makepyfile("assert 0") result = testdir.runpytest(p1, "--tb=native") assert result.ret == ExitCode.INTERRUPTED result.stdout.fnmatch_lines( [ "*_ ERROR collecting test_collector_respects_tbstyle.py _*", "Traceback (most recent call last):", ' File "*/test_collector_respects_tbstyle.py", line 1, in <module>', " assert 0", "AssertionError: assert 0", "*! Interrupted: 1 error during collection !*", "*= 1 error in *", ] ) def test_does_not_eagerly_collect_packages(testdir): testdir.makepyfile("def test(): pass") pydir = testdir.mkpydir("foopkg") pydir.join("__init__.py").write("assert False") result = testdir.runpytest() assert result.ret == ExitCode.OK def test_does_not_put_src_on_path(testdir): # `src` is not on sys.path so it should not be importable testdir.tmpdir.join("src/nope/__init__.py").ensure() testdir.makepyfile( "import pytest\n" "def test():\n" " with pytest.raises(ImportError):\n" " import nope\n" ) result = testdir.runpytest() assert result.ret == ExitCode.OK
# Copyright (C) 2014, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import nested import re import shlex import threading import time import six from cinder import exception from cinder.i18n import _LE, _LW from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib LOG = logging.getLogger(__name__) SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm ' 'LD_LIBRARY_PATH=/usr/stonavm/lib ' 'STONAVM_RSP_PASS=on STONAVM_ACT=on') MAX_HOSTGROUPS = 127 MAX_HOSTGROUPS_ISCSI = 254 MAX_HLUN = 2047 EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_' EXEC_TIMEOUT = 10 EXEC_INTERVAL = 1 CHAP_TIMEOUT = 5 PAIRED = 12 DUMMY_LU = -1 class HBSDSNM2(basic_lib.HBSDBasicLib): def __init__(self, conf): super(HBSDSNM2, self).__init__(conf=conf) self.unit_name = conf.hitachi_unit_name self.hsnm_lock = threading.Lock() self.hsnm_lock_file = ('%s%s' % (EXEC_LOCK_PATH_BASE, self.unit_name)) copy_speed = conf.hitachi_copy_speed if copy_speed <= 2: self.pace = 'slow' elif copy_speed == 3: self.pace = 'normal' else: self.pace = 'prior' def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start): lock = basic_lib.get_process_lock(self.hsnm_lock_file) with nested(self.hsnm_lock, lock): ret, stdout, stderr = self.exec_command('env', args=args, printflag=printflag) if not ret or noretry: raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= timeout: LOG.error(_LE("snm2 command timeout.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (re.search('DMEC002047', stderr) or re.search('DMEC002048', stderr) or re.search('DMED09000A', stderr) or re.search('DMED090026', stderr) or re.search('DMED0E002B', stderr) or re.search('DMER03006A', stderr) or re.search('DMER030080', stderr) or re.search('DMER0300B8', stderr) or re.search('DMER0800CF', stderr) or re.search('DMER0800D[0-6D]', stderr) or re.search('DMES052602', stderr)): LOG.error(_LE("Unexpected error occurs in snm2.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_hsnm(self, command, args, printflag=True, noretry=False, timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL): args = '%s %s %s' % (SNM2_ENV, command, args) loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_exec_hsnm, args, printflag, noretry, timeout, time.time()) return loop.start(interval=interval).wait() def get_comm_version(self): ret, stdout, stderr = self.exec_hsnm('auman', '-help') m = re.search('Version (\d+).(\d+)', stdout) if not m: msg = basic_lib.output_err( 600, cmd='auman', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return '%s.%s' % (m.group(1), m.group(2)) def add_used_hlun(self, command, port, gid, used_list, ldev): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm(command, '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: line = shlex.split(line) if not line: continue if line[0] == port and int(line[1][0:3]) == gid: if int(line[2]) not in used_list: used_list.append(int(line[2])) if int(line[3]) == ldev: hlu = int(line[2]) LOG.warning(_LW('ldev(%(ldev)d) is already mapped ' '(hlun: %(hlu)d)') % {'ldev': ldev, 'hlu': hlu}) return hlu return None def get_unused_ldev(self, ldev_range): start = ldev_range[0] end = ldev_range[1] unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auluref', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) free_ldev = start lines = stdout.splitlines() found = False for line in lines[2:]: line = shlex.split(line) if not line: continue ldev_num = int(line[0]) if free_ldev > ldev_num: continue if free_ldev == ldev_num: free_ldev += 1 else: found = True break if free_ldev > end: break else: found = True if not found: msg = basic_lib.output_err(648, resource='LDEV') raise exception.HBSDError(message=msg) return free_ldev def get_hgname_gid(self, port, host_grp_name): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False for line in lines: line = shlex.split(line) if not line: continue if line[0] == 'Port' and line[1] == port: is_target_port = True continue if is_target_port: if line[0] == 'Port': break if not line[0].isdigit(): continue gid = int(line[0]) if line[1] == host_grp_name: return gid return None def get_unused_gid(self, group_range, port): start = group_range[0] end = group_range[1] unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False free_gid = start found = False for line in lines: line = shlex.split(line) if not line: continue if line[0] == 'Port' and line[1] == port: is_target_port = True continue if is_target_port: if line[0] == 'Port': found = True break if not line[0].isdigit(): continue gid = int(line[0]) if free_gid > gid: continue if free_gid == gid: free_gid += 1 else: found = True break if free_gid > end or free_gid > MAX_HOSTGROUPS: break else: found = True if not found: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) return free_gid def comm_set_target_wwns(self, target_ports): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('aufibre1', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='aufibre1', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() target_wwns = {} for line in lines[3:]: if re.match('Transfer', line): break line = shlex.split(line) if len(line) < 4: continue port = '%s%s' % (line[0], line[1]) if target_ports: if port in target_ports: target_wwns[port] = line[3] else: target_wwns[port] = line[3] LOG.debug('target wwns: %s' % target_wwns) return target_wwns def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login): for pt in wwns: for line in buf[port]['assigned']: hgname = shlex.split(line[38:])[1][4:] if not re.match(basic_lib.NAME_PREFIX, hgname): continue if pt.search(line[38:54]): wwn = line[38:54] gid = int(shlex.split(line[38:])[1][0:3]) is_detected = None if login: for line in buf[port]['detected']: if pt.search(line[38:54]): is_detected = True break else: is_detected = False hostgroups.append({'port': six.text_type(port), 'gid': gid, 'initiator_wwn': wwn, 'detected': is_detected}) def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auhgwwn', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) security_ports = [] patterns = [] for wwn in wwns: pt = re.compile(wwn, re.IGNORECASE) patterns.append(pt) lines = stdout.splitlines() buf = {} _buffer = [] port = None security = None for line in lines: if re.match('Port', line): port = shlex.split(line)[1] if target_ports and port not in target_ports: port = None else: security = True if shlex.split(line)[5] == 'ON' else False buf[port] = {'detected': [], 'assigned': [], 'assignable': []} if security: security_ports.append(port) continue if port and security: if re.search('Detected WWN', line): _buffer = buf[port]['detected'] continue elif re.search('Assigned WWN', line): _buffer = buf[port]['assigned'] continue elif re.search('Assignable WWN', line): _buffer = buf[port]['assignable'] continue _buffer.append(line) hostgroups = [] for port in buf.keys(): self.get_hostgroup_from_wwns( hostgroups, port, patterns, buf, login) for hostgroup in hostgroups: hgs.append(hostgroup) return security_ports def comm_delete_lun_core(self, command, hostgroups, lun): unit = self.unit_name no_lun_cnt = 0 deleted_hostgroups = [] for hostgroup in hostgroups: LOG.debug('comm_delete_lun: hostgroup is %s' % hostgroup) port = hostgroup['port'] gid = hostgroup['gid'] ctl_no = port[0] port_no = port[1] is_deleted = False for deleted in deleted_hostgroups: if port == deleted['port'] and gid == deleted['gid']: is_deleted = True if is_deleted: continue ret, stdout, stderr = self.exec_hsnm(command, '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: line = shlex.split(line) if not line: continue if (line[0] == port and int(line[1][0:3]) == gid and int(line[3]) == lun): hlu = int(line[2]) break else: no_lun_cnt += 1 if no_lun_cnt == len(hostgroups): raise exception.HBSDNotFound else: continue opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no, gid, hlu, lun) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) deleted_hostgroups.append({'port': port, 'gid': gid}) LOG.debug('comm_delete_lun is over (%d)' % lun) def comm_delete_lun(self, hostgroups, ldev): self.comm_delete_lun_core('auhgmap', hostgroups, ldev) def comm_delete_lun_iscsi(self, hostgroups, ldev): self.comm_delete_lun_core('autargetmap', hostgroups, ldev) def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): unit = self.unit_name if is_vvol: command = 'aureplicationvvol' opt = ('-unit %s -add -lu %d -size %dg' % (unit, ldev, capacity)) else: command = 'auluadd' opt = ('-unit %s -lu %d -dppoolno %d -size %dg' % (unit, ldev, pool_id, capacity)) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret: if (re.search('DMEC002047', stderr) or re.search('DMES052602', stderr) or re.search('DMED09000A', stderr)): raise exception.HBSDNotFound else: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_hostgrp(self, port, gid, host_grp_name): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no, port_no, gid, host_grp_name) ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) if ret: raise exception.HBSDNotFound def comm_del_hostgrp(self, port, gid, host_grp_name): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no, host_grp_name) ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) if ret: msg = basic_lib.output_err( 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_hbawwn(self, port, gid, wwn): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no, port_no, wwn, gid) ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) if ret: opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no, port_no, wwn, gid) ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) if ret: msg = basic_lib.output_err( 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_lun(self, command, hostgroups, ldev, is_once=False): unit = self.unit_name tmp_hostgroups = hostgroups[:] used_list = [] is_ok = False hlu = None old_hlu = None for hostgroup in hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] hlu = self.add_used_hlun(command, port, gid, used_list, ldev) # When 'hlu' or 'old_hlu' is 0, it should be true. # So, it cannot remove 'is not None'. if hlu is not None: if old_hlu is not None and old_hlu != hlu: msg = basic_lib.output_err(648, resource='LUN (HLUN)') raise exception.HBSDError(message=msg) is_ok = True hostgroup['lun'] = hlu tmp_hostgroups.remove(hostgroup) old_hlu = hlu else: hlu = old_hlu if not used_list: hlu = 0 elif hlu is None: for i in range(MAX_HLUN + 1): if i not in used_list: hlu = i break else: raise exception.HBSDNotFound ret = 0 stdout = None stderr = None invalid_hgs_str = None for hostgroup in tmp_hostgroups: port = hostgroup['port'] gid = hostgroup['gid'] ctl_no = port[0] port_no = port[1] if not hostgroup['detected']: if invalid_hgs_str: invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, port, gid) else: invalid_hgs_str = '%s:%d' % (port, gid) continue opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no, gid, hlu, ldev) ret, stdout, stderr = self.exec_hsnm(command, opt) if ret == 0: is_ok = True hostgroup['lun'] = hlu if is_once: break else: msg = basic_lib.set_msg( 314, ldev=ldev, lun=hlu, port=port, id=gid) LOG.warning(msg) if not is_ok: if stderr: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) else: msg = basic_lib.output_err(659, gid=invalid_hgs_str) raise exception.HBSDError(message=msg) def comm_delete_ldev(self, ldev, is_vvol): unit = self.unit_name if is_vvol: command = 'aureplicationvvol' opt = '-unit %s -rm -lu %d' % (unit, ldev) else: command = 'auludel' opt = '-unit %s -lu %d -f' % (unit, ldev) ret, stdout, stderr = self.exec_hsnm(command, opt, timeout=30, interval=3) if ret: if (re.search('DMEC002048', stderr) or re.search('DMED090026', stderr)): raise exception.HBSDNotFound msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return ret def comm_extend_ldev(self, ldev, old_size, new_size): unit = self.unit_name command = 'auluchgsize' options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size) ret, stdout, stderr = self.exec_hsnm(command, options) if ret: msg = basic_lib.output_err( 600, cmd=command, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def delete_chap_user(self, port): unit = self.unit_name ctl_no = port[0] port_no = port[1] auth_username = self.conf.hitachi_auth_user opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no, auth_username) return self.exec_hsnm('auchapuser', opt) def _wait_for_add_chap_user(self, cmd, auth_username, auth_password, start): # Don't move 'import pexpect' to the beginning of the file so that # a tempest can work. import pexpect lock = basic_lib.get_process_lock(self.hsnm_lock_file) with nested(self.hsnm_lock, lock): try: child = pexpect.spawn(cmd) child.expect('Secret: ', timeout=CHAP_TIMEOUT) child.sendline(auth_password) child.expect('Re-enter Secret: ', timeout=CHAP_TIMEOUT) child.sendline(auth_password) child.expect('The CHAP user information has ' 'been added successfully.', timeout=CHAP_TIMEOUT) except Exception: if time.time() - start >= EXEC_TIMEOUT: msg = basic_lib.output_err(642, user=auth_username) raise exception.HBSDError(message=msg) else: raise loopingcall.LoopingCallDone(True) def set_chap_authention(self, port, gid): ctl_no = port[0] port_no = port[1] unit = self.unit_name auth_username = self.conf.hitachi_auth_user auth_password = self.conf.hitachi_auth_password add_chap_user = self.conf.hitachi_add_chap_user assign_flag = True added_flag = False opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True) if ret: if not add_chap_user: msg = basic_lib.output_err(643, user=auth_username) raise exception.HBSDError(message=msg) root_helper = utils.get_root_helper() cmd = ('%s env %s auchapuser -unit %s -add %s %s ' '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no, port_no, gid, auth_username)) LOG.debug('Add CHAP user') loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_add_chap_user, cmd, auth_username, auth_password, time.time()) added_flag = loop.start(interval=EXEC_INTERVAL).wait() else: lines = stdout.splitlines()[4:] for line in lines: if int(shlex.split(line)[0][0:3]) == gid: assign_flag = False break if assign_flag: opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no, port_no, gid, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt) if ret: if added_flag: _ret, _stdout, _stderr = self.delete_chap_user(port) if _ret: msg = basic_lib.set_msg(303, user=auth_username) LOG.warning(msg) msg = basic_lib.output_err( 600, cmd='auchapuser', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return added_flag def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn): auth_method = self.conf.hitachi_auth_method unit = self.unit_name ctl_no = port[0] port_no = port[1] if auth_method: auth_arg = '-authmethod %s -mutual disable' % auth_method else: auth_arg = '-authmethod None' opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid) opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn, auth_arg) ret, stdout, stderr = self.exec_hsnm('autargetdef', opt) if ret: raise exception.HBSDNotFound def delete_iscsi_target(self, port, _target_no, target_alias): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no, target_alias) return self.exec_hsnm('autargetdef', opt) def comm_set_hostgrp_reportportal(self, port, target_alias): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no, target_alias) opt = '%s -ReportFullPortalList enable' % opt ret, stdout, stderr = self.exec_hsnm('autargetopt', opt) if ret: msg = basic_lib.output_err( 600, cmd='autargetopt', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_add_initiator(self, port, gid, host_iqn): unit = self.unit_name ctl_no = port[0] port_no = port[1] opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no, port_no, gid, host_iqn) ret, stdout, stderr = self.exec_hsnm('autargetini', opt) if ret: msg = basic_lib.output_err( 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetini', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) security_ports = [] lines = stdout.splitlines() hostgroups = [] security = True for line in lines: if not shlex.split(line): continue if re.match('Port', line): line = shlex.split(line) port = line[1] security = True if line[4] == 'ON' else False continue if target_ports and port not in target_ports: continue if security: if (host_iqn in shlex.split(line[72:]) and re.match(basic_lib.NAME_PREFIX, shlex.split(line)[0][4:])): gid = int(shlex.split(line)[0][0:3]) hostgroups.append( {'port': port, 'gid': gid, 'detected': True}) LOG.debug('Find port=%(port)s gid=%(gid)d' % {'port': port, 'gid': gid}) if port not in security_ports: security_ports.append(port) for hostgroup in hostgroups: hgs.append(hostgroup) return security_ports def comm_get_iscsi_ip(self, port): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('auiscsi', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='auiscsi', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() is_target_port = False for line in lines: line_array = shlex.split(line) if not line_array: continue if line_array[0] == 'Port' and line_array[1] != 'Number': if line_array[1] == port: is_target_port = True else: is_target_port = False continue if is_target_port and re.search('IPv4 Address', line): ip_addr = shlex.split(line)[3] break if is_target_port and re.search('Port Number', line): ip_port = shlex.split(line)[3] else: msg = basic_lib.output_err(651) raise exception.HBSDError(message=msg) return ip_addr, ip_port def comm_get_target_iqn(self, port, gid): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) is_target_host = False tmp_port = None lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue gid_tmp = line[0][0:3] if gid_tmp.isdigit() and int(gid_tmp) == gid: is_target_host = True continue if is_target_host and line[0] == "iSCSI": target_iqn = line[3] break else: msg = basic_lib.output_err(650, resource='IQN') raise exception.HBSDError(message=msg) return target_iqn def get_unused_gid_iscsi(self, group_range, port): start = group_range[0] end = min(group_range[1], MAX_HOSTGROUPS_ISCSI) unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) used_list = [] tmp_port = None lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue if line[0][0:3].isdigit(): gid = int(line[0][0:3]) if start <= gid <= end: used_list.append(gid) if not used_list: return start for gid in range(start, end + 1): if gid not in used_list: break else: msg = basic_lib.output_err(648, resource='GID') raise exception.HBSDError(message=msg) return gid def get_gid_from_targetiqn(self, target_iqn, target_alias, port): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('autargetdef', '-unit %s -refer' % unit) if ret: msg = basic_lib.output_err( 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) gid = None tmp_port = None found_alias_full = False found_alias_part = False lines = stdout.splitlines() for line in lines: line = shlex.split(line) if not line: continue if line[0] == "Port": tmp_port = line[1] continue if port != tmp_port: continue if line[0][0:3].isdigit(): tmp_gid = int(line[0][0:3]) if re.match(basic_lib.NAME_PREFIX, line[0][4:]): found_alias_part = True if line[0][4:] == target_alias: found_alias_full = True continue if line[0] == "iSCSI": if line[3] == target_iqn: gid = tmp_gid break else: found_alias_part = False if found_alias_full and gid is None: msg = basic_lib.output_err(641) raise exception.HBSDError(message=msg) # When 'gid' is 0, it should be true. # So, it cannot remove 'is not None'. if not found_alias_part and gid is not None: msg = basic_lib.output_err(641) raise exception.HBSDError(message=msg) return gid def comm_get_dp_pool(self, pool_id): unit = self.unit_name ret, stdout, stderr = self.exec_hsnm('audppool', '-unit %s -refer -g' % unit, printflag=False) if ret: msg = basic_lib.output_err( 600, cmd='audppool', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) lines = stdout.splitlines() for line in lines[2:]: tc_cc = re.search('\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line) pool_tmp = re.match('\s*\d+', line) if (pool_tmp and tc_cc and int(pool_tmp.group(0)) == pool_id): total_gb = int(float(tc_cc.group(1))) free_gb = total_gb - int(float(tc_cc.group(2))) return total_gb, free_gb msg = basic_lib.output_err(640, pool_id=pool_id) raise exception.HBSDError(message=msg) def is_detected(self, port, wwn): hgs = [] self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True) return hgs[0]['detected'] def pairoperate(self, opr, pvol, svol, is_vvol, args=None): unit = self.unit_name method = '-ss' if is_vvol else '-si' opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method, pvol, svol) if args: opt = '%s %s' % (opt, args) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt) if ret: opt = '%s %s' % ('aureplicationlocal', opt) msg = basic_lib.output_err( 600, cmd=opt, ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) def comm_create_pair(self, pvol, svol, is_vvol): if not is_vvol: args = '-compsplit -pace %s' % self.pace method = basic_lib.FULL else: pool = self.conf.hitachi_thin_pool_id args = ('-localrepdppoolno %d -localmngdppoolno %d ' '-compsplit -pace %s' % (pool, pool, self.pace)) method = basic_lib.THIN try: self.pairoperate('create', pvol, svol, is_vvol, args=args) except exception.HBSDCmdError as ex: if (re.search('DMER0300B8', ex.stderr) or re.search('DMER0800CF', ex.stderr) or re.search('DMER0800D[0-6D]', ex.stderr) or re.search('DMER03006A', ex.stderr) or re.search('DMER030080', ex.stderr)): msg = basic_lib.output_err(615, copy_method=method, pvol=pvol) raise exception.HBSDBusy(message=msg) else: raise def _comm_pairevtwait(self, pvol, svol, is_vvol): unit = self.unit_name if not is_vvol: pairname = 'SI_LU%04d_LU%04d' % (pvol, svol) method = '-si' else: pairname = 'SS_LU%04d_LU%04d' % (pvol, svol) method = '-ss' opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' % (unit, method, pairname)) ret, stdout, stderr = self.exec_hsnm('aureplicationmon', opt, noretry=True) return ret def _wait_for_pair_status(self, pvol, svol, is_vvol, status, timeout, start): if self._comm_pairevtwait(pvol, svol, is_vvol) in status: raise loopingcall.LoopingCallDone() if time.time() - start >= timeout: msg = basic_lib.output_err( 637, method='_wait_for_pair_status', timeout=timeout) raise exception.HBSDError(message=msg) def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval): loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_pair_status, pvol, svol, is_vvol, status, timeout, time.time()) loop.start(interval=interval).wait() def delete_pair(self, pvol, svol, is_vvol): self.pairoperate('simplex', pvol, svol, is_vvol) def trans_status_hsnm2raid(self, str): status = None obj = re.search('Split\((.*)%\)', str) if obj: status = basic_lib.PSUS obj = re.search('Paired\((.*)%\)', str) if obj: status = basic_lib.PAIR return status def get_paired_info(self, ldev, only_flag=False): opt_base = '-unit %s -refer' % self.unit_name if only_flag: opt_base = '%s -ss' % opt_base opt = '%s -pvol %d' % (opt_base, ldev) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt, noretry=True) if ret == 0: lines = stdout.splitlines() pair_info = {'pvol': ldev, 'svol': []} for line in lines[1:]: status = self.trans_status_hsnm2raid(line) if re.search('SnapShot', line[100:]): is_vvol = True else: is_vvol = False line = shlex.split(line) if not line: break svol = int(line[2]) pair_info['svol'].append({'lun': svol, 'status': status, 'is_vvol': is_vvol}) return pair_info opt = '%s -svol %d' % (opt_base, ldev) ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt, noretry=True) if ret == 1: return {'pvol': None, 'svol': []} lines = stdout.splitlines() status = self.trans_status_hsnm2raid(lines[1]) if re.search('SnapShot', lines[1][100:]): is_vvol = True else: is_vvol = False line = shlex.split(lines[1]) pvol = int(line[1]) return {'pvol': pvol, 'svol': [{'lun': ldev, 'status': status, 'is_vvol': is_vvol}]} def create_lock_file(self): basic_lib.create_empty_file(self.hsnm_lock_file) def get_hostgroup_luns(self, port, gid): list = [] self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU) return list
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import io from unittest import TestCase, main import numpy as np import pandas as pd import numpy.testing as npt from skbio import OrdinationResults from skbio.io import OrdinationFormatError from skbio.io.format.ordination import ( _ordination_to_ordination_results, _ordination_results_to_ordination, _ordination_sniffer) from skbio.util import get_data_path, assert_ordination_results_equal class OrdinationTestData(TestCase): def setUp(self): self.valid_fps = map( get_data_path, ['ordination_L&L_CA_data_scores', 'ordination_example3_scores', 'ordination_PCoA_sample_data_3_scores', 'ordination_example2_scores']) # Store filepath, regex for matching the error message that should be # raised when reading the file, and whether the file should be matched # by the sniffer (True) or not (False). self.invalid_fps = map(lambda e: (get_data_path(e[0]), e[1], e[2]), [ ('empty', 'end of file.*Eigvals header', False), ('whitespace_only', 'Eigvals header not found', False), ('ordination_error1', 'Eigvals header not found', False), ('ordination_error2', 'Proportion explained header not found', False), ('ordination_error3', 'Species header not found', True), ('ordination_error4', 'Site header not found', True), ('ordination_error5', 'Biplot header not found', True), ('ordination_error6', 'Site constraints header not found', True), ('ordination_error7', 'empty line', False), ('ordination_error8', '9.*Proportion explained.*8', True), ('ordination_error9', '2 values.*1 in row 1', True), ('ordination_error10', '2 values.*1 in row 1', True), ('ordination_error11', 'Site constraints ids and site ids', True), ('ordination_error12', '9.*Eigvals.*8', True), ('ordination_error13', '9.*Proportion explained.*8', True), ('ordination_error14', 'Site is 0: 9 x 0', True), ('ordination_error15', '9 values.*8 in row 1', True), ('ordination_error16', 'Biplot is 0: 3 x 0', True), ('ordination_error17', '3 values.*2 in row 1', True), ('ordination_error18', 'proportion explained.*eigvals: 8 != 9', True), ('ordination_error19', 'coordinates.*species.*eigvals: 1 != 2', True), ('ordination_error20', 'coordinates.*site.*eigvals: 1 != 2', True), ('ordination_error21', 'one eigval', False), ('ordination_error22', 'end of file.*blank line', False), ('ordination_error23', 'end of file.*Proportion explained section', True), ('ordination_error24', 'end of file.*row 2.*Species section', True) ]) class OrdinationResultsReaderWriterTests(OrdinationTestData): def setUp(self): super(OrdinationResultsReaderWriterTests, self).setUp() # define in-memory results, one for each of the valid files in # self.valid_fps # CA results axes_ids = ['CA1', 'CA2'] species_ids = ['Species1', 'Species2', 'Species3'] site_ids = ['Site1', 'Site2', 'Site3'] eigvals = pd.Series([0.0961330159181, 0.0409418140138], axes_ids) species = pd.DataFrame([[0.408869425742, 0.0695518116298], [-0.1153860437, -0.299767683538], [-0.309967102571, 0.187391917117]], index=species_ids, columns=axes_ids) site = pd.DataFrame([[-0.848956053187, 0.882764759014], [-0.220458650578, -1.34482000302], [1.66697179591, 0.470324389808]], index=site_ids, columns=axes_ids) biplot = None site_constraints = None prop_explained = None ca_scores = OrdinationResults( 'CA', 'Correspondence Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # CCA results axes_ids = ['CCA%d' % i for i in range(1, 10)] species_ids = ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5', 'Species6', 'Species7', 'Species8'] site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'] eigvals = pd.Series([0.366135830393, 0.186887643052, 0.0788466514249, 0.082287840501, 0.0351348475787, 0.0233265839374, 0.0099048981912, 0.00122461669234, 0.000417454724117], axes_ids) species = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_CCA_species')), index=species_ids, columns=axes_ids) site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site')), index=site_ids, columns=axes_ids) biplot = pd.DataFrame( [[-0.169746767979, 0.63069090084, 0.760769036049], [-0.994016563505, 0.0609533148724, -0.0449369418179], [0.184352565909, -0.974867543612, 0.0309865007541]], columns=axes_ids[:3]) site_constraints = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_CCA_site_constraints')), index=site_ids, columns=axes_ids) prop_explained = None cca_scores = OrdinationResults('CCA', 'Canonical Correspondence Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # PCoA results axes_ids = ['PC%d' % i for i in range(1, 10)] species_ids = None site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593', 'PC.355', 'PC.607', 'PC.634'] eigvals = pd.Series([0.512367260461, 0.300719094427, 0.267912066004, 0.208988681078, 0.19169895326, 0.16054234528, 0.15017695712, 0.122457748167, 0.0], axes_ids) species = None site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site')), index=site_ids, columns=axes_ids) biplot = None site_constraints = None prop_explained = pd.Series([0.267573832777, 0.15704469605, 0.139911863774, 0.109140272454, 0.100111048503, 0.0838401161912, 0.0784269939011, 0.0639511763509, 0.0], axes_ids) pcoa_scores = OrdinationResults('PCoA', 'Principal Coordinate Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # RDA results axes_ids = ['RDA%d' % i for i in range(1, 8)] species_ids = ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5'] site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'] eigvals = pd.Series([25.8979540892, 14.9825779819, 8.93784077262, 6.13995623072, 1.68070536498, 0.57735026919, 0.275983624351], axes_ids) species = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_RDA_species')), index=species_ids, columns=axes_ids) site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site')), index=site_ids, columns=axes_ids) biplot = pd.DataFrame( [[0.422650019179, -0.559142585857, -0.713250678211], [0.988495963777, 0.150787422017, -0.0117848614073], [-0.556516618887, 0.817599992718, 0.147714267459], [-0.404079676685, -0.9058434809, -0.127150316558]], columns=axes_ids[:3]) site_constraints = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_RDA_site_constraints')), index=site_ids, columns=axes_ids) prop_explained = None rda_scores = OrdinationResults( 'RDA', 'Redundancy Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores, rda_scores] def test_read_valid_files(self): for fp, obj in zip(self.valid_fps, self.ordination_results_objs): obs = _ordination_to_ordination_results(fp) assert_ordination_results_equal( obs, obj, ignore_method_names=True, ignore_axis_labels=True) def test_read_invalid_files(self): for invalid_fp, error_msg_regexp, _ in self.invalid_fps: with self.assertRaisesRegex(OrdinationFormatError, error_msg_regexp): _ordination_to_ordination_results(invalid_fp) def test_write(self): for fp, obj in zip(self.valid_fps, self.ordination_results_objs): fh = io.StringIO() _ordination_results_to_ordination(obj, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() npt.assert_equal(obs, exp) def test_roundtrip_read_write(self): for fp in self.valid_fps: # Read. obj1 = _ordination_to_ordination_results(fp) # Write. fh = io.StringIO() _ordination_results_to_ordination(obj1, fh) fh.seek(0) # Read. obj2 = _ordination_to_ordination_results(fh) fh.close() assert_ordination_results_equal(obj1, obj2) class SnifferTests(OrdinationTestData): def setUp(self): super(SnifferTests, self).setUp() def test_matches_and_nonmatches(self): # Sniffer should match all valid files, and will match some invalid # ones too because it doesn't exhaustively check the entire file. for fp in self.valid_fps: self.assertEqual(_ordination_sniffer(fp), (True, {})) for fp, _, expected_sniffer_match in self.invalid_fps: self.assertEqual(_ordination_sniffer(fp), (expected_sniffer_match, {})) if __name__ == '__main__': main()
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com thumbor@googlegroups.com import mimetypes import random from io import BytesIO from os.path import dirname, join, realpath from unittest import mock from urllib.parse import urlencode from PIL import Image from ssim import compute_ssim from tornado.testing import AsyncHTTPTestCase from thumbor.app import ThumborServiceApp from thumbor.config import Config from thumbor.context import Context, RequestParameters from thumbor.engines.pil import Engine as PilEngine from thumbor.importer import Importer from thumbor.transformer import Transformer def get_ssim(actual, expected): if ( actual.size[0] != expected.size[0] or actual.size[1] != expected.size[1] ): raise RuntimeError( "Can't calculate SSIM for images of different sizes (" f"one is {actual.size[0]}x{actual.size[1]}, " f"the other {expected.size[0]}x{expected.size[1]}).", ) return compute_ssim(actual, expected) def encode_multipart_formdata(fields, files): boundary = b"thumborUploadFormBoundary" crlf = b"\r\n" lines = [] for key, value in fields.items(): lines.append(b"--" + boundary) lines.append( b'Content-Disposition: form-data; name="%s"' % key.encode() ) lines.append(b"") lines.append(value) for (key, filename, value) in files: lines.append(b"--" + boundary) lines.append( b'Content-Disposition: form-data; name="%s"; filename="%s"' % (key.encode(), filename.encode()) ) lines.append( b"Content-Type: %s" % mimetypes.guess_type(filename)[0].encode() or b"application/octet-stream" ) lines.append(b"") lines.append(value) lines.append(b"") lines.append(b"") lines.append(b"--" + boundary + b"--") body = crlf.join(lines) content_type = b"multipart/form-data; boundary=%s" % boundary return content_type, body class TestCase(AsyncHTTPTestCase): _multiprocess_can_split_ = True def get_app(self): self.config = ( # This is a test case pylint: disable=attribute-defined-outside-init self.get_config() ) self.server = ( # This is a test case pylint: disable=attribute-defined-outside-init self.get_server() ) self.importer = ( # This is a test case pylint: disable=attribute-defined-outside-init self.get_importer() ) self.request_handler = ( # This is a test case pylint: disable=attribute-defined-outside-init self.get_request_handler() ) self.importer.import_modules() self.context = self.get_context() return ThumborServiceApp(self.context) def get_config(self): # Meant to be overriden pylint: disable=no-self-use return Config() def get_server(self): # Meant to be overriden pylint: disable=no-self-use return None def get_importer(self): importer = Importer(self.config) importer.import_modules() return importer def get_request_handler( self, ): # Meant to be overriden pylint: disable=no-self-use return None def get_context(self): self.importer.import_modules() return Context( self.server, self.config, self.importer, self.request_handler ) async def async_fetch(self, path, method="GET", body=None, headers=None): return await self.http_client.fetch( self.get_url(path), method=method, body=body, headers=headers, allow_nonstandard_methods=True, raise_error=False, ) async def async_get(self, path, headers=None): return await self.async_fetch( path, method="GET", body=urlencode({}, doseq=True), headers=headers ) async def async_post(self, path, headers, body): return await self.async_fetch( path, method="POST", body=body, headers=headers, ) async def async_put(self, path, headers, body): return await self.async_fetch( path, method="PUT", body=body, headers=headers, ) async def async_delete(self, path, headers): return await self.async_fetch( path, method="DELETE", body=urlencode({}, doseq=True), headers=headers, ) async def async_post_files(self, path, data=None, files=None): if data is None: data = {} if files is None: files = [] multipart_data = encode_multipart_formdata(data, files) return await self.async_fetch( path, method="POST", body=multipart_data[1], headers={"Content-Type": multipart_data[0]}, ) class FilterTestCase(TestCase): _multiprocess_can_split_ = True def setUp(self): super().setUp() self.context = {} def get_filter(self, filter_name, params_string="", config_context=None): config = Config( FILTERS=[filter_name], LOADER="thumbor.loaders.file_loader", FILE_LOADER_ROOT_PATH=self.get_fixture_root_path(), ) importer = Importer(config) importer.import_modules() req = RequestParameters() context = Context(config=config, importer=importer) context.request = req context.request.engine = context.modules.engine if config_context is not None: config_context(context) self.context = context fltr = importer.filters[0] fltr.pre_compile() context.transformer = Transformer(context) return fltr(params_string, context=context) @staticmethod def get_fixture_root_path(): return join(dirname(realpath(__file__)), "fixtures", "filters") def get_fixture_path(self, name): return f"{self.get_fixture_root_path()}/{name}" def get_fixture(self, name, mode="RGB"): image = Image.open(self.get_fixture_path(name)) return image.convert(mode) async def get_filtered( self, source_image, filter_name, params_string, config_context=None, mode="RGB", ): fltr = self.get_filter(filter_name, params_string, config_context) image = Image.open(self.get_fixture_path(source_image)) img_buffer = BytesIO() # Special case for the quality test, because the quality filter # doesn't really affect the image, it only sets a context value # for use on save. But here we convert the result, # we do not save it if params_string == "quality(10)": image.save(img_buffer, "JPEG", quality=10) fltr.engine.load(img_buffer.getvalue(), ".jpg") else: image.save(img_buffer, "PNG", quality=100) fltr.engine.load(img_buffer.getvalue(), ".png") fltr.context.transformer.img_operation_worker() await fltr.run() fltr.engine.image = fltr.engine.image.convert(mode) return fltr.engine.image @staticmethod def get_ssim(actual, expected): return get_ssim(actual, expected) def debug(self, image): # pylint: disable=arguments-differ image = Image.fromarray(image) path = f"/tmp/debug_image_{random.randint(1, 10000)}.jpg" image.save(path, "JPEG") print(f"The debug image was in {path}.") @staticmethod def debug_size(image): loaded = Image.fromarray(image) print( f"Image dimensions are {loaded.size[0]}x{loaded.size[1]} (shape is {image.shape})" ) class DetectorTestCase(TestCase): _multiprocess_can_split_ = True def setUp(self): super().setUp() self.context.request = mock.Mock(focal_points=[]) self.engine = PilEngine(self.context) self.context.modules.engine = self.engine
# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import platform import mock from oslo_concurrency import processutils as putils from oslo_log import log from oslo_utils import importutils from manila import exception from manila.share.drivers.netapp import utils as na_utils from manila import test from manila import version class NetAppDriverUtilsTestCase(test.TestCase): def setUp(self): super(NetAppDriverUtilsTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(na_utils.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(na_utils.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) na_utils.setup_tracing(None) def test_setup_tracing(self): na_utils.setup_tracing(None) self.assertFalse(na_utils.TRACE_API) self.assertFalse(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method,api') self.assertTrue(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_setup_tracing_invalid_key(self): na_utils.setup_tracing('method,fake') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(1, na_utils.LOG.warning.call_count) @na_utils.trace def _trace_test_method(*args, **kwargs): return 'OK' def test_trace_no_tracing(self): result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(0, na_utils.LOG.debug.call_count) na_utils.setup_tracing('method') def test_trace_method_tracing(self): na_utils.setup_tracing('method') result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, na_utils.LOG.debug.call_count) def test_validate_driver_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_validate_driver_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(1, na_utils.LOG.warning.call_count) def test_check_flags(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag2': 'value2'}) self.assertIsNone(na_utils.check_flags(['flag1', 'flag2'], configuration)) def test_check_flags_missing_flag(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag3': 'value3'}) self.assertRaises(exception.InvalidInput, na_utils.check_flags, ['flag1', 'flag2'], configuration) def test_convert_to_list(self): self.assertListEqual([], na_utils.convert_to_list(None)) self.assertListEqual(['test'], na_utils.convert_to_list('test')) self.assertListEqual(['a'], na_utils.convert_to_list(['a'])) self.assertListEqual(['a', 'b'], na_utils.convert_to_list(['a', 'b'])) self.assertListEqual([1, 2, 3], na_utils.convert_to_list((1, 2, 3))) self.assertListEqual([5], na_utils.convert_to_list(5)) self.assertListEqual( sorted(['key1', 'key2']), sorted(na_utils.convert_to_list({'key1': 'value1', 'key2': 'value2'}))) def test_check_netapp_lib(self): mock_try_import = self.mock_object(importutils, 'try_import') na_utils.check_netapp_lib() mock_try_import.assert_called_once_with('netapp_lib') def test_check_netapp_lib_not_found(self): self.mock_object(importutils, 'try_import', mock.Mock(return_value=None)) self.assertRaises(exception.NetAppException, na_utils.check_netapp_lib) class OpenstackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def setUp(self): super(OpenstackInfoTestCase, self).setUp() def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_exception_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_exception_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called)
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class vlan_channel_binding(base_resource) : """ Binding class showing the channel that can be bound to vlan. """ def __init__(self) : self._ifnum = "" self._tagged = False self._id = 0 self.___count = 0 @property def id(self) : ur"""Specifies the virtual LAN ID.<br/>Minimum value = 1<br/>Maximum value = 4094. """ try : return self._id except Exception as e: raise e @id.setter def id(self, id) : ur"""Specifies the virtual LAN ID.<br/>Minimum value = 1<br/>Maximum value = 4094 """ try : self._id = id except Exception as e: raise e @property def ifnum(self) : ur"""The interface to be bound to the VLAN, specified in slot/port notation (for example, 1/3).<br/>Minimum length = 1. """ try : return self._ifnum except Exception as e: raise e @ifnum.setter def ifnum(self, ifnum) : ur"""The interface to be bound to the VLAN, specified in slot/port notation (for example, 1/3).<br/>Minimum length = 1 """ try : self._ifnum = ifnum except Exception as e: raise e @property def tagged(self) : ur"""Make the interface an 802.1q tagged interface. Packets sent on this interface on this VLAN have an additional 4-byte 802.1q tag, which identifies the VLAN. To use 802.1q tagging, you must also configure the switch connected to the appliance's interfaces. """ try : return self._tagged except Exception as e: raise e @tagged.setter def tagged(self, tagged) : ur"""Make the interface an 802.1q tagged interface. Packets sent on this interface on this VLAN have an additional 4-byte 802.1q tag, which identifies the VLAN. To use 802.1q tagging, you must also configure the switch connected to the appliance's interfaces. """ try : self._tagged = tagged except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(vlan_channel_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.vlan_channel_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.id is not None : return str(self.id) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = vlan_channel_binding() updateresource.id = resource.id updateresource.ifnum = resource.ifnum updateresource.tagged = resource.tagged return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [vlan_channel_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].id = resource[i].id updateresources[i].ifnum = resource[i].ifnum updateresources[i].tagged = resource[i].tagged return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = vlan_channel_binding() deleteresource.id = resource.id deleteresource.ifnum = resource.ifnum deleteresource.tagged = resource.tagged return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [vlan_channel_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].id = resource[i].id deleteresources[i].ifnum = resource[i].ifnum deleteresources[i].tagged = resource[i].tagged return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service, id) : ur""" Use this API to fetch vlan_channel_binding resources. """ try : obj = vlan_channel_binding() obj.id = id response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, id, filter_) : ur""" Use this API to fetch filtered set of vlan_channel_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = vlan_channel_binding() obj.id = id option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, id) : ur""" Use this API to count vlan_channel_binding resources configued on NetScaler. """ try : obj = vlan_channel_binding() obj.id = id option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, id, filter_) : ur""" Use this API to count the filtered set of vlan_channel_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = vlan_channel_binding() obj.id = id option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class vlan_channel_binding_response(base_response) : def __init__(self, length=1) : self.vlan_channel_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.vlan_channel_binding = [vlan_channel_binding() for _ in range(length)]
""" =============================================================== Partial Dependence and Individual Conditional Expectation Plots =============================================================== Partial dependence plots show the dependence between the target function [2]_ and a set of features of interest, marginalizing over the values of all other features (the complement features). Due to the limits of human perception, the size of the set of features of interest must be small (usually, one or two) thus they are usually chosen among the most important features. Similarly, an individual conditional expectation (ICE) plot [3]_ shows the dependence between the target function and a feature of interest. However, unlike partial dependence plots, which show the average effect of the features of interest, ICE plots visualize the dependence of the prediction on a feature for each :term:`sample` separately, with one line per sample. Only one feature of interest is supported for ICE plots. This example shows how to obtain partial dependence and ICE plots from a :class:`~sklearn.neural_network.MLPRegressor` and a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the California housing dataset. The example is taken from [1]_. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [2] For classification you can think of it as the regression score before the link function. .. [3] Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E., Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of Individual Conditional Expectation. (2015) Journal of Computational and Graphical Statistics, 24(1): 44-65 (https://arxiv.org/abs/1309.6392) """ print(__doc__) # %% # California Housing data preprocessing # ------------------------------------- # # Center target to avoid gradient boosting init bias: gradient boosting # with the 'recursion' method does not account for the initial estimator # (here the average target, by default). import pandas as pd from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split cal_housing = fetch_california_housing() X = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names) y = cal_housing.target y -= y.mean() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=0 ) # %% # 1-way partial dependence with different models # ---------------------------------------------- # # In this section, we will compute 1-way partial dependence with two different # machine-learning models: (i) a multi-layer perceptron and (ii) a # gradient-boosting. With these two models, we illustrate how to compute and # interpret both partial dependence plot (PDP) and individual conditional # expectation (ICE). # # Multi-layer perceptron # ...................... # # Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute # single-variable partial dependence plots. from time import time from sklearn.pipeline import make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.neural_network import MLPRegressor print("Training MLPRegressor...") tic = time() est = make_pipeline(QuantileTransformer(), MLPRegressor(hidden_layer_sizes=(50, 50), learning_rate_init=0.01, early_stopping=True)) est.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {est.score(X_test, y_test):.2f}") # %% # We configured a pipeline to scale the numerical input features and tuned the # neural network size and learning rate to get a reasonable compromise between # training time and predictive performance on a test set. # # Importantly, this tabular dataset has very different dynamic ranges for its # features. Neural networks tend to be very sensitive to features with varying # scales and forgetting to preprocess the numeric feature would lead to a very # poor model. # # It would be possible to get even higher predictive performance with a larger # neural network but the training would also be significantly more expensive. # # Note that it is important to check that the model is accurate enough on a # test set before plotting the partial dependence since there would be little # use in explaining the impact of a given feature on the prediction function of # a poor model. # # We will plot the partial dependence, both individual (ICE) and averaged one # (PDP). We limit to only 50 ICE curves to not overcrowd the plot. import matplotlib.pyplot as plt from sklearn.inspection import partial_dependence from sklearn.inspection import plot_partial_dependence print('Computing partial dependence plots...') tic = time() features = ['MedInc', 'AveOccup', 'HouseAge', 'AveRooms'] display = plot_partial_dependence( est, X_train, features, kind="both", subsample=50, n_jobs=3, grid_resolution=20, random_state=0 ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( 'Partial dependence of house value on non-location features\n' 'for the California housing dataset, with MLPRegressor' ) display.figure_.subplots_adjust(hspace=0.3) # %% # Gradient boosting # ................. # # Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and # compute the partial dependence on the same features. from sklearn.ensemble import HistGradientBoostingRegressor print("Training HistGradientBoostingRegressor...") tic = time() est = HistGradientBoostingRegressor() est.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {est.score(X_test, y_test):.2f}") # %% # Here, we used the default hyperparameters for the gradient boosting model # without any preprocessing as tree-based models are naturally robust to # monotonic transformations of numerical features. # # Note that on this tabular dataset, Gradient Boosting Machines are both # significantly faster to train and more accurate than neural networks. It is # also significantly cheaper to tune their hyperparameters (the defaults tend # to work well while this is not often the case for neural networks). # # We will plot the partial dependence, both individual (ICE) and averaged one # (PDP). We limit to only 50 ICE curves to not overcrowd the plot. print('Computing partial dependence plots...') tic = time() display = plot_partial_dependence( est, X_train, features, kind="both", subsample=50, n_jobs=3, grid_resolution=20, random_state=0 ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( 'Partial dependence of house value on non-location features\n' 'for the California housing dataset, with Gradient Boosting' ) display.figure_.subplots_adjust(wspace=0.4, hspace=0.3) # %% # Analysis of the plots # ..................... # # We can clearly see on the PDPs (thick blue line) that the median house price # shows a linear relationship with the median income (top left) and that the # house price drops when the average occupants per household increases (top # middle). The top right plot shows that the house age in a district does not # have a strong influence on the (median) house price; so does the average # rooms per household. # # The ICE curves (light blue lines) complement the analysis: we can see that # there are some exceptions, where the house price remain constant with median # income and average occupants. On the other hand, while the house age (top # right) does not have a strong influence on the median house price on average, # there seems to be a number of exceptions where the house price increase when # between the ages 15-25. Similar exceptions can be observed for the average # number of rooms (bottom left). Therefore, ICE plots show some individual # effect which are attenuated by taking the averages. # # In all plots, the tick marks on the x-axis represent the deciles of the # feature values in the training data. # # We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much # smoother predictions than # :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. # # However, it is worth noting that we are creating potential meaningless # synthetic samples if features are correlated. # %% # 2D interaction plots # -------------------- # # PDPs with two features of interest enable us to visualize interactions among # them. However, ICEs cannot be plotted in an easy manner and thus interpreted. # Another consideration is linked to the performance to compute the PDPs. With # the tree-based algorithm, when only PDPs are requested, they can be computed # on an efficient way using the `'recursion'` method. features = ['AveOccup', 'HouseAge', ('AveOccup', 'HouseAge')] print('Computing partial dependence plots...') tic = time() _, ax = plt.subplots(ncols=3, figsize=(9, 4)) display = plot_partial_dependence( est, X_train, features, kind='average', n_jobs=3, grid_resolution=20, ax=ax, ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( 'Partial dependence of house value on non-location features\n' 'for the California housing dataset, with Gradient Boosting' ) display.figure_.subplots_adjust(wspace=0.4, hspace=0.3) # %% # The two-way partial dependence plot shows the dependence of median house # price on joint values of house age and average occupants per household. We # can clearly see an interaction between the two features: for an average # occupancy greater than two, the house price is nearly independent of the # house age, whereas for values less than two there is a strong dependence on # age. # # 3D interaction plots # -------------------- # # Let's make the same partial dependence plot for the 2 features interaction, # this time in 3 dimensions. import numpy as np from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() features = ('AveOccup', 'HouseAge') pdp = partial_dependence( est, X_train, features=features, kind='average', grid_resolution=20 ) XX, YY = np.meshgrid(pdp["values"][0], pdp["values"][1]) Z = pdp.average[0].T ax = Axes3D(fig) surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu, edgecolor='k') ax.set_xlabel(features[0]) ax.set_ylabel(features[1]) ax.set_zlabel('Partial dependence') # pretty init view ax.view_init(elev=22, azim=122) plt.colorbar(surf) plt.suptitle('Partial dependence of house value on median\n' 'age and average occupancy, with Gradient Boosting') plt.subplots_adjust(top=0.9) plt.show()
from __future__ import absolute_import, division, print_function import math import itertools import operator import pytest from datetime import datetime, date import datashape from collections import Iterator, Iterable import blaze from blaze.compute.python import (nunique, mean, rrowfunc, rowfunc, reduce_by_funcs, optimize) from blaze import dshape from blaze.compute.core import compute, compute_up, pre_compute from blaze.expr import (symbol, by, merge, join, count, distinct, Apply, sum, min, max, any, summary, count, std, head, transform) import numpy as np from blaze import cos, sin from blaze.compatibility import builtins from blaze.utils import raises t = symbol('t', 'var * {name: string, amount: int, id: int}') data = [['Alice', 100, 1], ['Bob', 200, 2], ['Alice', 50, 3]] tbig = symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}') databig = [['Alice', 'F', 100, 1], ['Alice', 'F', 100, 3], ['Drew', 'F', 100, 4], ['Drew', 'M', 100, 5], ['Drew', 'M', 200, 5]] def test_dispatched_rowfunc(): cw = optimize(t['amount'] + 100, []) assert rowfunc(t)(t) == t assert rowfunc(cw)(('Alice', 100, 1)) == 200 def test_reduce_by_funcs(): e = summary(number=t.id.max(), sum=t.amount.sum()) b = by(t, e) assert reduce_by_funcs(b)[2]([1,2,3], [4,5,6]) == (1, 7) def test_symbol(): assert compute(t, data) == data def test_projection(): assert list(compute(t['name'], data)) == [x[0] for x in data] def test_eq(): assert list(compute(t['amount'] == 100, data)) == [x[1] == 100 for x in data] def test_selection(): assert list(compute(t[t['amount'] == 0], data)) == \ [x for x in data if x[1] == 0] assert list(compute(t[t['amount'] > 150], data)) == \ [x for x in data if x[1] > 150] def test_arithmetic(): assert list(compute(t['amount'] + t['id'], data)) == \ [b + c for a, b, c, in data] assert list(compute(t['amount'] * t['id'], data)) == \ [b * c for a, b, c, in data] assert list(compute(t['amount'] % t['id'], data)) == \ [b % c for a, b, c, in data] def test_unary_ops(): for op in ('cos', 'sin', 'exp', 'ceil', 'floor', 'trunc', 'isnan'): f = getattr(blaze, op) pyf = getattr(math, op) result = list(compute(f(t['amount']), data)) assert result == [pyf(x[1]) for x in data] def test_neg(): expr = optimize(-t.amount, []) assert list(compute(expr, data)) == [-x[1] for x in data] def test_reductions(): assert compute(sum(t['amount']), data) == 100 + 200 + 50 assert compute(min(t['amount']), data) == 50 assert compute(max(t['amount']), data) == 200 assert compute(nunique(t['amount']), data) == 3 assert compute(nunique(t['name']), data) == 2 assert compute(count(t['amount']), data) == 3 assert compute(any(t['amount'] > 150), data) is True assert compute(any(t['amount'] > 250), data) is False assert compute(t.amount[0], data) == 100 assert compute(t.amount[-1], data) == 50 def test_1d_reductions_keepdims(): for r in [sum, min, max, nunique, count]: assert compute(r(t.amount, keepdims=True), data) == \ (compute(r(t.amount), data),) def test_count(): t = symbol('t', '3 * int') assert compute(t.count(), [1, None, 2]) == 2 def reduction_runner(funcs): from blaze.compatibility import builtins as bts exprs = sum, min, max for blaze_expr, py_func in itertools.product(exprs, funcs): f = getattr(operator, py_func) reduc_f = getattr(bts, blaze_expr.__name__) ground_truth = f(reduc_f([100, 200, 50]), 5) assert compute(f(blaze_expr(t['amount']), 5), data) == ground_truth def test_reduction_arithmetic(): funcs = 'add', 'mul' reduction_runner(funcs) def test_reduction_compare(): funcs = 'eq', 'ne', 'lt', 'gt', 'le', 'ge' reduction_runner(funcs) def test_mean(): assert compute(mean(t['amount']), data) == float(100 + 200 + 50) / 3 assert 50 < compute(std(t['amount']), data) < 100 def test_std(): amt = [row[1] for row in data] assert np.allclose(compute(t.amount.std(), data), np.std(amt)) assert np.allclose(compute(t.amount.std(unbiased=True), data), np.std(amt, ddof=1)) assert np.allclose(compute(t.amount.var(), data), np.var(amt)) assert np.allclose(compute(t.amount.var(unbiased=True), data), np.var(amt, ddof=1)) def test_by_no_grouper(): names = t['name'] assert set(compute(by(names, count=names.count()), data)) == \ set([('Alice', 2), ('Bob', 1)]) def test_by_one(): print(compute(by(t['name'], total=t['amount'].sum()), data)) assert set(compute(by(t['name'], total=t['amount'].sum()), data)) == \ set([('Alice', 150), ('Bob', 200)]) def test_by_compound_apply(): print(compute(by(t['name'], total=(t['amount'] + 1).sum()), data)) assert set(compute(by(t['name'], total=(t['amount'] + 1).sum()), data)) == \ set([('Alice', 152), ('Bob', 201)]) def test_by_two(): result = compute(by(tbig[['name', 'sex']], total=tbig['amount'].sum()), databig) expected = [('Alice', 'F', 200), ('Drew', 'F', 100), ('Drew', 'M', 300)] print(set(result)) assert set(result) == set(expected) def test_by_three(): result = compute(by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum()), databig) expected = [('Alice', 'F', 204), ('Drew', 'F', 104), ('Drew', 'M', 310)] print(result) assert set(result) == set(expected) def test_works_on_generators(): assert list(compute(t['amount'], iter(data))) == \ [x[1] for x in data] assert list(compute(t['amount'], (i for i in data))) == \ [x[1] for x in data] def test_join(): left = [['Alice', 100], ['Bob', 200]] right = [['Alice', 1], ['Bob', 2]] L = symbol('L', 'var * {name: string, amount: int}') R = symbol('R', 'var * {name: string, id: int}') joined = join(L, R, 'name') assert dshape(joined.schema) == \ dshape('{name: string, amount: int, id: int}') result = list(compute(joined, {L: left, R: right})) expected = [('Alice', 100, 1), ('Bob', 200, 2)] assert result == expected def test_outer_join(): left = [(1, 'Alice', 100), (2, 'Bob', 200), (4, 'Dennis', 400)] right = [('NYC', 1), ('Boston', 1), ('LA', 3), ('Moscow', 4)] L = symbol('L', 'var * {id: int, name: string, amount: real}') R = symbol('R', 'var * {city: string, id: int}') assert set(compute(join(L, R), {L: left, R: right})) == set( [(1, 'Alice', 100, 'NYC'), (1, 'Alice', 100, 'Boston'), (4, 'Dennis', 400, 'Moscow')]) assert set(compute(join(L, R, how='left'), {L: left, R: right})) == set( [(1, 'Alice', 100, 'NYC'), (1, 'Alice', 100, 'Boston'), (2, 'Bob', 200, None), (4, 'Dennis', 400, 'Moscow')]) assert set(compute(join(L, R, how='right'), {L: left, R: right})) == set( [(1, 'Alice', 100, 'NYC'), (1, 'Alice', 100, 'Boston'), (3, None, None, 'LA'), (4, 'Dennis', 400, 'Moscow')]) assert set(compute(join(L, R, how='outer'), {L: left, R: right})) == set( [(1, 'Alice', 100, 'NYC'), (1, 'Alice', 100, 'Boston'), (2, 'Bob', 200, None), (3, None, None, 'LA'), (4, 'Dennis', 400, 'Moscow')]) def test_multi_column_join(): left = [(1, 2, 3), (2, 3, 4), (1, 3, 5)] right = [(1, 2, 30), (1, 3, 50), (1, 3, 150)] L = symbol('L', 'var * {x: int, y: int, z: int}') R = symbol('R', 'var * {x: int, y: int, w: int}') j = join(L, R, ['x', 'y']) print(list(compute(j, {L: left, R: right}))) assert list(compute(j, {L: left, R: right})) == [(1, 2, 3, 30), (1, 3, 5, 50), (1, 3, 5, 150)] @pytest.mark.xfail(reason="This doesn't necessarily make sense") def test_column_of_column(): assert list(compute(t['name']['name'], data)) == \ list(compute(t['name'], data)) def test_distinct(): assert set(compute(distinct(t['name']), data)) == set(['Alice', 'Bob']) assert set(compute(distinct(t), data)) == set(map(tuple, data)) e = distinct(t) assert list(compute(e, [])) == [] def test_distinct_count(): t2 = t['name'].distinct() gby = by(t2, total=t2.count()) result = set(compute(gby, data)) assert result == set([('Alice', 1), ('Bob', 1)]) def test_sort(): assert list(compute(t.sort('amount'), data)) == \ sorted(data, key=lambda x: x[1], reverse=False) assert list(compute(t.sort('amount', ascending=True), data)) == \ sorted(data, key=lambda x: x[1], reverse=False) assert list(compute(t.sort(['amount', 'id']), data)) == \ sorted(data, key=lambda x: (x[1], x[2]), reverse=False) def test_fancy_sort(): assert list(compute(t.sort(t['amount']), data)) ==\ list(compute(t.sort('amount'), data)) assert list(compute(t.sort(t[['amount', 'id']]), data)) ==\ list(compute(t.sort(['amount', 'id']), data)) assert list(compute(t.sort(0-t['amount']), data)) ==\ list(compute(t.sort('amount'), data))[::-1] def test_sort_on_column(): assert list(compute(t.name.distinct().sort('name'), data)) == \ ['Alice', 'Bob'] def test_head(): assert list(compute(t.head(1), data)) == [data[0]] e = head(t, 101) p = list(range(1000)) assert len(list(compute(e, p))) == 101 def test_graph_double_join(): idx = [['A', 1], ['B', 2], ['C', 3], ['D', 4], ['E', 5], ['F', 6]] arc = [[1, 3], [2, 3], [4, 3], [5, 3], [3, 1], [2, 1], [5, 1], [1, 6], [2, 6], [4, 6]] wanted = [['A'], ['F']] t_idx = symbol('t_idx', 'var * {name: string, b: int32}') t_arc = symbol('t_arc', 'var * {a: int32, b: int32}') t_wanted = symbol('t_wanted', 'var * {name: string}') j = join(join(t_idx, t_arc, 'b'), t_wanted, 'name')[['name', 'b', 'a']] result = compute(j, {t_idx: idx, t_arc: arc, t_wanted: wanted}) result = sorted(map(tuple, result)) expected = sorted([('A', 3, 1), ('A', 2, 1), ('A', 5, 1), ('F', 1, 6), ('F', 2, 6), ('F', 4, 6)]) assert result == expected def test_label(): assert list(compute((t['amount'] * 1).label('foo'), data)) == \ list(compute((t['amount'] * 1), data)) def test_relabel_join(): names = symbol('names', 'var * {first: string, last: string}') siblings = join(names.relabel({'first': 'left'}), names.relabel({'first': 'right'}), 'last')[['left', 'right']] data = [('Alice', 'Smith'), ('Bob', 'Jones'), ('Charlie', 'Smith')] print(set(compute(siblings, {names: data}))) assert ('Alice', 'Charlie') in set(compute(siblings, {names: data})) assert ('Alice', 'Bob') not in set(compute(siblings, {names: data})) def test_map_column(): inc = lambda x: x + 1 assert list(compute(t['amount'].map(inc, 'int'), data)) == [x[1] + 1 for x in data] def test_map(): assert (list(compute(t.map(lambda tup: tup[1] + tup[2], 'int'), data)) == [x[1] + x[2] for x in data]) def test_apply_column(): result = compute(t.amount.apply(builtins.sum, 'real'), data) expected = compute(t.amount.sum(), data) assert result == expected def test_apply(): data2 = tuple(map(tuple, data)) assert compute(t.apply(hash, 'int'), data2) == hash(data2) def test_map_datetime(): from datetime import datetime data = [['A', 0], ['B', 1]] t = symbol('t', 'var * {foo: string, datetime: int64}') result = list(compute(t['datetime'].map(datetime.utcfromtimestamp, 'datetime'), data)) expected = [datetime(1970, 1, 1, 0, 0, 0), datetime(1970, 1, 1, 0, 0, 1)] assert result == expected def test_by_multi_column_grouper(): t = symbol('t', 'var * {x: int, y: int, z: int}') expr = by(t[['x', 'y']], total=t['z'].count()) data = [(1, 2, 0), (1, 2, 0), (1, 1, 0)] print(set(compute(expr, data))) assert set(compute(expr, data)) == set([(1, 2, 2), (1, 1, 1)]) def test_merge(): col = (t['amount'] * 2).label('new') expr = merge(t['name'], col) assert list(compute(expr, data)) == [(row[0], row[1] * 2) for row in data] def test_transform(): expr = transform(t, x=t.amount / t.id) assert list(compute(expr, data)) == [('Alice', 100, 1, 100), ('Bob', 200, 2, 100), ('Alice', 50, 3, 50 / 3)] def test_map_columnwise(): colwise = t['amount'] * t['id'] expr = colwise.map(lambda x: x / 10, 'int64', name='mod') assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data] def test_map_columnwise_of_selection(): tsel = t[t['name'] == 'Alice'] colwise = tsel['amount'] * tsel['id'] expr = colwise.map(lambda x: x / 10, 'int64', name='mod') assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data[::2]] def test_selection_out_of_order(): expr = t['name'][t['amount'] < 100] assert list(compute(expr, data)) == ['Alice'] def test_recursive_rowfunc(): f = rrowfunc(t['name'], t) assert [f(row) for row in data] == [row[0] for row in data] expr = optimize(t['amount'] + t['id'], []) f = rrowfunc(expr, t) assert [f(row) for row in data] == [row[1] + row[2] for row in data] assert raises(Exception, lambda: rrowfunc(t[t['amount'] < 0]['name'], t)) def test_recursive_rowfunc_is_used(): expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum()) expected = [('Alice', 2*(101 + 53)), ('Bob', 2*(202))] assert set(compute(expr, data)) == set(expected) class TestFunctionExpressions(object): def test_compound(self): s = t.amount.mean() r = compute(s, data) assert isinstance(r, float) expr = cos(s) ** 2 + sin(s) ** 2 result = compute(expr, data) expected = math.cos(r) ** 2 + math.sin(r) ** 2 assert result == expected def test_user_defined_function(self): s = t.amount.count() r = compute(s, data) assert isinstance(r, int) def myfunc(x): return (cos(x) + sin(x)) ** 2 / math.pi result = compute(myfunc(s), data) expected = (math.cos(r) + math.sin(r)) ** 2 / math.pi assert result == expected def test_user_defined_calls(self): s = t.amount.count() r = compute(s, data) def myother(y): return 2 + y ** 10 def myfunc(x): return myother((cos(x) + sin(x)) ** 2 / math.pi) result = compute(myfunc(s), data) expected = myother((math.cos(r) + math.sin(r)) ** 2 / math.pi) assert result == expected def test_by_groupby_deep(): data = [(1, 2, 'Alice'), (1, 3, 'Bob'), (2, 4, 'Alice'), (2, 4, '')] schema = '{x: int, y: int, name: string}' t = symbol('t', datashape.var * schema) t2 = t[t['name'] != ''] t3 = merge(t2.x, t2.name) expr = by(t3.name, avg=t3.x.mean()) result = set(compute(expr, data)) assert result == set([('Alice', 1.5), ('Bob', 1.0)]) def test_by_then_sort_dict_items_sequence(): expr = by(tbig.name, total=tbig.amount.sum()).sort('name') assert compute(expr, databig) def test_summary(): expr = summary(count=t.id.count(), sum=t.amount.sum()) assert compute(expr, data) == (3, 350) assert compute(expr, iter(data)) == (3, 350) def test_summary_keepdims(): assert compute(summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True), data) == \ (compute(summary(count=t.id.count(), sum=t.amount.sum(), keepdims=False), data),) def test_summary_by(): expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum())) assert set(compute(expr, data)) == set([('Alice', 2, 150), ('Bob', 1, 200)]) expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum())) assert set(compute(expr, data)) == set([('Alice', 2, 152), ('Bob', 1, 201)]) expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1)) assert set(compute(expr, data)) == set([('Alice', 2, 151), ('Bob', 1, 201)]) def test_summary_by_first(): expr = by(t.name, amt=t.amount[0]) assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 100))) def test_summary_by_last(): expr = by(t.name, amt=t.amount[-1]) assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 50))) def test_reduction_arithmetic(): expr = t.amount.sum() + 1 assert compute(expr, data) == 351 def test_scalar_arithmetic(): x = symbol('x', 'real') y = symbol('y', 'real') assert compute(x + y, {x: 2, y: 3}) == 5 assert compute_up(x + y, 2, 3) == 5 assert compute_up(x * y, 2, 3) == 6 assert compute_up(x / y, 6, 3) == 2 assert compute_up(x % y, 4, 3) == 1 assert compute_up(x ** y, 4, 3) == 64 assert compute(x + 1, {x: 2}) == 3 assert compute(x * 2, {x: 2}) == 4 assert compute(1 + x, {x: 2}) == 3 assert compute(2 * x, {x: 2}) == 4 assert compute_up(-x, 1) == -1 assert compute_up(blaze.sin(x), 1) == math.sin(1) def test_like(): t = symbol('t', 'var * {name: string, city: string}') data = [('Alice Smith', 'New York'), ('Bob Smith', 'Chicago'), ('Alice Walker', 'LA')] assert list(compute(t.like(name='Alice*'), data)) == [data[0], data[2]] assert list(compute(t.like(name='lice*'), data)) == [] assert list(compute(t.like(name='*Smith*'), data)) == [data[0], data[1]] assert list(compute(t.like(name='*Smith*', city='New York'), data)) == [data[0]] def test_datetime_comparison(): data = [['Alice', date(2000, 1, 1)], ['Bob', date(2000, 2, 2)], ['Alice', date(2000, 3, 3)]] t = symbol('t', 'var * {name: string, when: date}') assert list(compute(t[t.when > '2000-01-01'], data)) == data[1:] def test_datetime_access(): data = [['Alice', 100, 1, datetime(2000, 1, 1, 1, 1, 1)], ['Bob', 200, 2, datetime(2000, 1, 1, 1, 1, 1)], ['Alice', 50, 3, datetime(2000, 1, 1, 1, 1, 1)]] t = symbol('t', 'var * {amount: float64, id: int64, name: string, when: datetime}') assert list(compute(t.when.year, data)) == [2000, 2000, 2000] assert list(compute(t.when.second, data)) == [1, 1, 1] assert list(compute(t.when.date, data)) == [date(2000, 1, 1)] * 3 def test_utcfromtimestamp(): t = symbol('t', '1 * int64') assert list(compute(t.utcfromtimestamp, [0])) == \ [datetime(1970, 1, 1, 0, 0)] payments = [{'name': 'Alice', 'payments': [ {'amount': 100, 'when': datetime(2000, 1, 1, 1, 1 ,1)}, {'amount': 200, 'when': datetime(2000, 2, 2, 2, 2, 2)} ]}, {'name': 'Bob', 'payments': [ {'amount': 300, 'when': datetime(2000, 3, 3, 3, 3 ,3)}, {'amount': -400, 'when': datetime(2000, 4, 4, 4, 4, 4)}, {'amount': 500, 'when': datetime(2000, 5, 5, 5, 5, 5)} ]}, ] payments_ordered = [('Alice', [( 100, datetime(2000, 1, 1, 1, 1 ,1)), ( 200, datetime(2000, 2, 2, 2, 2, 2))]), ('Bob', [( 300, datetime(2000, 3, 3, 3, 3 ,3)), (-400, datetime(2000, 4, 4, 4, 4, 4)), ( 500, datetime(2000, 5, 5, 5, 5, 5))])] payment_dshape = 'var * {name: string, payments: var * {amount: int32, when: datetime}}' @pytest.mark.xfail(reason="Can't reason about nested broadcasts yet") def test_nested(): t = symbol('t', payment_dshape) assert list(compute(t.name, payments_ordered)) == ['Alice', 'Bob'] assert list(compute(t.payments, payments_ordered)) == \ [p[1] for p in payments_ordered] assert list(compute(t.payments.amount, payments_ordered)) == \ [(100, 200), (300, -400, 500)] assert list(compute(t.payments.amount + 1, payments_ordered)) ==\ [(101, 201), (301, -399, 501)] @pytest.mark.xfail(reason="Can't reason about nested broadcasts yet") def test_scalar(): s = symbol('s', '{name: string, id: int32, payments: var * {amount: int32, when: datetime}}') data = ('Alice', 1, ((100, datetime(2000, 1, 1, 1, 1 ,1)), (200, datetime(2000, 2, 2, 2, 2, 2)), (300, datetime(2000, 3, 3, 3, 3, 3)))) assert compute(s.name, data) == 'Alice' assert compute(s.id + 1, data) == 2 assert tuple(compute(s.payments.amount, data)) == (100, 200, 300) assert tuple(compute(s.payments.amount + 1, data)) == (101, 201, 301) def test_slice(): assert compute(t[0], data) == data[0] assert list(compute(t[:2], data)) == list(data[:2]) assert list(compute(t.name[:2], data)) == [data[0][0], data[1][0]] def test_negative_slicing(): assert list(compute(t[-1:], data)) == data[-1:] assert list(compute(t[-1:], iter(data))) == data[-1:] assert list(compute(t[-1], data)) == data[-1] assert list(compute(t[-1], iter(data))) == data[-1] assert list(compute(t[-2], data)) == data[-2] assert list(compute(t[-2], iter(data))) == data[-2] @pytest.mark.xfail(raises=ValueError, reason="No support for stop and step having negative values") def test_negative_slicing_raises_on_stop_and_step_not_None(): assert list(compute(t[-2:-5:-1], data)) == data[-2:-5:-1] def test_multi_dataset_broadcast(): x = symbol('x', '3 * int') y = symbol('y', '3 * int') a = [1, 2, 3] b = [10, 20, 30] assert list(compute(x + y, {x: a, y: b})) == [11, 22, 33] assert list(compute(2*x + (y + 1), {x: a, y: b})) == [13, 25, 37] @pytest.mark.xfail(reason="Optimize doesn't create multi-table-broadcasts") def test_multi_dataset_broadcast_with_Record_types(): x = symbol('x', '3 * {p: int, q: int}') y = symbol('y', '3 * int') a = [(1, 1), (2, 2), (3, 3)] b = [10, 20, 30] assert list(compute(x.p + x.q + y, {x: iter(a), y: iter(b)})) == [12, 24, 36] def eq(a, b): if isinstance(a, (Iterable, Iterator)): a = list(a) if isinstance(b, (Iterable, Iterator)): b = list(b) return a == b def test_pre_compute(): s = symbol('s', 'var * {a: int, b: int}') assert pre_compute(s, [(1, 2)]) == [(1, 2)] assert list(pre_compute(s, iter([(1, 2)]))) == [(1, 2)] assert list(pre_compute(s, iter([(1, 2), (3, 4)]))) == [(1, 2), (3, 4)] assert list(pre_compute(s, iter([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))) == [(1, 2), (3, 4)] def test_dicts(): t = symbol('t', 'var * {name: string, amount: int, id: int}') L = [['Alice', 100, 1], ['Bob', 200, 2], ['Alice', 50, 3]] d = [{'name': 'Alice', 'amount': 100, 'id': 1}, {'name': 'Bob', 'amount': 200, 'id': 2}, {'name': 'Alice', 'amount': 50, 'id': 3}] assert list(pre_compute(t, d)) == list(map(tuple, L)) for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]: assert eq(compute(expr, {t: L}), compute(expr, {t: d})) for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]: assert eq(compute(expr, {t: iter(L)}), compute(expr, {t: iter(d)})) assert eq(compute(expr, {t: iter(L)}), compute(expr, {t: L})) def test_nelements_list_tuple(): assert compute(t.nelements(), data) == len(data) def test_nelements_iterator(): x = (row for row in data) assert compute(t.nelements(), x) == len(data) def test_nrows(): assert compute(t.nrows, data) == len(data) x = (row for row in data) assert compute(t.nrows, x) == len(data) @pytest.mark.xfail(raises=Exception, reason="Only 1D reductions allowed") def test_nelements_2D(): assert compute(t.nelements(axis=1), data) == len(data[0]) def test_compute_field_on_dicts(): s = symbol('s', '{x: 3 * int, y: 3 * int}') d = {'x': [1, 2, 3], 'y': [4, 5, 6]} assert compute(s.x, {s: d}) == [1, 2, 3] def test_truncate(): s = symbol('x', 'real') assert compute(s.truncate(20), 154) == 140 assert compute(s.truncate(0.1), 3.1415) == 3.1 def test_truncate_datetime(): s = symbol('x', 'datetime') assert compute(s.truncate(2, 'days'), datetime(2002, 1, 3, 12, 30)) ==\ date(2002, 1, 2) s = symbol('x', 'var * datetime') assert list(compute(s.truncate(2, 'days'), [datetime(2002, 1, 3, 12, 30)])) ==\ [date(2002, 1, 2)] def test_compute_up_on_base(): d = datetime.now() s = symbol('s', 'datetime') assert compute(s.minute, d) == d.minute def test_notnull(): data = [('Alice', -100, None), (None, None, None), ('Bob', 300, 'New York City')] t = symbol('t', 'var * {name: ?string, amount: ?int32, city: ?string}') expr = t.name.notnull() result = compute(expr, data) assert list(result) == [True, False, True] def test_notnull_whole_collection(): t = symbol('t', 'var * {name: ?string, amount: ?int32, city: ?string}') with pytest.raises(AttributeError): t.notnull @pytest.mark.parametrize('keys', [['Alice'], ['Bob', 'Alice']]) def test_isin(keys): expr = t[t.name.isin(keys)] result = list(compute(expr, data)) expected = [el for el in data if el[0] in keys] assert result == expected
from __future__ import division, print_function, absolute_import from warnings import warn import numpy as np from numpy import asarray, empty, ravel, nonzero from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix, SparseEfficiencyWarning, csc_matrix) from . import _superlu noScikit = False try: import scikits.umfpack as umfpack except ImportError: noScikit = True useUmfpack = not noScikit __all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', 'MatrixRankWarning'] class MatrixRankWarning(UserWarning): pass def use_solver(**kwargs): """ Select default sparse direct solver to be used. Parameters ---------- useUmfpack : bool, optional Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is installed. Default: True Notes ----- The default sparse solver is umfpack when available (scikits.umfpack is installed). This can be changed by passing useUmfpack = False, which then causes the always present SuperLU based solver to be used. Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` to gain some speed. """ if 'useUmfpack' in kwargs: globals()['useUmfpack'] = kwargs['useUmfpack'] #TODO: pass other options to scikit def _get_umf_family(A): """Get umfpack family string given the sparse matrix dtype.""" family = {'di': 'di', 'Di': 'zi', 'dl': 'dl', 'Dl': 'zl'} dt = A.dtype.char + A.indices.dtype.char return family[dt] def spsolve(A, b, permc_spec=None, use_umfpack=True): """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. If a vector, b.shape must be (n,) or (n, 1). permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering use_umfpack : bool, optional if True (default) then use umfpack for the solution. This is only referenced if b is a vector and ``scikit-umfpack`` is installed. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[1] If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) Notes ----- For solving the matrix expression AX = B, this solver assumes the resulting matrix X is sparse, as is often the case for very sparse inputs. If the resulting X is dense, the construction of this sparse result will be relatively expensive. In that case, consider converting A to a dense matrix and using scipy.linalg.solve or its variants. """ if not (isspmatrix_csc(A) or isspmatrix_csr(A)): A = csc_matrix(A) warn('spsolve requires A be CSC or CSR matrix format', SparseEfficiencyWarning) # b is a vector only if b have shape (n,) or (n, 1) b_is_sparse = isspmatrix(b) if not b_is_sparse: b = asarray(b) b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) A.sort_indices() A = A.asfptype() # upcast to a floating point format result_dtype = np.promote_types(A.dtype, b.dtype) if A.dtype != result_dtype: A = A.astype(result_dtype) if b.dtype != result_dtype: b = b.astype(result_dtype) # validate input shapes M, N = A.shape if (M != N): raise ValueError("matrix must be square (has shape %s)" % ((M, N),)) if M != b.shape[0]: raise ValueError("matrix - rhs dimension mismatch (%s - %s)" % (A.shape, b.shape[0])) use_umfpack = use_umfpack and useUmfpack if b_is_vector and use_umfpack: if b_is_sparse: b_vec = b.toarray() else: b_vec = b b_vec = asarray(b_vec, dtype=A.dtype).ravel() if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf = umfpack.UmfpackContext(_get_umf_family(A)) x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, autoTranspose=True) else: if b_is_vector and b_is_sparse: b = b.toarray() b_is_sparse = False if not b_is_sparse: if isspmatrix_csc(A): flag = 1 # CSC format else: flag = 0 # CSR format options = dict(ColPerm=permc_spec) x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag, options=options) if info != 0: warn("Matrix is exactly singular", MatrixRankWarning) x.fill(np.nan) if b_is_vector: x = x.ravel() else: # b is sparse Afactsolve = factorized(A) if not isspmatrix_csc(b): warn('spsolve is more efficient when sparse b ' 'is in the CSC matrix format', SparseEfficiencyWarning) b = csc_matrix(b) # Create a sparse output matrix by repeatedly applying # the sparse factorization to solve columns of b. data_segs = [] row_segs = [] col_segs = [] for j in range(b.shape[1]): bj = b[:, j].A.ravel() xj = Afactsolve(bj) w = np.flatnonzero(xj) segment_length = w.shape[0] row_segs.append(w) col_segs.append(np.ones(segment_length, dtype=int)*j) data_segs.append(np.asarray(xj[w], dtype=A.dtype)) sparse_data = np.concatenate(data_segs) sparse_row = np.concatenate(row_segs) sparse_col = np.concatenate(col_segs) x = A.__class__((sparse_data, (sparse_row, sparse_col)), shape=b.shape, dtype=A.dtype) return x def splu(A, permc_spec=None, diag_pivot_thresh=None, drop_tol=None, relax=None, panel_size=None, options=dict()): """ Compute the LU decomposition of a sparse, square matrix. Parameters ---------- A : sparse matrix Sparse matrix to factorize. Should be in CSR or CSC format. permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering diag_pivot_thresh : float, optional Threshold used for a diagonal entry to be an acceptable pivot. See SuperLU user's guide for details [1]_ drop_tol : float, optional (deprecated) No effect. relax : int, optional Expert option for customizing the degree of relaxing supernodes. See SuperLU user's guide for details [1]_ panel_size : int, optional Expert option for customizing the panel size. See SuperLU user's guide for details [1]_ options : dict, optional Dictionary containing additional expert options to SuperLU. See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) for more details. For example, you can specify ``options=dict(Equil=False, IterRefine='SINGLE'))`` to turn equilibration off and perform a single iterative refinement. Returns ------- invA : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- spilu : incomplete LU decomposition Notes ----- This function uses the SuperLU library. References ---------- .. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/ """ if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A.sort_indices() A = A.asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, ilu=False, options=_options) def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None): """ Compute an incomplete LU decomposition for a sparse, square matrix. The resulting object is an approximation to the inverse of `A`. Parameters ---------- A : (N, N) array_like Sparse matrix to factorize drop_tol : float, optional Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. (default: 1e-4) fill_factor : float, optional Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) drop_rule : str, optional Comma-separated string of drop rules to use. Available rules: ``basic``, ``prows``, ``column``, ``area``, ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) See SuperLU documentation for details. Remaining other options Same as for `splu` Returns ------- invA_approx : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- splu : complete LU decomposition Notes ----- To improve the better approximation to the inverse, you may need to increase `fill_factor` AND decrease `drop_tol`. This function uses the SuperLU library. """ if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A.sort_indices() A = A.asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, ILU_FillFactor=fill_factor, DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, ilu=True, options=_options) def factorized(A): """ Return a function for solving a sparse linear system, with A pre-factorized. Parameters ---------- A : (N, N) array_like Input. Returns ------- solve : callable To solve the linear system of equations given in `A`, the `solve` callable should be passed an ndarray of shape (N,). Examples -------- >>> from scipy.sparse.linalg import factorized >>> A = np.array([[ 3. , 2. , -1. ], ... [ 2. , -2. , 4. ], ... [-1. , 0.5, -1. ]]) >>> solve = factorized(A) # Makes LU decomposition. >>> rhs1 = np.array([1, -2, 0]) >>> solve(rhs1) # Uses the LU factors. array([ 1., -2., -2.]) """ if useUmfpack: if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if not isspmatrix_csc(A): A = csc_matrix(A) warn('splu requires CSC matrix format', SparseEfficiencyWarning) A.sort_indices() A = A.asfptype() # upcast to a floating point format if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf = umfpack.UmfpackContext(_get_umf_family(A)) # Make LU decomposition. umf.numeric(A) def solve(b): return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) return solve else: return splu(A).solve
import numpy as np import pandas as pd import pyflux as pf # Set up some data to use for the tests countdata = np.random.poisson(3,500) x1 = np.random.normal(0,1,500) x2 = np.random.normal(0,1,500) data = pd.DataFrame([countdata,x1,x2]).T data.columns = ['y', 'x1', 'x2'] x1_oos = np.random.normal(0,1,30) x2_oos = np.random.normal(0,1,30) countdata_oos = np.random.poisson(3,30) data_oos = pd.DataFrame([countdata_oos,x1_oos,x2_oos]).T data_oos.columns = ['y', 'x1', 'x2'] def test_normal_no_terms(): """ Tests the length of the latent variable vector for an GASReg model with no AR or MA terms, and tests that the values are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_bbvi(): """ Tests an GASReg model estimated with BBVI, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100) assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_bbvi_mini_batch(): """ Tests an ARIMA model estimated with BBVI and that the length of the latent variable list is correct, and that the estimated latent variables are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100, mini_batch=32) assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_bbvi_elbo(): """ Tests that the ELBO increases """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=200, record_elbo=True, map_start=False) assert(x.elbo_records[-1]>x.elbo_records[0]) def test_normal_bbvi_mini_batch_elbo(): """ Tests that the ELBO increases """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=200, mini_batch=32, record_elbo=True, map_start=False) assert(x.elbo_records[-1]>x.elbo_records[0]) def test_normal_mh(): """ Tests an GASReg model estimated with Metropolis-Hastings, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('M-H',nsims=300) assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_laplace(): """ Tests an GASReg model estimated with Laplace approximation, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('Laplace') assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_pml(): """ Tests an GASReg model estimated with PML, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('PML') assert(len(model.latent_variables.z_list) == 2) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test_normal_predict_length(): """ Tests that the length of the predict dataframe is equal to no of steps h """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5) def test_normal_predict_is_length(): """ Tests that the length of the predict IS dataframe is equal to no of steps h """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() assert(model.predict_is(h=5).shape[0] == 5) def test_normal_predict_nans(): """ Tests that the predictions are not NaNs """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5, oos_data=data_oos).values)]) == 0) def test_normal_predict_is_nans(): """ Tests that the predictions in-sample are not NaNs """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0) def test_predict_intervals(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() predictions = model.predict(h=10, oos_data=data_oos, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test_predict_is_intervals(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit() predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test_predict_intervals_bbvi(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) predictions = model.predict(h=10, oos_data=data_oos, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test_predict_is_intervals_bbvi(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test_predict_intervals_mh(): """ Tests prediction intervals are ordered correctly """ """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('M-H', nsims=400) predictions = model.predict(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values)) """ def test_predict_is_intervals_mh(): """ Tests prediction intervals are ordered correctly """ """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('M-H', nsims=400) predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values)) """ def test_sample_model(): """ Tests sampling function """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) sample = model.sample(nsims=100) assert(sample.shape[0]==100) assert(sample.shape[1]==len(data)) def test_ppc(): """ Tests PPC value """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) p_value = model.ppc() assert(0.0 <= p_value <= 1.0) ## Try more than one predictor def test2_normal_no_terms(): """ Tests the length of the latent variable vector for an GASReg model with no AR or MA terms, and two predictors, and tests that the values are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_bbvi(): """ Tests an GASReg model estimated with BBVI, with multiple predictors, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100) assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_bbvi_mini_batch(): """ Tests an ARIMA model estimated with BBVI and that the length of the latent variable list is correct, and that the estimated latent variables are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100, mini_batch=32) assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_bbvi_elbo(): """ Tests that the ELBO increases """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100, record_elbo=True, map_start=False) assert(x.elbo_records[-1]>x.elbo_records[0]) def test2_normal_bbvi_mini_batch_elbo(): """ Tests that the ELBO increases """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True, map_start=False) assert(x.elbo_records[-1]>x.elbo_records[0]) def test2_normal_mh(): """ Tests an GASReg model estimated with MEtropolis-Hastings, with multiple predictors, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('M-H',nsims=300) assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_normal(): """ Tests an GASReg model estimated with Laplace, with multiple predictors, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('Laplace') assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_pml(): """ Tests an GASReg model estimated with PML, with multiple predictors, and tests that the latent variable vector length is correct, and that value are not nan """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('PML') assert(len(model.latent_variables.z_list) == 3) lvs = np.array([i.value for i in model.latent_variables.z_list]) assert(len(lvs[np.isnan(lvs)]) == 0) def test2_normal_predict_length(): """ Tests that the length of the predict dataframe is equal to no of steps h """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5) def test2_normal_predict_is_length(): """ Tests that the length of the predict IS dataframe is equal to no of steps h """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() assert(model.predict_is(h=5).shape[0] == 5) def test2_normal_predict_nans(): """ Tests that the predictions are not NaNs """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5, oos_data=data_oos).values)]) == 0) def test2_normal_predict_is_nans(): """ Tests that the predictions in-sample are not NaNs """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() x.summary() assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0) def test2_predict_intervals(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() predictions = model.predict(h=10, oos_data=data_oos, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test2_predict_is_intervals(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit() predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test2_predict_intervals_bbvi(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) predictions = model.predict(h=10, oos_data=data_oos, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test2_predict_is_intervals_bbvi(): """ Tests prediction intervals are ordered correctly """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values)) def test2_predict_intervals_mh(): """ Tests prediction intervals are ordered correctly """ """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('M-H', nsims=400) predictions = model.predict(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values)) """ def test2_predict_is_intervals_mh(): """ Tests prediction intervals are ordered correctly """ """ model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson()) x = model.fit('M-H', nsims=400) predictions = model.predict_is(h=10, intervals=True) assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values)) assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values)) assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values)) """ def test2_sample_model(): """ Tests sampling function """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) sample = model.sample(nsims=100) assert(sample.shape[0]==100) assert(sample.shape[1]==len(data)) def test2_ppc(): """ Tests PPC value """ model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Poisson()) x = model.fit('BBVI', iterations=100) p_value = model.ppc() assert(0.0 <= p_value <= 1.0)
# -*- coding: utf-8 -*- from __future__ import unicode_literals, division, absolute_import from logging import getLogger from urllib import quote from requests.exceptions import RequestException from flexget import plugin, validator from flexget.event import event from flexget.utils import json, requests from flexget.config_schema import one_or_more log = getLogger('pyload') class PluginPyLoad(object): """ Parse task content or url for hoster links and adds them to pyLoad. Example:: pyload: api: http://localhost:8000/api queue: yes username: my_username password: my_password folder: desired_folder package: desired_package_name (jinja2 supported) hoster: - YoutubeCom parse_url: no multiple_hoster: yes enabled: yes Default values for the config elements:: pyload: api: http://localhost:8000/api queue: no hoster: ALL parse_url: no multiple_hoster: yes enabled: yes """ __author__ = 'http://pyload.org' __version__ = '0.4' DEFAULT_API = 'http://localhost:8000/api' DEFAULT_QUEUE = False DEFAULT_FOLDER = '' DEFAULT_HOSTER = [] DEFAULT_PARSE_URL = False DEFAULT_MULTIPLE_HOSTER = True DEFAULT_PREFERRED_HOSTER_ONLY = False DEFAULT_HANDLE_NO_URL_AS_FAILURE = False schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'object', 'properties': { 'api': {'type': 'string'}, 'username': {'type': 'string'}, 'password': {'type': 'string'}, 'folder': {'type': 'string'}, 'package': {'type': 'string'}, 'queue': {'type': 'boolean'}, 'parse_url': {'type': 'boolean'}, 'multiple_hoster': {'type': 'boolean'}, 'hoster': one_or_more({'type': 'string'}), 'preferred_hoster_only': {'type': 'boolean'}, 'handle_no_url_as_failure': {'type': 'boolean'}, 'enabled': {'type': 'boolean'}, }, 'additionalProperties': False } ] } def on_task_output(self, task, config): if not config.get('enabled', True): return if not task.accepted: return self.add_entries(task, config) def add_entries(self, task, config): """Adds accepted entries""" try: session = self.get_session(config) except IOError: raise plugin.PluginError('pyLoad not reachable', log) except plugin.PluginError: raise except Exception as e: raise plugin.PluginError('Unknown error: %s' % str(e), log) api = config.get('api', self.DEFAULT_API) hoster = config.get('hoster', self.DEFAULT_HOSTER) for entry in task.accepted: # bunch of urls now going to check content = entry.get('description', '') + ' ' + quote(entry['url']) content = json.dumps(content.encode("utf8")) url = json.dumps(entry['url']) if config.get('parse_url', self.DEFAULT_PARSE_URL) else "''" log.debug("Parsing url %s" % url) result = query_api(api, "parseURLs", {"html": content, "url": url, "session": session}) # parsed { plugins: [urls] } parsed = result.json() urls = [] # check for preferred hoster for name in hoster: if name in parsed: urls.extend(parsed[name]) if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER): break # no preferred hoster and not preferred hoster only - add all recognized plugins if not urls and not config.get('preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY): for name, purls in parsed.iteritems(): if name != "BasePlugin": urls.extend(purls) if task.options.test: log.info('Would add `%s` to pyload' % urls) continue # no urls found if not urls: if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE): entry.fail("No suited urls in entry %s" % entry['title']) else: log.info("No suited urls in entry %s" % entry['title']) continue log.debug("Add %d urls to pyLoad" % len(urls)) try: dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1 # Use the title of the entry, if no naming schema for the package is defined. name = config.get('package', entry['title']) # If name has jinja template, render it try: name = entry.render(name) except RenderError as e: name = entry['title'] log.error('Error rendering jinja event: %s' % e) post = {'name': "'%s'" % name.encode("ascii", "ignore"), 'links': str(urls), 'dest': dest, 'session': session} pid = query_api(api, "addPackage", post).text log.debug('added package pid: %s' % pid) # Set Folder folder = config.get('folder', self.DEFAULT_FOLDER) folder = entry.get('path', folder) if folder: # If folder has jinja template, render it try: folder = entry.render(folder) except RenderError as e: folder = self.DEFAULT_FOLDER log.error('Error rendering jinja event: %s' % e) # set folder with api data = json.dumps({'folder': folder}) query_api(api, "setPackageData", {'pid': pid, 'data': data, 'session': session}) except Exception as e: entry.fail(str(e)) def get_session(self, config): url = config.get('api', self.DEFAULT_API) # Login post = {'username': config['username'], 'password': config['password']} result = query_api(url, "login", post) response = result.json() if not response: raise plugin.PluginError('Login failed', log) return response.replace('"', '') def query_api(url, method, post=None): try: response = requests.request( 'post' if post is not None else 'get', url.rstrip("/") + "/" + method.strip("/"), data=post) response.raise_for_status() return response except RequestException as e: if e.response.status_code == 500: raise plugin.PluginError('Internal API Error: <%s> <%s> <%s>' % (method, url, post), log) raise @event('plugin.register') def register_plugin(): plugin.register(PluginPyLoad, 'pyload', api_ver=2)
""" Module to define and register Terminal IPython shortcuts with :mod:`prompt_toolkit` """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings import signal import sys from typing import Callable from prompt_toolkit.application.current import get_app from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER from prompt_toolkit.filters import (has_focus, has_selection, Condition, vi_insert_mode, emacs_insert_mode, has_completions, vi_mode) from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline from prompt_toolkit.key_binding import KeyBindings from IPython.utils.decorators import undoc @undoc @Condition def cursor_in_leading_ws(): before = get_app().current_buffer.document.current_line_before_cursor return (not before) or before.isspace() def create_ipython_shortcuts(shell): """Set up the prompt_toolkit keyboard shortcuts for IPython""" kb = KeyBindings() insert_mode = vi_insert_mode | emacs_insert_mode if getattr(shell, 'handle_return', None): return_handler = shell.handle_return(shell) else: return_handler = newline_or_execute_outer(shell) kb.add('enter', filter=(has_focus(DEFAULT_BUFFER) & ~has_selection & insert_mode ))(return_handler) kb.add('c-\\')(force_exit) kb.add('c-p', filter=(vi_insert_mode & has_focus(DEFAULT_BUFFER)) )(previous_history_or_previous_completion) kb.add('c-n', filter=(vi_insert_mode & has_focus(DEFAULT_BUFFER)) )(next_history_or_next_completion) kb.add('c-g', filter=(has_focus(DEFAULT_BUFFER) & has_completions) )(dismiss_completion) kb.add('c-c', filter=has_focus(DEFAULT_BUFFER))(reset_buffer) kb.add('c-c', filter=has_focus(SEARCH_BUFFER))(reset_search_buffer) supports_suspend = Condition(lambda: hasattr(signal, 'SIGTSTP')) kb.add('c-z', filter=supports_suspend)(suspend_to_bg) # Ctrl+I == Tab kb.add('tab', filter=(has_focus(DEFAULT_BUFFER) & ~has_selection & insert_mode & cursor_in_leading_ws ))(indent_buffer) kb.add('c-o', filter=(has_focus(DEFAULT_BUFFER) & emacs_insert_mode) )(newline_autoindent_outer(shell.input_transformer_manager)) kb.add('f2', filter=has_focus(DEFAULT_BUFFER))(open_input_in_editor) if shell.display_completions == 'readlinelike': kb.add('c-i', filter=(has_focus(DEFAULT_BUFFER) & ~has_selection & insert_mode & ~cursor_in_leading_ws ))(display_completions_like_readline) if sys.platform == 'win32': kb.add('c-v', filter=(has_focus(DEFAULT_BUFFER) & ~vi_mode))(win_paste) return kb def newline_or_execute_outer(shell): def newline_or_execute(event): """When the user presses return, insert a newline or execute the code.""" b = event.current_buffer d = b.document if b.complete_state: cc = b.complete_state.current_completion if cc: b.apply_completion(cc) else: b.cancel_completion() return # If there's only one line, treat it as if the cursor is at the end. # See https://github.com/ipython/ipython/issues/10425 if d.line_count == 1: check_text = d.text else: check_text = d.text[:d.cursor_position] status, indent = shell.check_complete(check_text) if not (d.on_last_line or d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end() ): if shell.autoindent: b.insert_text('\n' + indent) else: b.insert_text('\n') return if (status != 'incomplete') and b.accept_handler: b.validate_and_handle() else: if shell.autoindent: b.insert_text('\n' + indent) else: b.insert_text('\n') return newline_or_execute def previous_history_or_previous_completion(event): """ Control-P in vi edit mode on readline is history next, unlike default prompt toolkit. If completer is open this still select previous completion. """ event.current_buffer.auto_up() def next_history_or_next_completion(event): """ Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit. If completer is open this still select next completion. """ event.current_buffer.auto_down() def dismiss_completion(event): b = event.current_buffer if b.complete_state: b.cancel_completion() def reset_buffer(event): b = event.current_buffer if b.complete_state: b.cancel_completion() else: b.reset() def reset_search_buffer(event): if event.current_buffer.document.text: event.current_buffer.reset() else: event.app.layout.focus(DEFAULT_BUFFER) def suspend_to_bg(event): event.app.suspend_to_background() def force_exit(event): """ Force exit (with a non-zero return value) """ sys.exit("Quit") def indent_buffer(event): event.current_buffer.insert_text(' ' * 4) @undoc def newline_with_copy_margin(event): """ DEPRECATED since IPython 6.0 See :any:`newline_autoindent_outer` for a replacement. Preserve margin and cursor position when using Control-O to insert a newline in EMACS mode """ warnings.warn("`newline_with_copy_margin(event)` is deprecated since IPython 6.0. " "see `newline_autoindent_outer(shell)(event)` for a replacement.", DeprecationWarning, stacklevel=2) b = event.current_buffer cursor_start_pos = b.document.cursor_position_col b.newline(copy_margin=True) b.cursor_up(count=1) cursor_end_pos = b.document.cursor_position_col if cursor_start_pos != cursor_end_pos: pos_diff = cursor_start_pos - cursor_end_pos b.cursor_right(count=pos_diff) def newline_autoindent_outer(inputsplitter) -> Callable[..., None]: """ Return a function suitable for inserting a indented newline after the cursor. Fancier version of deprecated ``newline_with_copy_margin`` which should compute the correct indentation of the inserted line. That is to say, indent by 4 extra space after a function definition, class definition, context manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``. """ def newline_autoindent(event): """insert a newline after the cursor indented appropriately.""" b = event.current_buffer d = b.document if b.complete_state: b.cancel_completion() text = d.text[:d.cursor_position] + '\n' _, indent = inputsplitter.check_complete(text) b.insert_text('\n' + (' ' * (indent or 0)), move_cursor=False) return newline_autoindent def open_input_in_editor(event): event.app.current_buffer.tempfile_suffix = ".py" event.app.current_buffer.open_in_editor() if sys.platform == 'win32': from IPython.core.error import TryNext from IPython.lib.clipboard import (ClipboardEmpty, win32_clipboard_get, tkinter_clipboard_get) @undoc def win_paste(event): try: text = win32_clipboard_get() except TryNext: try: text = tkinter_clipboard_get() except (TryNext, ClipboardEmpty): return except ClipboardEmpty: return event.current_buffer.insert_text(text.replace('\t', ' ' * 4))
# # Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com> # # See the file LICENSE.txt for your full rights. # """Various utilities used by the plot package. """ try: from PIL import ImageFont, ImageColor except ImportError: import ImageFont, ImageColor import datetime import time import math import weeplot def scale(fmn, fmx, prescale = (None, None, None), nsteps = 10): """Calculates an appropriate min, max, and step size for scaling axes on a plot. The origin (zero) is guaranteed to be on an interval boundary. fmn: The minimum data value fmx: The maximum data value. Must be greater than or equal to fmn. prescale: A 3-way tuple. A non-None min or max value (positions 0 and 1, respectively) will be fixed to that value. A non-None interval (position 2) be at least as big as that value. Default = (None, None, None) nsteps: The nominal number of desired steps. Default = 10 Returns: a three-way tuple. First value is the lowest scale value, second the highest. The third value is the step (increment) between them. Examples: >>> print scale(1.1, 12.3) (0.0, 14.0, 2.0) >>> print scale(-1.1, 12.3) (-2.0, 14.0, 2.0) >>> print scale(-12.1, -5.3) (-13.0, -5.0, 1.0) >>> print scale(10.0, 10.0) (10.0, 10.1, 0.01) >>> print "(%.4f, %.4f, %.5f)" % scale(10.0, 10.001) (10.0000, 10.0010, 0.00010) >>> print scale(10.0, 10.0+1e-8) (10.0, 10.1, 0.01) >>> print scale(0.0, 0.05, (None, None, .1), 10) (0.0, 1.0, 0.1) >>> print scale(0.0, 0.21, (None, None, .02)) (0.0, 0.22, 0.02) """ if all(x is not None for x in prescale): return prescale (minscale, maxscale, min_interval) = prescale # Make sure fmn and fmx are float values, in case a user passed # in integers: fmn = float(fmn) fmx = float(fmx) if fmx < fmn : raise weeplot.ViolatedPrecondition, "scale() called with max value less than min value" if _rel_approx_equal(fmn, fmx) : if fmn == 0.0 : fmx = 1.0 else : fmx = fmn + .01*abs(fmn) frange = fmx - fmn steps = frange / nsteps mag = math.floor(math.log10(steps)) magPow = math.pow(10.0, mag) magMsd = math.floor(steps/magPow + 0.5) if magMsd > 5.0: magMsd = 10.0 elif magMsd > 2.0: magMsd = 5.0 else : # magMsd > 1.0 magMsd = 2 # This will be the nominal interval size interval = magMsd * magPow # Test it against the desired minimum, if any if min_interval is None or interval >= min_interval: # Either no min interval was specified, or its safely # less than the chosen interval. if minscale is None: minscale = interval * math.floor(fmn / interval) if maxscale is None: maxscale = interval * math.ceil(fmx / interval) else: # The request for a minimum interval has kicked in. # Sometimes this can make for a plot with just one or # two intervals in it. Adjust the min and max values # to get a nice plot interval = min_interval if minscale is None: if maxscale is None: # Both can float. Pick values so the range is near the bottom # of the scale: minscale = interval * math.floor(fmn / interval) maxscale = minscale + interval * nsteps else: # Only minscale can float minscale = maxscale - interval * nsteps else: if maxscale is None: # Only maxscale can float maxscale = minscale + interval * nsteps else: # Both are fixed --- nothing to be done pass return (minscale, maxscale, interval) def scaletime(tmin_ts, tmax_ts) : """Picks a time scaling suitable for a time plot. tmin_ts, tmax_ts: The time stamps in epoch time around which the times will be picked. Returns a scaling 3-tuple. First element is the start time, second the stop time, third the increment. All are in seconds (epoch time in the case of the first two). Example 1: 24 hours on an hour boundary >>> from weeutil.weeutil import timestamp_to_string as to_string >>> time_ts = time.mktime(time.strptime("2013-05-17 08:00", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 24*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 09:00:00 PDT (1368720000) 2013-05-17 09:00:00 PDT (1368806400) 10800 Example 2: 24 hours on a 3-hour boundary >>> time_ts = time.mktime(time.strptime("2013-05-17 09:00", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 24*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 09:00:00 PDT (1368720000) 2013-05-17 09:00:00 PDT (1368806400) 10800 Example 3: 24 hours on a non-hour boundary >>> time_ts = time.mktime(time.strptime("2013-05-17 09:01", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 24*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 12:00:00 PDT (1368730800) 2013-05-17 12:00:00 PDT (1368817200) 10800 Example 4: 27 hours >>> time_ts = time.mktime(time.strptime("2013-05-17 07:45", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 27*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 06:00:00 PDT (1368709200) 2013-05-17 09:00:00 PDT (1368806400) 10800 Example 5: 3 hours on a 15 minute boundary >>> time_ts = time.mktime(time.strptime("2013-05-17 07:45", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 3*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-17 05:00:00 PDT (1368792000) 2013-05-17 08:00:00 PDT (1368802800) 900 Example 6: 3 hours on a non-15 minute boundary >>> time_ts = time.mktime(time.strptime("2013-05-17 07:46", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 3*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-17 05:00:00 PDT (1368792000) 2013-05-17 08:00:00 PDT (1368802800) 900 Example 7: 12 hours >>> time_ts = time.mktime(time.strptime("2013-05-17 07:46", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 12*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 20:00:00 PDT (1368759600) 2013-05-17 08:00:00 PDT (1368802800) 3600 Example 8: 15 hours >>> time_ts = time.mktime(time.strptime("2013-05-17 07:46", "%Y-%m-%d %H:%M")) >>> xmin, xmax, xinc = scaletime(time_ts - 15*3600, time_ts) >>> print to_string(xmin), to_string(xmax), xinc 2013-05-16 17:00:00 PDT (1368748800) 2013-05-17 08:00:00 PDT (1368802800) 3600 """ if tmax_ts <= tmin_ts : raise weeplot.ViolatedPrecondition, "scaletime called with tmax <= tmin" tdelta = tmax_ts - tmin_ts tmin_dt = datetime.datetime.fromtimestamp(tmin_ts) tmax_dt = datetime.datetime.fromtimestamp(tmax_ts) if tdelta <= 16 * 3600: if tdelta <= 3*3600: # For time intervals less than 3 hours, use an increment of 15 minutes interval = 900 elif tdelta <= 12 * 3600: # For intervals from 3 hours up through 12 hours, use one hour interval = 3600 else: # For intervals from 12 through 16 hours, use two hours. interval = 7200 # Get to the one hour boundary below tmax: stop_dt = tmax_dt.replace(minute=0, second=0, microsecond=0) # if tmax happens to be on a one hour boundary we're done. Otherwise, round # up to the next one hour boundary: if tmax_dt > stop_dt: stop_dt += datetime.timedelta(hours=1) n_hours = int((tdelta + 3599) / 3600) start_dt = stop_dt - datetime.timedelta(hours=n_hours) elif tdelta <= 27 * 3600: # A day plot is wanted. A time increment of 3 hours is appropriate interval = 3 * 3600 # h is the hour of tmax_dt h = tmax_dt.timetuple()[3] # Subtract off enough to get to the lower 3-hour boundary from tmax: stop_dt = tmax_dt.replace(minute=0, second=0, microsecond=0) - datetime.timedelta(hours = h % 3) # If tmax happens to lie on a 3 hour boundary we don't need to do anything. If not, we need # to round up to the next 3 hour boundary: if tmax_dt > stop_dt: stop_dt += datetime.timedelta(hours=3) # The stop time is one day earlier start_dt = stop_dt - datetime.timedelta(days=1) if tdelta == 27 * 3600 : # A "slightly more than a day plot" is wanted. Start 3 hours earlier: start_dt -= datetime.timedelta(hours=3) elif 27 * 3600 < tdelta <= 31 * 24 * 3600 : # The time scale is between a day and a month. A time increment of one day is appropriate start_dt = tmin_dt.replace(hour=0, minute=0, second=0, microsecond=0) stop_dt = tmax_dt.replace(hour=0, minute=0, second=0, microsecond=0) tmax_tt = tmax_dt.timetuple() if tmax_tt[3]!=0 or tmax_tt[4]!=0 : stop_dt += datetime.timedelta(days=1) interval = 24 * 3600 else : # The time scale is more than a month. A time increment of a month is appropriate start_dt = tmin_dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0) (year , mon, day) = tmax_dt.timetuple()[0:3] if day != 1 : mon += 1 if mon==13 : mon = 1 year += 1 stop_dt = datetime.datetime(year, mon, 1) # Average month length: interval = 365.25/12 * 24 * 3600 # Convert to epoch time stamps start_ts = int(time.mktime(start_dt.timetuple())) stop_ts = int(time.mktime(stop_dt.timetuple())) return (start_ts, stop_ts, interval) class ScaledDraw(object): """Like an ImageDraw object, but lines are scaled. """ def __init__(self, draw, imagebox, scaledbox): """Initialize a ScaledDraw object. Example: scaledraw = ScaledDraw(draw, ((10, 10), (118, 246)), ((0.0, 0.0), (10.0, 1.0))) would create a scaled drawing where the upper-left image coordinate (10, 10) would correspond to the scaled coordinate( 0.0, 1.0). The lower-left image coordinate would correspond to the scaled coordinate (10.0, 0.0). draw: an instance of ImageDraw imagebox: a 2-tuple of the box coordinates on the image ((ulx, uly), (lrx, lry)) scaledbox: a 2-tuple of the box coordinates of the scaled plot ((llx, lly), (urx, ury)) """ uli = imagebox[0] lri = imagebox[1] lls = scaledbox[0] urs = scaledbox[1] if urs[1] == lls[1]: pass self.xscale = float(lri[0] - uli[0]) / float(urs[0] - lls[0]) self.yscale = -float(lri[1] - uli[1]) / float(urs[1] - lls[1]) self.xoffset = int(lri[0] - urs[0] * self.xscale + 0.5) self.yoffset = int(uli[1] - urs[1] * self.yscale + 0.5) self.draw = draw def line(self, x, y, line_type='solid', marker_type=None, marker_size=8, maxdx=None, **options) : """Draw a scaled line on the instance's ImageDraw object. x: sequence of x coordinates y: sequence of y coordinates, some of which are possibly null (value of None) line_type: 'solid' for line that connect the coordinates None for no line marker_type: None or 'none' for no marker. 'cross' for a cross 'circle' for a circle 'box' for a box 'x' for an X maxdx: defines what constitutes a gap in samples. if two data points are more than maxdx apart they are treated as separate segments. For a scatter plot, set line_type to None and marker_type to something other than None. """ # Break the line up around any nulls or gaps between samples for xy_seq in xy_seq_line(x, y, maxdx): # Create a list with the scaled coordinates... xy_seq_scaled = [(self.xtranslate(xc), self.ytranslate(yc)) for (xc,yc) in xy_seq] if line_type == 'solid': # Now pick the appropriate drawing function, depending on the length of the line: if len(xy_seq) == 1 : self.draw.point(xy_seq_scaled, fill=options['fill']) else : self.draw.line(xy_seq_scaled, **options) if marker_type and marker_type.lower().strip() not in ['none', '']: self.marker(xy_seq_scaled, marker_type, marker_size=marker_size, **options) def marker(self, xy_seq, marker_type, marker_size=10, **options): half_size = marker_size/2 marker=marker_type.lower() for x, y in xy_seq: if marker == 'cross': self.draw.line([(x-half_size, y), (x+half_size, y)], **options) self.draw.line([(x, y-half_size), (x, y+half_size)], **options) elif marker == 'x': self.draw.line([(x-half_size, y-half_size), (x+half_size, y+half_size)], **options) self.draw.line([(x-half_size, y+half_size), (x+half_size, y-half_size)], **options) elif marker == 'circle': self.draw.ellipse([(x-half_size, y-half_size), (x+half_size, y+half_size)], outline=options['fill']) elif marker == 'box': self.draw.line([(x-half_size, y-half_size), (x+half_size, y-half_size), (x+half_size, y+half_size), (x-half_size, y+half_size), (x-half_size, y-half_size)], **options) def rectangle(self, box, **options) : """Draw a scaled rectangle. box: A pair of 2-way tuples, containing coordinates of opposing corners of the box. options: passed on to draw.rectangle. Usually contains 'fill' (the color) """ box_scaled = [(coord[0]*self.xscale + self.xoffset + 0.5, coord[1]*self.yscale + self.yoffset + 0.5) for coord in box] self.draw.rectangle(box_scaled, **options) def vector(self, x, vec, vector_rotate, **options): if vec is None: return xstart_scaled = self.xtranslate(x) ystart_scaled = self.ytranslate(0) vecinc_scaled = vec * self.yscale if vector_rotate: vecinc_scaled *= complex(math.cos(math.radians(vector_rotate)), math.sin(math.radians(vector_rotate))) # Subtract off the x increment because the x-axis # *increases* to the right, unlike y, which increases # downwards xend_scaled = xstart_scaled - vecinc_scaled.real yend_scaled = ystart_scaled + vecinc_scaled.imag self.draw.line(((xstart_scaled, ystart_scaled), (xend_scaled, yend_scaled)), **options) def xtranslate(self, x): return int(x * self.xscale + self.xoffset + 0.5) def ytranslate(self, y): return int(y * self.yscale + self.yoffset + 0.5) def xy_seq_line(x, y, maxdx=None): """Generator function that breaks a line up into individual segments around any nulls held in y or any gaps in x greater than maxdx. x: iterable sequence of x coordinates. All values must be non-null y: iterable sequence of y coordinates, possibly with some embedded nulls (that is, their value==None) yields: Lists of (x,y) coordinates Example 1 >>> x=[ 1, 2, 3] >>> y=[10, 20, 30] >>> for xy_seq in xy_seq_line(x,y): ... print xy_seq [(1, 10), (2, 20), (3, 30)] Example 2 >>> x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> y=[0, 10, None, 30, None, None, 60, 70, 80, None] >>> for xy_seq in xy_seq_line(x,y): ... print xy_seq [(0, 0), (1, 10)] [(3, 30)] [(6, 60), (7, 70), (8, 80)] Example 3 >>> x=[ 0 ] >>> y=[None] >>> for xy_seq in xy_seq_line(x,y): ... print xy_seq Example 4 >>> x=[ 0, 1, 2] >>> y=[None, None, None] >>> for xy_seq in xy_seq_line(x,y): ... print xy_seq Example 5 (using gap) >>> x=[0, 1, 2, 3, 5.1, 6, 7, 8, 9] >>> y=[0, 10, 20, 30, 50, 60, 70, 80, 90] >>> for xy_seq in xy_seq_line(x,y,2): ... print xy_seq [(0, 0), (1, 10), (2, 20), (3, 30)] [(5.1, 50), (6, 60), (7, 70), (8, 80), (9, 90)] """ line = [] last_x = None for xy in zip(x, y): dx = xy[0] - last_x if last_x is not None else 0 last_x = xy[0] # If the y coordinate is None or dx > maxdx, that marks a break if xy[1] is None or (maxdx is not None and dx > maxdx): # If the length of the line is non-zero, yield it if len(line): yield line line = [] if xy[1] is None else [xy] else: line.append(xy) if len(line): yield line def pickLabelFormat(increment): """Pick an appropriate label format for the given increment. Examples: >>> print pickLabelFormat(1) %.0f >>> print pickLabelFormat(20) %.0f >>> print pickLabelFormat(.2) %.1f >>> print pickLabelFormat(.01) %.2f """ i_log = math.log10(increment) if i_log < 0 : i_log = abs(i_log) decimal_places = int(i_log) if i_log != decimal_places : decimal_places += 1 else : decimal_places = 0 return "%%.%df" % decimal_places def get_font_handle(fontpath, *args): font_key = (fontpath, args) if font_key in get_font_handle.fontCache: return get_font_handle.fontCache[font_key] font = None if fontpath is not None : try : if fontpath.endswith('.ttf'): font = ImageFont.truetype(fontpath, *args) else : font = ImageFont.load_path(fontpath) except IOError : pass if font is None : font = ImageFont.load_default() if font is not None : get_font_handle.fontCache[font_key] = font return font get_font_handle.fontCache={} def _rel_approx_equal(x, y, rel=1e-7): """Relative test for equality. Example >>> _rel_approx_equal(1.23456, 1.23457) False >>> _rel_approx_equal(1.2345678, 1.2345679) True >>> _rel_approx_equal(0.0, 0.0) True >>> _rel_approx_equal(0.0, 0.1) False >>> _rel_approx_equal(0.0, 1e-9) False >>> _rel_approx_equal(1.0, 1.0+1e-9) True >>> _rel_approx_equal(1e8, 1e8+1e-3) True """ return abs(x-y) <= rel*max(abs(x), abs(y)) def tobgr(x): """Convert a color to little-endian integer. The PIL wants either a little-endian integer (0xBBGGRR) or a string (#RRGGBB). weewx expects little-endian integer. Accept any standard color format that is known by ImageColor for example #RGB, #RRGGBB, hslHSL as well as standard color names from X11 and CSS3. See ImageColor for complete set of colors. """ if isinstance(x, basestring): if x.startswith('0x'): return int(x, 0) try: (r,g,b) = ImageColor.getrgb(x) return r + g*256 + b*256*256 except : pass try: return int(x) except ValueError: pass raise ValueError("Unknown color specifier: '%s'. Colors must be specified as 0xBBGGRR, #RRGGBB, or standard color names." % x) return x if __name__ == "__main__": import doctest if not doctest.testmod().failed: print "PASSED"
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This Module performs Unit Tests for the TSA.Fourier class. It can not be considered part of the active code but of the regression test system """ import os import sys import copy import numpy as np # add RAVEN to path frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)] + [os.pardir]*4 + ['framework']))) if frameworkDir not in sys.path: sys.path.append(frameworkDir) from utils.utils import find_crow find_crow(frameworkDir) from utils import xmlUtils from TSA import Fourier plot = False print('Module undergoing testing:') print(Fourier) print('') results = {"pass":0,"fail":0} def checkFloat(comment, value, expected, tol=1e-10, update=True): """ This method is aimed to compare two floats given a certain tolerance @ In, comment, string, a comment printed out if it fails @ In, value, float, the value to compare @ In, expected, float, the expected value @ In, tol, float, optional, the tolerance @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if same """ if np.isnan(value) and np.isnan(expected): res = True elif np.isnan(value) or np.isnan(expected): res = False else: res = abs(value - expected) <= tol if update: if not res: print("checking float",comment,'|',value,"!=",expected) results["fail"] += 1 else: results["pass"] += 1 return res def checkTrue(comment, res, update=True): """ This method is a pass-through for consistency and updating @ In, comment, string, a comment printed out if it fails @ In, res, bool, the tested value @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if test """ if update: if res: results["pass"] += 1 else: print("checking bool",comment,'|',res,'is not True!') results["fail"] += 1 return res def checkSame(comment, value, expected, update=True): """ This method is aimed to compare two identical things @ In, comment, string, a comment printed out if it fails @ In, value, float, the value to compare @ In, expected, float, the expected value @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if same """ res = value == expected if update: if res: results["pass"] += 1 else: print("checking string",comment,'|',value,"!=",expected) results["fail"] += 1 return res def checkArray(comment, first, second, dtype, tol=1e-10, update=True): """ This method is aimed to compare two arrays @ In, comment, string, a comment printed out if it fails @ In, value, float, the value to compare @ In, expected, float, the expected value @ In, tol, float, optional, the tolerance @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if same """ res = True if len(first) != len(second): res = False print("checking answer",comment,'|','lengths do not match:',len(first),len(second)) else: for i in range(len(first)): if dtype == float: pres = checkFloat('',first[i],second[i],tol,update=False) elif dtype in (str,unicode): pres = checkSame('',first[i],second[i],update=False) if not pres: print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i])) res = False if update: if res: results["pass"] += 1 else: results["fail"] += 1 return res def checkNone(comment, entry, update=True): """ Checks if entry is None. @ In, comment, string, a comment printed out if it fails @ In, entry, object, to test if against None @ In, update, bool, optional, if False then don't update results counter @ Out, res, bool, True if None """ res = entry is None if update: if res: results["pass"] += 1 else: print("checking answer",comment,'|','"{}" is not None!'.format(entry)) results["fail"] += 1 def checkFails(comment, errstr, function, update=True, args=None, kwargs=None): """ Checks if expected error occurs @ In, comment, string, a comment printed out if it fails @ In, errstr, str, expected fail message @ In, function, method, method to run to test for failure @ In, update, bool, optional, if False then don't update results counter @ In, args, list, arguments to pass to function @ In, kwargs, dict, keyword arguments to pass to function @ Out, res, bool, True if failed as expected """ print('Error testing ...') if args is None: args = [] if kwargs is None: kwargs = {} try: function(*args,**kwargs) res = False msg = 'Function call did not error!' except Exception as e: res = checkSame('',e.args[0],errstr,update=False) if not res: msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr) if update: if res: results["pass"] += 1 print(' ... end Error testing (PASSED)') else: print("checking error",comment,'|',msg) results["fail"] += 1 print(' ... end Error testing (FAILED)') print('') return res ###################################### # CONSTRUCTION # ###################################### def createFourierXML(targets, periods): xml = xmlUtils.newNode('Fourier', attrib={'target':','.join(targets)}) xml.append(xmlUtils.newNode('periods', text=','.join(str(k) for k in periods))) return xml def createFromXML(xml): fourier = Fourier() inputSpec = Fourier.getInputSpecification()() inputSpec.parseNode(xml) fourier.handleInput(inputSpec) return fourier def createFourier(targets, periods): xml = createFourierXML(targets, periods) fourier = createFromXML(xml) return fourier def createFourierSignal(amps, periods, phases, pivot, intercept=0, plot=False): if plot: import matplotlib.pyplot as plt fig, ax = plt.subplots() signal = np.zeros(len(pivot)) + intercept for k, period in enumerate(periods): new = amps[k] * np.sin(2 * np.pi / period * pivot + phases[k]) if plot: ax.plot(pivot, new, ':') signal += new if plot: ax.plot(pivot, signal, 'k-') plt.show() return signal ################### # Simple # ################### # generate signal targets = ['A', 'B', 'C'] pivot = np.arange(100) / 10. periods = [2, 5, 10] amps = [0.5, 1, 2] phasesA = [0, np.pi, 0] signalA = createFourierSignal(amps, periods, phasesA, pivot, plot=plot) phasesB = [np.pi, 0, np.pi/4] signalB = createFourierSignal(amps, periods, phasesB, pivot, plot=plot) phasesC = [np.pi, np.pi/4, -np.pi/4] interceptC = 2 signalC = createFourierSignal(amps, periods, phasesC, pivot, intercept=interceptC, plot=plot) signals = np.zeros((len(pivot), 3)) signals[:, 0] = signalA signals[:, 1] = signalB signals[:, 2] = signalC fourier = createFourier(targets, periods) settings = {'periods': periods} params = fourier.characterize(signals, pivot, targets, settings) checkTrue("fourier can generate", fourier.canGenerate()) checkTrue("fourier can characterize", fourier.canCharacterize()) # intercepts checkFloat('Signal A intercept', params['A']['intercept'], 0) checkFloat('Signal B intercept', params['B']['intercept'], 0) checkFloat('Signal C intercept', params['C']['intercept'], interceptC) # amplitudes checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2]) checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2]) checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2]) # phases # check absolute value of phase pi since -pi and pi are often converged on separately checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0]) checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1]) checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2]) checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0]) checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1]) checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2]) checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0]) checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1]) checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2]) # residual ## add constant to training, make sure we get constant back const = 42.0 residSig = signals + const resid = fourier.getResidual(residSig, params, pivot, settings) checkFloat('Residual check', (resid-const).sum(), 0) # recreate signals res = fourier.generate(params, pivot, None) for tg, target in enumerate(targets): checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float) ##### now redo with non-simultaneous fitting params = fourier.characterize(signals, pivot, targets, settings, simultFit=False) # intercepts checkFloat('Signal A intercept', params['A']['intercept'], 0) checkFloat('Signal B intercept', params['B']['intercept'], 0) checkFloat('Signal C intercept', params['C']['intercept'], interceptC) # amplitudes checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2]) checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2]) checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0]) checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1]) checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2]) # phases # check absolute value of phase pi since -pi and pi are often converged on separately checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0]) checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1]) checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2]) checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0]) checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1]) checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2]) checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0]) checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1]) checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2]) # recreate signals res = fourier.generate(params, pivot, settings) for tg, target in enumerate(targets): checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float) # check residual # -> generate random noise to add to signal, then check it is returned in residual r = np.random.rand(pivot.size, len(targets)) new = r + signals res = fourier.getResidual(new, params, pivot, None) for tg, target in enumerate(targets): checkArray(f'Signal {target} residual', res[:, tg], r[:, tg], float) print(results) sys.exit(results["fail"]) """ <TestInfo> <name>framework.unit_tests.TSA.Fourier</name> <author>talbpaul</author> <created>2021-01-05</created> <classesTested>TSA.Fourier</classesTested> <description> This test is a Unit Test for the Fourier TimeSeriesAnalyzer classes. </description> </TestInfo> """