text
stringlengths
29
850k
from __future__ import unicode_literals from moto.core import BaseBackend from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException from .utils import random_access_key, random_alphanumeric, random_resource_id from datetime import datetime import base64 class Role(object): def __init__(self, role_id, name, assume_role_policy_document, path): self.id = role_id self.name = name self.assume_role_policy_document = assume_role_policy_document self.path = path self.policies = {} @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] role = iam_backend.create_role( role_name=resource_name, assume_role_policy_document=properties['AssumeRolePolicyDocument'], path=properties['Path'], ) policies = properties.get('Policies', []) for policy in policies: policy_name = policy['PolicyName'] policy_json = policy['PolicyDocument'] role.put_policy(policy_name, policy_json) return role def put_policy(self, policy_name, policy_json): self.policies[policy_name] = policy_json @property def physical_resource_id(self): return self.id def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() class InstanceProfile(object): def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name self.path = path self.roles = roles if roles else [] @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] role_ids = properties['Roles'] return iam_backend.create_instance_profile( name=resource_name, path=properties['Path'], role_ids=role_ids, ) @property def physical_resource_id(self): return self.name def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() class Certificate(object): def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): self.cert_name = cert_name self.cert_body = cert_body self.private_key = private_key self.path = path self.cert_chain = cert_chain @property def physical_resource_id(self): return self.name class AccessKey(object): def __init__(self, user_name): self.user_name = user_name self.access_key_id = random_access_key() self.secret_access_key = random_alphanumeric(32) self.status = 'Active' self.create_date = datetime.strftime( datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'SecretAccessKey': return self.secret_access_key raise UnformattedGetAttTemplateException() class Group(object): def __init__(self, name, path='/'): self.name = name self.id = random_resource_id() self.path = path self.created = datetime.strftime( datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) self.users = [] def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() class User(object): def __init__(self, name, path='/'): self.name = name self.id = random_resource_id() self.path = path self.created = datetime.strftime( datetime.utcnow(), "%Y-%m-%d-%H-%M-%S" ) self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name) self.policies = {} self.access_keys = [] self.password = None def get_policy(self, policy_name): policy_json = None try: policy_json = self.policies[policy_name] except KeyError: raise IAMNotFoundException("Policy {0} not found".format(policy_name)) return { 'policy_name': policy_name, 'policy_document': policy_json, 'user_name': self.name, } def put_policy(self, policy_name, policy_json): self.policies[policy_name] = policy_json def delete_policy(self, policy_name): if policy_name not in self.policies: raise IAMNotFoundException("Policy {0} not found".format(policy_name)) del self.policies[policy_name] def create_access_key(self): access_key = AccessKey(self.name) self.access_keys.append(access_key) return access_key def get_all_access_keys(self): return self.access_keys def delete_access_key(self, access_key_id): for key in self.access_keys: if key.access_key_id == access_key_id: self.access_keys.remove(key) break else: raise IAMNotFoundException("Key {0} not found".format(access_key_id)) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() def to_csv(self): date_format = '%Y-%m-%dT%H:%M:%S+00:00' date_created = datetime.strptime(self.created, '%Y-%m-%d-%H-%M-%S') # aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A if not self.password: password_enabled = 'false' password_last_used = 'not_supported' else: password_enabled = 'true' password_last_used = 'no_information' if len(self.access_keys) == 0: access_key_1_active = 'false' access_key_1_last_rotated = 'N/A' access_key_2_active = 'false' access_key_2_last_rotated = 'N/A' elif len(self.access_keys) == 1: access_key_1_active = 'true' access_key_1_last_rotated = date_created.strftime(date_format) access_key_2_active = 'false' access_key_2_last_rotated = 'N/A' else: access_key_1_active = 'true' access_key_1_last_rotated = date_created.strftime(date_format) access_key_2_active = 'true' access_key_2_last_rotated = date_created.strftime(date_format) return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name, self.arn, date_created.strftime(date_format), password_enabled, password_last_used, date_created.strftime(date_format), access_key_1_active, access_key_1_last_rotated, access_key_2_active, access_key_2_last_rotated ) class IAMBackend(BaseBackend): def __init__(self): self.instance_profiles = {} self.roles = {} self.certificates = {} self.groups = {} self.users = {} self.credential_report = None super(IAMBackend, self).__init__() def create_role(self, role_name, assume_role_policy_document, path): role_id = random_resource_id() role = Role(role_id, role_name, assume_role_policy_document, path) self.roles[role_id] = role return role def get_role_by_id(self, role_id): return self.roles.get(role_id) def get_role(self, role_name): for role in self.get_roles(): if role.name == role_name: return role raise IAMNotFoundException("Role {0} not found".format(role_name)) def get_roles(self): return self.roles.values() def put_role_policy(self, role_name, policy_name, policy_json): role = self.get_role(role_name) role.put_policy(policy_name, policy_json) def get_role_policy(self, role_name, policy_name): role = self.get_role(role_name) for p, d in role.policies.items(): if p == policy_name: return p, d def list_role_policies(self, role_name): role = self.get_role(role_name) return role.policies.keys() def create_instance_profile(self, name, path, role_ids): instance_profile_id = random_resource_id() roles = [iam_backend.get_role_by_id(role_id) for role_id in role_ids] instance_profile = InstanceProfile(instance_profile_id, name, path, roles) self.instance_profiles[instance_profile_id] = instance_profile return instance_profile def get_instance_profile(self, profile_name): for profile in self.get_instance_profiles(): if profile.name == profile_name: return profile def get_instance_profiles(self): return self.instance_profiles.values() def get_instance_profiles_for_role(self, role_name): found_profiles = [] for profile in self.get_instance_profiles(): if len(profile.roles) > 0: if profile.roles[0].name == role_name: found_profiles.append(profile) return found_profiles def add_role_to_instance_profile(self, profile_name, role_name): profile = self.get_instance_profile(profile_name) role = self.get_role(role_name) profile.roles.append(role) def get_all_server_certs(self, marker=None): return self.certificates.values() def upload_server_cert(self, cert_name, cert_body, private_key, cert_chain=None, path=None): certificate_id = random_resource_id() cert = Certificate(cert_name, cert_body, private_key, cert_chain, path) self.certificates[certificate_id] = cert return cert def get_server_certificate(self, name): for key, cert in self.certificates.items(): if name == cert.cert_name: return cert def create_group(self, group_name, path='/'): if group_name in self.groups: raise IAMConflictException("Group {0} already exists".format(group_name)) group = Group(group_name, path) self.groups[group_name] = group return group def get_group(self, group_name, marker=None, max_items=None): group = None try: group = self.groups[group_name] except KeyError: raise IAMNotFoundException("Group {0} not found".format(group_name)) return group def list_groups(self): return self.groups.values() def get_groups_for_user(self, user_name): user = self.get_user(user_name) groups = [] for group in self.list_groups(): if user in group.users: groups.append(group) return groups def create_user(self, user_name, path='/'): if user_name in self.users: raise IAMConflictException("EntityAlreadyExists", "User {0} already exists".format(user_name)) user = User(user_name, path) self.users[user_name] = user return user def get_user(self, user_name): user = None try: user = self.users[user_name] except KeyError: raise IAMNotFoundException("User {0} not found".format(user_name)) return user def create_login_profile(self, user_name, password): # This does not currently deal with PasswordPolicyViolation. user = self.get_user(user_name) if user.password: raise IAMConflictException("User {0} already has password".format(user_name)) user.password = password def add_user_to_group(self, group_name, user_name): user = self.get_user(user_name) group = self.get_group(group_name) group.users.append(user) def remove_user_from_group(self, group_name, user_name): group = self.get_group(group_name) user = self.get_user(user_name) try: group.users.remove(user) except ValueError: raise IAMNotFoundException("User {0} not in group {1}".format(user_name, group_name)) def get_user_policy(self, user_name, policy_name): user = self.get_user(user_name) policy = user.get_policy(policy_name) return policy def put_user_policy(self, user_name, policy_name, policy_json): user = self.get_user(user_name) user.put_policy(policy_name, policy_json) def delete_user_policy(self, user_name, policy_name): user = self.get_user(user_name) user.delete_policy(policy_name) def create_access_key(self, user_name=None): user = self.get_user(user_name) key = user.create_access_key() return key def get_all_access_keys(self, user_name, marker=None, max_items=None): user = self.get_user(user_name) keys = user.get_all_access_keys() return keys def delete_access_key(self, access_key_id, user_name): user = self.get_user(user_name) user.delete_access_key(access_key_id) def delete_user(self, user_name): try: del self.users[user_name] except KeyError: raise IAMNotFoundException("User {0} not found".format(user_name)) def report_generated(self): return self.credential_report def generate_report(self): self.credential_report = True def get_credential_report(self): if not self.credential_report: raise IAMReportNotPresentException("Credential report not present") report = 'user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n' for user in self.users: report += self.users[user].to_csv() return base64.b64encode(report.encode('ascii')).decode('ascii') iam_backend = IAMBackend()
On Monday, September 24, 2018 the unique six-day conference was completed in the picturesque village of Fuzine, Croatia. More than 50 asylum seekers, including an international team of facilitators, humanitarian and faith-based activists from twelve countries, Egypt, Afghanistan, Iraq, Syria and Iran including, came together to wrestle with a number of burning questions that asylum seekers in Croatia and Europe are facing daily in their prolonged processes of waiting between interviews and issuing of the final government decision to their requests to stay permanently in Croatia. This dynamic and at times intense conference, with the theme “Moving Forward in Truth, with Courage and Hope,” represented the extension of the ROM – Renewing Our Minds international leadership project, and was organized by “Forum for Leadership and Reconciliation” (US), and “Business as Mission” (Zagreb, Croatia), in partnership with ECPM – European Christian Political Movement, from Brussels, Belgium. The conference was conducted in English, with simultaneous translations provided into Farsi and Arabic. The conference objectives included helping seekers of the international protection in Croatia, and those who have had their asylum statuses favorably resolved recently, gain a balanced understanding of the integration processes and related challenges in Croatia. Likewise, the conference organizers wanted to hear from the participating asylum seekers what are their most daring challenges as they are often waiting for two to three years, driven by uncertainty and anxiety, to have their requests answered by the Croatian Government? In short, the objectives of the conference were to help immigrants in Croatia and Europe to integrate well, and to help them grasp the essentials of European culture and Christian faith. The daily afternoon workshops helped the conference participants, most of whom were family men and women with children, to vocalize their most urging needs and anxieties. Some of the most frequent questions were: Why is the process of waiting to receive the final decision taking so long? Why are some asylum seekers, who have come to Croatia only recently receiving their positive answers sooner than those who entered Croatia two or three years ago, and are still waiting to have their cases finalized? Other questions included: While only a few asylum seekers have been allowed to work legally in Croatia, as they are waiting to have their asylum requests finalized, why are many more denied the privilege to work legally? Why is it that under the law some helpful privileges are promised to the waiting asylums seekers, such as learning the Croatian language and access to health care, while in practice those privileges are seldom accessible? Why is the Croatian government not helping reunite the families separated in different places and countries along their refugee trail? Through carefully selected lectures, and much time devoted to questions and group conversation, the organizers of the conference devoted their attention to the issues of law, culture and place of religion in the Croatian society; the three overlapping areas whose understanding is necessary if the Integration into the Croatian society is to be fully appreciated and implemented. Special attention was given to building of friendship and trust between Muslim and Christian asylum-seekers, as well as building friendship and trust between asylum seekers and the Croatian citizens and institutions. At the workshop about the role of faith and religious communities in Croatian it became obvious that the asylum seekers are expecting more empathy and support from the religious communities of Croatia than from the government. “Since it is in the nature of religious communities to care about vulnerable groups,” they reasoned, “religious organizations, churches and faith-based groups should be able to put in place a synchronized care network” that could efficiently help asylum seekers in the months and years of uncertainty and prolonged months and years of anxious waiting; and also help the asylum holders who have recently been granted the right to remain in Croatia. For example, they could help the new asylum holders, especially those with children, to find their first home and first job. The strength of this conference was that it dealt insightfully and compassionately with the challenges of integration as observed and handled by the three sides involved in the lives of asylum seekers: as they are handled by the Croatian and European laws and legal systems; as they are experienced by the asylum seekers themselves; and as they are addressed by the church and faith-based organizations and groups. Since most of the asylum seekers are spending long periods of time anxiously waiting for the final and affirming decision from the government, often being previously rejected two or three times, it became clear in the initial days of the conference that most of asylum seekers-in-waiting have suffered from the loss of trust in the Croatian government, its institutions and supporting agencies. The conference leadership team, which included also a group of asylum seekers and asylum holders, worked diligently to see the damaged trust and relationships healed and restored, and promised to contribute to the conversation by making the findings of the conference available to the public and relevant institutions. The quality of the conference was enhanced by the choice of speakers and facilitators, and experienced ROM (Renewing Our Minds) team members. Among them were Heather Staff, an emerging UK politician and Policy Adviser to Kate Green MP; Leo van Doesburg, Director for European Affairs for the European Christian Political Movement (ECPM); Amir Hekmatpour, spiritual advisor and faith mentor from the US; Donya and Shayan Spanta, asylum holders and humanitarian activists from Iran, now Croatian residents; Mihal Kreko, Christian pastor and humanitarian activist from Zagreb, Croatia; Tihomir Kukolja, Renewing Our Minds (ROM) Director; and Ana Šutalo, a representative of the Croatian Ministry of the Interior, and Asylum Support Expert. Ana Šutalo’s contribution to the conference was especially significant as she explained in detail what needed to happen once an asylum seeker has received a residential status in Croatia. Although the conference dealt with very serious themes and issues, often accompanied by heated but honest discussion, this event also became a festival of friendship through the relaxing, and spirit-lifting hours of sharing in the energetic feasts of music, dances, customs, culture of the countries represented at the conference, and moments of thanksgiving. However, the special feature of the conference were the moments that addressed the spiritual hunger manifest by all participants at the conference, Christians and Muslims alike. No hours were too long, and no optional workshops too many for the groups of participants who never ceased to ask new questions as they listened to Amir Hekmatpour, even in the hours outside his official speaking sessions. Amir Hekmatpour, from the US and with the origins from Iran, served as a spiritual adviser, mentor and speaker at the conference. “The results of this integration conference surpassed all our expectations” – stated Mihal Kreko, one of the organizers of the 2018 ROM Integration Conference, and the director of a unique integration project currently under construction in Zagreb, Croatia, known as “The House of Hope.” Heather Staff, a speaker at the conference and a UK politician commented: “I’ve never before experienced the level of honesty in our conversations as at this conference, and even when we disagreed we did it agreeably.” “The Thanksgiving Evening on the closing Sunday demonstrated that we all became one forgiving, forgiven and reconciled family of friends who loved each other. By the end of the conference one could see that the faces of asylum seekers radiated with new hope and new joy,” said Liviu Bocaniala, the conference music director. And Tihomir Kukolja, the ROM – Renewing Our Minds Director stated: “This conference opened my eyes. I learned more about the hardships of being an asylum seeker in six days of this conference than in previous three years”. At this time the organizers are involved in the speedy preparation of a memorandum that will be presented to the governmental and non-governmental organizations before the end of this month, as well as to a number of church and faith-based organizations in Croatia and abroad. This document will present in detail the conference’s objectives, conclusions and recommendations. It will be released in Croatian, English, Farsi and Arabic, and presented to several international organizations too. Tihomir Kukolja is director of ROM (Renewing Our Minds). This article originally appeared on the ROM website, and is reprinted here with permission. Image courtesy of ROM.
#!/usr/bin/env python3 """ Downloads assets to the static directory. This is used to allow easier updating without having to clog the git repository with frequently updated minified JS/CSS """ import urllib.request import os import sys from ansicolor import black, red from collections import namedtuple Asset = namedtuple("Asset", ["filename", "url"]) assets = [ #Bootstrap Asset("css/bootstrap.min.css", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css"), Asset("js/bootstrap.min.js", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js"), Asset("fonts/glyphicons-halflings-regular.eot", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.eot"), Asset("fonts/glyphicons-halflings-regular.svg", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.svg"), Asset("fonts/glyphicons-halflings-regular.ttf", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.ttf"), Asset("fonts/glyphicons-halflings-regular.woff", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff"), Asset("fonts/glyphicons-halflings-regular.woff2", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff2"), #Angular Asset("js/angular.min.js", "https://ajax.googleapis.com/ajax/libs/angularjs/1.3.13/angular.min.js"), #Angular Bootstrap directives Asset("js/angular-bootstrap.min.js", "https://angular-ui.github.io/bootstrap/ui-bootstrap-tpls-0.12.0.min.js"), #JQuery & plugins Asset("js/jquery.min.js", "https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"), Asset("js/jquery.highlight.js", "https://raw.githubusercontent.com/bartaz/sandbox.js/master/jquery.highlight.js"), ] def ensureFileIsPresent(asset, directory, forceDownload=False): (filename, url) = asset filepath = os.path.join(directory, filename) if url is None: # --> no need to download return if not os.path.isfile(filepath) or forceDownload: #Create directory if required dirname = os.path.dirname(filepath) if not os.path.exists(dirname): os.makedirs(dirname) #Perform download print (black("Downloading %s" % filename, bold=True)) urllib.request.urlretrieve(url, filepath) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("-f", "--force-download", action="store_true", help="Force downloading assets") parser.add_argument("-d", "--directory", default="./static", help="Directory to download (must exist)") args = parser.parse_args() #Check if the --directory arg is valid if not os.path.isdir(args.directory): print(red("%s is not a directory" % args.directory, bold=True)) sys.exit(1) #Create subdirs if not already present #Run download if file for asset in assets: ensureFileIsPresent(asset, args.directory, args.force_download)
alright. no i am not a lovey fangirl but i really am a romantic. so i read some of the other scenario articles, played Tries Bien... and wrote this. Screw Arthur! If I want to be with Francism I will!!
import argparse from database import DatabaseAccessor from data_structures import Perf from pylab import * def graph(db_file, filename, ttl): db = DatabaseAccessor(Perf, db_file) perf_data = db.read() save_fig(perf_data, filename, ttl) def save_fig(perf_data, filename, ttl): iters = range(len(perf_data)) # number of epochs tr_errs = [ entry[1] for entry in perf_data] vd_errs = [ entry[2] for entry in perf_data] plot(iters, tr_errs, 'g', label = 'Training Error') plot(iters, vd_errs, 'r', label = 'Validation Error') legend() xlabel('Epoch') ylabel('Error') title(ttl) grid(True) savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Tool to graph the validation \ and training error collected during training phase') parser.add_argument('database_file', help='the database file that contains \ data') parser.add_argument('figure_file', help='graph of the data') parser.add_argument('title', help='graph title') args = parser.parse_args() graph(args.database_file, args.figure_file, args.title)
Next, you’ll need to be sure your wireless adapter is actually turned on. Best thing about this new service is that you are never placed on hold and get to talk to real repairmen in the US. Helpful 0 Not Helpful Flag. Please select from the list below. Select Your Operating System, download zipped files, and then proceed to manually install them. Medion Card Reader Driver. Driver Reviver Windows Driver Updater. Driver Reviver Driver Reviver support. Medion 2400 Medion wim 2040 Posted on Jan 02, It’s worth a thousand words. Feedback How we are doing? Lately during the filling cycle water hammer is occurring. Battery Optimizer Battery Optimizer support. This battery has been manufactured with high quality components. Answer questions, earn points and help others Answer questions. Select Your Operating System, download zipped files, and then proceed to manually install them. If so, you will have to download the correct drivers for wireless, medion wim 2040, video, etc. Are you a Computer and Internet Expert? As part of our ISO wiim procedure, quality control tests are frequently carried out on all our products. Security Reviver Security Reviver support. Medion Wim now has a special edition for these Windows versions: If the capacity Ah of the battery is higher than the capacity of the original battery it will enable a longer life expectancy. Medion Wim driver installation medion wim 2040 was reported as very satisfying by a large percentage of our reporters, so it is recommended to download and install. Mddion sure you have removed all screws from the bottom case, including the four under the rubber feet at the rear. Hi there, Medion wim 2040 hours of searching online or wasting money on unnecessary repairs by talking to a 6YA Expert who can help you resolve this issue over the phone in a minute or two. Dec 26, Medion medion wim 2040 Notebook. Ask Talk to Expert. Driver Reviver will also identify all of your other out of date drivers. Please assign your manual to a product: I can’t load electricity medion wim 2040 shows rfcon MacReviver Complete Mac Optimization. Login to post Please use English characters only. Posted by Anonymous on Dec 29, Direct Download Success Stats: Recommended if Medion Wim is the only driver on your PC you wish to update.
from handler.base_plugin import CommandPlugin import datetime class TimePlugin(CommandPlugin): __slots__ = ("delta", "message", "days") def __init__(self, *commands, prefixes=None, strict=False, offseth=3, offsetm=0, message=None): """Answers with current date and time.""" if not commands: commands = ("время",) super().__init__(*commands, prefixes=prefixes, strict=strict) self.message = message or "Текущие дата и время по МСК:" self.delta = datetime.timedelta(hours=offseth, minutes=offsetm) self.days = {0: 'понедельник', 1: 'вторник', 2: 'среда', 3: 'четверг', 4: 'пятница', 5: 'суббота', 6: 'воскресенье'} example = self.command_example() self.description = [f"Текущее время", f"{example} - показывает текущее время и дату."] async def process_message(self, msg): time = (datetime.datetime.now(datetime.timezone.utc) + self.delta) timestr = time.strftime('%d-%m-%Y %H:%M:%S') await msg.answer(f'{self.message}\n{timestr}\nСегодня {self.days[time.weekday()]}.')
In the midst of an array of Fussaaq so-called ‘scholars’, posing for haraam picture-making, is the Faasiq Reverend Abram Bham who masquerades as an Aalim of the Deen. While this criticism is applicable to the entire lot of miserable agents of Iblees whose ugly faces appear in the group photo which includes faasiqah women as well, we single out the evil Reverend since he still pretends to be an Aalim of Deoband, and is known and accepted as a valid Molvi by Muslims of the Hanafi Math-hab. As far as the other moron ‘scholars for dollars’ and ulama-e-soo are concerned – those who appear in the photo – there is no need to comment much since their satanic credentials are well-known in our Hanafi community. No one really has any concern for the Bid’ati, modernist miscreants. However, there is a need for the Muslim community to understand that Reverend Abraham Bham is a shaitaan in human form. In this there is no doubt. The photo testifies vociferously for his fisq, fujoor, dhalaal and zanaadiqah. His very Imaan is in doubt. The late Sufi Pandor Sahib (Rahmatullah alayh) would turn away from the Musjid if Reverend Abraham Bham happened to be leading the Salaat. He would go to another Musjid since he believed that Salaat behind this Iblees in human form was not valid. At that time, despite our vigorous criticism of this Reverend character, we would advise that Salaat is valid behind him. However, now we have changed this view. Our present stance is that SALAAT BEHIND THIS EVIL CHARACTER WHO STABS THE DEEN IN THE BACK IS NOT VALID. Henceforth, anyone who suffers the misfortune of being trapped in Salaat behind this treacherous Reverend Abraham Bham, should repeat his Salaat. Rasulullah’s fear for the Reverend type of devil is sufficient to register the villainy and evil of this Bham character. His public appearance in a haraam photo flagrantly advertised in violation of every rule in the Book of the Shariah, clearly demonstrates the agency of Shaitaan which this Reverend operates. He is always seen in haraam and dubious public functions scrounging for publicity, name and fame like a sewer rat at the bottom of the barrel of filth and muck. Whilst he professes to be a Muslim, this villain cannot be a Muslim in terms of the Qur’aan and Sunnah. Shiahs also proclaim themselves to be Muslims whilst they are among the worse kuffaar. Yet the Reverend Abraham Bham is perfectly at home with the Shiahs and the neo-Shiahs and the pro-Shiah Ninowy crowd of shayaateen. The entire cabal in the haraam photo is the League of Iblees. This villain Reverend advertises himself as a ‘maulana’, yet he has no haya, no sharam – not the slightest vestige of it – to join in a group of villains, fussaaq and fujjaar together with faasiqaat and faajiraat, flagrantly posing their ugly snouts for public consumption in flagrant and rebellious violation of Allah’s prohibition. Together with the ugly mob of moron imbeciles with whom he poses, the Reverend Villain Abraham Bham is a murderer of Rasulullah (Sallallahu alayhi wasallam), for he murders and disgracefully tramples on the Shariah and Sunnah of Muhammadur Rasulullah (Sallallahu alayhi wasallam). It is HARAAM to offer Salaam to the villain reverend. It is HARAAM to respond to his Salaam. He has confirmed that he is an open enemy of the Deen. So should he be treated. May Allah Ta’ala save the masses from the evil, satanic clutches, traps, wiles and wares of this Iblees who prowls in the community in human form.
from __future__ import print_function import hashlib import json import os import sys from contextlib import contextmanager from leapp.exceptions import LeappRuntimeError from leapp.models import ErrorModel from leapp.utils.audit import get_audit_entry def _is_verbose(): """Redefinition of is_verbose from leapp.libraries.stdlib.config because it leads to import errors""" return os.getenv('LEAPP_VERBOSE', '0') == '1' class Color(object): reset = "\033[0m" if sys.stdout.isatty() else "" bold = "\033[1m" if sys.stdout.isatty() else "" red = "\033[1;31m" if sys.stdout.isatty() else "" green = "\033[1;32m" if sys.stdout.isatty() else "" yellow = "\033[1;33m" if sys.stdout.isatty() else "" def pretty_block_text(string, color=Color.bold, width=60): return "\n{color}{separator}\n{text}\n{separator}{reset}\n".format( color=color, separator="=" * width, reset=Color.reset, text=string.center(width)) @contextmanager def pretty_block(text, target, end_text=None, color=Color.bold, width=60): target.write(pretty_block_text(text, color=color, width=width)) target.write('\n') yield target.write(pretty_block_text(end_text or 'END OF {}'.format(text), color=color, width=width)) target.write('\n') def print_error(error): model = ErrorModel.create(json.loads(error['message']['data'])) sys.stdout.write("{red}{time} [{severity}]{reset} Actor: {actor}\nMessage: {message}\n".format( red=Color.red, reset=Color.reset, severity=model.severity.upper(), message=model.message, time=model.time, actor=model.actor)) if model.details: print('Summary:') details = json.loads(model.details) for detail in details: print(' {k}: {v}'.format( k=detail.capitalize(), v=details[detail].rstrip().replace('\n', '\n' + ' ' * (6 + len(detail))))) def report_inhibitors(context_id): # The following imports are required to be here to avoid import loop problems from leapp.reporting import Flags # pylint: disable=import-outside-toplevel from leapp.utils.report import fetch_upgrade_report_messages # pylint: disable=import-outside-toplevel reports = fetch_upgrade_report_messages(context_id) inhibitors = [report for report in reports if Flags.INHIBITOR in report.get('flags', [])] if inhibitors: text = 'UPGRADE INHIBITED' with pretty_block(text=text, end_text=text, color=Color.red, target=sys.stdout): print('Upgrade has been inhibited due to the following problems:') for position, report in enumerate(inhibitors, start=1): print('{idx:5}. Inhibitor: {title}'.format(idx=position, title=report['title'])) print('Consult the pre-upgrade report for details and possible remediation.') def report_deprecations(context_id, start=None): deprecations = get_audit_entry(event='deprecation', context=context_id) if start: start_stamp = start.isoformat() + 'Z' deprecations = [d for d in deprecations if d['stamp'] > start_stamp] if deprecations: cache = set() with pretty_block("USE OF DEPRECATED ENTITIES", target=sys.stderr, color=Color.red): for deprecation in deprecations: entry_data = json.loads(deprecation['data']) # Deduplicate messages key = hashlib.sha256(json.dumps(entry_data, sort_keys=True)).hexdigest() if key in cache: continue # Add current message to the cache cache.add(key) # Print the message sys.stderr.write( '{message} @ {filename}:{lineno}\nNear: {line}\nReason: {reason}\n{separator}\n'.format( separator='-' * 60, **entry_data) ) def report_errors(errors): if errors: with pretty_block("ERRORS", target=sys.stdout, color=Color.red): for error in errors: print_error(error) def report_info(report_paths, log_paths, answerfile=None, fail=False): report_paths = [report_paths] if not isinstance(report_paths, list) else report_paths log_paths = [log_paths] if not isinstance(report_paths, list) else log_paths if log_paths: sys.stdout.write("\n") for log_path in log_paths: sys.stdout.write("Debug output written to {path}\n".format(path=log_path)) if report_paths: with pretty_block("REPORT", target=sys.stdout, color=Color.bold if fail else Color.green): for report_path in report_paths: sys.stdout.write("A report has been generated at {path}\n".format(path=report_path)) if answerfile: sys.stdout.write("Answerfile has been generated at {}\n".format(answerfile)) def report_unsupported(devel_vars, experimental): text = "UNSUPPORTED UPGRADE" with pretty_block(text=text, end_text=text, target=sys.stdout, color=Color.yellow): sys.stdout.write("Variable LEAPP_UNSUPPORTED has been detected. Proceeding at your own risk.\n") if devel_vars: sys.stdout.write("{yellow}Development variables{reset} have been detected:\n".format( yellow=Color.yellow, reset=Color.reset)) for key in devel_vars: sys.stdout.write("- {key}={value}\n".format(key=key, value=devel_vars[key])) if experimental: sys.stdout.write("{yellow}Experimental actors{reset} have been detected:\n".format( yellow=Color.yellow, reset=Color.reset)) for actor in experimental: sys.stdout.write("- {actor}\n".format(actor=actor)) @contextmanager def beautify_actor_exception(): try: try: yield except LeappRuntimeError as e: msg = '{} - Please check the above details'.format(e.message) sys.stderr.write("\n") sys.stderr.write(pretty_block_text(msg, color="", width=len(msg))) finally: pass def display_status_current_phase(phase): if not _is_verbose(): print('==> Processing phase `{name}`'.format(name=phase[0].name)) def _get_description_title(actor): lines = actor.description.strip().split('\n') return lines[0].strip() if lines else actor.name def display_status_current_actor(actor, designation=''): if not _is_verbose(): print('====> * {actor}{designation}\n {title}'.format(actor=actor.name, title=_get_description_title(actor), designation=designation))
Used Sailing boat COLVIC WATSON 32 MOTORSAILER for sale located in Greece, founded in 1977. The manufacturer of boat - Colvic. It`s overall length is 9.8 meters. Width of boat is 3.43 meters. Draft is 1.22 m. Engine «Lister» uses Diesel fuel. Fuel capacity is 480 liters. You can buy COLVIC WATSON 32 MOTORSAILER just for 15000 GBP. Want to Buy COLVIC WATSON 32 MOTORSAILER in Greece? - Contact now!
# -*- coding:utf-8 -*- # # Copyright (C) 2012, Maximilian Köhl <linuxmaxi@googlemail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import threading from grooveshark.const import * class Song(object): """ Represents a song. Do not use this class directly. :param id: internal song id :param name: name :param artist_id: artist's id to generate an :class:`Artist` object :param artist_name: artist's name to generate an :class:`Artist` object :param album_id: album's id to generate an :class:`Album` object :param album_name: album's name to generate an :class:`Album` object :param cover_url: album's cover to generate an :class:`Album` object :param track: track number :param duration: estimate song duration :param popularity: popularity :param connection: underlying :class:`Connection` object """ def __init__(self, id, name, artist_id, artist_name, album_id, album_name, cover_url, track, duration, popularity, connection): self._connection = connection self._id = id self._name = name self._artist_id = artist_id self._artist_name = artist_name self._album_id = album_id self._album_name = album_name self._cover_url = cover_url if not self._cover_url: self._cover_url = NO_COVER_URL self._track = track self._duration = duration self._popularity = popularity self._artist = None self._album = None def __str__(self): return '%s - %s - %s' % (self.name, self.artist.name, self.album.name) @classmethod def from_response(cls, song, connection): return cls(song['SongID'], song['Name'] if 'Name' in song else song['SongName'], song['ArtistID'], song['ArtistName'], song['AlbumID'], song['AlbumName'], ALBUM_COVER_URL + song['CoverArtFilename'] if song['CoverArtFilename'] else None, song['TrackNum'], song['EstimateDuration'], song['Popularity'], connection) @classmethod def from_export(cls, export, connection): return cls(export['id'], export['name'], export['artist_id'], export['artist'], export['album_id'], export['album'], export['cover'], export['track'], export['duration'], export['popularity'], connection) @property def id(self): """ internal song id """ return self._id @property def name(self): """ song name """ return self._name @property def artist(self): """ artist as :class:`Artist` object """ if not self._artist: self._artist = Artist(self._artist_id, self._artist_name, self._connection) return self._artist @property def album(self): """ album as :class:`Album` object """ if not self._album: self._album = Album(self._album_id, self._album_name, self._artist_id, self._artist_name, self._cover_url, self._connection) return self._album @property def track(self): """ track number """ return self._track @property def duration(self): """ estimate song duration """ return self._duration @property def popularity(self): """ popularity """ return self._popularity @property def stream(self): """ :class:`Stream` object for playing """ # Add song to queue self._connection.request('addSongsToQueue', {'songIDsArtistIDs': [{'artistID': self.artist.id, 'source': 'user', 'songID': self.id, 'songQueueSongID': 1}], 'songQueueID': self._connection.session.queue}, self._connection.header('addSongsToQueue', 'jsqueue')) stream_info = self._connection.request('getStreamKeyFromSongIDEx', {'songID' : self.id, 'country' : self._connection.session.country, 'prefetch' : False, 'mobile' : False}, self._connection.header('getStreamKeyFromSongIDEx', 'jsqueue'))[1] return Stream(stream_info['ip'], stream_info['streamKey'], self._connection) def export(self): """ Returns a dictionary with all song information. Use the :meth:`from_export` method to recreate the :class:`Song` object. """ return {'id' : self.id, 'name' : self.name, 'artist' : self._artist_name, 'artist_id' : self._artist_id, 'album' : self._album_name, 'album_id' : self._album_id, 'track' : self.track, 'duration' : self.duration, 'popularity' : self.popularity, 'cover' : self._cover_url} def format(self, pattern): """ Format the song according to certain patterns: %a: artist title %s: song title %A: album title """ pattern = pattern.replace('%a', self.artist.name) pattern = pattern.replace('%s', self.name) pattern = pattern.replace('%A', self.album.name) return pattern.replace('/', '').replace('\\', '') def download(self, directory='~/Music', song_name='%a - %s - %A'): """ Download a song to a directory. :param directory: A system file path. :param song_name: A name that will be formatted with :meth:`format`. :return: The formatted song name. """ formatted = self.format(song_name) path = os.path.expanduser(directory) + os.path.sep + formatted + '.mp3' try: raw = self.safe_download() with open(path, 'wb') as f: f.write(raw) except: raise return formatted def safe_download(self): """Download a song respecting Grooveshark's API. :return: The raw song data. """ def _markStreamKeyOver30Seconds(stream): self._connection.request('markStreamKeyOver30Seconds', {'streamServerID': stream.ip, 'artistID': self.artist.id, 'songQueueID': self._connection.session.queue, 'songID': self.id, 'songQueueSongID': 1, 'streamKey': stream.key}, self._connection.header('markStreamKeyOver30Seconds', 'jsqueue')) stream = self.stream timer = threading.Timer(30, _markStreamKeyOver30Seconds, [stream]) timer.start() raw = stream.data.read() if len(raw) == stream.size: timer.cancel() self._connection.request('markSongDownloadedEx', {'streamServerID': stream.ip, 'songID': self.id, 'streamKey': stream.key}, self._connection.header('markSongDownloadedEx', 'jsqueue')) self._connection.request('removeSongsFromQueue', {'userRemoved':True, 'songQueueID': self._connection.session.queue, 'songQueueSongIDs': [1]}, self._connection.header('removeSongsFromQueue', 'jsqueue')) return raw else: raise ValueError("Content-Length {}, but read {}" .format(stream.size, len(raw))) from grooveshark.classes.artist import Artist from grooveshark.classes.album import Album from grooveshark.classes.stream import Stream
San Jose, CA - Official Website - Volunteer with Us! Home > Government > Departments & Offices > Departments & Offices P-Z > Parks, Recreation & Neighborhood Services > Volunteer with Us! Your help is needed! Volunteers are an integral part of the City of San Jose! Come volunteer—we have many opportunities, right in your neighborhood! To sign up for volunteer opportunities, register online! Sunday Series: Volunteer on Sunday Morning! All volunteers must complete the City of San Jose Volunteer Service Agreement. Those 17 years old and younger must also have a parent sign the Volunteer Service Agreement. San Jose has over 200 city parks! Everything in our parks keeps growing, so volunteers help is vital to enhancing the work of park staff. Volunteering in city parks is a healthy, outdoor activity, in the fresh air and sunshine! Volunteers must be at least 6 years old. The Adopt-A-Park (AAP) program is our long-term opportunity. Interested volunteers can be individuals, families, friends, groups or neighbors. Parks can be adopted by one or more entities, as there is always something to do! Which parks are available for adoption? One Day Volunteer Events are typically done in 3-4 hours. Generally, they run from 8:45 to 12:00 noon, any day of the week. All supplies are provided—we simply need your time and muscle! Call (408) 595-3483 to schedule a park clean-up event for your group! Please allow a 4-week planning period. Free on Sunday mornings? Come volunteer! Every 10 weeks, we visit a park in each City of San Jose Council district. Find the details for our current Sunday Series! Does your work team want to participate in a service event? Park clean-ups are great for team-building! After volunteering, stay at the park to enjoy the great outdoors! Alum Rock is the oldest municipal park in California. Join the crew that keeps the hiking trails safe and secure. Make a difference—right in your neighborhood! Park volunteers cannot use power tools. Park volunteers cannot use any chemicals, herbicides or pesticides. Fingerprinting and a TB test are required for most volunteer positions. To apply for a City of San Jose volunteer experience, please complete our Volunteer Service Agreement. After staff has reviewed the application, the volunteer office will contact you regarding next steps.
# -*- coding: utf-8 -*- from imports import * ################################## 专栏页面 ################################## # 专栏列表页 @app.route('/special_all', methods=['GET']) def special_all(): try: view = request.args.get('view') sort = request.args.get('sort') page_id = int(request.args.get('page')) except Exception: abort(404) if sort != 'favor': sort = 'time' sort_change_url = '/special_all?view=%s&sort=favor&page=1'%(view) else: sort_change_url = '/special_all?view=%s&sort=time&page=1'%(view) if view != 'list': view = 'all' view_change_url = '/special_all?view=list&sort=%s&page=1'%(sort) else: view_change_url = '/special_all?view=all&sort=%s&page=1'%(sort) if view == 'list': # list view specials_pagination = get_all_specials(sort, page_id, 5) return render_template('special_all_listView.html', sort = sort, view=view, specials_pagination_list = specials_pagination, author = get_special_author, articles = get_special_article, sort_change_url = sort_change_url, view_change_url = view_change_url) else: # all view specials_pagination = get_all_specials(sort, page_id, 12) return render_template('special_all_allView.html', sort = sort, view=view, specials_pagination_all = specials_pagination, author = get_special_author, articles = get_special_article, sort_change_url = sort_change_url, view_change_url = view_change_url) #专栏列表搜索 @app.route('/special_search', methods=['GET']) def special_search(): try: search = request.args.get('search') if search == '': abort(404) except Exception: abort(404) specials_pagination = get_search_specials(search) return render_template('special_search.html', specials_pagination = specials_pagination, author = get_special_author) # 专栏详情页 @app.route('/special', methods=['GET']) def special(): #URL样式:http://127.0.0.1:5000/special?id=2&page=1&sort=time try: special_id = int(request.args.get('id')) page_id = int(request.args.get('page')) sort = request.args.get('sort') except Exception: abort(404) #只有favor和time两种排序方式 if (sort != 'favor'): sort = 'time' sort_change_url = "/special?id=%d&page=1&sort=favor" % (special_id) else: sort_change_url = "/special?id=%d&page=1&sort=time" % (special_id) special = get_special_information(special_id) if (special == None): abort(404) author = get_special_author(special.special_id) # print ddd #article的分页对象,articles_pagination.items获得该分页对象中的所有内容,为一个list login_user = get_userid_from_session() articles_pagination = get_special_article(special_id, page_id, sort, 5) related_other_special = get_related_special(special.special_id) is_mobile = is_small_mobile_device(request) if is_mobile: return render_template('mobile_special_detail.html', login_user_id = login_user, is_mobile = is_mobile, root_authorized = root_authorized(), #author_itself = (special.user_id == login_user), has_collected_special = get_special_collect_info(login_user, special_id), has_collected_author = has_collected, sort_change_url = sort_change_url, special_id = special_id, sort = sort, special_favor = special.favor, special_title = special.name, special_author = author, #special_author_slogon = author.slogon, special_introduction = special.introduction, special_style = special.style, special_total_issue = special.total_issue, special_update_frequency = special.update_frequency, special_coin = special.coin, special_image = special.picture, #special_author_avatar = author.photo, articles_pagination = articles_pagination, get_nick_by_userid = get_nick_by_userid) else: return render_template('special_detail.html', len = len, author = get_special_author, login_user_id = login_user, is_mobile = is_mobile, root_authorized = root_authorized(), #author_itself = (special.user_id == login_user), has_collected_special = get_special_collect_info(login_user, special_id), has_collected_author = has_collected, sort_change_url = sort_change_url, special_id = special_id, sort = sort, other = get_special_author_other, special_favor = special.favor, special_title = special.name, special_author = author, #special_author_slogon = author.slogon, special_introduction = special.introduction, special_style = special.style, special_total_issue = special.total_issue, special_update_frequency = special.update_frequency, special_coin = special.coin, special_image = special.picture, #special_author_avatar = author.photo, articles_pagination = articles_pagination, related_other_special = related_other_special, get_nick_by_userid = get_nick_by_userid) ## 创建专栏界面 @app.route('/create_special') @login_required def create_special(): if (not root_authorized()): abort(404) return render_template('create_special.html') ## 修改专栏界面 @app.route('/modify_special') @login_required def modify_special(): if (not root_authorized()): abort(404) return render_template('modify_special.html') ## 上传专栏题图文件 @app.route('/upload_special_title_image', methods=['GET', 'POST']) def save_special_title_image(): title_image = request.files['upload_file'] #设置默认题图 title_image_name = 'special_upload_pic.jpg' if title_image: if allowed_file(title_image.filename): title_image_name=get_secure_photoname(title_image.filename) title_image_url=os.path.join(app.config['SPECIAL_DEST'], title_image_name) title_image.save(title_image_url) return app.config['HOST_NAME']+'/upload/special/'+title_image_name # 调用美图秀秀 @app.route('/upload/tailor/special_title_image') def upload_special_title_image(): return render_template('upload_special_title_image_tailor.html') ## 完成创建专栏的上传 @app.route('/create_special_finish', methods=['GET']) @login_required def create_special_finish(): if (not root_authorized()): abort(404) try: title = request.args.get('title') content = request.args.get('content') title_image = request.args.get('title_image') style = request.args.get('style') total_issue = request.args.get('total_issue') update_frequency = request.args.get('update_frequency') except Exception: return "failed" authors = [] try: author_list = eval(request.args.get('author_list')) for nick in author_list: author = get_userid_by_nick(nick) if (len(author) == 0): return "nick_error" authors.append(author[0][0]) except Exception: return "failed" special_id = create_new_special(name = title, #user_id = author[0][0], picture = title_image, introduction = content, style = style, total_issue = total_issue, update_frequency = update_frequency) for author in authors: create_new_special_author(special_id, author) return str(special_id) ## 完成修改专栏 @app.route('/modify_special_finish', methods=['GET']) @login_required def modify_special_finish(): if (not root_authorized()): abort(404) try: title = request.args.get('title') content = request.args.get('content') title_image = request.args.get('title_image') style = request.args.get('style') total_issue = request.args.get('total_issue') update_frequency = request.args.get('update_frequency') except Exception: return "failed" authors = [] try: author_list = eval(request.args.get('author_list')) for nick in author_list: author = get_userid_by_nick(nick) if (len(author) == 0): return "nick_error" authors.append(author[0][0]) except Exception: return "failed" try: special_id = modify_special_func(name = title, #user_id = author[0][0], authors = authors, picture = title_image, introduction = content, style = style, total_issue = total_issue, update_frequency = update_frequency) return str(special_id) except Exception: return "failed" ## 编辑专栏文章 @app.route('/special_article_upload', methods=['GET']) @login_required def special_article_upload(): try: special_id = int(request.args.get('id')) except Exception: abort(404) ####TODO #author = get_special_information(special_id).user_id #login_user = get_userid_from_session() if (not root_authorized()): abort(404) article_session_id = get_article_session_id() session['special_article_session_id'] = str(article_session_id) session['special_id'] = str(special_id) os.makedirs(os.path.join(app.config['ARTICLE_CONTENT_DEST'], str(article_session_id))) return render_template('special_article_upload.html') # 修改专栏文章 @app.route('/special_article_modify/article/<int:article_id>') @login_required def special_article_modify(article_id): article = get_article_information(article_id) try: special_id = int(article[0].special_id) except Exception: abort(404) if (not root_authorized()): abort(404) session['special_id'] = str(article[0].special_id) session['special_article_session_id'] = str(article[0].article_session_id) return render_template('special_article_modify.html', article=article[0], book=article[2], get_author = get_nick_by_userid) # 删除专栏文章 @app.route('/special_article_remove', methods=['GET']) def special_article_remove(): try: article_id = request.args.get('id') except Exception: return "failed" user_id = get_userid_from_session() if delete_article_by_article_id(article_id, user_id) == 'fail': return "failed" return "success" ## 上传专栏文章 ##TODO:可能是存在数据库中的草稿提交过来的,这时候只需要把is_draft字段更改就行 @app.route('/special_article_finish', methods=['POST']) def special_article_finish(): content = request.form['content'] title = request.form['title'] ##TODO 文章标题的安全性过滤 title_image=request.form['title_image'] abstract_abstract_with_img=request.form['abstract'] book_picture=request.form['book_picture'] book_author=request.form['book_author'] book_press=request.form['book_press'] book_page_num=request.form['book_page_num'] book_price=request.form['book_price'] book_press_time=request.form['book_press_time'] book_title=request.form['book_title'] book_ISBN=request.form['book_ISBN'] book_binding=request.form['book_binding'] special_author = request.form['special_author'] try: user_id = get_userid_by_nick(special_author)[0][0] if not has_special_author(int(session['special_id']), user_id): raise Exception except Exception: return "nick" abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img) if len(abstract_plain_text)<191: abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......' else: abstract=abstract_plain_text[0:190]+'......' book_id = create_book(book_picture = book_picture, book_author = book_author, book_press = book_press, book_page_num = book_page_num, book_price = book_price, book_press_time = book_press_time, book_title = book_title, book_ISBN = book_ISBN, book_binding = book_binding) article_id=create_article(title = title, content = content, title_image = title_image, user_id = user_id, article_session_id = session['special_article_session_id'], is_draft ='0', special_id = int(session['special_id']), group_id = '3', category_id = '0', abstract = abstract, book_id = book_id) update_article_num_for_special(int(session['special_id']),True) session.pop('special_id', None) session.pop('special_article_session_id', None) return str(article_id) # 上传专栏草稿 @app.route('/special_article_draft',methods=['POST']) def special_article_draft(): content=request.form['content'] ##TODO 文章标题的安全性过滤 title=request.form['title'] title_image=request.form['title_image'] abstract_abstract_with_img=request.form['abstract'] book_picture=request.form['book_picture'] book_author=request.form['book_author'] book_press=request.form['book_press'] book_page_num=request.form['book_page_num'] book_price=request.form['book_price'] book_press_time=request.form['book_press_time'] book_title=request.form['book_title'] book_ISBN=request.form['book_ISBN'] book_binding=request.form['book_binding'] special_author = request.form['special_author'] try: user_id = get_userid_by_nick(special_author)[0][0] if not has_special_author(int(session['special_id']), user_id): raise Exception except Exception: return "nick" abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img) if len(abstract_plain_text)<191: abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......' else: abstract=abstract_plain_text[0:190]+'......' #create_article(title=title,content=content,title_image=title_image,user_id=user_id,article_session_id=session['article_session_id'],is_draft='1',group_id=group_id,category_id=category_id,abstract=abstract) book_id=create_book(book_picture=book_picture,book_author=book_author,book_press=book_press,book_page_num=book_page_num,book_price=book_price,book_press_time=book_press_time,book_title=book_title,book_ISBN=book_ISBN,book_binding=book_binding) article_id=create_article(title = title, content = content, title_image = title_image, user_id = user_id, article_session_id = session['special_article_session_id'], is_draft ='1', special_id = int(session['special_id']), group_id = '3', category_id = '0', abstract = abstract, book_id = book_id) return str(article_id) ################################## 专栏详情页面 ################################## @app.route('/upload/special/<filename>') def uploaded_special_image(filename): return send_from_directory(app.config['SPECIAL_DEST'],filename)
Paul Gallen, Tim Tszyu and Justin Hodges all scored KO victories in the fight night at the Horden Pavillion. Here's five things we learned from the night of fights. There's been plenty of talk about Gallen facing Williams and it's pretty clear now is the time to make the fight. Williams may not have fought since 2015 but he's the logical bout for Gallen - he's now too accomplished to take on debutant footballers but likely wouldn't want to take the step and fight a more experienced professional. Gallen displayed a nice jab to the body, a weapon he has used to great effect in the past, and scored the most spectacular knockout of his career against Hopoate, who looked every bit of 45. Williams hasn't pulled the gloves on since beating Chauncy Welliver in 2105 but in Welliver and Frans Botha he has two big scalps. Gallen also said he would fight Manu Vatuvei or Justin Hodges, but Williams shapes as a good post-season option as Gallen's final fight. Tszyu's name alone guarantees him a certain amount of hype but he's got the ability to back it up. He dispatched Denton Vassell with relative ease in two rounds and his big right hand was a serious highlight. Tszyu landed with accuracy and discipline en route to the latest win of his career. At 12-0 it may be time for a step up in class before he heads over to Europe and America. It's difficult to judge Hodges given his age, inexperience and the quality of his opponent but he threw some nice uppercuts en route to his win over Rob Baron. While it was hardly the kind of stuff Joe Frazier dreamt of, Hodges landed nicely a couple of times en route to his easy win. Gallen may be too experienced at the moment but we're betting Hodges enters the ring again - maybe on the undercard of Anthony Mundine's ill-fated comeback.
import sys import traceback from stat import S_IRUSR, S_IXUSR, S_IWUSR, S_IRGRP, S_IXGRP, S_IXOTH, S_IROTH from errno import ENOENT import fuse from fs import FS, BaseMetadata fs = FS.get() class Out(object): def __init__(self): self.out = '' def write(self, data): self.out += data def read(self): return self.out def evalcode(code): old_stdout = sys.stdout try: new_stdout = Out() sys.stdout = new_stdout eval(compile(code, "<eval>", "exec")) return new_stdout.read() except: error = traceback.format_exc().strip().split("\n")[-1] return error finally: sys.stdout = old_stdout @fs.route('/eval') @fs.route('/eval/<filepath>.py') class Eval(object): def __init__(self): file_mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH self.file_metadata = BaseMetadata(file_mode, False) root_mode = S_IRUSR|S_IXUSR|S_IWUSR|S_IRGRP|S_IXGRP|S_IXOTH|S_IROTH self.dir_metadata = BaseMetadata(root_mode, True) self.files = {} def create(self, *args, **kwargs): self.files[kwargs['filepath']] = '' return 0 def open(self, flags, **kwargs): return 0 def getattr(self, *args, **kwargs): if 'filepath' in kwargs: data = kwargs['filepath'] if data not in self.files: return -ENOENT data = evalcode(self.files[data]) self.file_metadata.st_size = len(data) return self.file_metadata return self.dir_metadata def readdir(self, *args, **kwargs): for i in self.files: yield fuse.Direntry('%s.py' % i) def read(self, size, offset, *args, **kwargs): key = kwargs['filepath'] data = evalcode(self.files[key]) return data[offset:size+offset] def write(self, buf, offset, fh=None, **kwargs): key = kwargs['filepath'] prev_data = self.files[key] new_data = prev_data[:offset] + buf + prev_data[offset+len(buf):] if offset + len(new_data) > len(prev_data): self.truncate(offset + len(new_data), filepath=key) self.files[key] = new_data return len(buf) def truncate(self, size, fh=None, **kwargs): key = kwargs['filepath'] prev_data = self.files[key] prev_size = len(prev_data) if size > prev_size: new_data = prev_data + (size - prev_size)*"0" else: new_data = prev_data[0:size] self.files[key] = new_data
Ability to do Reverse Engineering on Rotary Lift units. To send in your Rotary Lift unit for repair use the shipping address below. If you have any technical questions or wish to offer technical information about any Rotary Lift units then fill out the form below.
from django.contrib.auth.models import Group from django.core.exceptions import ValidationError from django.db import models from django.utils.timezone import now from base.util import with_timestamp, with_author, remove_accents # Competition-related models @with_author @with_timestamp class Competition(models.Model): """ Represents a competition. One roots site will usually hold several competitions, as there are usually several age categories or several subjects categories. Or both. """ name = models.CharField(max_length=100) organizer_group = models.ForeignKey(Group, blank=True, null=True) # Fields added via foreign keys: # competitionorgregistration_set # competitionuserregistration_set # gallery_set # leaflet_set # post_set # problemset_set # season_set # user_set def __unicode__(self): return self.name class Meta: ordering = ['name'] verbose_name = 'Competition' verbose_name_plural = 'Competitions' @with_author @with_timestamp class CompetitionUserRegistration(models.Model): """ Represents a relation between user and competition. User himself can register into competition if he satisfies the conditions. """ competition = models.ForeignKey('competitions.Competition') user = models.ForeignKey('profiles.UserProfile') def __unicode__(self): return (self.user.__unicode__() + u" competes in " + self.competition.__unicode__()) class Meta: ordering = ['added_at'] verbose_name = 'User registration' verbose_name_plural = 'User registrations' @with_author @with_timestamp class CompetitionOrgRegistration(models.Model): """ Represents a relation between organizer and comeptition. Organizer can help organize multiple competitions. Organizer registrations have to be approved. """ competition = models.ForeignKey('competitions.Competition') organizer = models.ForeignKey('profiles.UserProfile') approved = models.BooleanField() def __unicode__(self): return (self.organizer.__unicode__() + u" organizes " + self.competition.__unicode__()) class Meta: ordering = ['added_at'] verbose_name = 'Organizer registration' verbose_name_plural = 'Organizer registration' @with_author @with_timestamp class Season(models.Model): """ Represents an one season of a competition. This is usually autumn or spring season. Using this model, however, we are not limited to 2 seasons per year. During each Season there might be several ProblemSets published as parts of that season. """ competition = models.ForeignKey('competitions.Competition') year = models.IntegerField() number = models.IntegerField() name = models.CharField(max_length=50) join_deadline = models.DateTimeField(blank=True, null=True) def __unicode__(self): template = "{name} ({competition} {year}-{number})" return template.format(competition=remove_accents(self.competition), year=self.year, number=self.number, name=remove_accents(self.name), ) class Meta: ordering = ['competition', 'year', 'number'] verbose_name = 'Season' verbose_name_plural = 'Seasons' @with_author @with_timestamp class Series(models.Model): """ Represents one series of problems in the season of the competetion. """ season = models.ForeignKey('competitions.Season') name = models.CharField(max_length=50) number = models.PositiveSmallIntegerField() problemset = models.OneToOneField('problems.ProblemSet', blank=True, null=True) submission_deadline = models.DateTimeField() is_active = models.BooleanField(default=False) def is_past_submission_deadline(self): return now() > self.submission_deadline def is_nearest_deadline(self): # Series are returned sorted by the submission deadline active_series = [s for s in Series.objects.all() if not s.is_past_submission_deadline()] if active_series: return active_series[0] == self else: return False def clean(self, *args, **kwargs): if self.is_active: if not self.submission_deadline: raise ValidationError("Submission deadline must be set to " "make the series active") if not self.problemset: raise ValidationError("Corresponding set of problems must be " "set to make the series active") if self.is_past_submission_deadline(): raise ValidationError("Series that is past its submission " "deadline cannot be made active") super(Series, self).save(*args, **kwargs) def __unicode__(self): return self.name class Meta: ordering = ['submission_deadline'] unique_together = ('season', 'number') verbose_name = 'Series' verbose_name_plural = 'Series'
This is an interesting anecdote that has been getting a lot of attention on my FaceBook page, but it makes a very valid point on how our priorities when it comes down to our health may be skewed some. Gaining and maintaining optimal health is within your reach. Your car insurance doesn’t pay for oil changes, but you do them anyway because you know they’re what’s best for your car. Your health insurance doesn’t pay for a gym membership, supplements, bottled water or healthy foods, but many people do those things anyway because they know it’s what’s best for their bodies. The same is true with chiropractic care. In order to have optimal health, you must have a clear neurological connection between your brain and all the parts of your body. And because the spine and nervous system are so intimately connected, the only way to have that clear brain-body connection, that allows optimal health, giving you the best possible chance at living an optimally happy and vital life, full of purpose…is to have an optimally aligned and functioning spine – and THAT is what chiropractors do! We help you regain and maintain optimal spinal health, so that you can BE your best, and GIVE your best! Dirty Dozen –> BUY ORGANIC! Clean Fifteen –> CAN BUY CONVENTIONAL! Background: Soybeans are legumes native to East Asia. Initially it was considered unfit for human consumption, but it was used for crop rotation as a method of fixing nitrogen to regenerate the soil. In the East Asian cuisines, the soybeans are fermented prior to moderate consumption into products, such as tempeh, miso, natto, and soy sauce. The reason for fermentation is that the process removes the toxins present in the plant and makes the nutrients in the beans available to the body. To simplify the process of preparation for soy consumption, after lengthy soaking and cooking, the beans were treated with nigari, a substance found in seawater, which resulted in tofu. When fermented soy foods are used in small amounts they help build the inner ecosystem, providing a wealth of friendly microflora to the intestinal tract that can help with digestion and assimilation of nutrients, and boost immunity. With the industrialized processing, the food manufacturers were left with a lot of soy protein that they started adding to processed foods after they split the beans into two different food products: soybean oil and soy protein (i.e. soy protein isolate, soy protein concentrate, textured vegetable protein or hydrolyzed vegetable protein). These substances are included in majority of processed and fast foods (read the labels!). Anti-Nutrients: Soy contains phytochemicals (phytates, enzyme inhibitors and goitrogens) that are toxic to the human body.They function as the immune system of the plant, offering protection from the radiation of the sun, and from invasion by bacteria, viruses, fungi, or foragers. All plants have some anti-nutrient properties, but the soybean plant is especially rich in these chemicals. That is why extensive preparation prior to consumption is essential. Unfermented soy (whole soybeans, soy milk, soy chips, soy protein isolates, soy flour and other industrially processed products) has been linked to digestive distress, immune system breakdown, PMS, endometriosis, reproductive problems for men and women, allergies, ADD and ADHD, higher risk of heart disease and cancer, malnutrition, and loss of libido. Populations most at risk of experiencing the negative effects of unfermented soy are infants through soy baby formula, vegetarians with high soy protein intake, and pre- and menopausal women in search of symptom relief. Phytates: Bind tightly to minerals in our digestive tract and can block absorption of zinc, copper, iron, magnesium and calcium. It has a particularly strong affinity for zinc, a mineral that supports wound healing, protein synthesis, reproductive health, nerve function, and brain development. Miso and tempeh have the lowest levels of phytates. The enzyme inhibitors: Interfere with the process of digestive enzymes (amylase, lipase, and protease) and make the carbohydrates and proteins near impossible to digest completely. Incomplete digestion moves into the large intestine, causing discomfort and bloating, and can be even more difficult for people who have naturally low levels of digestive enzymes such as infants and elderly. Goitorgens: Block the production of thyroid hormone and cause goiter formation. These are compounds known to make it more difficult for the thyroid gland to create its hormones. Low thyroid hormone levels will cause a reduced thyroid function with slow metabolism, low energy level, reduced heart rate, lack of oxygen in cells. Also blocking thyroid production is genistein, an isoflavone found in soybeans. However, genistein also inhibits tyrosine kinases that is involved in the transfer of energy from one molecule to another, driving cell division, memory consolidation, tissue repair, and blood vessel maintenance and regeneration. Aluminum: The highly industrialized processing of soy requires acid washing in aluminum tanks. The aluminum leaches into the final soy products that are sold for human consumption. Aluminum is toxic to our nervous system and kidneys, so avoiding all processed soy products is a good rule to follow. Environmental Impact: The industrialization of soy production has led to genetic modification of the crops, which is unhealthy for human, animals, or the environment, deforestation and a need for large amounts of pesticides. GM-produced soy also increases the risk of soy allergies and intolerance. Large amounts of it have also been used as animal feed, increasing the need for antibiotics. Remember: Reading the labels if buying packaged or processed foods is VITAL! Committing to prepare food at home in order to control the quality of the ingredients can be very helpful. If using soy, choosing organic, fermented soy products, such as miso, tempeh, and natto are the wisest options. Make sure when using these products to combine them with other whole foods! What Is It With Gluten? Gluten is a composite formed from several different proteins. It is found most commonly in wheat, barley, rye, spelt, kamut, oats, durum, einkorn, farro, graham, and semolina. Adding texture and a characteristic chewiness to baked goods, gluten is used in a wide variety of other foods as a thickener and binder, flavor enhancer, and protein supplement. Within gluten, there are actually four main proteins: albumins, glutelins, globulins, and prolamins. Glutelins and prolamins are found in higher concentrations in wheat, while albumins and globulins are more plentiful in corn and rice. Glutelins in wheat, in particular, are dangerous for those susceptible to gluten intolerance and sensitivity because of the way that acids in the body break them down, leading to an abnormal immune response. The cells of the immune system produce antibodies and other cellular products that begin to react against normal, healthy tissue, causing inflammation and damage. Gluten sensitivity is an autoimmune disease that creates inflammation throughout the body, with wide-ranging effects across all organ systems. Gluten can inflame the brain by causing an autoimmune response. A review paper in The New England Journal of Medicine listed 55 “diseases” that can be caused by eating gluten, including celiac disease, osteoporosis, irritable bowel disease, inflammatory bowel disease, anemia, cancer, fatigue, rheumatoid arthritis, lupus, multiple sclerosis, and almost all other autoimmune diseases. Gluten is also linked to many psychiatric and neurological diseases, including anxiety, depression, schizophrenia, dementia, migraines, epilepsy, and neuropathy (nerve damage). It has also been linked to autism. The autoimmune reaction, where the body’s immune system starts attacking normal tissue in response to eating gluten will continue as long as these food products are in the diet. Gluten intolerance may lead to a varying degree of intestinal damage that increases the risk for malabsorption of food, which cause nutritional deficiencies and may also result in conditions, such as iron deficiency anemia, osteopenia, and osteoporosis. During the digestion process, gluten can be broken down into individual proteins that are a lot like psychedelic drugs. These are opium-like proteins called gluteomorophins, which can drastically change brain function and behavior. The gluten can affect the brain is due to its high content of glutamate (similar to MSG), a molecule that accelerates, activates, irritates and damages brain cells through a special “docking station” called the NMDA receptor. Excessive activity in this receptor due to glutamate has been linked to many psychiatric disorders. Glutamate is an excitotoxin, a substance that agitates and kills or damages brain cells. Have we adapted to gluten? Grains contain poisons, i.e. gluten, in their husks to fight back against predators. Some creatures, like birds, have adapted to overcome the defenses of gluten-containing cereal grains. However, most mammals are not adapted to grains and do not eat them in substantial quantities. This includes the humans. Even though, our bodies have not adapted, our diet have significantly changed to include industrialized, processed grains. It eats away at your gut lining. If the gut is damaged, you do not absorb nutrients. It messes with the gall bladder and bile production. If you do not absorb fats and fat soluble nutrients such as vitamins A, D, K, and other nutrients, you will have problems utilizing any minerals you do absorb, to say nothing of the nutrient deficiencies from inadequate essential fats. Phytates tightly bind to metal ions and make them unavailable for absorption. All of which can lead to autoimmune disease and cancer. Once the gut lining is damaged, we are at exceptionally high risk of autoimmune disease, such as Hashimoto’s, and several types of cancer, including non-Hodgkin’s lymphoma. The pancreas is assailed by grain-induced inflammation due to CCK problems and elevated insulin levels. This inflammation is a potential cause of pancreatic cancer and pancreatitis (inflammation of the pancreas). The molecular structure of gliadin, the protein portion of gluten, closely resembles that of the thyroid gland. When gliadin breaches the protective barrier of the gut, and enters the bloodstream, the immune system tags it for destruction. These antibodies to gliadin also cause the body to attack thyroid tissue. Is moderation a possible choice? Gluten is one of those cases where moderation is not possible. Even the smallest amount of gluten can trigger an autoimmune response, from a drop of soy sauce or a whole loaf of wheat bread. The immune response to gluten is delayed, which means that it may not manifest until up to 4 days later and may last in the body for up to 6 months each time. Remember to read all your ingredient labels! Choose whole foods and gluten-free foods that don’t have a label!
# -*- coding: utf-8 -*- """ ************************************************************************************ Class : TotalLine Author : Thierry Maillard (TMD) Date : 28/10/2016 - 18/11/2016 Role : Define a TotalLine for food table. Licence : GPLv3 Copyright (c) 2016 - Thierry Maillard This file is part of CalcAl project. CalcAl project is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CalcAl project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CalcAl project. If not, see <http://www.gnu.org/licenses/>. Modif. : - dictComponentsValueStr is calculated when calling getFormattedValue() - getFormattedValue accepts 1 parameter : nbDays ************************************************************************************ """ from model import ModelBaseData from model import Component from util import CalcalExceptions class TotalLine(ModelBaseData.ModelBaseData): """ Model for a TotalLine """ def __init__(self, configApp): super(TotalLine, self).__init__(configApp, None, "TotalLine") self.setData("containValues", False) self.setData("name", _("Total")) self.setData("quantity", 0.0) self.setData("dictComponentsQualifierQuantity", dict()) # table for qualification reduction rules self.qRulesS = self.configApp.get('QualifValue', 'QRulesS').split(";") self.qRules0 = self.configApp.get('QualifValue', 'QRules0').split(";") self.qRulesO = self.configApp.get('QualifValue', 'QRulesO').split(";") self.logger.debug("Created in model" + str(self)) def update(self, dictFoodStuff): """ Update total line value according value in parent """ # Get values of all foodstuff in model nbFoodStuff = len(dictFoodStuff) if nbFoodStuff > 0: # Sum quantities of each food self.setData("quantity", 0.0) for foodStuff in dictFoodStuff.values(): self.setData("quantity", self.getData("quantity") + foodStuff.getData("quantity")) # Sum all components values and qualifiers dictQualifierQuantity = dict() for foodStuff in dictFoodStuff.values(): for codeComponent, component in foodStuff.dictComponents.items(): qualifValue = component.getData("qualifValue") quantity = component.getData("quantity") if codeComponent in dictQualifierQuantity.keys(): dictQualifierQuantity[codeComponent][0] += qualifValue dictQualifierQuantity[codeComponent][1] += quantity else: dictQualifierQuantity[codeComponent] = [qualifValue, quantity] # Reduce qualifiers and format all components self.getData("dictComponentsQualifierQuantity").clear() for codeComponent, qualifierQuantity in dictQualifierQuantity.items(): qualifierQuantity[0] = self.reducQualifier(qualifierQuantity[0], qualifierQuantity[1]) self.getData("dictComponentsQualifierQuantity")[codeComponent] = \ [qualifierQuantity[0], qualifierQuantity[1]] def reducQualifier(self, qualif2Reduce, value): """ Reduce qualif2Reduce expression by applying rules read in config file """ qualifResult = "".join(set(qualif2Reduce)) nbReduction = 0 while nbReduction < 5 and len(qualifResult) > 1: # Apply rules if value >= float(self.configApp.get("Limits", "near0")): qRule2apply = self.qRulesS else: # For value near 0 qRule2apply = self.qRules0 qRule2apply = qRule2apply + self.qRulesO for rule in qRule2apply: if rule[0] in qualifResult and rule[1] in qualifResult: qualifResult = qualifResult.replace(rule[0], rule[2]) qualifResult = qualifResult.replace(rule[1], rule[2]) qualifResult = "".join(set(qualifResult)) nbReduction = nbReduction + 1 if nbReduction >= 5: raise CalcalExceptions.CalcalInternalError(self.configApp, "reducQualifier don't converge : " + qualif2Reduce + " can't be reduce : " + qualifResult + ". Check config/[QualifValue]/QRules") return qualifResult def getFormattedValue(self, nbDays=1): """ Return name, quantity and dict(codeComponents) = qty formated for all components of this total line V0.38 : build dictComponentsValueStr getFormattedValue accept 1 optional parameter : Nb days Parameter nbDays : all returned values are divided by this integer """ assert nbDays > 0, "TotalLine/getFormattedValue() : nbDays must be > 0 " + str(nbDays) dictComponentsValueStr = dict() for codeComponent, qualifierQuantity \ in self.getData("dictComponentsQualifierQuantity").items(): dictComponentsValueStr[codeComponent] = \ Component.Component.getValueFormatedStatic(self.configApp, qualifierQuantity[0], qualifierQuantity[1] / float(nbDays)) totalName = self.getData("name") totalQuantity = self.getData("quantity") if nbDays > 1: totalName += " " + _("per day") totalQuantity /= float(nbDays) return totalName, round(totalQuantity, 1), dictComponentsValueStr def getRawValue(self): """ Return name, quantity and dict(codeComponents) = [Qualifier, quantity] for all components of this total line """ return self.getData("name"), self.getData("quantity"), \ self.getData("dictComponentsQualifierQuantity") def normalise4for100g(self): """ Normalise all components quantities for 100g of products """ ratio = 100. / self.getData("quantity") for fieldsComponent in self.getData("dictComponentsQualifierQuantity").values(): fieldsComponent[1] *= ratio self.setData("quantity", 100.)
Tuesday, November 13th at 2 p.m. – Workshop (in French) on banking and saving in Canada, for newcomers and new Canadians. Thursday, November 15th at 2 p.m. – Workshop (in English) on budgeting and spending plans. For more information or to register: Lynn Bourgeois, (506) 758-4029.
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, sys, re, csv def copy_to_imagedrop_upload(src_filepath, destdir=None): import pycurl, os, shutil, re regex_colorstyle = re.compile(r'^.*?/[0-9]{9}[_altm0-6]{,6}?\.[jpngJPNG]{3}$') if not regex_colorstyle.findall(src_filepath): print src_filepath.split('/')[-1], ' Is Not a valid Bluefly Colorstyle File or Alt Out of Range' return else: if not destdir: destdir = '/mnt/Post_Complete/ImageDrop' imagedrop = os.path.abspath(destdir) localFileName = src_filepath.split('/')[-1] imagedropFilePath = os.path.join(imagedrop, localFileName.lower()) try: if os.path.isfile(imagedropFilePath): try: os.remove(imagedropFilePath) #os.rename(src_filepath, imagedropFilePath) shutil.copyfile(src_filepath, imagedropFilePath) return True except: print 'Error ', imagedropFilePath return False #shutil.copyfile(src_filepath, imagedropFilePath else: ##os.rename(src_filepath, imagedropFilePath) shutil.copyfile(src_filepath, imagedropFilePath) return True except: return False def rename_retouched_file(img): import os,re regex_coded = re.compile(r'.+?/[1-9][0-9]{8}_[1-6]\.[jJpPnNgG]{3}') imgfilepath = img if re.findall(regex_coded,imgfilepath): filedir = imgfilepath.split('/')[:-1] filedir = '/'.join(filedir) print filedir filename = imgfilepath.split('/')[-1] colorstyle = str(filename[:9]) testimg = filename.split('_')[-1] alttest = testimg.split('.')[0] ext = filename.split('.')[-1] ext = ".{}".format(ext.lower()) if str.isdigit(alttest) & len(alttest) == 1: if alttest == '1': src_img_primary = img.replace('_1.','.') os.rename(img, src_img_primary) return src_img_primary else: alttest = int(alttest) print alttest alttest = alttest - 1 alt = '_alt0{}'.format(str(alttest)) print alt if alt: filename = "{}{}{}".format(colorstyle,alt,ext) renamed = os.path.join(filedir, filename) print renamed if renamed: os.rename(img, renamed) if os.path.isfile(renamed): return renamed else: return img def get_aspect_ratio(img): from PIL import Image try: im = Image.open(img) w,h = im.size aspect_ratio = str(round(float(int(h))/float(int(w)),2)) return aspect_ratio except IOError: pass def get_dimensions(img): from PIL import Image try: im = Image.open(img) w,h = im.size dimensions = "{0}x{1}".format(int(w),int(h)) return dimensions except IOError: pass def get_exif_metadata_value(img, exiftag=None): import pyexiv2 image_metadata = pyexiv2.ImageMetadata(img) metadata = image_metadata.read() if exiftag: exifvalue = metadata[exiftag] return (exiftag, exifvalue) else: metadict = {} for mtag, mvalue in metadata.iteritems(): metadict[mtag] = mvalue return metadict def get_image_color_minmax(img): import subprocess, os, sys, re try: ret = subprocess.check_output(['convert', img, '-median', '3', '+dither', '-colors', '2', '-trim', '+repage', '-gravity', 'center', '-crop', "50%", '-depth', '8', '-format', '%c',"histogram:info:-"]) except: return '' colorlow = str(ret).split('\n')[0].strip(' ') colorlow = re.sub(re.compile(r',\W'),',',colorlow).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ') colorhigh = str(ret).split('\n')[1].strip(' ') colorhigh = re.sub(re.compile(r',\W'),',',colorhigh).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ') fields_top = ['low_rgb_avg', 'high_rgb_avg'] fields_level2 = ['total_pixels', 'rgb_vals', 'webcolor_id', 'color_profile_vals'] colorlow = zip(fields_level2,colorlow) colorhigh = zip(fields_level2,colorhigh) if len(colorhigh) == len(colorlow): coloravgs = dict(colorlow),dict(colorhigh) colordata = zip(fields_top, coloravgs) colordata = dict(colordata) colordata['comp_level'] = 'InRange' return colordata elif len(colorhigh) < len(colorlow): coloravgs = dict(colorlow) colordata = {} colordata[fields_top[0]] = coloravgs colordata[fields_top[1]] = {'total_pixels': 0} colordata['comp_level'] = 'Bright' return colordata elif len(colorhigh) > len(colorlow): coloravgs = dict(colorhigh) colordata = {} colordata[fields_top[1]] = coloravgs colordata[fields_top[0]] == {'total_pixels': 0} colordata['comp_level'] = 'Dark' return colordata def evaluate_color_values(colordata): high_range_pixels = '' low_range_pixels = '' high_range_pixels = float((colordata['high_rgb_avg']['total_pixels'])) low_range_pixels = float((colordata['low_rgb_avg']['total_pixels'])) try: if low_range_pixels >= high_range_pixels and high_range_pixels != 0: r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',') r,g,b = float(r),float(g),float(b) high_avg = float(round((r+b+g)/3,2)) r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',') r,g,b = float(r),float(g),float(b) low_avg = float(round((r+b+g)/3,2)) ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2) print high_avg/(low_avg*ratio) return high_avg,low_avg,ratio, 'LOW' elif low_range_pixels < high_range_pixels and low_range_pixels != 0: r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',') r,g,b = float(r),float(g),float(b) high_avg = float(round((r+b+g)/3,2)) r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',') r,g,b = float(r),float(g),float(b) low_avg = float(round((r+b+g)/3,2)) ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2) print low_avg/(high_avg*ratio) return high_avg,low_avg,ratio, 'HIGH' except TypeError: print "Type Error" pass except ValueError: print "Value Error", colordata pass def sort_files_by_values(directory): import os,glob filevalue_dict = {} fileslist = directory count = len(fileslist) for f in fileslist: values = {} colordata = get_image_color_minmax(f) try: high,low,ratio, ratio_range = evaluate_color_values(colordata) values['ratio'] = ratio values['ratio_range'] = ratio_range if ratio_range == 'LOW': values['low'] = low ## values['high'] = high if ratio_range == 'HIGH': values['high'] = high ## values['low'] = low filevalue_dict[f] = values count -= 1 print "{0} Files Remaining".format(count) except TypeError: filevalue_dict[f] = {'ratio_range': 'OutOfRange'} count -= 1 print "{0} Files Remaining-TypeError".format(count) pass except ZeroDivisionError: filevalue_dict[f] = {'ratio_range': 'OutOfRange'} count -= 1 print "{0} Files Remaining-ZeroDivision".format(count) pass return filevalue_dict def subproc_magick_large_jpg(img, destdir=None): import subprocess,os,re regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$') regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$') regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$') os.chdir(os.path.dirname(img)) if not destdir: destdir = os.path.abspath('.') else: destdir = os.path.abspath(destdir) if not regex_alt.findall(img): outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_l.jpg') dimensions = '' aspect_ratio = get_aspect_ratio(img) dimensions = get_dimensions(img) width = dimensions.split('x')[0] height = dimensions.split('x')[1] if aspect_ratio == '1.2': vert_horiz = '400x480' elif float(aspect_ratio) > float(1.2): vert_horiz = 'x480' elif float(aspect_ratio) < float(1.2): vert_horiz = '400x' dimensions = "400x480" print dimensions,vert_horiz if regex_valid_style.findall(img): subprocess.call([ 'convert', '-colorspace', 'sRGB', img, '-background', 'white', "-filter", "Spline", "-filter", "Cosine", "-define", "filter:blur=0.9891028367558475", "-distort", "Resize", vert_horiz, '-extent', dimensions, "-colorspace", "sRGB", "-format", "jpeg", '-unsharp', '2x1.24+0.5+0', '-quality', '95', outfile ]) return outfile else: return img else: pass def subproc_magick_medium_jpg(img, destdir=None): import subprocess,os,re regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$') regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$') regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$') os.chdir(os.path.dirname(img)) if not destdir: destdir = os.path.abspath('.') else: destdir = os.path.abspath(destdir) if regex_alt.findall(img): outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.jpg') else: outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_m.jpg') dimensions = '' aspect_ratio = get_aspect_ratio(img) dimensions = get_dimensions(img) width = dimensions.split('x')[0] height = dimensions.split('x')[1] if aspect_ratio == '1.2': vert_horiz = '200x240' elif float(aspect_ratio) > float(1.2): vert_horiz = 'x240' elif float(aspect_ratio) < float(1.2): vert_horiz = '200x' dimensions = '200x240' print dimensions,vert_horiz if regex_valid_style.findall(img): subprocess.call([ 'convert', '-colorspace', 'sRGB', img, '-background', 'white', "-filter", "Spline", "-filter", "Cosine", "-define", "fliter:blur=0.9891028367558475", "-distort", "Resize", vert_horiz, '-extent', dimensions, "-colorspace", "sRGB", "-format", "jpeg", '-unsharp', '2x1.1+0.5+0', '-quality', '95', outfile ]) return outfile else: return img def subproc_magick_png(img, rgbmean=None, destdir=None): import subprocess,re,os regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$') regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$') regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$') modulator = '' modulate = '' if not destdir: destdir = '.' #imgdestpng_out = os.path.join(tmp_processing, os.path.basename(imgsrc_jpg)) os.chdir(os.path.dirname(img)) if not rgbmean: ratio_range = 'OutOfRange' else: try: ratio_range = rgbmean['ratio_range'] except: ratio_range = 'OutOfRange' pass if ratio_range != 'OutOfRange': high = rgbmean['high'] low = rgbmean['low'] ratio = rgbmean['ratio'] #rgbmean = float(128) #rgbmean = get_image_color_minmax(img) if ratio_range == 'LOW': if float(round(high,2)) > float(240): modulator = '-modulate' modulate = '104,100' elif float(round(high,2)) > float(200): modulator = '-modulate' modulate = '107,110' elif float(round(high,2)) > float(150): modulator = '-modulate' modulate = '110,110' else: modulator = '-modulate' modulate = '112,110' elif ratio_range == 'HIGH': if float(round(high,2)) > float(230): modulator = '-modulate' modulate = '100,100' elif float(round(high,2)) > float(200): modulator = '-modulate' modulate = '103,100' elif float(round(high,2)) > float(150): modulator = '-modulate' modulate = '105,105' else: modulator = '-modulate' modulate = '108,107' elif ratio_range == 'OutOfRange': modulator = '-modulate' modulate = '100,100' format = img.split('.')[-1] os.chdir(os.path.dirname(img)) ## Destination name if not destdir: destdir = os.path.abspath('.') else: destdir = os.path.abspath(destdir) outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png') dimensions = '' ## Get variable values for processing aspect_ratio = get_aspect_ratio(img) dimensions = get_dimensions(img) width = dimensions.split('x')[0] height = dimensions.split('x')[1] if aspect_ratio == '1.2': vert_horiz = '{0}x{1}'.format(width,height) dimensions = '{0}x{1}'.format(int(width),int(height)) elif float(aspect_ratio) > float(int(1.2)): vert_horiz = 'x{0}'.format(height) w = float(0.8) * float(height) #w = float(round(w,2)*float(aspect_ratio)) dimensions = '{0}x{1}'.format(int(w),int(height)) print "W",w, aspect_ratio elif float(aspect_ratio) < float(1.2): vert_horiz = '{0}x'.format(width) h = float(1.2) * float(width) #h = float(round(h,2)*float(aspect_ratio)) dimensions = '{0}x{1}'.format(int(width),int(h)) print "H",h, aspect_ratio if not dimensions: dimensions = '100%' vert_horiz = '100%' subprocess.call([ 'convert', '-format', format, img, '-define', 'png:preserve-colormap', '-define', 'png:format\=png24', '-define', 'png:compression-level\=N', '-define', 'png:compression-strategy\=N', '-define', 'png:compression-filter\=N', '-format', 'png', '-modulate', modulate, "-define", "filter:blur=0.625", #"filter:blur=0.88549061701764", "-distort", "Resize", vert_horiz, '-background', 'white', '-gravity', 'center', '-extent', dimensions, "-colorspace", "sRGB", '-unsharp', '2x2.7+0.5+0', '-quality', '95', os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png') ]) print 'Done {}'.format(img) return os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png') def upload_imagedrop(root_dir): import os, sys, re, csv, shutil, glob archive_uploaded = os.path.join(root_dir, 'uploaded') tmp_failed = os.path.join(root_dir, 'failed_upload') try: os.makedirs(archive_uploaded, 16877) except OSError: try: shutil.rmtree(archive_uploaded, ignore_errors = True) os.makedirs(archive_uploaded, 16877) except: pass try: os.makedirs(tmp_failed, 16877) except: pass import time upload_tmp_loading = glob.glob(os.path.join(root_dir, '*.*g')) for upload_file in upload_tmp_loading: try: code = copy_to_imagedrop_upload(upload_file) if code == True or code == '200': try: shutil.move(upload_file, archive_uploaded) time.sleep(float(.1)) print "1stTryOK", upload_file except: dst_file = upload_file.replace(root_dir, archive_uploaded) try: if os.path.exists(dst_file): os.remove(dst_file) shutil.move(upload_file, archive_uploaded) except: pass else: print "Uploaded {}".format(upload_file) time.sleep(float(.1)) try: shutil.move(upload_file, archive_uploaded) except shutil.Error: pass except OSError: print "Error moving Finals to Arch {}".format(file) try: shutil.move(upload_file, tmp_failed) except shutil.Error: pass try: if os.path.isdir(sys.argv[2]): finaldir = os.path.abspath(sys.argv[2]) for f in glob.glob(os.path.join(archive_uploaded, '*.*g')): try: shutil.move(f, finaldir) except shutil.Error: pass except: print 'Failed to Archive {}'.format(upload_tmp_loading) pass def main(root_img_dir=None): import sys,glob,shutil,os,re import convert_img_srgb regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.[JjPpNnGg]{3}$') regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$') regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$') if not root_img_dir: try: root_img_dir = sys.argv[1] if root_img_dir == 'jblocal': root_img_dir = os.path.abspath('/mnt/Post_Ready/Retouchers/JohnBragato/MARKETPLACE_LOCAL') else: root_img_dir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE') except IndexError: root_img_dir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE') pass else: pass try: destdir = os.path.abspath(sys.argv[2]) if not os.path.isdir(destdir): os.makedirs(destdir, 16877) except IndexError: destdir = '/mnt/Post_Complete/ImageDrop/' ##os.path.join(root_img_dir, 'output') # try: # os.makedirs(destdir, 16877) # except OSError: # pass if os.path.isdir(root_img_dir): #import md5_unique_dup_files #duplicates = md5_unique_dup_files.find_duplicate_imgs(root_img_dir)[1] #[ os.remove(f) for f in duplicates if f ] imgs_renamed = [rename_retouched_file(f) for f in (glob.glob(os.path.join(root_img_dir,'*.??[gG]')))] img_dict = sort_files_by_values(glob.glob(os.path.join(root_img_dir,'*.??[gG]'))) for k,v in img_dict.items(): try: img = k ## Convert profile of source img if CMYK ignores if RGB convert_img_srgb.main(image_file=img) ## Get color pixel values from source img rgbmean = v.items() ## Generate png from source then jpgs from png pngout = subproc_magick_png(img, rgbmean=dict(rgbmean), destdir=destdir) subproc_magick_large_jpg(pngout, destdir=destdir) subproc_magick_medium_jpg(pngout, destdir=destdir) except AttributeError: print 'SOMETHING IS WRONG WITH THE IMAGE Error {}'.format(img) pass else: img = root_img_dir if regex_coded.findall(img): img = rename_retouched_file(img) pngout = subproc_magick_png(img, destdir=destdir) subproc_magick_large_jpg(pngout, destdir=destdir) subproc_magick_medium_jpg(pngout, destdir=destdir) try: upload_imagedrop(destdir) failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]') except: print 'PrintUploadFailed' pass # while True: # if glob.glob(failed_dir): # destdir = failed_dir # failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]') # upload_imagedrop(destdir) #print 'NOT UPLOADING YET' if __name__ == '__main__': main()
We found this panoramic book in the Big Bad Wolf book fair in Melaka. Telling stories based on a continuous stretch of pictures; no limits to story-telling. It is an excellent way to tell open-ended story and let the imagination soar. One of the best books we bought for Svadhi. This entry was posted in LEARNING, SVADHI ELEESHA. Bookmark the permalink.
import os import discord from discord.ext import commands from .utils.dataIO import dataIO class STREAMCON: """Server STREAMCON Levels""" def __init__(self, bot): self.bot = bot self.settings_path = "data/STREAMCON/settings.json" self.settings = dataIO.load_json(self.settings_path) self.valid_STREAMCONs = ['1', '2', '3', '4', '5'] @commands.command(name="STREAMCON", no_pm=True, pass_context=True) async def STREAMCON(self, ctx): """Reports the server STREAMCON level.""" server = ctx.message.server self.load_settings(server) nick = self.settings[server.id]["authority"] await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]), nick) @commands.command(name="STREAMCON+", no_pm=True, pass_context=True) async def STREAMCONplus(self, ctx): """Elevates the server STREAMCON level.""" server = ctx.message.server member = ctx.message.author self.load_settings(server) if self.settings[server.id]["STREAMCON"] == 1: await self.bot.say("We are already at STREAMCON 1! Oh no!") else: self.settings[server.id]["STREAMCON"] -= 1 self.settings[server.id]["authority"] = member.display_name self.save_settings(server) await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]), member.display_name) @commands.command(name="STREAMCON-", no_pm=True, pass_context=True) async def STREAMCONminus(self, ctx): """Lowers the server STREAMCON level.""" server = ctx.message.server member = ctx.message.author self.load_settings(server) if self.settings[server.id]["STREAMCON"] == 5: await self.bot.say("We are already at STREAMCON 5! Relax!") else: self.settings[server.id]["STREAMCON"] += 1 self.settings[server.id]["authority"] = member.display_name self.save_settings(server) await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]), member.display_name) @commands.command(name="setSTREAMCON", no_pm=True, pass_context=True) async def setSTREAMCON(self, ctx, level): """Manually set the server STREAMCON level in case of emergency.""" server = ctx.message.server member = ctx.message.author self.load_settings(server) if level in self.valid_STREAMCONs: self.settings[server.id]["STREAMCON"] = int(level) self.settings[server.id]["Authority"] = member.display_name self.save_settings(server) await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]), member.display_name) else: await self.bot.say("Not a valid STREAMCON level. Haven't " "you seen War Games Doofus?") async def post_STREAMCON(self, level, nick): icon_url = 'https://i.imgur.com/7psx4VV.gif' if level == '5': color = 0x0080ff thumbnail_url = 'http://i.imgur.com/e2CK3om.gif' author = "Stream status: SCHEDULED HOLD.".format(level) subtitle = ("I am either at work or have something scheduled prior to the stream") instructions = ("- Expect this status to change later in the day\n" "- Start counting your Tributes\n" "- Stream procedures will continue as planned\n" "- Report all suspicious activity by DJ Woo") elif level == '4': color = 0x00ff00 thumbnail_url = 'http://i.imgur.com/TMBq2i0.gif' author = "Stream status: GO.".format(level) subtitle = 'Stream should launch as scheduled' instructions = ("- Strap in and buckle up\n" "- Begin propellant load\n" "- Transfer guidance control to Eddie\n" "- I usually switch to this status once I am out of work\n" "- Stay tuned to Discord or Twitter for updates") elif level == '3': color = 0xffff00 thumbnail_url = 'http://i.imgur.com/uW1AZQN.gif' author = "Stream status: HOLD.".format(level) subtitle = 'Something has come up but it should be ok.' instructions = ("- Stream has a good chance of happening\n" "- This is probably just precautionary\n" "- Expect additional updates\n" "- Feel free to nag me for one too\n" "- Put on your big boy pants") elif level == '2': color = 0xff0000 thumbnail_url = 'http://i.imgur.com/stCtTIe.gif' author = "Stream status: Engineering HOLD.".format(level) subtitle = 'There is a SIGNIFICANT obstacle to streaming' instructions = ("- We are pretty sure the stream is not happening\n" "- Queue up something to binge on Netflix\n" "- Check StreamAlerts for who else is live\n" "- Look for additional updates for Go or No-Go") elif level == '1': color = 0xffffff thumbnail_url = 'http://i.imgur.com/U44wmN3.gif' author = "Stream status: SCRUBBED.".format(level) subtitle = 'The Stream is POSITIVELY not happening' instructions = ("- Stand down from all stream launch plans\n" "- You should absolutely find something else to do\n" "- There will be no further updates.") if level in self.valid_STREAMCONs: embed = discord.Embed(title="\u2063", color=color) embed.set_author(name=author, icon_url=icon_url) embed.set_thumbnail(url=thumbnail_url) embed.add_field(name=subtitle, value=instructions, inline=False) embed.set_footer(text="Authority: {}".format(nick)) await self.bot.say(embed=embed) else: await self.bot.say("There was an error due to a downrange tracking system failure.") def load_settings(self, server): self.settings = dataIO.load_json(self.settings_path) if server.id not in self.settings.keys(): self.add_default_settings(server) def save_settings(self, server): if server.id not in self.settings.keys(): self.add_default_settings(server) dataIO.save_json(self.settings_path, self.settings) def add_default_settings(self, server): self.settings[server.id] = {"STREAMCON": 5, "authority": "none"} dataIO.save_json(self.settings_path, self.settings) def check_folders(): folder = "data/STREAMCON" if not os.path.exists(folder): print("Creating {} folder...".format(folder)) os.makedirs(folder) def check_files(): default = {} if not dataIO.is_valid_json("data/STREAMCON/settings.json"): print("Creating default STREAMCON settings.json...") dataIO.save_json("data/STREAMCON/settings.json", default) def setup(bot): check_folders() check_files() n = STREAMCON(bot) bot.add_cog(n)
Falcon Range | Frontier Sportsman's Club, Inc. View a larger map to Frontier Sportsman’s Falcon Range. View a larger map of Frontier Sportsman’s Club Falcon Range. How to daisy chain locks. The Club is not responsible for accidents. All participate at their own risk. Absolutely no intoxicating beverages or illegal drugs are allowed on the premises. All firearms will be kept open & empty until on the firing line & during all “cease fires”. All shooting will be into range backstops (berms). Paper targets are to mounted on target frames constructed to Club specifications (see website). No metallic targets or silhouettes are permitted at Falcon Range, however, they are allowed at Hanover in the informal ranges only. No glass or appliances may be used as targets. The shotgun area is at the west side on both properties. All shooting will be to the northeast. Shot size no larger than #2 birdshot. Shotgun shooters at Falcon Range must not fire to the right of the white marker & “Cease Fire” will apply when shooters from the 200 & 300 yard ranges are down range tending their targets. All members are required to keep the premises clean & orderly. Target frames are required for each shooter, positioned directly in front of the shooter’s position. At Hanover Range target frames are not required in the 2 informal ranges. All target frames must be placed in the provided target frame holders. Without exception, violation of shooting times will result in expulsion from club. Fully automatic firearms or mechanically altered firearms that increase the rate of fire are not permitted at Falcon range. At Hanover Range they are permitted in the informal ranges (2) only. During “Cease Fire” firearms shall be cleared, chamber empty, magazine or priming device removed. No firearms, cased or uncased, will be handled, loaded or unloaded from vehicles. Shooters must move away from their shooting benches & remain clear of them & behind Red Line where provided unless you are down range or policing brass. At Falcon, a cease fire affects 100, 200, & 300 yard ranges inclusively. Uncased firearms must be pointed downrange or straight up when being handled. All members must wear or display membership badges when on Club property. No tracers or explosives of any type will be allowed on any range. Shooting benches must be positioned on the concrete pad. All shooters must observe the same firing line. Shooting from ground, seated or prone, must be beside the bench. No hunting will be allowed on club property. Handguns with barrels shorter than 5 ½ inches may only be fired on the pistol ranges at Falcon & Hanover or the Informal ranges at Hanover. Any violation of these rules could subject members to expulsion from the Club. Please retain this set of rules and report any violations to an officer of the Club. Information should include the date, time, and nature of the violation and the name or membership number of the person involved.
# Miro - an RSS based video player application # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 # Participatory Culture Foundation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. import logging import platform from miro import app from miro import prefs from miro import startup from miro import controller from miro.infoupdater import InfoUpdater from miro import messages from miro.frontends.cli.util import print_text, print_box from miro.frontends.cli.events import EventHandler from miro.frontends.cli.interpreter import MiroInterpreter def setup_logging(): # this gets called after miro.plat.util.setup_logging, and changes # the logging level so it's way less spammy. logger = logging.getLogger('') logger.setLevel(logging.WARN) def setup_movie_data_program_info(): from miro.plat.renderers.gstreamerrenderer import movie_data_program_info app.movie_data_program_info = movie_data_program_info def run_application(): setup_logging() app.controller = controller.Controller() print "Starting up %s" % app.config.get(prefs.LONG_APP_NAME) print "Version: %s" % app.config.get(prefs.APP_VERSION) print "OS: %s %s %s" % (platform.system(), platform.release(), platform.machine()) print "Revision: %s" % app.config.get(prefs.APP_REVISION) print "Builder: %s" % app.config.get(prefs.BUILD_MACHINE) print "Build Time: %s" % app.config.get(prefs.BUILD_TIME) print app.info_updater = InfoUpdater() app.cli_events = EventHandler() app.cli_events.connect_to_signals() startup.install_first_time_handler(app.cli_events.handle_first_time) startup.startup() app.cli_events.startup_event.wait() if app.cli_events.startup_failure: print_box("Error Starting Up: %s" % app.cli_events.startup_failure[0]) print print_text(app.cli_events.startup_failure[1]) app.controller.shutdown() return setup_movie_data_program_info() messages.FrontendStarted().send_to_backend() print "Startup complete. Type \"help\" for list of commands." app.cli_interpreter = MiroInterpreter() app.cli_interpreter.cmdloop() app.controller.shutdown()
OriginationPro Marketing System - Market Focus, Inc.Market Focus, Inc. Market Focus partners with The Hershman Group to provide current, industry-specific marketing content for the mortgage, real estate and insurance industries. The OriginationPro marketing materials are written by Dave Hershman, a leading author and speaker with seven books published and hundreds of articles. All Market Focus services contain the entire OriginationPro catalog of up to 500 email articles and a weekly industry-specific newsletter. Dave also writes 5 to 10 new marketing email articles every month that are available to Market Focus users.
from watson_speech2text import Watson_Speech2Text from watson_conversation import Watson_Conversation from naoproxy import NaoProxy class Tutor(): def __init__(self): # broker = ALBroker("myBroker", "0.0.0.0", 0, IP, 9559) IP = "192.168.0.100" global nao nao = NaoProxy(IP, "nao") self.nao = nao self.filename = "record.wav" self.picturepath = "/home/nao/" self.picturename = "picture.png" self.nao.takePicture(self.picturepath, self.picturename) self.conversation = Watson_Conversation('6734af95-6ca0-4d72-b80b-6c3b578c16bf', 'CqsrM7IrxeCZ', '2016-09-20', '41c2898c-cc6a-49f6-82dc-bfc51c201a33') self.speech2text = Watson_Speech2Text('5a43e79e-b9de-4b8b-9df2-bfaead00aaa6', '86WTJ13jYssQ', model='es-ES_BroadbandModel') response = self.conversation.message("hello") self.nao.say(response) def startConversation(self): recording = False while True: if self.nao.getFrontHeadStatus(): break if recording: recording = self.nao.getRightBumperStatus() if not recording: self.nao.endRecordAudio(self.filename) self.nao.say(self.conversation.message(self.speech2text.recognize(self.filename, "audio/wav"))) else: recording = self.nao.getRightBumperStatus() if recording: self.nao.startRecordAudio(self.filename) tutor = Tutor() tutor.startConversation() #anteriores: '6432cebe-14b4-4f93-8e73-12ccdb5891c2','ccaNRkHB1Uqt', 21d88c8e-c0e8-48cb-bffb-61524417ae38 #
If we encounter problems in our marriage, we will often blame God for allowing us to marry the wrong person. But Pastor Ford will counter that today by reminding us that we came up with the idea of marrying that person. It was our choice to proceed in the relationship. But now we say, “Why did God allow it?” God didn’t do anything! We did it. We made the decision and the choice. Why are we blaming God?
#!/usr/bin/env python # encoding: utf-8 # Licensed under a 3-clause BSD license. # Revision History: # Initial Version: 2016-02-17 14:13:28 by Brett Andrews # 2016-02-23 - Modified to test a programmatic query using a test sample form - B. Cherinka # 2016-03-02 - Generalized to many parameters and many forms - B. Cherinka # - Added config drpver info # 2016-03-12 - Changed parameter input to be a natural language string from __future__ import division, print_function, unicode_literals import datetime import os import re import warnings from collections import OrderedDict, defaultdict from functools import wraps from operator import eq, ge, gt, le, lt, ne import numpy as np import six from marvin import config, marvindb from marvin.api.api import Interaction from marvin.core import marvin_pickle from marvin.core.exceptions import (MarvinBreadCrumb, MarvinError, MarvinUserWarning) from marvin.tools.results import Results, remote_mode_only from marvin.utils.datamodel.query import datamodel from marvin.utils.datamodel.query.base import query_params from marvin.utils.general import temp_setattr from marvin.utils.general.structs import string_folding_wrapper from sqlalchemy import bindparam, func from sqlalchemy.dialects import postgresql from sqlalchemy.orm import aliased from sqlalchemy.sql.expression import desc from sqlalchemy_boolean_search import (BooleanSearchException, parse_boolean_search) try: import cPickle as pickle except: import pickle __all__ = ['Query', 'doQuery'] opdict = {'<=': le, '>=': ge, '>': gt, '<': lt, '!=': ne, '=': eq, '==': eq} breadcrumb = MarvinBreadCrumb() def tree(): return defaultdict(tree) def doQuery(*args, **kwargs): """Convenience function for building a Query and retrieving the Results. Parameters: N/A: See the :class:`~marvin.tools.query.Query` class for a list of inputs. Returns: query, results: A tuple containing the built :class:`~marvin.tools.query.Query` instance, and the :class:`~marvin.tools.results.Results` instance. """ start = kwargs.pop('start', None) end = kwargs.pop('end', None) q = Query(*args, **kwargs) try: res = q.run(start=start, end=end) except TypeError as e: warnings.warn('Cannot run, query object is None: {0}.'.format(e), MarvinUserWarning) res = None return q, res def updateConfig(f): """Decorator that updates query object with new config drpver version.""" @wraps(f) def wrapper(self, *args, **kwargs): if self.query and self.mode == 'local': self.query = self.query.params({'drpver': self._drpver, 'dapver': self._dapver}) return f(self, *args, **kwargs) return wrapper def makeBaseQuery(f): """Decorator that makes the base query if it does not already exist.""" @wraps(f) def wrapper(self, *args, **kwargs): if not self.query and self.mode == 'local': self._createBaseQuery() return f(self, *args, **kwargs) return wrapper def checkCondition(f): """Decorator that checks if filter is set, if it does not already exist.""" @wraps(f) def wrapper(self, *args, **kwargs): if self.mode == 'local' and self.filterparams and not self._alreadyInFilter(self.filterparams.keys()): self.add_condition() return f(self, *args, **kwargs) return wrapper class Query(object): ''' A class to perform queries on the MaNGA dataset. This class is the main way of performing a query. A query works minimally by specifying a list of desired parameters, along with a string filter condition in a natural language SQL format. A local mode query assumes a local database. A remote mode query uses the API to run a query on the Utah server, and return the results. By default, the query returns a list of tupled parameters. The parameters are a combination of user-defined parameters, parameters used in the filter condition, and a set of pre-defined default parameters. The object plate-IFU or mangaid is always returned by default. Parameters: returnparams (str list): A list of string parameters names desired to be returned in the query searchfilter (str): A (natural language) string containing the filter conditions in the query; written as you would say it. returntype (str): The requested Marvin Tool object that the results are converted into mode ({'local', 'remote', 'auto'}): The load mode to use. See :doc:`Mode secision tree</mode_decision>`. sort (str): The parameter name to sort the query on order ({'asc', 'desc'}): The sort order. Can be either ascending or descending. limit (int): The number limit on the number of returned results Returns: results: An instance of the :class:`~marvin.tools.query.results.Results` class containing the results of your Query. Example: >>> # filter of "NSA redshift less than 0.1 and IFU names starting with 19" >>> searchfilter = 'nsa.z < 0.1 and ifu.name = 19*' >>> returnparams = ['cube.ra', 'cube.dec'] >>> q = Query(searchfilter=searchfilter, returnparams=returnparams) >>> results = q.run() ''' def __init__(self, *args, **kwargs): self._release = kwargs.pop('release', config.release) self._drpver, self._dapver = config.lookUpVersions(release=self._release) self.query = None self.params = [] self.filterparams = {} self.queryparams = None self.myparamtree = tree() self._paramtree = None self.session = marvindb.session self.filter = None self.joins = [] self.myforms = defaultdict(str) self.quiet = kwargs.get('quiet', None) self._errors = [] self._basetable = None self._modelgraph = marvindb.modelgraph self._returnparams = [] self._caching = kwargs.get('caching', True) self.verbose = kwargs.get('verbose', True) self.count_threshold = kwargs.get('count_threshold', 1000) self.allspaxels = kwargs.get('allspaxels', None) self.mode = kwargs.get('mode', None) self.limit = int(kwargs.get('limit', 100)) self.sort = kwargs.get('sort', 'mangaid') self.order = kwargs.get('order', 'asc') self.return_all = kwargs.get('return_all', False) self.datamodel = datamodel[self._release] self.marvinform = self.datamodel._marvinform # drop breadcrumb breadcrumb.drop(message='Initializing MarvinQuery {0}'.format(self.__class__), category=self.__class__) # set the mode if self.mode is None: self.mode = config.mode if self.mode == 'local': self._doLocal() if self.mode == 'remote': self._doRemote() if self.mode == 'auto': try: self._doLocal() except Exception as e: warnings.warn('local mode failed. Trying remote now.', MarvinUserWarning) self._doRemote() # get return type self.returntype = kwargs.get('returntype', None) # set default parameters self.set_defaultparams() # get user-defined input parameters returnparams = kwargs.get('returnparams', []) if returnparams: self.set_returnparams(returnparams) # if searchfilter is set then set the parameters searchfilter = kwargs.get('searchfilter', None) if searchfilter: self.set_filter(searchfilter=searchfilter) self._isdapquery = self._checkInFilter(name='dapdb') # Don't do anything if nothing specified allnot = [not searchfilter, not returnparams] if not all(allnot) and self.mode == 'local': # create query parameter ModelClasses self._create_query_modelclasses() # this adds spaxel x, y into default for query 1 dap zonal query self._adjust_defaults() # join tables self._join_tables() # add condition if searchfilter: self.add_condition() # add PipelineInfo self._addPipeline() # check if query if a dap query if self._isdapquery: self._buildDapQuery() self._check_dapall_query() def __repr__(self): return ('Marvin Query(filter={4}, mode={0}, limit={1}, sort={2}, order={3})' .format(repr(self.mode), self.limit, self.sort, repr(self.order), self.searchfilter)) def _doLocal(self): ''' Tests if it is possible to perform queries locally. ''' if not config.db or not self.session: warnings.warn('No local database found. Cannot perform queries.', MarvinUserWarning) raise MarvinError('No local database found. Query cannot be run in local mode') else: self.mode = 'local' def _doRemote(self): ''' Sets up to perform queries remotely. ''' if not config.urlmap: raise MarvinError('No URL Map found. Cannot make remote query calls!') else: self.mode = 'remote' def _check_query(self, name): ''' Check if string is inside the query statement ''' qstate = str(self.query.statement.compile(compile_kwargs={'literal_binds':True})) return name in qstate def _checkInFilter(self, name='dapdb'): ''' Check if the given name is in the schema of any of the filter params ''' if self.mode == 'local': fparams = self.marvinform._param_form_lookup.mapToColumn(self.filterparams.keys()) fparams = [fparams] if not isinstance(fparams, list) else fparams inschema = [name in c.class_.__table__.schema for c in fparams] elif self.mode == 'remote': inschema = [] return True if any(inschema) else False def _check_shortcuts_in_filter(self, strfilter): ''' Check for shortcuts in string filter Replaces shortcuts in string searchfilter with the full tables and names. is there a better way? ''' # table shortcuts # for key in self.marvinform._param_form_lookup._tableShortcuts.keys(): # #if key in strfilter: # if re.search('{0}.[a-z]'.format(key), strfilter): # strfilter = strfilter.replace(key, self.marvinform._param_form_lookup._tableShortcuts[key]) # name shortcuts for key in self.marvinform._param_form_lookup._nameShortcuts.keys(): if key in strfilter: # strfilter = strfilter.replace(key, self.marvinform._param_form_lookup._nameShortcuts[key]) param_form_lookup = self.marvinform._param_form_lookup strfilter = re.sub(r'\b{0}\b'.format(key), '{0}'.format(param_form_lookup._nameShortcuts[key]), strfilter) return strfilter def _adjust_defaults(self): ''' Adjust the default parameters to include necessary parameters For any query involving DAP DB, always return the spaxel index TODO: change this to spaxel x and y TODO: change this entirely ''' dapschema = ['dapdb' in c.class_.__table__.schema for c in self.queryparams] if any(dapschema): dapcols = ['spaxelprop.x', 'spaxelprop.y', 'bintype.name', 'template.name'] self.defaultparams.extend(dapcols) self.params.extend(dapcols) self.params = list(OrderedDict.fromkeys(self.params)) self._create_query_modelclasses() # qpdap = self.marvinform._param_form_lookup.mapToColumn(dapcols) # self.queryparams.extend(qpdap) # self.queryparams_order.extend([q.key for q in qpdap]) def set_returnparams(self, returnparams): ''' Loads the user input parameters into the query params limit Adds a list of string parameter names into the main list of query parameters to return Parameters: returnparams (list): A string list of the parameters you wish to return in the query ''' if returnparams: returnparams = [returnparams] if not isinstance(returnparams, list) else returnparams # look up shortcut names for the return parameters full_returnparams = [self.marvinform._param_form_lookup._nameShortcuts[rp] if rp in self.marvinform._param_form_lookup._nameShortcuts else rp for rp in returnparams] self._returnparams = full_returnparams # remove any return parameters that are also defaults use_only = [f for f in full_returnparams if f not in self.defaultparams] self.params.extend(use_only) def set_defaultparams(self): ''' Loads the default params for a given return type TODO - change mangaid to plateifu once plateifu works in cube, maps, rss, modelcube - file objects spaxel, map, rssfiber - derived objects (no file) these are also the default params except any query on spaxelprop should return spaxel_index (x/y) Minimum parameters to instantiate a Marvin Tool cube - return plateifu/mangaid modelcube - return plateifu/mangaid, bintype, template rss - return plateifu/mangaid maps - return plateifu/mangaid, bintype, template spaxel - return plateifu/mangaid, spaxel x and y map - do not instantiate directly (plateifu/mangaid, bintype, template, property name, channel) rssfiber - do not instantiate directly (plateifu/mangaid, fiberid) return any of our tools ''' assert self.returntype in [None, 'cube', 'spaxel', 'maps', 'rss', 'modelcube'], 'Query returntype must be either cube, spaxel, maps, modelcube, rss' self.defaultparams = ['cube.mangaid', 'cube.plate', 'cube.plateifu', 'ifu.name'] if self.returntype == 'spaxel': pass #self.defaultparams.extend(['spaxel.x', 'spaxel.y']) elif self.returntype == 'modelcube': self.defaultparams.extend(['bintype.name', 'template.name']) elif self.returntype == 'rss': pass elif self.returntype == 'maps': self.defaultparams.extend(['bintype.name', 'template.name']) # self.defaultparams.extend(['spaxelprop.x', 'spaxelprop.y']) # add to main set of params self.params.extend(self.defaultparams) def _create_query_modelclasses(self): ''' Creates a list of database ModelClasses from a list of parameter names ''' self.params = [item for item in self.params if item in set(self.params)] self.queryparams = self.marvinform._param_form_lookup.mapToColumn(self.params) self.queryparams = [item for item in self.queryparams if item in set(self.queryparams)] self.queryparams_order = [q.key for q in self.queryparams] def get_available_params(self, paramdisplay='best'): ''' Retrieve the available parameters to query on Retrieves a list of the available query parameters. Can either retrieve a list of all the parameters or only the vetted parameters. Parameters: paramdisplay (str {all|best}): String indicating to grab either all or just the vetted parameters. Default is to only return 'best', i.e. vetted parameters Returns: qparams (list): a list of all of the available queryable parameters ''' assert paramdisplay in ['all', 'best'], 'paramdisplay can only be either "all" or "best"!' if paramdisplay == 'all': qparams = self.datamodel.groups.list_params('full') elif paramdisplay == 'best': qparams = query_params return qparams @remote_mode_only def save(self, path=None, overwrite=False): ''' Save the query as a pickle object Parameters: path (str): Filepath and name of the pickled object overwrite (bool): Set this to overwrite an existing pickled file Returns: path (str): The filepath and name of the pickled object ''' sf = self.searchfilter.replace(' ', '') if self.searchfilter else 'anon' # set the path if not path: path = os.path.expanduser('~/marvin_query_{0}.mpf'.format(sf)) # check for file extension if not os.path.splitext(path)[1]: path = os.path.join(path + '.mpf') path = os.path.realpath(path) if os.path.isdir(path): raise MarvinError('path must be a full route, including the filename.') if os.path.exists(path) and not overwrite: warnings.warn('file already exists. Not overwriting.', MarvinUserWarning) return dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname) # set bad pickled attributes to None attrs = ['session', 'datamodel', 'marvinform', 'myform', '_modelgraph'] # pickle the query try: with temp_setattr(self, attrs, None): pickle.dump(self, open(path, 'wb'), protocol=-1) except Exception as ee: if os.path.exists(path): os.remove(path) raise MarvinError('Error found while pickling: {0}'.format(str(ee))) return path @classmethod def restore(cls, path, delete=False): ''' Restore a pickled object Parameters: path (str): The filename and path to the pickled object delete (bool): Turn this on to delete the pickled fil upon restore Returns: Query (instance): The instantiated Marvin Query class ''' obj = marvin_pickle.restore(path, delete=delete) obj._modelgraph = marvindb.modelgraph obj.session = marvindb.session obj.datamodel = datamodel[obj._release] # if obj.allspaxels: # obj.datamodel.use_all_spaxels() obj.marvinform = obj.datamodel._marvinform return obj def set_filter(self, searchfilter=None): ''' Parses a filter string and adds it into the query. Parses a natural language string filter into the appropriate SQL filter syntax. String is a boolean join of one or more conditons of the form "PARAMETER_NAME OPERAND VALUE" Parameter names must be uniquely specified. For example, nsa.z is a unique parameter name in the database and can be specified thusly. On the other hand, name is not a unique parameter name in the database, and must be clarified with the desired table. Parameter Naming Convention: NSA redshift == nsa.z IFU name == ifu.name Pipeline name == pipeline_info.name Allowed Joins: AND | OR | NOT In the absence of parantheses, the precedence of joins follow: NOT > AND > OR Allowed Operands: == | != | <= | >= | < | > | = Notes: Operand == maps to a strict equality (x == 5 --> x is equal to 5) Operand = maps to SQL LIKE (x = 5 --> x contains the string 5; x = '%5%') (x = 5* --> x starts with the string 5; x = '5%') (x = *5 --> x ends with the string 5; x = '%5') Parameters: searchfilter (str): A (natural language) string containing the filter conditions in the query; written as you would say it. Example: >>> # Filter string >>> filter = "nsa.z < 0.012 and ifu.name = 19*" >>> # Converts to >>> and_(nsa.z<0.012, ifu.name=19*) >>> # SQL syntax >>> mangasampledb.nsa.z < 0.012 AND lower(mangadatadb.ifudesign.name) LIKE lower('19%') >>> # Filter string >>> filter = 'cube.plate < 8000 and ifu.name = 19 or not (nsa.z > 0.1 or not cube.ra > 225.)' >>> # Converts to >>> or_(and_(cube.plate<8000, ifu.name=19), not_(or_(nsa.z>0.1, not_(cube.ra>225.)))) >>> # SQL syntax >>> mangadatadb.cube.plate < 8000 AND lower(mangadatadb.ifudesign.name) LIKE lower(('%' || '19' || '%')) >>> OR NOT (mangasampledb.nsa.z > 0.1 OR mangadatadb.cube.ra <= 225.0) ''' if searchfilter: # if params is a string, then parse and filter if isinstance(searchfilter, six.string_types): searchfilter = self._check_shortcuts_in_filter(searchfilter) try: parsed = parse_boolean_search(searchfilter) except BooleanSearchException as e: raise MarvinError('Your boolean expression contained a syntax error: {0}'.format(e)) else: raise MarvinError('Input parameters must be a natural language string!') # update the parameters dictionary self.searchfilter = searchfilter self._parsed = parsed self._checkParsed() self.strfilter = str(parsed) self.filterparams.update(parsed.params) filterkeys = [key for key in parsed.uniqueparams if key not in self.params] self.params.extend(filterkeys) # print filter if not self.quiet: print('Your parsed filter is: ') print(parsed) # Perform local vs remote modes if self.mode == 'local': # Pass into Marvin Forms try: self._setForms() except KeyError as e: self.reset() raise MarvinError('Could not set parameters. Multiple entries found for key. Be more specific: {0}'.format(e)) elif self.mode == 'remote': # Is it possible to build a query remotely but still allow for user manipulation? pass def _setForms(self): ''' Set the appropriate WTForms in myforms and set the parameters ''' self._paramtree = self.marvinform._paramtree for key in self.filterparams.keys(): self.myforms[key] = self.marvinform.callInstance(self.marvinform._param_form_lookup[key], params=self.filterparams) self.myparamtree[self.myforms[key].Meta.model.__name__][key] def _validateForms(self): ''' Validate all the data in the forms ''' formkeys = list(self.myforms.keys()) isgood = [form.validate() for form in self.myforms.values()] if not all(isgood): inds = np.where(np.invert(isgood))[0] for index in inds: self._errors.append(list(self.myforms.values())[index].errors) raise MarvinError('Parameters failed to validate: {0}'.format(self._errors)) def add_condition(self): ''' Loop over all input forms and add a filter condition based on the input parameter form data. ''' # validate the forms self._validateForms() # build the actual filter self.build_filter() # add the filter to the query if not isinstance(self.filter, type(None)): self.query = self.query.filter(self.filter) @makeBaseQuery def _join_tables(self): ''' Build the join statement from the input parameters ''' self._modellist = [param.class_ for param in self.queryparams] # Gets the list of joins from ModelGraph. Uses Cube as nexus, so that # the order of the joins is the correct one. # TODO: at some point, all the queries should be generalised so that # we don't assume that we are querying a cube. joinmodellist = self._modelgraph.getJoins(self._modellist, format_out='models', nexus=marvindb.datadb.Cube) # sublist = [model for model in modellist if model.__tablename__ not in self._basetable and not self._tableInQuery(model.__tablename__)] # self.joins.extend([model.__tablename__ for model in sublist]) # self.query = self.query.join(*sublist) for model in joinmodellist: name = '{0}.{1}'.format(model.__table__.schema, model.__tablename__) if not self._tableInQuery(name): self.joins.append(model.__tablename__) if 'template' not in model.__tablename__: self.query = self.query.join(model) else: # assume template_kin only now, TODO deal with template_pop later self.query = self.query.join(model, marvindb.dapdb.Structure.template_kin) def build_filter(self): ''' Builds a filter condition to load into sqlalchemy filter. ''' try: self.filter = self._parsed.filter(self._modellist) except BooleanSearchException as e: raise MarvinError('Your boolean expression could not me mapped to model: {0}'.format(e)) def update_params(self, param): ''' Update the input parameters ''' # param = {key: unicode(val) if '*' not in unicode(val) else unicode(val.replace('*', '%')) for key, val in param.items() if key in self.filterparams.keys()} param = {key: val.decode('UTF-8') if '*' not in val.decode('UTF-8') else val.replace('*', '%').decode('UTF-8') for key, val in param.items() if key in self.filterparams.keys()} self.filterparams.update(param) self._setForms() def _update_params(self, param): ''' this is now broken, this should update the boolean params in the filter condition ''' ''' Update any input parameters that have been bound already. Input is a dictionary of key, value pairs representing parameter name to update, and the value (number only) to update. This does not allow to change the operand. Does not update self.params e.g. original input parameters {'nsa.z': '< 0.012'} newparams = {'nsa.z': '0.2'} update_params(newparams) new condition will be nsa.z < 0.2 ''' param = {key: unicode(val) if '*' not in unicode(val) else unicode(val.replace('*', '%')) for key, val in param.items() if key in self.filterparams.keys()} self.query = self.query.params(param) def _alreadyInFilter(self, names): ''' Checks if the parameter name already added into the filter ''' infilter = None if names: if not isinstance(self.query, type(None)): if not isinstance(self.query.whereclause, type(None)): wc = str(self.query.whereclause.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True})) infilter = any([name in wc for name in names]) return infilter @makeBaseQuery @checkCondition @updateConfig def run(self, start=None, end=None, raw=None, orm=None, core=None): ''' Runs a Marvin Query Runs the query and return an instance of Marvin Results class to deal with results. Parameters: start (int): Starting value of a subset. Default is None end (int): Ending value of a subset. Default is None Returns: results (object): An instance of the Marvin Results class containing the results from the Query. ''' if self.mode == 'local': # Check for adding a sort self._sortQuery() # Check to add the cache if self._caching: from marvin.core.caching_query import FromCache self.query = self.query.options(FromCache("default")).\ options(*marvindb.cache_bits) # turn on streaming of results self.query = self.query.execution_options(stream_results=True) # get total count, and if more than 150 results, paginate and only return the first 100 starttime = datetime.datetime.now() # check for query and get count if marvindb.isdbconnected: qm = self._check_history(check_only=True) self.totalcount = qm.count if qm else None # run count if it doesn't exist if self.totalcount is None: self.totalcount = self.query.count() # get the new count if start and end exist if start and end: count = (end - start) else: count = self.totalcount # # run the query # res = self.query.slice(start, end).all() # count = len(res) # self.totalcount = count if not self.totalcount else self.totalcount # check history if marvindb.isdbconnected: query_meta = self._check_history() if count > self.count_threshold and self.return_all is False: # res = res[0:self.limit] start = 0 end = self.limit count = (end - start) warnings.warn('Results contain more than {0} entries. ' 'Only returning first {1}'.format(self.count_threshold, self.limit), MarvinUserWarning) elif self.return_all is True: warnings.warn('Warning: Attempting to return all results. This may take a long time or crash.', MarvinUserWarning) start = None end = None elif start and end: warnings.warn('Getting subset of data {0} to {1}'.format(start, end), MarvinUserWarning) # slice the query query = self.query.slice(start, end) # run the query if not any([raw, core, orm]): raw = True if raw: # use the db api cursor sql = str(self._get_sql(query)) conn = marvindb.db.engine.raw_connection() cursor = conn.cursor('query_cursor') cursor.execute(sql) res = self._fetch_data(cursor) conn.close() elif core: # use the core connection sql = str(self._get_sql(query)) with marvindb.db.engine.connect() as conn: results = conn.execution_options(stream_results=True).execute(sql) res = self._fetch_data(results) elif orm: # use the orm query yield_num = int(10**(np.floor(np.log10(self.totalcount)))) results = string_folding_wrapper(query.yield_per(yield_num), keys=self.params) res = list(results) # get the runtime endtime = datetime.datetime.now() self.runtime = (endtime - starttime) # clear the session self.session.close() # pass the results into Marvin Results final = Results(results=res, query=query, count=count, mode=self.mode, returntype=self.returntype, queryobj=self, totalcount=self.totalcount, chunk=self.limit, runtime=self.runtime, start=start, end=end) # get the final time posttime = datetime.datetime.now() self.finaltime = (posttime - starttime) return final elif self.mode == 'remote': # Fail if no route map initialized if not config.urlmap: raise MarvinError('No URL Map found. Cannot make remote call') if self.return_all: warnings.warn('Warning: Attempting to return all results. This may take a long time or crash.') # Get the query route url = config.urlmap['api']['querycubes']['url'] params = {'searchfilter': self.searchfilter, 'params': ','.join(self._returnparams) if self._returnparams else None, 'returntype': self.returntype, 'limit': self.limit, 'sort': self.sort, 'order': self.order, 'release': self._release, 'return_all': self.return_all, 'start': start, 'end': end, 'caching': self._caching} try: ii = Interaction(route=url, params=params, stream=True) except Exception as e: # if a remote query fails for any reason, then try to clean them up # self._cleanUpQueries() raise MarvinError('API Query call failed: {0}'.format(e)) else: res = ii.getData() self.queryparams_order = ii.results['queryparams_order'] self.params = ii.results['params'] self.query = ii.results['query'] count = ii.results['count'] chunk = int(ii.results['chunk']) totalcount = ii.results['totalcount'] query_runtime = ii.results['runtime'] resp_runtime = ii.response_time if self.return_all: msg = 'Returning all {0} results'.format(totalcount) else: msg = 'Only returning the first {0} results.'.format(count) if not self.quiet: print('Results contain of a total of {0}. {1}'.format(totalcount, msg)) return Results(results=res, query=self.query, mode=self.mode, queryobj=self, count=count, returntype=self.returntype, totalcount=totalcount, chunk=chunk, runtime=query_runtime, response_time=resp_runtime, start=start, end=end) def _fetch_data(self, obj): ''' Fetch query using fetchall or fetchmany ''' res = [] if not self.return_all: res = obj.fetchall() else: while True: rows = obj.fetchmany(100000) if rows: res.extend(rows) else: break return res def _check_history(self, check_only=None): ''' Check the query against the query history schema ''' sqlcol = self.marvinform._param_form_lookup.mapToColumn('sql') stringfilter = self.searchfilter.strip().replace(' ', '') rawsql = self.show().strip() return_params = ','.join(self._returnparams) qm = self.session.query(sqlcol.class_).\ filter(sqlcol == rawsql, sqlcol.class_.release == self._release).one_or_none() if check_only: return qm with self.session.begin(): if not qm: qm = sqlcol.class_(searchfilter=stringfilter, n_run=1, release=self._release, count=self.totalcount, sql=rawsql, return_params=return_params) self.session.add(qm) else: qm.n_run += 1 return qm def _cleanUpQueries(self): ''' Attempt to clean up idle queries on the server This is a hack to try to kill all idl processes on the server. Using pg_terminate_backend and pg_stat_activity it terminates all transactions that are in an idle, or idle in transaction, state that have running for > 1 minute, and whose application_name is not psql, and the process is not the one initiating the terminate. The rank part ranks the processes and originally killed all > 1, to leave one alive as a warning to the others. I've changed this to 0 to kill everything. I think this will sometimes also leave a newly orphaned idle ROLLBACK transaction. Not sure why. ''' if self.mode == 'local': sql = ("with inactive as (select p.pid, rank() over (partition by \ p.client_addr order by p.backend_start ASC) as rank from \ pg_stat_activity as p where p.application_name !~ 'psql' \ and p.state ilike '%idle%' and p.pid <> pg_backend_pid() and \ current_timestamp-p.state_change > interval '1 minutes') \ select pg_terminate_backend(pid) from inactive where rank > 0;") self.session.expire_all() self.session.expunge_all() res = self.session.execute(sql) tmp = res.fetchall() #self.session.close() #marvindb.db.engine.dispose() elif self.mode == 'remote': # Fail if no route map initialized if not config.urlmap: raise MarvinError('No URL Map found. Cannot make remote call') # Get the query route url = config.urlmap['api']['cleanupqueries']['url'] params = {'task': 'clean', 'release': self._release} try: ii = Interaction(route=url, params=params) except Exception as e: raise MarvinError('API Query call failed: {0}'.format(e)) else: res = ii.getData() def _getIdleProcesses(self): ''' Get a list of all idle processes on server This grabs a list of all processes in a state of idle, or idle in transaction using pg_stat_activity and returns the process id, the state, and the query ''' if self.mode == 'local': sql = ("select p.pid,p.state,p.query from pg_stat_activity as p \ where p.state ilike '%idle%';") res = self.session.execute(sql) procs = res.fetchall() elif self.mode == 'remote': # Fail if no route map initialized if not config.urlmap: raise MarvinError('No URL Map found. Cannot make remote call') # Get the query route url = config.urlmap['api']['cleanupqueries']['url'] params = {'task': 'getprocs', 'release': self._release} try: ii = Interaction(route=url, params=params) except Exception as e: raise MarvinError('API Query call failed: {0}'.format(e)) else: procs = ii.getData() return procs def _sortQuery(self): ''' Sort the query by a given parameter ''' if not isinstance(self.sort, type(None)): # set the sort variable ModelClass parameter if '.' in self.sort: param = self.datamodel.parameters[str(self.sort)].full else: param = self.datamodel.parameters.get_full_from_remote(self.sort) sortparam = self.marvinform._param_form_lookup.mapToColumn(param) # If order is specified, then do the sort if self.order: assert self.order in ['asc', 'desc'], 'Sort order parameter must be either "asc" or "desc"' # Check if order by already applied if 'ORDER' in str(self.query.statement): self.query = self.query.order_by(None) # Do the sorting if 'desc' in self.order: self.query = self.query.order_by(desc(sortparam)) else: self.query = self.query.order_by(sortparam) @updateConfig def show(self, prop=None): ''' Prints into to the console Displays the query to the console with parameter variables plugged in. Works only in local mode. Input prop can be one of Can be one of query, tables, joins, or filter. Only works in LOCAL mode. Allowed Values for Prop: query - displays the entire query (default if nothing specified) tables - displays the tables that have been joined in the query joins - same as table filter - displays only the filter used on the query Parameters: prop (str): The type of info to print. Example: TODO add example ''' assert prop in [None, 'query', 'tables', 'joins', 'filter'], 'Input must be query, tables, joins, or filter' if self.mode == 'local': if not prop or 'query' in prop: sql = self._get_sql(self.query) elif prop == 'tables': sql = self.joins elif prop == 'filter': '''oddly this does not update when bound parameters change, but the statement above does ''' sql = self.query.whereclause.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True}) else: sql = self.__getattribute__(prop) return str(sql) elif self.mode == 'remote': sql = 'Cannot show full SQL query in remote mode, use the Results showQuery' warnings.warn(sql, MarvinUserWarning) return sql def _get_sql(self, query): ''' Get the sql for a given query Parameters: query (object): An SQLAlchemy Query object Returms: A raw sql string ''' return query.statement.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True}) def reset(self): ''' Resets all query attributes ''' self.__init__() @updateConfig def _createBaseQuery(self): ''' Create the base query session object. Passes in a list of parameters defined in returnparams, filterparams, and defaultparams ''' labeledqps = [qp.label(self.params[i]) for i, qp in enumerate(self.queryparams)] self.query = self.session.query(*labeledqps) def _query_column(self, column_name): ''' query and return a specific column from the current query ''' qp = self.marvinform._param_form_lookup.mapToColumn(column_name) qp = qp.label(column_name) return self.query.from_self(qp).all() def _getPipeInfo(self, pipename): ''' Retrieve the pipeline Info for a given pipeline version name ''' assert pipename.lower() in ['drp', 'dap'], 'Pipeline Name must either be DRP or DAP' # bindparam values bindname = 'drpver' if pipename.lower() == 'drp' else 'dapver' bindvalue = self._drpver if pipename.lower() == 'drp' else self._dapver # class names if pipename.lower() == 'drp': inclasses = self._tableInQuery('cube') or 'cube' in str(self.query.statement.compile()) elif pipename.lower() == 'dap': inclasses = self._tableInQuery('file') or 'file' in str(self.query.statement.compile()) # set alias pipealias = self._drp_alias if pipename.lower() == 'drp' else self._dap_alias # get the pipeinfo if inclasses: pipeinfo = marvindb.session.query(pipealias).\ join(marvindb.datadb.PipelineName, marvindb.datadb.PipelineVersion).\ filter(marvindb.datadb.PipelineName.label == pipename.upper(), marvindb.datadb.PipelineVersion.version == bindparam(bindname, bindvalue)).one() else: pipeinfo = None return pipeinfo def _addPipeline(self): ''' Adds the DRP and DAP Pipeline Info into the Query ''' self._drp_alias = aliased(marvindb.datadb.PipelineInfo, name='drpalias') self._dap_alias = aliased(marvindb.datadb.PipelineInfo, name='dapalias') drppipe = self._getPipeInfo('drp') dappipe = self._getPipeInfo('dap') # Add DRP pipeline version if drppipe: self.query = self.query.join(self._drp_alias, marvindb.datadb.Cube.pipelineInfo).\ filter(self._drp_alias.pk == drppipe.pk) # Add DAP pipeline version if dappipe: self.query = self.query.join(self._dap_alias, marvindb.dapdb.File.pipelineinfo).\ filter(self._dap_alias.pk == dappipe.pk) @makeBaseQuery def _tableInQuery(self, name): ''' Checks if a given SQL table is already in the SQL query ''' # do the check try: isin = name in str(self.query._from_obj[0]) except IndexError as e: isin = False except AttributeError as e: if isinstance(self.query, six.string_types): isin = name in self.query else: isin = False return isin def _group_by(self, params=None): ''' Group the query by a set of parameters Parameters: params (list): A list of string parameter names to group the query by Returns: A new SQLA Query object ''' if not params: params = [d for d in self.defaultparams if 'spaxelprop' not in d] newdefaults = self.marvinform._param_form_lookup.mapToColumn(params) self.params = params newq = self.query.from_self(*newdefaults).group_by(*newdefaults) return newq # ------------------------------------------------------ # DAP Specific Query Modifiers - subqueries, etc go below here # ----------------------------------------------------- def _buildDapQuery(self): ''' Builds a DAP zonal query ''' # get the appropriate Junk (SpaxelProp) ModelClass self._junkclass = self.marvinform.\ _param_form_lookup['spaxelprop.file'].Meta.model # get good spaxels # bingood = self.getGoodSpaxels() # self.query = self.query.\ # join(bingood, bingood.c.binfile == marvindb.dapdb.Junk.file_pk) # check for additional modifier criteria if self._parsed.functions: # loop over all functions for fxn in self._parsed.functions: # look up the function name in the marvinform dictionary try: methodname = self.marvinform._param_fxn_lookup[fxn.fxnname] except KeyError as e: self.reset() raise MarvinError('Could not set function: {0}'.format(e)) else: # run the method methodcall = self.__getattribute__(methodname) methodcall(fxn) def _check_dapall_query(self): ''' Checks if the query is on the DAPall table. ''' isdapall = self._check_query('dapall') if isdapall: self.query = self._group_by() def _getGoodSpaxels(self): ''' Subquery - Counts the number of good spaxels Counts the number of good spaxels with binid != -1 Uses the junk.bindid_pk != 9999 since this is known and set. Removes need to join to the binid table Returns: bincount (subquery): An SQLalchemy subquery to be joined into the main query object ''' spaxelname = self._junkclass.__name__ bincount = self.session.query(self._junkclass.file_pk.label('binfile'), func.count(self._junkclass.pk).label('goodcount')) # optionally add the filter if the table is SpaxelProp if 'CleanSpaxelProp' not in spaxelname: bincount = bincount.filter(self._junkclass.binid != -1) # group the results by file_pk bincount = bincount.group_by(self._junkclass.file_pk).subquery('bingood', with_labels=True) return bincount def _getCountOf(self, expression): ''' Subquery - Counts spaxels satisfying an expression Counts the number of spaxels of a given parameter above a certain value. Parameters: expression (str): The filter expression to parse Returns: valcount (subquery): An SQLalchemy subquery to be joined into the main query object Example: >>> expression = 'junk.emline_gflux_ha_6564 >= 25' ''' # parse the expression into name, operator, value param, ops, value = self._parseExpression(expression) # look up the InstrumentedAttribute, Operator, and convert Value attribute = self.marvinform._param_form_lookup.mapToColumn(param) op = opdict[ops] value = float(value) # Build the subquery valcount = self.session.query(self._junkclass.file_pk.label('valfile'), (func.count(self._junkclass.pk)).label('valcount')).\ filter(op(attribute, value)).\ group_by(self._junkclass.file_pk).subquery('goodhacount', with_labels=True) return valcount def getPercent(self, fxn, **kwargs): ''' Query - Computes count comparisons Retrieves the number of objects that have satisfy a given expression in x% of good spaxels. Expression is of the form Parameter Operand Value. This function is mapped to the "npergood" filter name. Syntax: fxnname(expression) operator value Parameters: fxn (str): The function condition used in the query filter Example: >>> fxn = 'npergood(junk.emline_gflux_ha_6564 > 25) >= 20' >>> Syntax: npergood() - function name >>> npergood(expression) operator value >>> >>> Select objects that have Ha flux > 25 in more than >>> 20% of their (good) spaxels. ''' # parse the function into name, condition, operator, and value name, condition, ops, value = self._parseFxn(fxn) percent = float(value) / 100. op = opdict[ops] # Retrieve the necessary subqueries bincount = self._getGoodSpaxels() valcount = self._getCountOf(condition) # Join to the main query self.query = self.query.join(bincount, bincount.c.binfile == self._junkclass.file_pk).\ join(valcount, valcount.c.valfile == self._junkclass.file_pk).\ filter(op(valcount.c.valcount, percent * bincount.c.goodcount)) # Group the results by main defaultdatadb parameters, # so as not to include all spaxels newdefs = [d for d in self.defaultparams if 'spaxelprop' not in d] self.query = self._group_by(params=newdefs) # newdefaults = self.marvinform._param_form_lookup.mapToColumn(newdefs) # self.params = newdefs # self.query = self.query.from_self(*newdefaults).group_by(*newdefaults) def _parseFxn(self, fxn): ''' Parse a fxn condition ''' return fxn.fxnname, fxn.fxncond, fxn.op, fxn.value def _parseExpression(self, expr): ''' Parse an expression ''' return expr.fullname, expr.op, expr.value def _checkParsed(self): ''' Check the boolean parsed object check for function conditions vs normal. This should be moved into SQLalchemy Boolean Search ''' # Triggers for only one filter and it is a function condition if hasattr(self._parsed, 'fxn'): self._parsed.functions = [self._parsed] # Checks for shortcut names and replaces them in params # now redundant after pre-check on searchfilter for key, val in self._parsed.params.items(): if key in self.marvinform._param_form_lookup._nameShortcuts.keys(): newkey = self.marvinform._param_form_lookup._nameShortcuts[key] self._parsed.params.pop(key) self._parsed.params.update({newkey: val})
That’s a nice sentiment, but Netflix is an entertainment streaming service with highly personalized experiences for users. You’re offering learning content, and it’s often not that tailored to the individual. So it’s not quite the same, for obvious reasons. However, by looking at some of the ways we recommend training, we can improve the learning experience without creating an overwhelming interface for learners. Today, there are an incredible number of options for learning content, whether inside or outside the organization. And curation is quickly becoming the name of the game for learning leaders. The challenge is in how to curate the content to deliver the most personalized, tailored experience for users. Today’s algorithms can factor in a ton of signals to determine what recommendations the machine will make. For example, Amazon has had a patent for years that allows it to anticipate the needs of users based on a significant amount of user data and interactions. Below is an overview of some common and not-so-common signals that, when matched with AI, can create a more personalized learning experience for all of your people. User preferences – by far the easiest to set up in someone’s profile, user preferences can be tied to job or role. For example, managers take managerial training. This can also include other defaults or user-defined preferences such as content about specific aspirational topics or skill areas of focus. Consumption by similar people – by looking at what similar people take within the LMS or LXP (learning experience platform), systems can recommend training that you might also be interested in that share your current role or interests. If other design specialists are taking the UX course, the system can recommend that you also take the UX course if you’re also a design specialist. Consumption by people in my preferred role/career path – this takes the point above a step further. If you’re currently a marketing associate but want to be a marketing manager, the system can highlight courses being taken by those at a higher level to help you see what kinds of learning content you need to succeed in that preferred or aspirational role or career path. Views (popularity)– recommending popular content isn’t a bad move, especially if you’re trying to get a critical mass of traffic onto the learning platform, but this should not be the only indicator of what content to consume next. We’ve all been sucked into the funny work video that goes around via email and IM at work–those are popular but don’t necessarily impact performance. Star ratings and comments (value) – unlike raw view counts for popularity, star ratings and comments can offer a deeper layer of insight into the quality and value of content. Star ratings, thumbs up/down voting, or other simple measures give a feedback loop about whether content is valuable or not, and comments can offer deeper insights into the specific value or feedback points from users. Note: the team at onQ actually has a way to do this inside video assets where users can comment and create asynchronous social learning conversations on videos within a learning library. The algorithm then shows where users are engaged, disengaged, or confused in 10-second increments within a video. Very interesting. For many learning systems, these first five components are either standard or becoming standard. But what’s next on the horizon? Consider the following items we’ve seen other companies following in a consumer context that might help us to drive more consumption of learning content. Time of day, week, year – in a recent interview with Wired magazine, the product team at Netflix talked about how its algorithm selects content for users. One surprising component was time of day. If users are looking at content late at night, the system is more likely to recommend partially consumed content (finishing off earlier views) than new, unwatched content. This could be extrapolated for learning purposes. If someone is logged in from a public wifi, they may be better suited to shorter content (potentially commuting or on travel) whereas logging in from work when their calendar has three hours available might signify the potential for consuming longer-form content. On a weekly basis, this might also flow around work. On an annual basis, users may want deeper dives early in the year to create new competencies but later in the year may be settled in and looking to hone or refine established capabilities. Title performance in search vs. consumption – research from Groupon’s data science team shows that there is a science behind what we open and read, and this has direct translations to what people open and examine from a learning context. What Groupon found was that by examining the performance of what had encouraged opens and clickthroughs in the past, the team could target those same types of terms and syntax to create higher engagement with the audience. In learning, we can measure this by modifying the titles and descriptions we define and then measuring that against consumption patterns. Titles may change open rates but may not affect completions, and descriptions may or may not contribute, but without considering them and measuring the impact, it’s hard to say. Netflix and other tech firms use A/B tests to see which content is consumed more often, then the highest converting option is scaled across the platform for all users to ensure the best performance. Job performance/productivity/output – It’s easy for a Netflix or an Amazon to see what is working or not: subscriber counts support their approach. At work, we need to get better at this, though. One interesting approach I recently saw was IBM’s Your Learning LXP that targets key skills across the organization, creating a powerful set of reports that leaders can use to not only see training volume and demographics but also impact. These are some of the signals we’ve run across in our research. Though this isn’t an exhaustive list, it helps to highlight how artificial intelligence technologies are supporting areas like learning and development through better content curation.
#!/usr/bin/env python import logging import os from cvmfsreplica.cvmfsreplicaex import PluginConfigurationFailure, AcceptancePluginFailed from cvmfsreplica.interfaces import RepositoryPluginAcceptanceInterface from cvmfsreplica.utils import check_disk_space import cvmfsreplica.pluginsmanagement as pm class Diskspace(RepositoryPluginAcceptanceInterface): def __init__(self, repository, conf): self.log = logging.getLogger('cvmfsreplica.diskspace') self.repository = repository self.conf = conf try: self.spool_size = self.conf.getint('diskspace.spool_size') self.storage_size = self.conf.getint('diskspace.storage_size') self.reportplugins = pm.readplugins(self.repository, 'repository', 'report', self.conf.namespace('acceptance.diskspace.', exclude=True) ) except: raise PluginConfigurationFailure('failed to initialize Diskspace plugin') try: self.should_abort = self.conf.getboolean('diskspace.should_abort') except: self.should_abort = True #Default self.log.debug('plugin Diskspace initialized properly') def verify(self): ''' checks if there is enough space in disk ''' try: return self._check_storage() & self._check_storage() except Exception, ex: raise ex def _check_spool(self): # FIXME: too much duplicated code SPOOL_DIR = self.repository.cvmfsconf.get('CVMFS_SPOOL_DIR') if check_disk_space(SPOOL_DIR, self.spool_size): self.log.trace('There is enough disk space for SPOOL directory') return True else: msg = 'There is not enough disk space for SPOOL. Requested=%s, available=%s' %(self.spool_size, current_free_size) self._notify_failure(msg) self.log.error(msg) if self.should_abort: self.log.error('Raising exception') raise AcceptancePluginFailed(msg) else: return False def _check_storage(self): # FIXME: too much duplicated code STORAGE_DIR = self.repository.cvmfsconf.get('CVMFS_UPSTREAM_STORAGE').split(',')[1] if check_disk_space(STORAGE_DIR, self.storage_size): self.log.trace('There is enough disk space for STORAGE directory') return True else: msg = 'There is not enough disk space for STORAGE. Requested=%s, available=%s' %(self.storage_size, current_free_size) self._notify_failure(msg) self.log.error(msg) if self.should_abort: self.log.error('Raising exception') raise AcceptancePluginFailed(msg) else: return False def _notify_failure(self, msg): for report in self.reportplugins: report.notifyfailure(msg)
Cervelo's S2 Frameset boasts a stellar combination of light weight and aerodynamics, ideal for winning races or just making your next ride faster and more fun. The S2 sports a blade-like down tube, teardrop-shaped top tube and custom seatstays for optimal airflow around the bike. Yet the best part may be the oversize bottom bracket area capable of channeling everything from feathery soft pedaling to world-class wattage outputs into searing speed without a drop of waste. Plus, the Soloist comes with a 3T Funda Pro carbon fork so you can climb, descend and corner as if you're a ProTour racer.
from should_be.core import BaseMixin, ObjectMixin try: from collections.abc import Set except ImportError: # python < 3.3 from collections import Set class SetMixin(BaseMixin): target_class = Set def should_be(self, target): msg_smaller = ('{txt} should have been {val}, but did not have ' 'the items {items}') msg_bigger = ('{txt} should have been {val}, but had the extra ' 'items {items}') msg_diff = ('{txt} should have been {val}, but differed in items ' '{i1} and {i2}') try: we_had = self - target they_had = target - self if (len(we_had) != 0 and len(they_had) != 0): self.should_follow(len(we_had) == len(they_had) == 0, msg_diff, val=target, i1=we_had, i2=they_had) self.should_follow(len(we_had) == 0, msg_bigger, val=target, items=we_had) self.should_follow(len(they_had) == 0, msg_smaller, val=target, items=they_had) except TypeError: ObjectMixin.should_be.__get__(self)(target)
A consistent theme in Kahlhamer’s work is identity. He was born in Tucson of American Indian parentage, adopted into a family of German-American heritage and later raised in Wisconsin. Bowery Nation represents what he calls his ”third place”, the fusion of his personal identities and mythologies. Join curators Jan Schall and Gaylord Torrence in conversation with Native American artist Brad Kahlhamer. Discover how 122 handmade, katsina-like dolls and birds riding on a stationary Pow Wow float form a bridge between traditional American Indian culture and the New York contemporary art world. Image: Exhibition installation view, Brad Kahlhamer, Bowery Nation, July 15, 2012-February 24, 2013, The Aldrich Contemporary Art Museum, Ridgefield, CT.
# vim: set encoding=utf-8 from itertools import takewhile import re from copy import copy from lxml import etree from regparser.grammar import amdpar, tokens from regparser.tree.struct import Node from regparser.tree.xml_parser.reg_text import build_from_section from regparser.tree.xml_parser.tree_utils import get_node_text def clear_between(xml_node, start_char, end_char): """Gets rid of any content (including xml nodes) between chars""" as_str = etree.tostring(xml_node, encoding=unicode) start_char, end_char = re.escape(start_char), re.escape(end_char) pattern = re.compile( start_char + '[^' + end_char + ']*' + end_char, re.M + re.S + re.U) return etree.fromstring(pattern.sub('', as_str)) def remove_char(xml_node, char): """Remove from this node and all its children""" as_str = etree.tostring(xml_node, encoding=unicode) return etree.fromstring(as_str.replace(char, '')) def fix_section_node(paragraphs, amdpar_xml): """ When notices are corrected, the XML for notices doesn't follow the normal syntax. Namely, pargraphs aren't inside section tags. We fix that here, by finding the preceding section tag and appending paragraphs to it. """ sections = [s for s in amdpar_xml.itersiblings(preceding=True) if s.tag == 'SECTION'] # Let's only do this if we find one section tag. if len(sections) == 1: section = copy(sections[0]) for paragraph in paragraphs: section.append(copy(paragraph)) return section def find_lost_section(amdpar_xml): """ This amdpar doesn't have any following siblings, so we look in the next regtext """ reg_text = amdpar_xml.getparent() reg_text_siblings = [s for s in reg_text.itersiblings() if s.tag == 'REGTEXT'] if len(reg_text_siblings) > 0: candidate_reg_text = reg_text_siblings[0] amdpars = [a for a in candidate_reg_text if a.tag == 'AMDPAR'] if len(amdpars) == 0: # Only do this if there are not AMDPARS for c in candidate_reg_text: if c.tag == 'SECTION': return c def find_section(amdpar_xml): """ With an AMDPAR xml, return the first section sibling """ siblings = [s for s in amdpar_xml.itersiblings()] if len(siblings) == 0: return find_lost_section(amdpar_xml) section = None for sibling in amdpar_xml.itersiblings(): if sibling.tag == 'SECTION': section = sibling if section is None: paragraphs = [s for s in amdpar_xml.itersiblings() if s.tag == 'P'] if len(paragraphs) > 0: return fix_section_node(paragraphs, amdpar_xml) return section def find_subpart(amdpar_tag): """ Look amongst an amdpar tag's siblings to find a subpart. """ for sibling in amdpar_tag.itersiblings(): if sibling.tag == 'SUBPART': return sibling def find_diffs(xml_tree, cfr_part): """Find the XML nodes that are needed to determine diffs""" # Only final notices have this format for section in xml_tree.xpath('//REGTEXT//SECTION'): section = clear_between(section, '[', ']') section = remove_char(remove_char(section, u'▸'), u'◂') for node in build_from_section(cfr_part, section): def per_node(node): if node_is_empty(node): for c in node.children: per_node(c) per_node(node) def node_is_empty(node): """Handle different ways the regulation represents no content""" return node.text.strip() == '' def switch_context(token_list, carried_context): """ Notices can refer to multiple regulations (CFR parts). If the CFR part changes, empty out the context that we carry forward. """ def is_valid_label(label): return label and label[0] is not None if carried_context and carried_context[0] is not None: token_list = [t for t in token_list if hasattr(t, 'label')] reg_parts = [t.label[0] for t in token_list if is_valid_label(t.label)] if len(reg_parts) > 0: reg_part = reg_parts[0] if reg_part != carried_context[0]: return [] return carried_context def contains_one_instance(tokenized, element): """ Return True if tokenized contains only one instance of the class element. """ contexts = [t for t in tokenized if isinstance(t, element)] return len(contexts) == 1 def contains_one_paragraph(tokenized): """ Returns True if tokenized contains only one tokens.Paragraph """ return contains_one_instance(tokenized, tokens.Paragraph) def contains_delete(tokenized): """ Returns True if tokenized contains at least one DELETE. """ contexts = [t for t in tokenized if t.match(tokens.Verb, verb='DELETE')] return len(contexts) > 0 def remove_false_deletes(tokenized, text): """ Sometimes a statement like 'Removing the 'x' from the end of paragraph can be confused as removing the paragraph. Ensure that doesn't happen here. Likely this method needs a little more work. """ if contains_delete(tokenized): if contains_one_paragraph(tokenized): if 'end of paragraph' in text: return [] return tokenized def paragraph_in_context_moved(tokenized, initial_context): """Catches this situation: "Paragraph 1 under subheading 51(b)(1) is redesignated as paragraph 7 under subheading 51(b)", i.e. a Paragraph within a Context moved to another Paragraph within a Context. The contexts and paragraphs in this situation need to be swapped.""" final_tokens = [] idx = 0 while idx < len(tokenized) - 4: par1, cont1, verb, par2, cont2 = tokenized[idx:idx + 5] if (par1.match(tokens.Paragraph) and cont1.match(tokens.Context) and verb.match(tokens.Verb, verb=tokens.Verb.MOVE, active=False) and par2.match(tokens.Paragraph) and cont2.match(tokens.Context) and all(tok.label[1:2] == ['Interpretations'] for tok in (par1, cont1, par2, cont2))): batch, initial_context = compress_context( [cont1, par1, verb, cont2, par2], initial_context) final_tokens.extend(batch) idx += 5 else: final_tokens.append(tokenized[idx]) idx += 1 final_tokens.extend(tokenized[idx:]) return final_tokens def move_then_modify(tokenized): """The subject of modification may be implicit in the preceding move operation: A is redesignated B and changed. Replace the operation with a DELETE and a POST so it's easier to compile later.""" final_tokens = [] idx = 0 while idx < len(tokenized) - 3: move, p1, p2, edit = tokenized[idx:idx + 4] if (move.match(tokens.Verb, verb=tokens.Verb.MOVE, active=True) and p1.match(tokens.Paragraph) and p2.match(tokens.Paragraph) and edit.match(tokens.Verb, verb=tokens.Verb.PUT, active=True, and_prefix=True)): final_tokens.append(tokens.Verb(tokens.Verb.DELETE, active=True)) final_tokens.append(p1) final_tokens.append(tokens.Verb(tokens.Verb.POST, active=True)) final_tokens.append(p2) idx += 4 else: final_tokens.append(tokenized[idx]) idx += 1 final_tokens.extend(tokenized[idx:]) return final_tokens def parse_amdpar(par, initial_context): """ Parse the <AMDPAR> tags into a list of paragraphs that have changed. """ # Replace and "and"s in titles; they will throw off and_token_resolution for e in filter(lambda e: e.text, par.xpath('./E')): e.text = e.text.replace(' and ', ' ') text = get_node_text(par, add_spaces=True) tokenized = [t[0] for t, _, _ in amdpar.token_patterns.scanString(text)] tokenized = compress_context_in_tokenlists(tokenized) tokenized = resolve_confused_context(tokenized, initial_context) tokenized = paragraph_in_context_moved(tokenized, initial_context) tokenized = remove_false_deletes(tokenized, text) tokenized = multiple_moves(tokenized) tokenized = switch_passive(tokenized) tokenized = and_token_resolution(tokenized) tokenized, subpart = deal_with_subpart_adds(tokenized) tokenized = context_to_paragraph(tokenized) tokenized = move_then_modify(tokenized) if not subpart: tokenized = separate_tokenlist(tokenized) initial_context = switch_context(tokenized, initial_context) tokenized, final_context = compress_context(tokenized, initial_context) amends = make_amendments(tokenized, subpart) return amends, final_context def multiple_moves(tokenized): """Phrases like paragraphs 1 and 2 are redesignated paragraphs 3 and 4 are replaced with Move(active), paragraph 1, paragraph 3, Move(active) paragraph 2, paragraph 4""" converted = [] skip = 0 for idx, el0 in enumerate(tokenized): if skip: skip -= 1 elif idx < len(tokenized) - 2: el1, el2 = tokenized[idx+1:idx+3] if (el0.match(tokens.TokenList) and el2.match(tokens.TokenList) and el1.match(tokens.Verb, verb=tokens.Verb.MOVE, active=False) and len(el0.tokens) == len(el2.tokens)): skip = 2 for tidx in range(len(el0.tokens)): converted.append(el1.copy(active=True)) converted.append(el0.tokens[tidx]) converted.append(el2.tokens[tidx]) else: converted.append(el0) else: converted.append(el0) return converted def switch_passive(tokenized): """Passive verbs are modifying the phrase before them rather than the phrase following. For consistency, we flip the order of such verbs""" if all(not t.match(tokens.Verb, active=False) for t in tokenized): return tokenized converted, remaining = [], tokenized while remaining: to_add = list(takewhile( lambda t: not isinstance(t, tokens.Verb), remaining)) if len(to_add) < len(remaining): # also take the verb verb = remaining[len(to_add)].copy() to_add.append(verb) # switch verb to the beginning if not verb.active: to_add = to_add[-1:] + to_add[:-1] verb.active = True # may need to grab one more if the verb is move if (verb.verb == tokens.Verb.MOVE and len(to_add) < len(remaining)): to_add.append(remaining[len(to_add)]) converted.extend(to_add) remaining = remaining[len(to_add):] return converted def resolve_confused_context(tokenized, initial_context): """Resolve situation where a Context thinks it is regtext, but it *should* be an interpretation""" if initial_context[1:2] == ['Interpretations']: final_tokens = [] for token in tokenized: if (token.match(tokens.Context, tokens.Paragraph) and len(token.label) > 1 and token.label[1] is None): final_tokens.append(token.copy( label=[token.label[0], 'Interpretations', token.label[2], '(' + ')('.join(l for l in token.label[3:] if l) + ')'])) elif (token.match(tokens.Context, tokens.Paragraph) and len(token.label) > 1 and token.label[1].startswith('Appendix:')): final_tokens.append(token.copy( label=[token.label[0], 'Interpretations', token.label[1][len('Appendix:'):], '(' + ')('.join(l for l in token.label[2:] if l) + ')'])) elif token.match(tokens.TokenList): sub_tokens = resolve_confused_context(token.tokens, initial_context) final_tokens.append(token.copy(tokens=sub_tokens)) else: final_tokens.append(token) return final_tokens else: return tokenized def and_token_resolution(tokenized): """Troublesome case where a Context should be a Paragraph, but the only indicator is the presence of an "and" token afterwards. We'll likely want to expand this step in the future, but for now, we only catch a few cases""" # compress "and" tokens tokenized = zip(tokenized, tokenized[1:] + [None]) tokenized = [l for l, r in tokenized if l != r or not l.match(tokens.AndToken)] # we'll strip out all "and" tokens in just a moment, but as a first # pass, remove all those preceded by a verb (which makes the following # logic simpler). tokenized = list(reversed(tokenized)) tokenized = zip(tokenized, tokenized[1:] + [None]) tokenized = list(reversed([l for l, r in tokenized if not l.match(tokens.AndToken) or not r or not r.match(tokens.Verb)])) # check for the pattern in question final_tokens = [] idx = 0 while idx < len(tokenized) - 3: t1, t2, t3, t4 = tokenized[idx:idx + 4] if (t1.match(tokens.Verb) and t2.match(tokens.Context) and t3.match(tokens.AndToken) and t4.match(tokens.Paragraph, tokens.TokenList)): final_tokens.append(t1) final_tokens.append(tokens.Paragraph(t2.label)) final_tokens.append(t4) idx += 3 # not 4 as one will appear below elif t1 != tokens.AndToken: final_tokens.append(t1) idx += 1 final_tokens.extend(tokenized[idx:]) return final_tokens def context_to_paragraph(tokenized): """Generally, section numbers, subparts, etc. are good contextual clues, but sometimes they are the object of manipulation.""" # Don't modify anything if there are already paragraphs or no verbs for token in tokenized: if isinstance(token, tokens.Paragraph): return tokenized elif (isinstance(token, tokens.TokenList) and any(isinstance(t, tokens.Paragraph) for t in token.tokens)): return tokenized # copy converted = list(tokenized) verb_seen = False for i in range(len(converted)): token = converted[i] if isinstance(token, tokens.Verb): verb_seen = True elif verb_seen and token.match(tokens.Context, certain=False): converted[i] = tokens.Paragraph(token.label) return converted def is_designate_token(token): """ This is a designate token """ return token.match(tokens.Verb, verb=tokens.Verb.DESIGNATE) def contains_one_designate_token(tokenized): """ Return True if the list of tokens contains only one designate token. """ designate_tokens = [t for t in tokenized if is_designate_token(t)] return len(designate_tokens) == 1 def contains_one_tokenlist(tokenized): """ Return True if the list of tokens contains only one TokenList """ tokens_lists = [t for t in tokenized if isinstance(t, tokens.TokenList)] return len(tokens_lists) == 1 def contains_one_context(tokenized): """ Returns True if the list of tokens contains only one Context. """ contexts = [t for t in tokenized if isinstance(t, tokens.Context)] return len(contexts) == 1 def deal_with_subpart_adds(tokenized): """If we have a designate verb, and a token list, we're going to change the context to a Paragraph. Because it's not a context, it's part of the manipulation.""" # Ensure that we only have one of each: designate verb, a token list and # a context verb_exists = contains_one_designate_token(tokenized) list_exists = contains_one_tokenlist(tokenized) context_exists = contains_one_context(tokenized) if verb_exists and list_exists and context_exists: token_list = [] for token in tokenized: if isinstance(token, tokens.Context): token_list.append(tokens.Paragraph(token.label)) else: token_list.append(token) return token_list, True else: return tokenized, False def separate_tokenlist(tokenized): """When we come across a token list, separate it out into individual tokens""" converted = [] for token in tokenized: if isinstance(token, tokens.TokenList): converted.extend(token.tokens) else: converted.append(token) return converted def compress(lhs_label, rhs_label): """Combine two labels where the rhs replaces the lhs. If the rhs is empty, assume the lhs takes precedent.""" if not rhs_label: return lhs_label label = list(lhs_label) label.extend([None]*len(rhs_label)) label = label[:len(rhs_label)] for i in range(len(rhs_label)): label[i] = rhs_label[i] or label[i] return label def compress_context_in_tokenlists(tokenized): """Use compress (above) on elements within a tokenlist.""" final = [] for token in tokenized: if token.match(tokens.TokenList): subtokens = [] label_so_far = [] for subtoken in token.tokens: if hasattr(subtoken, 'label'): label_so_far = compress(label_so_far, subtoken.label) subtokens.append(subtoken.copy(label=label_so_far)) else: subtokens.append(subtoken) final.append(token.copy(tokens=subtokens)) else: final.append(token) return final def compress_context(tokenized, initial_context): """Add context to each of the paragraphs (removing context)""" # copy context = list(initial_context) converted = [] for token in tokenized: if isinstance(token, tokens.Context): # Interpretations of appendices if (len(context) > 1 and len(token.label) > 1 and context[1] == 'Interpretations' and (token.label[1] or '').startswith('Appendix')): context = compress( context, [token.label[0], None, token.label[1]] + token.label[2:]) else: context = compress(context, token.label) continue # Another corner case: a "paragraph" is indicates interp context elif ( isinstance(token, tokens.Paragraph) and len(context) > 1 and len(token.label) > 3 and context[1] == 'Interpretations' and token.label[1] != 'Interpretations'): context = compress( context, [token.label[0], None, token.label[2], '(' + ')('.join( p for p in token.label[3:] if p) + ')']) continue elif isinstance(token, tokens.Paragraph): context = compress(context, token.label) token.label = context converted.append(token) return converted, context def get_destination(tokenized, reg_part): """ In a designate scenario, get the destination label. """ paragraphs = [t for t in tokenized if isinstance(t, tokens.Paragraph)] destination = paragraphs[0] if destination.label[0] is None: # Sometimes the destination label doesn't know the reg part. destination.label[0] = reg_part destination = destination.label_text() return destination def handle_subpart_amendment(tokenized): """ Handle the situation where a new subpart is designated. """ verb = tokens.Verb.DESIGNATE token_lists = [t for t in tokenized if isinstance(t, tokens.TokenList)] # There's only one token list of paragraphs, sections to be designated tokens_to_be_designated = token_lists[0] labels_to_be_designated = [t.label_text() for t in tokens_to_be_designated] reg_part = tokens_to_be_designated.tokens[0].label[0] destination = get_destination(tokenized, reg_part) return DesignateAmendment(verb, labels_to_be_designated, destination) class Amendment(object): """ An Amendment object contains all the information necessary for an amendment. """ TITLE = '[title]' TEXT = '[text]' HEADING = '[heading]' def remove_intro(self, l): """ Remove the marker that indicates this is a change to introductory text. """ l = l.replace(self.TITLE, '').replace(self.TEXT, '') return l.replace(self.HEADING, '') def fix_interp_format(self, components): """Convert between the interp format of amendments and the normal, node label format""" if ['Interpretations'] == components[1:2]: if len(components) > 2: new_style = [components[0], components[2].replace('Appendix:', '')] # Add paragraphs if len(components) > 3: paragraphs = [p.strip('()') for p in components[3].split(')(')] paragraphs = filter(bool, paragraphs) new_style.extend(paragraphs) new_style.append(Node.INTERP_MARK) # Add any paragraphs of the comment new_style.extend(components[4:]) return new_style else: return components[:1] + [Node.INTERP_MARK] return components def fix_appendix_format(self, components): """Convert between the appendix format of amendments and the normal, node label format""" return [c.replace('Appendix:', '') for c in components] def fix_label(self, label): """ The labels that come back from parsing the list of amendments are not the same type we use in the rest of parsing. Convert between the two here (removing question markers, converting to interp format, etc.)""" def wanted(l): return l != '?' and 'Subpart' not in l components = label.split('-') components = [self.remove_intro(l) for l in components if wanted(l)] components = self.fix_interp_format(components) components = self.fix_appendix_format(components) return components def __init__(self, action, label, destination=None): self.action = action self.original_label = label self.label = self.fix_label(self.original_label) if destination and '-' in destination: self.destination = self.fix_interp_format(destination.split('-')) else: self.destination = destination if self.TITLE in self.original_label: self.field = self.TITLE elif self.TEXT in self.original_label: self.field = self.TEXT elif self.HEADING in self.original_label: self.field = self.HEADING else: self.field = None def label_id(self): """ Return the label id (dash delimited) for this label. """ return '-'.join(self.label) def __repr__(self): if self.destination: return '(%s, %s, %s)' % (self.action, self.label, self.destination) else: return '(%s, %s)' % (self.action, self.label) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) class DesignateAmendment(Amendment): """ A designate Amendment manages it's information a little differently than a normal Amendment. Namely, there's more handling around Subparts.""" def __init__(self, action, label_list, destination): self.action = action self.original_labels = label_list self.labels = [self.fix_label(l) for l in self.original_labels] self.original_destination = destination if 'Subpart' in destination and ':' in destination: reg_part, subpart = self.original_destination.split('-') _, subpart_letter = destination.split(':') self.destination = [reg_part, 'Subpart', subpart_letter] elif '-' in destination: self.destination = self.fix_interp_format(destination.split('-')) else: self.destination = destination def __repr__(self): return "(%s, %s, %s)" % ( repr(self.action), repr(self.labels), repr(self.destination)) def make_amendments(tokenized, subpart=False): """Convert a sequence of (normalized) tokens into a list of amendments""" verb = None amends = [] if subpart: amends.append(handle_subpart_amendment(tokenized)) else: for i in range(len(tokenized)): token = tokenized[i] if isinstance(token, tokens.Verb): assert token.active verb = token.verb elif isinstance(token, tokens.Paragraph): if verb == tokens.Verb.MOVE: if isinstance(tokenized[i-1], tokens.Paragraph): origin = tokenized[i-1].label_text() destination = token.label_text() amends.append(Amendment(verb, origin, destination)) elif verb: amends.append(Amendment(verb, token.label_text())) # Edits to intro text should always be PUTs for amend in amends: if (not isinstance(amend, DesignateAmendment) and amend.field == "[text]" and amend.action == tokens.Verb.POST): amend.action = tokens.Verb.PUT return amends def new_subpart_added(amendment): """ Return True if label indicates that a new subpart was added """ new_subpart = amendment.action == 'POST' label = amendment.original_label m = [t for t, _, _ in amdpar.subpart_label.scanString(label)] return (len(m) > 0 and new_subpart)
The Center for Disease Control and Prevention reported that only 21.7% of adults are getting the recommended amount of aerobic and muscle-strengthening exercise. If you are someone that is highly interested in a growler keg or want to consume beer at a high rate, make sure you stay active. Beer is a beverage that contains a lot of calories and as a result, it is easy for it to pack on the pounds. The United States craft beer market is worth $23.5 billion. This is huge when compared to other markets. As a matter of fact, the craft beer market is worth just as much as markets that are actually important to how people live their daily lives. Almost 84% of all craft beer consumers like to choose their beer depending on the season. So for instance, getting a growler keg might be something you can only do at a certain time of year. There are fall beers and summer beers that provide different types of taste to accompany the change in seasons. In 2015 the number of operating breweries in the U.S. grew 15%, totaling 4,269 breweries. This is without a doubt the most at any time in American history. So the growler keg is a hugely popular item across the United States. Right now, just about 14% of Americans drink beer once a week. United States consumers 21 years and older consumed 27.5 gallons of beer and cider per person during 2015. According to a 2016 Gallup poll, 43% of legal drinkers prefer beer over wine and spirits. In 2015, 85% of all beer in the U.S. was domestically produced. Small and independent craft brewers now represent 12 percent market share of the overall beer industry. So if you want to get a growler keg or other kinds of beer, make sure you know all of the facts on beer and the industry. Every single year, there are a large number of Americans that go out on bar crawls. This is an activity that involves people going from one bar to another in a short span of time. One of the most popular activities in the United States is drinking and this trend is only going to continue to grow.
#!/usr/bin/env python import os, sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../") import bz2 from xml.etree import ElementTree from pymcda.electre_tri import MRSort from pymcda.uta import AVFSort from pymcda.types import PerformanceTable from pymcda.types import AlternativesAssignments from pymcda.types import AlternativePerformances from pymcda.utils import compute_ca from pymcda.utils import compute_confusion_matrix from pymcda.utils import print_confusion_matrix from pymcda.utils import print_pt_and_assignments from pymcda.ui.graphic import display_electre_tri_models from test_utils import is_bz2_file f = sys.argv[1] if not os.path.isfile(f): print("Invalid file %s" % f) sys.exit(1) if is_bz2_file(f) is True: f = bz2.BZ2File(f) tree = ElementTree.parse(f) root = tree.getroot() try: pt_learning = PerformanceTable().from_xmcda(root, 'learning_set') except: pt_learning = None try: pt_test = PerformanceTable().from_xmcda(root, 'test_set') except: pt_test = None aa_learning_m1, aa_learning_m2 = None, None aa_test_m1, aa_test_m2 = None, None if root.find("ElectreTri[@id='initial']") is not None: m1 = MRSort().from_xmcda(root, 'initial') if pt_learning is not None: aa_learning_m1 = m1.pessimist(pt_learning) if pt_test is not None: aa_test_m1 = m1.pessimist(pt_test) elif root.find("AVFSort[@id='initial']") is not None: m1 = AVFSort().from_xmcda(root, 'initial') if pt_learning is not None: aa_learning_m1 = m1.get_assignments(pt_learning) if pt_test is not None: aa_test_m1 = m1.get_assignments(pt_test) else: if root.find("alternativesAffectations[@id='learning_set']") is not None: aa_learning_m1 = AlternativesAssignments().from_xmcda(root, 'learning_set') if root.find("alternativesAffectations[@id='test_set']") is not None: aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set') if root.find("ElectreTri[@id='learned']") is not None: m2 = MRSort().from_xmcda(root, 'learned') if pt_learning is not None: aa_learning_m2 = m2.pessimist(pt_learning) if pt_test is not None: aa_test_m2 = m2.pessimist(pt_test) elif root.find("AVFSort[@id='learned']") is not None: m2 = AVFSort().from_xmcda(root, 'learned') if pt_learning is not None: aa_learning_m2 = m2.get_assignments(pt_learning) aids = [] from pymcda.utils import print_pt_and_assignments for aid in aa_learning_m2.keys(): if aa_learning_m2[aid].category_id != aa_learning_m1[aid].category_id: aids.append(aid) else: aids.append(aid) au = m2.global_utilities(pt_learning) print_pt_and_assignments(aids, None, [aa_learning_m1, aa_learning_m2], pt_learning, au) # for i in range(1, len(pt_learning) + 1): # aid = "a%d" % i # uti = m2.global_utility(pt_learning["a%d" % i]) # if aa_learning_m2[aid].category_id != aa_learning_m1[aid].category_id: # print("%s %g %s %s" % (aid, uti.value, aa_learning_m2[aid].category_id, aa_learning_m1[aid].category_id)) # print_pt_and_assignments(anok, c, [aa_learning_m1, aa_learning_m2], pt_learning) if pt_test is not None: aa_test_m2 = m2.get_assignments(pt_test) def compute_auc_histo(aa): pass if aa_learning_m1 is not None: ca_learning = compute_ca(aa_learning_m1, aa_learning_m2) auc_learning = m2.auc(aa_learning_m1, pt_learning) print("Learning set") print("============") print("CA : %g" % ca_learning) print("AUC: %g" % auc_learning) print("Confusion table:") matrix = compute_confusion_matrix(aa_learning_m1, aa_learning_m2, m2.categories) print_confusion_matrix(matrix, m2.categories) aids = [a.id for a in aa_learning_m1 \ if aa_learning_m1[a.id].category_id != aa_learning_m2[a.id].category_id] if len(aids) > 0: print("List of alternatives wrongly assigned:") print_pt_and_assignments(aids, None, [aa_learning_m1, aa_learning_m2], pt_learning) if aa_test_m1 is not None and len(aa_test_m1) > 0: ca_test = compute_ca(aa_test_m1, aa_test_m2) auc_test = m2.auc(aa_test_m1, pt_test) print("\n\nTest set") print("========") print("CA : %g" % ca_test) print("AUC: %g" % auc_test) print("Confusion table:") matrix = compute_confusion_matrix(aa_test_m1, aa_test_m2, m2.categories) print_confusion_matrix(matrix, m2.categories) aids = [a.id for a in aa_test_m1 \ if aa_test_m1[a.id].category_id != aa_test_m2[a.id].category_id] if len(aids) > 0: print("List of alternatives wrongly assigned:") print_pt_and_assignments(aids, None, [aa_test_m1, aa_test_m2], pt_test) if type(m2) == MRSort: worst = AlternativePerformances('worst', {c.id: 0 for c in m2.criteria}) best = AlternativePerformances('best', {c.id: 1 for c in m2.criteria}) categories = m2.categories a_learning = aa_learning_m1.keys() pt_learning_ok = [] pt_learning_too_low = [] pt_learning_too_high = [] for a in a_learning: i1 = categories.index(aa_learning_m1[a].category_id) i2 = categories.index(aa_learning_m2[a].category_id) if i1 == i2: pt_learning_ok.append(pt_learning[a]) elif i1 < i2: pt_learning_too_high.append(pt_learning[a]) elif i1 > i2: pt_learning_too_low.append(pt_learning[a]) a_test = aa_test_m1.keys() pt_test_ok = [] pt_test_too_low = [] pt_test_too_high = [] for a in a_test: i1 = categories.index(aa_test_m1[a].category_id) i2 = categories.index(aa_test_m2[a].category_id) if i1 == i2: pt_test_ok.append(pt_test[a]) elif i1 < i2: pt_test_too_high.append(pt_test[a]) elif i1 > i2: pt_test_too_low.append(pt_test[a]) display_electre_tri_models([m2, m2], [worst, worst], [best, best], [m2.vpt, m2.vpt], [pt_learning_too_low, pt_test_too_low], None, [pt_learning_too_high, pt_test_too_high])
Thinking about getting your REALTOR's LICENSE? Whether you are thinking about a career change or looking to buy real estate investments, a Realtor's License may be right for you. Check out the advantages below and Call us Today.
# Copyright 2019 The ROBEL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Hardware reset functions for the D'Kitty.""" import time from robel.components.builder import ComponentBuilder from robel.components.robot import RobotComponentBuilder from robel.components.robot.dynamixel_robot import DynamixelRobotComponent from robel.components.tracking import TrackerComponentBuilder from robel.components.tracking.tracker import TrackerComponent from robel.utils.reset_procedure import ResetProcedure class ManualAutoDKittyResetProcedure(ResetProcedure): """Manual reset procedure for D'Kitty. This waits until the D'Kitty is placed upright and automatically starts the episode. """ def __init__(self, upright_threshold: float = 0.9, max_height: float = 0.35, min_successful_checks: int = 5, check_interval_sec: float = 0.1, print_interval_sec: float = 1.0, episode_start_delay_sec: float = 1.0): super().__init__() self._upright_threshold = upright_threshold self._max_height = max_height self._min_successful_checks = min_successful_checks self._check_interval_sec = check_interval_sec self._print_interval_sec = print_interval_sec self._episode_start_delay_sec = episode_start_delay_sec self._last_print_time = 0 self._robot = None self._tracker = None def configure_reset_groups(self, builder: ComponentBuilder): """Configures the component groups needed for reset.""" if isinstance(builder, RobotComponentBuilder): assert 'dkitty' in builder.group_configs elif isinstance(builder, TrackerComponentBuilder): assert 'torso' in builder.group_configs def reset(self, robot: DynamixelRobotComponent, tracker: TrackerComponent): """Performs the reset procedure.""" self._robot = robot self._tracker = tracker def finish(self): """Called when the reset is complete.""" # Wait until the robot is sufficiently upright. self._wait_until_upright() def _wait_until_upright(self): """Waits until the D'Kitty is upright.""" upright_checks = 0 self._last_print_time = 0 # Start at 0 so print happens first time. while True: if self._is_dkitty_upright(): upright_checks += 1 else: upright_checks = 0 if upright_checks > self._min_successful_checks: break time.sleep(self._check_interval_sec) print('Reset complete, starting episode...') time.sleep(self._episode_start_delay_sec) def _is_dkitty_upright(self) -> bool: """Checks if the D'Kitty is currently upright.""" state = self._tracker.get_state('torso') height = state.pos[2] upright = state.rot[2, 2] cur_time = time.time() if cur_time - self._last_print_time >= self._print_interval_sec: self._last_print_time = cur_time print(('Waiting for D\'Kitty to be upright (upright: {:.2f}, ' 'height: {:.2f})').format(upright, height)) if upright < self._upright_threshold: return False if height > self._max_height: return False return True
Take even a simpler case, which seems more nearly resolvable into an expression of identity: 4=2+2. Even here, the meaning is not that the two members of the equation are identical, but only that the Concept or group four is equivalent in one respect – viz. the possession of an equal number of units – to the two groups two and two. It is plain that one group cannot be identical with two groups, or that two distinct acts of the mind, each conceiving or grasping together two units, cannot be literally the same thing as one mental act conceiving four. Suppose we take this proposition to a logical extreme. This would grant a land owner rights to everything in a cone from the center of the earth to an infinite distance out into space, and whatever was inside that cone, including stars and planets. It is absurd that someone who purchases land on earth should own other planets, therefore this proposition is wrong. The ontological argument was proposed by Anselm of Canterbury in the second chapter of his Proslogion. Although he did not propose an ontological system, he was very much concerned with the nature of being. He distinguished necessary beings (those that must exist) from contingent beings (those that may exist, but whose existence is not necessary). The plenitude principle or principle of plenitude asserts that everything that can happen will happen. The historian of ideas Arthur Lovejoy was the first to discuss this philosophically important principle explicitly, tracing it back to Aristotle, who said that no possibilities which remain eternally possible will go unrealized. The infinite monkey theorem states that a monkey hitting keys at random on a typewriter keyboard for an infinite amount of time will almost surely type a given text, such as the complete works of William Shakespeare. In this context, “almost surely” is a mathematical term with a precise meaning, and the “monkey” is not an actual monkey, but a metaphor for an abstract device that produces a random sequence of letters ad infinitum. The theorem illustrates the perils of reasoning about infinity by imagining a vast but finite number, and vice versa. The probability of a monkey exactly typing a complete work such as Shakespeare’s Hamlet is so tiny that the chance of it occurring during a period of time of the order of the age of the universe is minuscule, but not zero.
# -*- coding: utf-8 -*- """ Created on Feb 28, 2015 @author: Tyranic-Moron """ import re from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from CommandInterface import CommandInterface from twisted.words.protocols.irc import assembleFormattedText, attributes as A class UnbalancedBracesException(Exception): def __init__(self, message, column): # Call the base exception constructor with the params it needs super(UnbalancedBracesException, self).__init__(message) # Store the column position of the unbalanced brace self.column = column class DictMergeError(Exception): pass class Sub(CommandInterface): triggers = ['sub'] help = "sub <text> - executes nested commands in <text> and replaces the commands with their output\n" \ "syntax: text {command params} more text {command {command params} {command params}}\n" \ "example: .sub Some {rainbow magical} {flip topsy-turvy} text" runInThread = True def execute(self, message): """ @type message: IRCMessage """ subString = self._mangleEscapes(message.Parameters) try: segments = list(self._parseSubcommandTree(subString)) except UnbalancedBracesException as e: red = assembleFormattedText(A.fg.lightRed['']) normal = assembleFormattedText(A.normal['']) error = subString[:e.column] + red + subString[e.column] + normal + subString[e.column+1:] error = self._unmangleEscapes(error, False) return [IRCResponse(ResponseType.Say, u"Sub Error: {}".format(e.message), message.ReplyTo), IRCResponse(ResponseType.Say, error, message.ReplyTo)] prevLevel = -1 responseStack = [] extraVars = {} metadata = {} for segment in segments: (level, command, start, end) = segment # We've finished executing subcommands at the previous depth, # so replace subcommands with their output at the current depth if level < prevLevel: command = self._substituteResponses(command, responseStack, level, extraVars, start) # Replace any extraVars in the command for var, value in extraVars.iteritems(): command = re.sub(ur'\$\b{}\b'.format(re.escape(var)), u'{}'.format(value), command) # Build a new message out of this segment inputMessage = IRCMessage(message.Type, message.User.String, message.Channel, self.bot.commandChar + command.lstrip(), self.bot, metadata=metadata) # Execute the constructed message if inputMessage.Command.lower() in self.bot.moduleHandler.mappedTriggers: response = self.bot.moduleHandler.mappedTriggers[inputMessage.Command.lower()].execute(inputMessage) """@type : IRCResponse""" else: return IRCResponse(ResponseType.Say, u"'{}' is not a recognized command trigger".format(inputMessage.Command), message.ReplyTo) # Push the response onto the stack responseStack.append((level, response.Response, start, end)) # Update the extraVars dict extraVars.update(response.ExtraVars) metadata = self._recursiveMerge(metadata, response.Metadata) prevLevel = level responseString = self._substituteResponses(subString, responseStack, -1, extraVars, -1) responseString = self._unmangleEscapes(responseString) return IRCResponse(ResponseType.Say, responseString, message.ReplyTo, extraVars=extraVars, metadata=metadata) @staticmethod def _parseSubcommandTree(string): """Parse braced segments in string as tuples (level, contents, start index, end index).""" stack = [] for i, c in enumerate(string): if c == '{': stack.append(i) elif c == '}': if stack: start = stack.pop() yield (len(stack), string[start + 1: i], start, i) else: raise UnbalancedBracesException(u"unbalanced closing brace", i) if stack: start = stack.pop() raise UnbalancedBracesException(u"unbalanced opening brace", start) @staticmethod def _substituteResponses(command, responseStack, commandLevel, extraVars, start): # Pop responses off the stack and replace the subcommand that generated them while len(responseStack) > 0: level, responseString, rStart, rEnd = responseStack.pop() if level <= commandLevel: responseStack.append((level, responseString, rStart, rEnd)) break cStart = rStart - start - 1 cEnd = rEnd - start # Replace the subcommand with its output command = command[:cStart] + responseString + command[cEnd:] # Replace any extraVars generated by functions for var, value in extraVars.iteritems(): command = re.sub(ur'\$\b{}\b'.format(re.escape(var)), u'{}'.format(value), command) return command @staticmethod def _mangleEscapes(string): # Replace escaped left and right braces with something that should never show up in messages/responses string = re.sub(ur'(?<!\\)\\\{', u'@LB@', string) string = re.sub(ur'(?<!\\)\\\}', u'@RB@', string) return string @staticmethod def _unmangleEscapes(string, unescape=True): if unescape: # Replace the mangled escaped braces with unescaped braces string = string.replace(u'@LB@', u'{') string = string.replace(u'@RB@', u'}') else: # Just unmangle them, ie, keep the escapes string = string.replace(u'@LB@', u'\\{') string = string.replace(u'@RB@', u'\\}') return string def _recursiveMerge(self, d1, d2): from collections import MutableMapping ''' Update two dicts of dicts recursively, if either mapping has leaves that are non-dicts, the second's leaf overwrites the first's. ''' for k, v in d1.iteritems(): if k in d2: if all(isinstance(e, MutableMapping) for e in (v, d2[k])): d2[k] = self._recursiveMerge(v, d2[k]) # we could further check types and merge as appropriate here. elif isinstance(v, list): # merge/append lists if isinstance(d2[k], list): # merge lists v.extend(d2[k]) else: # append to list v.append(d2[k]) d3 = d1.copy() d3.update(d2) return d3
Valentines Day Branch Tree 677 Birds With DIY Valentine S Valentines Day Branch Tree 677 BIRDS OF A FEATHER With DIY Valentine S With DIY Valentine S Day Branch Tree Valentines Day Branch Tree 677 TOP All DIY Valentine S Valentines Day Branch Tree 677 HEARTS In DIY Valentine S For DIY Valentine S Day Branch Tree V 2 Jpg Resize 682 2C1025 All DIY Valentine S Day Branch Tree In DIY Valentine S Day Branch Tree Valentine S Decorations DIY Heart Tree On Day Branch Image24 At DIY Valentine S Day Branch Tree Valentines Day Craft Ideas 23 680×684 Like DIY Valentine S Branch Tree Diy 243 At DIY Valentine S Day Branch Tree 02 Valentines Day Decor Ideas Homebnc With DIY Valentine S Branch Tree DIY Valentines Day Decorations At Valentine S Branch Tree Valentines Day Crafts For Adults 1 0 In DIY Valentine S Branch Tree . On this website we recommend many designs about DIY Valentines Day Branch Tree that we have collected from various sites. ufcnancy.org that we have collected from various sites, and of course what we recommend is the most excellent of image for DIY Valentines Day Branch Tree. If you like the design on our website, please do not hesitate to visit again and get inspiration from all the houses in the design of our web design.
""" Unit tests for resdk/resources/user.py file. """ import unittest from mock import MagicMock from resdk.resources.user import Group, User class TestGroup(unittest.TestCase): def setUp(self): self.resolwe = MagicMock() self.user = User(resolwe=self.resolwe, id=42) self.group = Group(resolwe=self.resolwe, name="Test group", id=1) self.group_no_id = Group(resolwe=self.resolwe, name="Test group") def test_users_no_id(self): with self.assertRaises(ValueError): self.group_no_id.users def test_users(self): self.resolwe.user.filter.return_value = [self.user] users = self.group.users self.assertEqual(len(users), 1) self.assertEqual(users[0], self.user) def test_add_user_no_id(self): with self.assertRaises(ValueError): self.group_no_id.add_users(self.user) def test_add_user(self): self.group.add_users(self.user) self.resolwe.api.group().add_users.post.assert_called_with({"user_ids": [42]}) def test_remove_user_no_id(self): with self.assertRaises(ValueError): self.group_no_id.remove_users(self.user) def test_remove_user(self): self.group.remove_users(self.user) self.resolwe.api.group().remove_users.post.assert_called_with( {"user_ids": [42]} ) if __name__ == "__main__": unittest.main()
The Boom Operator is a specialist role, they control the the long boom arm on which a microphone is mounted, this enables it to get as close to the filmed scene as possible without being seen onscreen. This can be either handheld or placed on a wheeled dolly. They must understand the dynamics of filming and camera in order to know how to record to a high level, without impinging on the cameras or obstructing the shot. They must work as a part of a team to a tight schedule, under the demands of a busy film set. Excellent knowledge of various microphone and other sound equipment and how they work in specific settings. Good knowledge of electronics, including picture/sound editing. Understanding of how scenes are put together and the synchronization between sound and camera. Excellent physical and mental dexterity in recording the highest possible quality of sound while keeping the boom out of shot. Great hearing, balance and agility as well as good co-ordination. Ability and experience to problem solve sound issues as they may arise. There are no usual career paths to Boom Operator, often they would have worked their way through the Sound Department starting as a sound trainee. Knowledge of sound equipment and electronics are essential. A qualification in sound recording is useful.
# This file is part of Androguard. # # Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re, random, cPickle, collections from androguard.core.androconf import error, warning, debug, is_ascii_problem,\ load_api_specific_resource_module from androguard.core.bytecodes import dvm from androguard.core.bytecodes.api_permissions import DVM_PERMISSIONS_BY_PERMISSION, DVM_PERMISSIONS_BY_ELEMENT class ContextField(object): def __init__(self, mode): self.mode = mode self.details = [] def set_details(self, details): for i in details: self.details.append( i ) class ContextMethod(object): def __init__(self): self.details = [] def set_details(self, details): for i in details: self.details.append( i ) class ExternalFM(object): def __init__(self, class_name, name, descriptor): self.class_name = class_name self.name = name self.descriptor = descriptor def get_class_name(self): return self.class_name def get_name(self): return self.name def get_descriptor(self): return self.descriptor class ToString(object): def __init__(self, tab): self.__tab = tab self.__re_tab = {} for i in self.__tab: self.__re_tab[i] = [] for j in self.__tab[i]: self.__re_tab[i].append( re.compile( j ) ) self.__string = "" def push(self, name): for i in self.__tab: for j in self.__re_tab[i]: if j.match(name) != None: if len(self.__string) > 0: if i == 'O' and self.__string[-1] == 'O': continue self.__string += i def get_string(self): return self.__string class BreakBlock(object): def __init__(self, _vm, idx): self._vm = _vm self._start = idx self._end = self._start self._ins = [] self._ops = [] self._fields = {} self._methods = {} def get_ops(self): return self._ops def get_fields(self): return self._fields def get_methods(self): return self._methods def push(self, ins): self._ins.append(ins) self._end += ins.get_length() def get_start(self): return self._start def get_end(self): return self._end def show(self): for i in self._ins: print "\t\t", i.show(0) DVM_FIELDS_ACCESS = { "iget" : "R", "iget-wide" : "R", "iget-object" : "R", "iget-boolean" : "R", "iget-byte" : "R", "iget-char" : "R", "iget-short" : "R", "iput" : "W", "iput-wide" : "W", "iput-object" : "W", "iput-boolean" : "W", "iput-byte" : "W", "iput-char" : "W", "iput-short" : "W", "sget" : "R", "sget-wide" : "R", "sget-object" : "R", "sget-boolean" : "R", "sget-byte" : "R", "sget-char" : "R", "sget-short" : "R", "sput" : "W", "sput-wide" : "W", "sput-object" : "W", "sput-boolean" : "W", "sput-byte" : "W", "sput-char" : "W", "sput-short" : "W", } class DVMBasicBlock(object): """ A simple basic block of a dalvik method """ def __init__(self, start, vm, method, context): self.__vm = vm self.method = method self.context = context self.last_length = 0 self.nb_instructions = 0 self.fathers = [] self.childs = [] self.start = start self.end = self.start self.special_ins = {} self.name = "%s-BB@0x%x" % (self.method.get_name(), self.start) self.exception_analysis = None self.tainted_variables = self.context.get_tainted_variables() self.tainted_packages = self.context.get_tainted_packages() self.notes = [] def get_notes(self): return self.notes def set_notes(self, value): self.notes = [value] def add_note(self, note): self.notes.append(note) def clear_notes(self): self.notes = [] def get_instructions(self): """ Get all instructions from a basic block. :rtype: Return all instructions in the current basic block """ tmp_ins = [] idx = 0 for i in self.method.get_instructions(): if idx >= self.start and idx < self.end: tmp_ins.append(i) idx += i.get_length() return tmp_ins def get_nb_instructions(self): return self.nb_instructions def get_method(self): return self.method def get_name(self): return "%s-BB@0x%x" % (self.method.get_name(), self.start) def get_start(self): return self.start def get_end(self): return self.end def get_last(self): return self.get_instructions()[-1] def get_next(self): """ Get next basic blocks :rtype: a list of the next basic blocks """ return self.childs def get_prev(self): """ Get previous basic blocks :rtype: a list of the previous basic blocks """ return self.fathers def set_fathers(self, f): self.fathers.append(f) def get_last_length(self): return self.last_length def set_childs(self, values): #print self, self.start, self.end, values if values == []: next_block = self.context.get_basic_block( self.end + 1 ) if next_block != None: self.childs.append( ( self.end - self.get_last_length(), self.end, next_block ) ) else: for i in values: if i != -1: next_block = self.context.get_basic_block( i ) if next_block != None: self.childs.append( ( self.end - self.get_last_length(), i, next_block) ) for c in self.childs: if c[2] != None: c[2].set_fathers( ( c[1], c[0], self ) ) def push(self, i): try: self.nb_instructions += 1 idx = self.end self.last_length = i.get_length() self.end += self.last_length op_value = i.get_op_value() # field access if (op_value >= 0x52 and op_value <= 0x6d): desc = self.__vm.get_cm_field(i.get_ref_kind()) if self.tainted_variables != None: self.tainted_variables.push_info(TAINTED_FIELD, desc, DVM_FIELDS_ACCESS[i.get_name()][0], idx, self.method) # invoke elif (op_value >= 0x6e and op_value <= 0x72) or (op_value >= 0x74 and op_value <= 0x78): idx_meth = i.get_ref_kind() method_info = self.__vm.get_cm_method(idx_meth) if self.tainted_packages != None: self.tainted_packages.push_info(method_info[0], TAINTED_PACKAGE_CALL, idx, self.method, idx_meth) # new_instance elif op_value == 0x22: idx_type = i.get_ref_kind() type_info = self.__vm.get_cm_type(idx_type) if self.tainted_packages != None: self.tainted_packages.push_info(type_info, TAINTED_PACKAGE_CREATE, idx, self.method, None) # const-string elif (op_value >= 0x1a and op_value <= 0x1b): string_name = self.__vm.get_cm_string(i.get_ref_kind()) if self.tainted_variables != None: self.tainted_variables.push_info(TAINTED_STRING, string_name, "R", idx, self.method) elif op_value == 0x26 or (op_value >= 0x2b and op_value <= 0x2c): code = self.method.get_code().get_bc() self.special_ins[idx] = code.get_ins_off(idx + i.get_ref_off() * 2) except: pass def get_special_ins(self, idx): """ Return the associated instruction to a specific instruction (for example a packed/sparse switch) :param idx: the index of the instruction :rtype: None or an Instruction """ try: return self.special_ins[idx] except: return None def get_exception_analysis(self): return self.exception_analysis def set_exception_analysis(self, exception_analysis): self.exception_analysis = exception_analysis TAINTED_LOCAL_VARIABLE = 0 TAINTED_FIELD = 1 TAINTED_STRING = 2 class PathVar(object): def __init__(self, access, idx, dst_idx, info_obj): self.access_flag = access self.idx = idx self.dst_idx = dst_idx self.info_obj = info_obj def get_var_info(self): return self.info_obj.get_info() def get_access_flag(self): return self.access_flag def get_src(self, cm): method = cm.get_method_ref( self.idx ) return method.get_class_name(), method.get_name(), method.get_descriptor() def get_dst(self, cm): method = cm.get_method_ref( self.dst_idx ) return method.get_class_name(), method.get_name(), method.get_descriptor() def get_idx(self): return self.idx class TaintedVariable(object): def __init__(self, var, _type): self.var = var self.type = _type self.paths = {} self.__cache = [] def get_type(self): return self.type def get_info(self): if self.type == TAINTED_FIELD: return [ self.var[0], self.var[2], self.var[1] ] return self.var def push(self, access, idx, ref): m_idx = ref.get_method_idx() if m_idx not in self.paths: self.paths[ m_idx ] = [] self.paths[ m_idx ].append( (access, idx) ) def get_paths_access(self, mode): for i in self.paths: for j in self.paths[ i ]: for k, v in self.paths[ i ][ j ]: if k in mode: yield i, j, k, v def get_paths(self): if self.__cache != []: return self.__cache for i in self.paths: for j in self.paths[ i ]: self.__cache.append( [j, i] ) #yield j, i return self.__cache def get_paths_length(self): return len(self.paths) def show_paths(self, vm): show_PathVariable( vm, self.get_paths() ) class TaintedVariables(object): def __init__(self, _vm): self.__vm = _vm self.__vars = { TAINTED_LOCAL_VARIABLE : {}, TAINTED_FIELD : {}, TAINTED_STRING : {}, } self.__cache_field_by_method = {} self.__cache_string_by_method = {} self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version()) self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version()) # functions to get particulars elements def get_string(self, s): try: return self.__vars[ TAINTED_STRING ][ s ] except KeyError: return None def get_field(self, class_name, name, descriptor): key = class_name + descriptor + name try: return self.__vars[ TAINTED_FIELD ] [ key ] except KeyError: return None def toPathVariable(self, obj): z = [] for i in obj.get_paths(): access, idx = i[0] m_idx = i[1] z.append( PathVar(access, idx, m_idx, obj ) ) return z # permission functions def get_permissions_method(self, method): permissions = set() for f, f1 in self.get_fields(): data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1]) if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys(): for path in f.get_paths(): #access, idx = path[0] m_idx = path[1] if m_idx == method.get_idx(): permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data]) return permissions def get_permissions(self, permissions_needed): """ @param permissions_needed : a list of restricted permissions to get ([] returns all permissions) @rtype : a dictionnary of permissions' paths """ permissions = {} pn = set(permissions_needed) if permissions_needed == []: pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys()) for f, _ in self.get_fields(): data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1]) if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys(): perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data]) for p in perm_intersection: try: permissions[p].extend(self.toPathVariable(f)) except KeyError: permissions[p] = [] permissions[p].extend(self.toPathVariable(f)) return permissions # global functions def get_strings(self): for i in self.__vars[ TAINTED_STRING ]: yield self.__vars[ TAINTED_STRING ][ i ], i def get_fields(self): for i in self.__vars[ TAINTED_FIELD ]: yield self.__vars[ TAINTED_FIELD ][ i ], i # specifics functions def get_strings_by_method(self, method): z = {} try: for i in self.__cache_string_by_method[ method.get_method_idx() ]: z[ i ] = [] for j in i.get_paths(): if method.get_method_idx() == j[1]: z[i].append( j[0] ) return z except: return z def get_fields_by_method(self, method): z = {} try: for i in self.__cache_field_by_method[ method.get_method_idx() ]: z[ i ] = [] for j in i.get_paths(): if method.get_method_idx() == j[1]: z[i].append( j[0] ) return z except: return z def add(self, var, _type, _method=None): if _type == TAINTED_FIELD: key = var[0] + var[1] + var[2] if key not in self.__vars[ TAINTED_FIELD ]: self.__vars[ TAINTED_FIELD ][ key ] = TaintedVariable( var, _type ) elif _type == TAINTED_STRING: if var not in self.__vars[ TAINTED_STRING ]: self.__vars[ TAINTED_STRING ][ var ] = TaintedVariable( var, _type ) elif _type == TAINTED_LOCAL_VARIABLE: if _method not in self.__vars[ TAINTED_LOCAL_VARIABLE ]: self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ] = {} if var not in self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ]: self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ][ var ] = TaintedVariable( var, _type ) def push_info(self, _type, var, access, idx, ref): if _type == TAINTED_FIELD: self.add( var, _type ) key = var[0] + var[1] + var[2] self.__vars[ _type ][ key ].push( access, idx, ref ) method_idx = ref.get_method_idx() if method_idx not in self.__cache_field_by_method: self.__cache_field_by_method[ method_idx ] = set() self.__cache_field_by_method[ method_idx ].add( self.__vars[ TAINTED_FIELD ][ key ] ) elif _type == TAINTED_STRING: self.add( var, _type ) self.__vars[ _type ][ var ].push( access, idx, ref ) method_idx = ref.get_method_idx() if method_idx not in self.__cache_string_by_method: self.__cache_string_by_method[ method_idx ] = set() self.__cache_string_by_method[ method_idx ].add( self.__vars[ TAINTED_STRING ][ var ] ) TAINTED_PACKAGE_CREATE = 0 TAINTED_PACKAGE_CALL = 1 TAINTED_PACKAGE = { TAINTED_PACKAGE_CREATE : "C", TAINTED_PACKAGE_CALL : "M" } def show_Path(vm, path): cm = vm.get_class_manager() if isinstance(path, PathVar): dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm ) info_var = path.get_var_info() print "%s %s (0x%x) ---> %s->%s%s" % (path.get_access_flag(), info_var, path.get_idx(), dst_class_name, dst_method_name, dst_descriptor) else: if path.get_access_flag() == TAINTED_PACKAGE_CALL: src_class_name, src_method_name, src_descriptor = path.get_src( cm ) dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm ) print "%d %s->%s%s (0x%x) ---> %s->%s%s" % (path.get_access_flag(), src_class_name, src_method_name, src_descriptor, path.get_idx(), dst_class_name, dst_method_name, dst_descriptor) else: src_class_name, src_method_name, src_descriptor = path.get_src( cm ) print "%d %s->%s%s (0x%x)" % (path.get_access_flag(), src_class_name, src_method_name, src_descriptor, path.get_idx()) def get_Path(vm, path): x = {} cm = vm.get_class_manager() if isinstance(path, PathVar): dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm ) info_var = path.get_var_info() x["src"] = "%s" % info_var x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor) x["idx"] = path.get_idx() else: if path.get_access_flag() == TAINTED_PACKAGE_CALL: src_class_name, src_method_name, src_descriptor = path.get_src( cm ) dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm ) x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor) x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor) else: src_class_name, src_method_name, src_descriptor = path.get_src( cm ) x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor) x["idx"] = path.get_idx() return x def show_Paths(vm, paths): """ Show paths of packages :param vm: the object which represents the dex file :param paths: a list of :class:`PathP` objects """ for path in paths: show_Path( vm, path ) def get_Paths(vm, paths): """ Return paths of packages :param vm: the object which represents the dex file :param paths: a list of :class:`PathP` objects """ full_paths = [] for path in paths: full_paths.append(get_Path( vm, path )) return full_paths def show_PathVariable(vm, paths): for path in paths: access, idx = path[0] m_idx = path[1] method = vm.get_cm_method(m_idx) print "%s %x %s->%s %s" % (access, idx, method[0], method[1], method[2][0] + method[2][1]) class PathP(object): def __init__(self, access, idx, src_idx, dst_idx): self.access_flag = access self.idx = idx self.src_idx = src_idx self.dst_idx = dst_idx def get_access_flag(self): return self.access_flag def get_dst(self, cm): method = cm.get_method_ref(self.dst_idx) return method.get_class_name(), method.get_name(), method.get_descriptor() def get_src(self, cm): method = cm.get_method_ref(self.src_idx) return method.get_class_name(), method.get_name(), method.get_descriptor() def get_idx(self): return self.idx def get_src_idx(self): return self.src_idx def get_dst_idx(self): return self.dst_idx class TaintedPackage(object): def __init__(self, vm, name): self.vm = vm self.name = name self.paths = {TAINTED_PACKAGE_CREATE : [], TAINTED_PACKAGE_CALL : []} def get_name(self): return self.name def gets(self): return self.paths def push(self, access, idx, src_idx, dst_idx): p = PathP( access, idx, src_idx, dst_idx ) self.paths[ access ].append( p ) return p def get_objects_paths(self): return self.paths[ TAINTED_PACKAGE_CREATE ] def search_method(self, name, descriptor): """ @param name : a regexp for the name of the method @param descriptor : a regexp for the descriptor of the method @rtype : a list of called paths """ l = [] m_name = re.compile(name) m_descriptor = re.compile(descriptor) for path in self.paths[ TAINTED_PACKAGE_CALL ]: _, dst_name, dst_descriptor = path.get_dst(self.vm.get_class_manager()) if m_name.match( dst_name ) != None and m_descriptor.match( dst_descriptor ) != None: l.append( path ) return l def get_method(self, name, descriptor): l = [] for path in self.paths[ TAINTED_PACKAGE_CALL ]: if path.get_name() == name and path.get_descriptor() == descriptor: l.append( path ) return l def get_paths(self): for i in self.paths: for j in self.paths[ i ]: yield j def get_paths_length(self): x = 0 for i in self.paths: x += len(self.paths[ i ]) return x def get_methods(self): return [path for path in self.paths[TAINTED_PACKAGE_CALL]] def get_new(self): return [path for path in self.paths[TAINTED_PACKAGE_CREATE]] def show(self): cm = self.vm.get_class_manager() print self.get_name() for _type in self.paths: print "\t -->", _type if _type == TAINTED_PACKAGE_CALL: for path in self.paths[_type]: print "\t\t => %s <-- %x in %s" % (path.get_dst(cm), path.get_idx(), path.get_src(cm)) else: for path in self.paths[_type]: print "\t\t => %x in %s" % (path.get_idx(), path.get_src(cm)) def show_Permissions(dx): """ Show where permissions are used in a specific application :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object """ p = dx.get_permissions( [] ) for i in p: print i, ":" for j in p[i]: show_Path( dx.get_vm(), j ) def show_DynCode(dx): """ Show where dynamic code is used :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object """ paths = [] paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;", "<init>", ".")) paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;", "<init>", ".")) paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;", "<init>", ".")) paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;", "<init>", ".")) paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;", "loadDex", ".")) show_Paths( dx.get_vm(), paths ) def show_NativeMethods(dx): """ Show the native methods :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object """ print get_NativeMethods(dx) def show_ReflectionCode(dx): """ Show the reflection code :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object """ paths = dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", ".") show_Paths(dx.get_vm(), paths) def get_NativeMethods(dx): """ Return the native methods :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: [tuple] """ d = dx.get_vm() native_methods = [] for i in d.get_methods(): if i.get_access_flags() & 0x100: native_methods.append( (i.get_class_name(), i.get_name(), i.get_descriptor())) return native_methods def get_ReflectionCode(dx): """ Return the reflection code :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: [dict] """ paths = dx.get_tainted_packages().search_methods( "Ljava/lang/reflect/Method;", ".", ".") return get_Paths(dx.get_vm(), paths) def is_crypto_code(dx): """ Crypto code is present ? :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: boolean """ if dx.get_tainted_packages().search_methods("Ljavax/crypto/.", ".", "."): return True if dx.get_tainted_packages().search_methods("Ljava/security/spec/.", ".", "."): return True return False def is_dyn_code(dx): """ Dalvik Dynamic code loading is present ? :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: boolean """ if dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;", "<init>", "."): return True if dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;", "<init>", "."): return True if dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;", "<init>", "."): return True if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;", "<init>", "."): return True if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;", "loadDex", "."): return True return False def is_reflection_code(dx): """ Reflection is present ? :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: boolean """ if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", "."): return True if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Field;", ".", "."): return True if dx.get_tainted_packages().search_methods("Ljava/lang/Class;", "forName", "."): return True return False def is_native_code(dx): """ Native code is present ? :param dx : the analysis virtual machine :type dx: a :class:`VMAnalysis` object :rtype: boolean """ if dx.get_tainted_packages().search_methods("Ljava/lang/System;", "load.", "."): return True if dx.get_tainted_packages().search_methods("Ljava/lang/Runtime;", "load.", "."): return True return False class TaintedPackages(object): def __init__(self, _vm): self.__vm = _vm self.__packages = {} self.__methods = {} self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version()) self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version()) def _add_pkg(self, name): if name not in self.__packages: self.__packages[ name ] = TaintedPackage( self.__vm, name ) #self.context.get_tainted_packages().push_info( method_info[0], TAINTED_PACKAGE_CALL, idx, self, self.method, method_info[1], method_info[2][0] + method_info[2][1] ) def push_info(self, class_name, access, idx, method, idx_method): self._add_pkg( class_name ) p = self.__packages[ class_name ].push( access, idx, method.get_method_idx(), idx_method ) try: self.__methods[ method ][ class_name ].append( p ) except: try: self.__methods[ method ][ class_name ] = [] except: self.__methods[ method ] = {} self.__methods[ method ][ class_name ] = [] self.__methods[ method ][ class_name ].append( p ) def get_packages_by_method(self, method): try: return self.__methods[method] except KeyError: return {} def get_package(self, name): return self.__packages[name] def get_packages_by_bb(self, bb): """ :rtype: return a list of packaged used in a basic block """ l = [] for i in self.__packages: paths = self.__packages[i].gets() for j in paths: for k in paths[j]: if k.get_bb() == bb: l.append( (i, k.get_access_flag(), k.get_idx(), k.get_method()) ) return l def get_packages(self): for i in self.__packages: yield self.__packages[i], i def get_internal_packages_from_package(self, package): classes = self.__vm.get_classes_names() l = [] for m, _ in self.get_packages(): paths = m.get_methods() for j in paths: src_class_name, _, _ = j.get_src(self.__vm.get_class_manager()) dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager()) if src_class_name == package and dst_class_name in classes: l.append(j) return l def get_internal_packages(self): """ :rtype: return a list of the internal packages called in the application """ classes = self.__vm.get_classes_names() l = [] for m, _ in self.get_packages(): paths = m.get_methods() for j in paths: if j.get_access_flag() == TAINTED_PACKAGE_CALL: dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager()) if dst_class_name in classes and m.get_name() in classes: l.append(j) return l def get_internal_new_packages(self): """ :rtype: return a list of the internal packages created in the application """ classes = self.__vm.get_classes_names() l = {} for m, _ in self.get_packages(): paths = m.get_new() for j in paths: src_class_name, _, _ = j.get_src(self.__vm.get_class_manager()) if src_class_name in classes and m.get_name() in classes: if j.get_access_flag() == TAINTED_PACKAGE_CREATE: try: l[m.get_name()].append(j) except: l[m.get_name()] = [] l[m.get_name()].append(j) return l def get_external_packages(self): """ :rtype: return a list of the external packages called in the application """ classes = self.__vm.get_classes_names() l = [] for m, _ in self.get_packages(): paths = m.get_methods() for j in paths: src_class_name, _, _ = j.get_src(self.__vm.get_class_manager()) dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager()) if src_class_name in classes and dst_class_name not in classes: if j.get_access_flag() == TAINTED_PACKAGE_CALL: l.append(j) return l def search_packages(self, package_name): """ :param package_name: a regexp for the name of the package :rtype: a list of called packages' paths """ ex = re.compile(package_name) l = [] for m, _ in self.get_packages(): if ex.search(m.get_name()) != None: l.extend(m.get_methods()) return l def search_unique_packages(self, package_name): """ :param package_name: a regexp for the name of the package """ ex = re.compile( package_name ) l = [] d = {} for m, _ in self.get_packages(): if ex.match( m.get_info() ) != None: for path in m.get_methods(): try: d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] += 1 except KeyError: d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] = 0 l.append( [ path.get_class_name(), path.get_name(), path.get_descriptor() ] ) return l, d def search_methods(self, class_name, name, descriptor, re_expr=True): """ @param class_name : a regexp for the class name of the method (the package) @param name : a regexp for the name of the method @param descriptor : a regexp for the descriptor of the method @rtype : a list of called methods' paths """ l = [] if re_expr == True: ex = re.compile( class_name ) for m, _ in self.get_packages(): if ex.search( m.get_name() ) != None: l.extend( m.search_method( name, descriptor ) ) return l def search_objects(self, class_name): """ @param class_name : a regexp for the class name @rtype : a list of created objects' paths """ ex = re.compile( class_name ) l = [] for m, _ in self.get_packages(): if ex.search( m.get_name() ) != None: l.extend( m.get_objects_paths() ) return l def search_crypto_packages(self): """ @rtype : a list of called crypto packages """ return self.search_packages( "Ljavax/crypto/" ) def search_telephony_packages(self): """ @rtype : a list of called telephony packages """ return self.search_packages( "Landroid/telephony/" ) def search_net_packages(self): """ @rtype : a list of called net packages """ return self.search_packages( "Landroid/net/" ) def get_method(self, class_name, name, descriptor): try: return self.__packages[ class_name ].get_method( name, descriptor ) except KeyError: return [] def get_permissions_method(self, method): permissions = set() for m, _ in self.get_packages(): paths = m.get_methods() for j in paths: if j.get_method() == method: if j.get_access_flag() == TAINTED_PACKAGE_CALL: dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() ) data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys(): permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data]) return permissions def get_permissions(self, permissions_needed): """ @param permissions_needed : a list of restricted permissions to get ([] returns all permissions) @rtype : a dictionnary of permissions' paths """ permissions = {} pn = set(permissions_needed) if permissions_needed == []: pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys()) classes = self.__vm.get_classes_names() for m, _ in self.get_packages(): paths = m.get_methods() for j in paths: src_class_name, src_method_name, src_descriptor = j.get_src( self.__vm.get_class_manager() ) dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() ) if (src_class_name in classes) and (dst_class_name not in classes): if j.get_access_flag() == TAINTED_PACKAGE_CALL: data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys(): perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data]) for p in perm_intersection: try: permissions[p].append(j) except KeyError: permissions[p] = [] permissions[p].append(j) return permissions class Enum(object): def __init__(self, names): self.names = names for value, name in enumerate(self.names): setattr(self, name.upper(), value) def tuples(self): return tuple(enumerate(self.names)) TAG_ANDROID = Enum([ 'ANDROID', 'TELEPHONY', 'SMS', 'SMSMESSAGE', 'ACCESSIBILITYSERVICE', 'ACCOUNTS', 'ANIMATION', 'APP', 'BLUETOOTH', 'CONTENT', 'DATABASE', 'DEBUG', 'DRM', 'GESTURE', 'GRAPHICS', 'HARDWARE', 'INPUTMETHODSERVICE', 'LOCATION', 'MEDIA', 'MTP', 'NET', 'NFC', 'OPENGL', 'OS', 'PREFERENCE', 'PROVIDER', 'RENDERSCRIPT', 'SAX', 'SECURITY', 'SERVICE', 'SPEECH', 'SUPPORT', 'TEST', 'TEXT', 'UTIL', 'VIEW', 'WEBKIT', 'WIDGET', 'DALVIK_BYTECODE', 'DALVIK_SYSTEM', 'JAVA_REFLECTION']) TAG_REVERSE_ANDROID = dict((i[0], i[1]) for i in TAG_ANDROID.tuples()) TAGS_ANDROID = { TAG_ANDROID.ANDROID : [ 0, "Landroid" ], TAG_ANDROID.TELEPHONY : [ 0, "Landroid/telephony"], TAG_ANDROID.SMS : [ 0, "Landroid/telephony/SmsManager"], TAG_ANDROID.SMSMESSAGE : [ 0, "Landroid/telephony/SmsMessage"], TAG_ANDROID.DEBUG : [ 0, "Landroid/os/Debug"], TAG_ANDROID.ACCESSIBILITYSERVICE : [ 0, "Landroid/accessibilityservice" ], TAG_ANDROID.ACCOUNTS : [ 0, "Landroid/accounts" ], TAG_ANDROID.ANIMATION : [ 0, "Landroid/animation" ], TAG_ANDROID.APP : [ 0, "Landroid/app" ], TAG_ANDROID.BLUETOOTH : [ 0, "Landroid/bluetooth" ], TAG_ANDROID.CONTENT : [ 0, "Landroid/content" ], TAG_ANDROID.DATABASE : [ 0, "Landroid/database" ], TAG_ANDROID.DRM : [ 0, "Landroid/drm" ], TAG_ANDROID.GESTURE : [ 0, "Landroid/gesture" ], TAG_ANDROID.GRAPHICS : [ 0, "Landroid/graphics" ], TAG_ANDROID.HARDWARE : [ 0, "Landroid/hardware" ], TAG_ANDROID.INPUTMETHODSERVICE : [ 0, "Landroid/inputmethodservice" ], TAG_ANDROID.LOCATION : [ 0, "Landroid/location" ], TAG_ANDROID.MEDIA : [ 0, "Landroid/media" ], TAG_ANDROID.MTP : [ 0, "Landroid/mtp" ], TAG_ANDROID.NET : [ 0, "Landroid/net" ], TAG_ANDROID.NFC : [ 0, "Landroid/nfc" ], TAG_ANDROID.OPENGL : [ 0, "Landroid/opengl" ], TAG_ANDROID.OS : [ 0, "Landroid/os" ], TAG_ANDROID.PREFERENCE : [ 0, "Landroid/preference" ], TAG_ANDROID.PROVIDER : [ 0, "Landroid/provider" ], TAG_ANDROID.RENDERSCRIPT : [ 0, "Landroid/renderscript" ], TAG_ANDROID.SAX : [ 0, "Landroid/sax" ], TAG_ANDROID.SECURITY : [ 0, "Landroid/security" ], TAG_ANDROID.SERVICE : [ 0, "Landroid/service" ], TAG_ANDROID.SPEECH : [ 0, "Landroid/speech" ], TAG_ANDROID.SUPPORT : [ 0, "Landroid/support" ], TAG_ANDROID.TEST : [ 0, "Landroid/test" ], TAG_ANDROID.TEXT : [ 0, "Landroid/text" ], TAG_ANDROID.UTIL : [ 0, "Landroid/util" ], TAG_ANDROID.VIEW : [ 0, "Landroid/view" ], TAG_ANDROID.WEBKIT : [ 0, "Landroid/webkit" ], TAG_ANDROID.WIDGET : [ 0, "Landroid/widget" ], TAG_ANDROID.DALVIK_BYTECODE : [ 0, "Ldalvik/bytecode" ], TAG_ANDROID.DALVIK_SYSTEM : [ 0, "Ldalvik/system" ], TAG_ANDROID.JAVA_REFLECTION : [ 0, "Ljava/lang/reflect"], } class Tags(object): """ Handle specific tags :param patterns: :params reverse: """ def __init__(self, patterns=TAGS_ANDROID, reverse=TAG_REVERSE_ANDROID): self.tags = set() self.patterns = patterns self.reverse = TAG_REVERSE_ANDROID for i in self.patterns: self.patterns[i][1] = re.compile(self.patterns[i][1]) def emit(self, method): for i in self.patterns: if self.patterns[i][0] == 0: if self.patterns[i][1].search( method.get_class() ) != None: self.tags.add( i ) def emit_by_classname(self, classname): for i in self.patterns: if self.patterns[i][0] == 0: if self.patterns[i][1].search( classname ) != None: self.tags.add( i ) def get_list(self): return [ self.reverse[ i ] for i in self.tags ] def __contains__(self, key): return key in self.tags def __str__(self): return str([ self.reverse[ i ] for i in self.tags ]) def empty(self): return self.tags == set() class BasicBlocks(object): """ This class represents all basic blocks of a method """ def __init__(self, _vm, tv): self.__vm = _vm self.tainted = tv self.bb = [] def push(self, bb): self.bb.append(bb) def pop(self, idx): return self.bb.pop(idx) def get_basic_block(self, idx): for i in self.bb: if idx >= i.get_start() and idx < i.get_end(): return i return None def get_tainted_integers(self): try: return self.tainted.get_tainted_integers() except: return None def get_tainted_packages(self): try: return self.tainted.get_tainted_packages() except: return None def get_tainted_variables(self): try: return self.tainted.get_tainted_variables() except: return None def get(self): """ :rtype: return each basic block (:class:`DVMBasicBlock` object) """ for i in self.bb: yield i def gets(self): """ :rtype: a list of basic blocks (:class:`DVMBasicBlock` objects) """ return self.bb def get_basic_block_pos(self, idx): return self.bb[idx] class ExceptionAnalysis(object): def __init__(self, exception, bb): self.start = exception[0] self.end = exception[1] self.exceptions = exception[2:] for i in self.exceptions: i.append(bb.get_basic_block(i[1])) def show_buff(self): buff = "%x:%x\n" % (self.start, self.end) for i in self.exceptions: if i[2] == None: buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2]) else: buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name()) return buff[:-1] def get(self): d = {"start": self.start, "end": self.end, "list": []} for i in self.exceptions: d["list"].append({"name": i[0], "idx": i[1], "bb": i[2].get_name()}) return d class Exceptions(object): def __init__(self, _vm): self.__vm = _vm self.exceptions = [] def add(self, exceptions, basic_blocks): for i in exceptions: self.exceptions.append( ExceptionAnalysis( i, basic_blocks ) ) def get_exception(self, addr_start, addr_end): for i in self.exceptions: # print hex(i.start), hex(i.end), hex(addr_start), hex(addr_end), i.start >= addr_start and i.end <= addr_end, addr_end <= i.end and addr_start >= i.start if i.start >= addr_start and i.end <= addr_end: return i elif addr_end <= i.end and addr_start >= i.start: return i return None def gets(self): return self.exceptions def get(self): for i in self.exceptions: yield i BO = { "BasicOPCODES" : dvm.BRANCH_DVM_OPCODES, "BasicClass" : DVMBasicBlock, "Dnext" : dvm.determineNext, "Dexception" : dvm.determineException } BO["BasicOPCODES_H"] = [] for i in BO["BasicOPCODES"]: BO["BasicOPCODES_H"].append( re.compile( i ) ) class MethodAnalysis(object): """ This class analyses in details a method of a class/dex file :param vm: the object which represent the dex file :param method: the original method :param tv: a virtual object to get access to tainted information :type vm: a :class:`DalvikVMFormat` object :type method: a :class:`EncodedMethod` object """ def __init__(self, vm, method, tv): self.__vm = vm self.method = method self.tainted = tv self.basic_blocks = BasicBlocks(self.__vm, self.tainted) self.exceptions = Exceptions(self.__vm) code = self.method.get_code() if code == None: return current_basic = BO["BasicClass"](0, self.__vm, self.method, self.basic_blocks) self.basic_blocks.push(current_basic) ########################################################## bc = code.get_bc() l = [] h = {} idx = 0 debug("Parsing instructions") instructions = [i for i in bc.get_instructions()] for i in instructions: for j in BO["BasicOPCODES_H"]: if j.match(i.get_name()) != None: v = BO["Dnext"](i, idx, self.method) h[ idx ] = v l.extend(v) break idx += i.get_length() debug("Parsing exceptions") excepts = BO["Dexception"]( self.__vm, self.method ) for i in excepts: l.extend( [i[0]] ) for handler in i[2:]: l.append( handler[1] ) debug("Creating basic blocks in %s" % self.method) idx = 0 for i in instructions: # index is a destination if idx in l: if current_basic.get_nb_instructions() != 0: current_basic = BO["BasicClass"](current_basic.get_end(), self.__vm, self.method, self.basic_blocks) self.basic_blocks.push(current_basic) current_basic.push(i) # index is a branch instruction if idx in h: current_basic = BO["BasicClass"]( current_basic.get_end(), self.__vm, self.method, self.basic_blocks ) self.basic_blocks.push( current_basic ) idx += i.get_length() if current_basic.get_nb_instructions() == 0: self.basic_blocks.pop(-1) debug("Settings basic blocks childs") for i in self.basic_blocks.get(): try: i.set_childs( h[ i.end - i.get_last_length() ] ) except KeyError: i.set_childs( [] ) debug("Creating exceptions") # Create exceptions self.exceptions.add(excepts, self.basic_blocks) for i in self.basic_blocks.get(): # setup exception by basic block i.set_exception_analysis(self.exceptions.get_exception( i.start, i.end - 1 )) del instructions del h, l def get_basic_blocks(self): """ :rtype: a :class:`BasicBlocks` object """ return self.basic_blocks def get_length(self): """ :rtype: an integer which is the length of the code """ return self.get_code().get_length() def get_vm(self): return self.__vm def get_method(self): return self.method def get_local_variables(self): return self.tainted.get_tainted_variables().get_local_variables( self.method ) def show(self): print "METHOD", self.method.get_class_name(), self.method.get_name(), self.method.get_descriptor() for i in self.basic_blocks.get(): print "\t", i i.show() print "" def show_methods(self): print "\t #METHODS :" for i in self.__bb: methods = i.get_methods() for method in methods: print "\t\t-->", method.get_class_name(), method.get_name(), method.get_descriptor() for context in methods[method]: print "\t\t\t |---|", context.details def create_tags(self): """ Create the tags for the method """ self.tags = Tags() for i in self.tainted.get_tainted_packages().get_packages_by_method( self.method ): self.tags.emit_by_classname( i ) def get_tags(self): """ Return the tags of the method :rtype: a :class:`Tags` object """ return self.tags SIGNATURE_L0_0 = "L0_0" SIGNATURE_L0_1 = "L0_1" SIGNATURE_L0_2 = "L0_2" SIGNATURE_L0_3 = "L0_3" SIGNATURE_L0_4 = "L0_4" SIGNATURE_L0_5 = "L0_5" SIGNATURE_L0_6 = "L0_6" SIGNATURE_L0_0_L1 = "L0_0:L1" SIGNATURE_L0_1_L1 = "L0_1:L1" SIGNATURE_L0_2_L1 = "L0_2:L1" SIGNATURE_L0_3_L1 = "L0_3:L1" SIGNATURE_L0_4_L1 = "L0_4:L1" SIGNATURE_L0_5_L1 = "L0_5:L1" SIGNATURE_L0_0_L2 = "L0_0:L2" SIGNATURE_L0_0_L3 = "L0_0:L3" SIGNATURE_HEX = "hex" SIGNATURE_SEQUENCE_BB = "sequencebb" SIGNATURES = { SIGNATURE_L0_0 : { "type" : 0 }, SIGNATURE_L0_1 : { "type" : 1 }, SIGNATURE_L0_2 : { "type" : 2, "arguments" : ["Landroid"] }, SIGNATURE_L0_3 : { "type" : 2, "arguments" : ["Ljava"] }, SIGNATURE_L0_4 : { "type" : 2, "arguments" : ["Landroid", "Ljava"] }, SIGNATURE_L0_5 : { "type" : 3, "arguments" : ["Landroid"] }, SIGNATURE_L0_6 : { "type" : 3, "arguments" : ["Ljava"] }, SIGNATURE_SEQUENCE_BB : {}, SIGNATURE_HEX : {}, } from sign import Signature class StringAnalysis(object): def __init__(self, value): self.value = value self.xreffrom = set() def AddXrefFrom(self, classobj, methodobj): #debug("Added strings xreffrom for %s to %s" % (self.value, methodobj)) self.xreffrom.add((classobj, methodobj)) def get_xref_from(self): return self.xreffrom def __str__(self): data = "XREFto for string %s in\n" % repr(self.value) for ref_class, ref_method in self.xreffrom: data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method) return data class MethodClassAnalysis(object): def __init__(self, method): self.method = method self.xrefto = set() self.xreffrom = set() def AddXrefTo(self, classobj, methodobj): #debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj)) self.xrefto.add((classobj, methodobj)) def AddXrefFrom(self, classobj, methodobj): #debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj)) self.xreffrom.add((classobj, methodobj)) def get_xref_from(self): return self.xreffrom def get_xref_to(self): return self.xrefto def __str__(self): data = "XREFto for %s\n" % self.method for ref_class, ref_method in self.xrefto: data += "in\n" data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method) data += "XREFFrom for %s\n" % self.method for ref_class, ref_method in self.xreffrom: data += "in\n" data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method) return data class FieldClassAnalysis(object): def __init__(self, field): self.field = field self.xrefread = set() self.xrefwrite = set() def AddXrefRead(self, classobj, methodobj): #debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj)) self.xrefread.add((classobj, methodobj)) def AddXrefWrite(self, classobj, methodobj): #debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj)) self.xrefwrite.add((classobj, methodobj)) def get_xref_read(self): return self.xrefread def get_xref_write(self): return self.xrefwrite def __str__(self): data = "XREFRead for %s\n" % self.field for ref_class, ref_method in self.xrefread: data += "in\n" data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method) data += "XREFWrite for %s\n" % self.field for ref_class, ref_method in self.xrefwrite: data += "in\n" data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method) return data REF_NEW_INSTANCE = 0 REF_CLASS_USAGE = 1 class ClassAnalysis(object): def __init__(self, classobj): self._class = classobj self._methods = {} self._fields = {} self.xrefto = collections.defaultdict(set) self.xreffrom = collections.defaultdict(set) def get_method_analysis(self, method): return self._methods.get(method) def get_field_analysis(self, field): return self._fields.get(field) def AddFXrefRead(self, method, classobj, field): if field not in self._fields: self._fields[field] = FieldClassAnalysis(field) self._fields[field].AddXrefRead(classobj, method) def AddFXrefWrite(self, method, classobj, field): if field not in self._fields: self._fields[field] = FieldClassAnalysis(field) self._fields[field].AddXrefWrite(classobj, method) def AddMXrefTo(self, method1, classobj, method2): if method1 not in self._methods: self._methods[method1] = MethodClassAnalysis(method1) self._methods[method1].AddXrefTo(classobj, method2) def AddMXrefFrom(self, method1, classobj, method2): if method1 not in self._methods: self._methods[method1] = MethodClassAnalysis(method1) self._methods[method1].AddXrefFrom(classobj, method2) def AddXrefTo(self, ref_kind, classobj, methodobj): #debug("Added class xrefto for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name())) self.xrefto[classobj].add((ref_kind, methodobj)) def AddXrefFrom(self, ref_kind, classobj, methodobj): #debug("Added class xreffrom for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name())) self.xreffrom[classobj].add((ref_kind, methodobj)) def get_xref_from(self): return self.xreffrom def get_xref_to(self): return self.xrefto def get_vm_class(self): return self._class def __str__(self): data = "XREFto for %s\n" % self._class for ref_class in self.xrefto: data += str(ref_class.get_vm_class().get_name()) + " " data += "in\n" for ref_kind, ref_method in self.xrefto[ref_class]: data += "%d %s\n" % (ref_kind, ref_method) data += "\n" data += "XREFFrom for %s\n" % self._class for ref_class in self.xreffrom: data += str(ref_class.get_vm_class().get_name()) + " " data += "in\n" for ref_kind, ref_method in self.xreffrom[ref_class]: data += "%d %s\n" % (ref_kind, ref_method) data += "\n" return data class newVMAnalysis(object): def __init__(self, vm): self.vm = vm self.classes = {} self.strings = {} for current_class in self.vm.get_classes(): self.classes[current_class.get_name()] = ClassAnalysis(current_class) def create_xref(self): debug("Creating XREF/DREF") instances_class_name = self.classes.keys() external_instances = {} for current_class in self.vm.get_classes(): for current_method in current_class.get_methods(): debug("Creating XREF for %s" % current_method) code = current_method.get_code() if code == None: continue off = 0 bc = code.get_bc() for instruction in bc.get_instructions(): op_value = instruction.get_op_value() if op_value in [0x1c, 0x22]: idx_type = instruction.get_ref_kind() type_info = self.vm.get_cm_type(idx_type) # Internal xref related to class manipulation if type_info in instances_class_name and type_info != current_class.get_name(): # new instance if op_value == 0x22: self.classes[current_class.get_name()].AddXrefTo(REF_NEW_INSTANCE, self.classes[type_info], current_method) self.classes[type_info].AddXrefFrom(REF_NEW_INSTANCE, self.classes[current_class.get_name()], current_method) # class reference else: self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[type_info], current_method) self.classes[type_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method) elif ((op_value >= 0x6e and op_value <= 0x72) or (op_value >= 0x74 and op_value <= 0x78)): idx_meth = instruction.get_ref_kind() method_info = self.vm.get_cm_method(idx_meth) if method_info: class_info = method_info[0] method_item = self.vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2])) if method_item: self.classes[current_class.get_name()].AddMXrefTo(current_method, self.classes[class_info], method_item) self.classes[class_info].AddMXrefFrom(method_item, self.classes[current_class.get_name()], current_method) # Internal xref related to class manipulation if class_info in instances_class_name and class_info != current_class.get_name(): self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[class_info], method_item) self.classes[class_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method) elif op_value >= 0x1a and op_value <= 0x1b: string_value = self.vm.get_cm_string(instruction.get_ref_kind()) if string_value not in self.strings: self.strings[string_value] = StringAnalysis(string_value) self.strings[string_value].AddXrefFrom(self.classes[current_class.get_name()], current_method) elif op_value >= 0x52 and op_value <= 0x6d: idx_field = instruction.get_ref_kind() field_info = self.vm.get_cm_field(idx_field) field_item = self.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1]) if field_item: # read access to a field if (op_value >= 0x52 and op_value <= 0x58) or (op_value >= 0x60 and op_value <= 0x66): self.classes[current_class.get_name()].AddFXrefRead(current_method, self.classes[current_class.get_name()], field_item) # write access to a field else: self.classes[current_class.get_name()].AddFXrefWrite(current_method, self.classes[current_class.get_name()], field_item) off += instruction.get_length() def get_method(self, method): return MethodAnalysis( self.vm, method, None ) def get_method_by_name(self, class_name, method_name, method_descriptor): print class_name, method_name, method_descriptor if class_name in self.classes: for method in self.classes[class_name].get_vm_class().get_methods(): print method.get_name(), method.get_descriptor() if method.get_name() == method_name and method.get_descriptor() == method_descriptor: return method return None def is_class_present(self, class_name): return class_name in self.classes def get_class_analysis(self, class_name): return self.classes.get(class_name) def get_strings_analysis(self): return self.strings class VMAnalysis(object): """ This class analyses a dex file :param _vm: the object which represent the dex file :type _vm: a :class:`DalvikVMFormat` object :Example: VMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) ) """ def __init__(self, vm): self.vm = vm self.tainted_variables = TaintedVariables( self.vm ) self.tainted_packages = TaintedPackages( self.vm ) self.tainted = { "variables" : self.tainted_variables, "packages" : self.tainted_packages, } self.signature = None for i in self.vm.get_all_fields(): self.tainted_variables.add( [ i.get_class_name(), i.get_descriptor(), i.get_name() ], TAINTED_FIELD ) self.methods = [] self.hmethods = {} self.__nmethods = {} for i in self.vm.get_methods(): x = MethodAnalysis( self.vm, i, self ) self.methods.append( x ) self.hmethods[ i ] = x self.__nmethods[ i.get_name() ] = x def get_vm(self): return self.vm def get_method(self, method): """ Return an analysis method :param method: a classical method object :type method: an :class:`EncodedMethod` object :rtype: a :class:`MethodAnalysis` object """ return self.hmethods[ method ] def get_methods(self): """ Return each analysis method :rtype: a :class:`MethodAnalysis` object """ for i in self.hmethods: yield self.hmethods[i] def get_method_signature(self, method, grammar_type="", options={}, predef_sign=""): """ Return a specific signature for a specific method :param method: a reference to method from a vm class :type method: a :class:`EncodedMethod` object :param grammar_type: the type of the signature (optional) :type grammar_type: string :param options: the options of the signature (optional) :param options: dict :param predef_sign: used a predefined signature (optional) :type predef_sign: string :rtype: a :class:`Sign` object """ if self.signature == None: self.signature = Signature( self ) if predef_sign != "": g = "" o = {} for i in predef_sign.split(":"): if "_" in i: g += "L0:" o[ "L0" ] = SIGNATURES[ i ] else: g += i g += ":" return self.signature.get_method( self.get_method( method ), g[:-1], o ) else: return self.signature.get_method( self.get_method( method ), grammar_type, options ) def get_permissions(self, permissions_needed): """ Return the permissions used :param permissions_needed: a list of restricted permissions to get ([] returns all permissions) :type permissions_needed: list :rtype: a dictionnary of permissions paths """ permissions = {} permissions.update( self.get_tainted_packages().get_permissions( permissions_needed ) ) permissions.update( self.get_tainted_variables().get_permissions( permissions_needed ) ) return permissions def get_permissions_method(self, method): permissions_f = self.get_tainted_packages().get_permissions_method( method ) permissions_v = self.get_tainted_variables().get_permissions_method( method ) all_permissions_of_method = permissions_f.union(permissions_v) return list(all_permissions_of_method) def get_tainted_variables(self): """ Return the tainted variables :rtype: a :class:`TaintedVariables` object """ return self.tainted_variables def get_tainted_packages(self): """ Return the tainted packages :rtype: a :class:`TaintedPackages` object """ return self.tainted_packages def get_tainted_fields(self): return self.get_tainted_variables().get_fields() def get_tainted_field(self, class_name, name, descriptor): """ Return a specific tainted field :param class_name: the name of the class :param name: the name of the field :param descriptor: the descriptor of the field :type class_name: string :type name: string :type descriptor: string :rtype: a :class:`TaintedVariable` object """ return self.get_tainted_variables().get_field( class_name, name, descriptor ) class uVMAnalysis(VMAnalysis): """ This class analyses a dex file but on the fly (quicker !) :param _vm: the object which represent the dex file :type _vm: a :class:`DalvikVMFormat` object :Example: uVMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) ) """ def __init__(self, vm): self.vm = vm self.tainted_variables = TaintedVariables( self.vm ) self.tainted_packages = TaintedPackages( self.vm ) self.tainted = { "variables" : self.tainted_variables, "packages" : self.tainted_packages, } self.signature = None self.resolve = False def get_methods(self): self.resolve = True for i in self.vm.get_methods(): yield MethodAnalysis(self.vm, i, self) def get_method(self, method): return MethodAnalysis( self.vm, method, None ) def get_vm(self): return self.vm def _resolve(self): if self.resolve == False: for i in self.get_methods(): pass def get_tainted_packages(self): self._resolve() return self.tainted_packages def get_tainted_variables(self): self._resolve() return self.tainted_variables def is_ascii_obfuscation(vm): for classe in vm.get_classes(): if is_ascii_problem(classe.get_name()): return True for method in classe.get_methods(): if is_ascii_problem(method.get_name()): return True return False
(1) Any Executive Magistrate or officer-in-charge of a police station or, in the absence of such officer-in-charge, any police officer, not below the rank of a sub-inspector, may command any lawful assembly, or any assembly of five or more persons likely to cause a disturbance of the public peace, to disperse; and it shall thereupon be the duty of the members of such assembly to disperse accordingly. (2) If, upon being so commanded, any such assembly does not disperse, or if, without being so commanded, it conducts itself in such a manner as to show a determination not to disperse, any Executive Magistrate or police officer referred to in sub-section (1), may proceed to disperse such assembly by force, and may require the assistance of any male person, not being an officer or member of the armed forces and acting as such, for the purpose of dispersing such assembly, and, if necessary, arresting and confining the persons who form part of it, in order to disperse such assembly or that they may be punished according to law. (1) If any such assembly cannot be otherwise dispersed, and if it is necessary for the public security that it should be dispersed, the Executive Magistrate of the highest rank who is present may cause it to be dispersed by the armed forces. (2) Such Magistrate may require any officer in command of any group of person belonging to the armed forces to disperse the assembly with the help of the armed forces under his command, and to arrest and confine such persons forming part of it as the Magistrate may direct, or as it may be necessary to arrest and confine in order to disperse the assembly or to have them punished according to law. (3) Every such officer of the armed forces shall obey such requisition in such manner as he thinks fit, but in so doing he shall use as little force, and do as little injury to person and property, as may be consistent with dispersing the assembly and arresting and detaining such persons. When the public security is manifestly endangered by any such assembly and no Executive Magistrate can be communicated with, any commissioned or gazetted officer of the armed forces may disperse such assembly with the help of the armed forces under his command, and may arrest and confine any person forming part of it, in order to disperse such assembly or that they may be punished according to law, but if, while he is acting under this section, it becomes practicable for him to communicate with an Executive Magistrate, he shall do so and shall thence forward obey the instructions of the Magistrate, as to whether he shall or shall not continue such action. (b) with the sanction of State Government in any other case. (d) no member of the armed forces doing any act in obedience to any order which he was bound to obey, shall be deemed to have thereby committed an offence. (c) "member" in relation to the armed forces, means a person in the armed forces other than an officer. (vi) to destroy, confine or dispose of such dangerous animal in the manner provided in the said order, or, if he objects so to do, to appear before himself or some other Executive Magistrate subordinate to him at a time and place to be fixed by the order and show cause, in the manner hereinafter provided, why the order should not be made absolute. (2) No order duly made by a Magistrate under this section shall be called in question in any Civil Court. Explanation. - A "public place" includes also property belonging to the State, camping grounds and grounds left unoccupied for sanitary or recreative purposes. (1) The order shall, if practicable, be served on the person against whom it is made, in the manner hereinafter provided for service of a summons. (2) If such order cannot be so served, it shall be notified by proclamation, published in such manner as the State Government may, by rules, direct, and a copy thereof shall be stuck up at such place or places as may be fittest for conveying the information to such person. (b) appear in accordance with such order and show cause against the same. If such person does not perform such act or appear and show cause, he shall be liable to the penalty prescribed in that behalf in section 188 of the Indian Penal Code (45 of 1860), and the order shall be made absolute. (1) Where an order is made under section 133 for the purpose of preventing obstruction, nuisance or danger to the public in the use of any way, river channel, or place, the Magistrate shall, on the appearance before him of the person against whom the order was made, question him as to whether he denies the existence of any public right in respect of the way, river, channel or place, and if he does so, the Magistrate shall, before proceeding under section 138, inquire into the matter. (2) If in such inquiry the Magistrate finds that there is any reliable evidence in support of such denial, he shall stay the proceedings until the matter of the existence of such right has been decided by a competent Court; and, if he finds that there is no such evidence, he shall proceed as laid down in section 138. (3) A person who has, on being questioned by the Magistrate under sub-section (1), failed to deny the existence of a public right of the nature therein referred to, or who, having made such denial, has failed to adduce reliable evidence in support thereof, shall not in the subsequent proceedings be permitted to make any such denial. (1) If the person against whom an order under section 133 is made appears and shows cause against the order, the Magistrate shall take evidence in the manner as in a summons case. (2) If the Magistrate is satisfied that the order, either as originally made or subject to such modification as he considers necessary, is reasonable and proper, the order shall be made absolute without modification or, as the case may be, with such modification. (3) If the Magistrate is not so satisfied, no further proceedings shall be taken in the case. (b) summon and examine an expert. (b) declare by whom the whole or any part of the necessary expenses of the local investigation shall be paid. (2) The report of such person may be read as evidence in the case. (3) Where the Magistrate summons and examines an expert under section 139, the Magistrate may direct by whom the costs of such summoning and examination shall be paid. (1) When an order has been made absolute under section 136 or section 138, the Magistrate shall give notice of the same to the person against whom the order was made, and shall further require him to perform the act directed by the order within a time to be fixed in the notice, and inform him that, in case of disobedience, he will be liable to the penalty provided by section 188 of the Indian Penal Code (45 of 1860). (2) If such act is not performed within the time fixed, the Magistrate may cause it to be performed, and may recover the costs of performing it, either by the sale of any building, goods or other property removed by his order, or by the distress and sale of any other movable property of such person within or without such Magistrate's local jurisdiction, and if such other property is without such jurisdiction, the order shall authorise its attachment and sale when endorsed by the Magistrate within whose local jurisdiction the property to be attached is found. (1) If a Magistrate making an order under section 133 considers that immediate measures should be taken to prevent imminent danger or injury of a serious kind to the public, he may issue such an injunction to the person against whom the order was made, as is required to obviate or prevent such danger or injury pending the determination of the matter. (2) In default of such person forthwith obeying such injunction, the Magistrate may himself use, or cause to be used, such means as he thinks fit to obviate such danger or to prevent such injury. (3) No suit shall lie in respect of anything done in good faith by a Magistrate under this section. A District Magistrate or Sub-Divisional Magistrate, or any other Executive Magistrate empowered by the State Government or the District Magistrate in this behalf, may order any person not to repeat or continue a public nuisance, as defined in the Indian Penal Code (45 of 1860), or any special or local law. (1) In cases where, in the opinion of a District Magistrate, a Sub-Divisional Magistrate or any other Executive Magistrate specially empowered by the State Government in this behalf, there is sufficient ground for proceeding under this section and immediate prevention or speedy remedy is desirable, such Magistrate may, by a written order stating the material facts of the case and served in the manner provided by section 134, direct any person to abstain from a certain act or to take certain order with respect to certain property in his possession or under his management, if such Magistrate considers that such direction is likely to prevent, or tends to prevent, obstruction, annoyance or injury to any person lawfully employed, or danger to human life, health or safety, or a disturbance of the public tranquility, or a riot, or an affray. (3) An order under this Section may be directed to a particular individual or to persons residing in a particular place or area, or to the public generally when frequenting or visiting a particular place or area. (5) Any Magistrate may, either on his own motion or on the application of any person aggrieved, rescind or alter any order made under this section by himself or any Magistrate subordinate to him or by his predecessor-in-office. (6) The State Government may, either on its own motion or on the application of any person aggrieved, rescind or alter any order made by it under the proviso to sub-section (4). (7) Where an application under sub-section (5) or sub-section (6) is received, the Magistrate, or the State Government, as the case may be, shall afford to the applicant an early opportunity of appearing before him or it, either in person or by pleader and showing cause against the order; and if the Magistrate or the State Government, as the case may be, rejects the application wholly or in part, he or it shall record in writing the reasons for so doing. [144-A. Power to prohibit carrying arms in possession or mass drill or mass training with arms - (1) The District Magistrate may, whenever he considers it necessary so to do for the preservation of public peace or public safety or for the maintenance of public order, by public notice or by order, prohibit in any area within the local limits of his jurisdiction, the carrying of arms in any prossession or the organising or holding of, or taking part in, any mass drill or mass training with arms in any public place. (2) A public notice issued or an order made under this Section may the directed to a particular person or to persons belonging to any community, party or organisation. (3) No public notice issued or an order made under this Section shall remain in force for more than three months from the date on which it is issued or made. (4) The State Government may, if it considers necessary so to do for the preservation of public peace or public safety or for the maintenance of public order, by notification, direct that a public notice issued or order made by the District Magistrate under this Section shall remain in force for such further period not exceeding six months from the date on which such public notice or order was issued or made by the District Magistrate would have, but for such directions, expired, as it may specify in the said notification. (5) The State Government may, subject to such control and directions as it may deem fit to impose, by general or special order, delicate its powers under sub-section (4) to the District Magistrate. Explanation - The word "arms" shall have the meaning assigned to it in Section 153 - AA or the Indian Penal Code, 1860]. (1) Whenever an Executive Magistrate is satisfied from a report of a police officer or upon other information that a dispute likely to cause a breach of the peace exists concerning any land or water or the boundaries thereof, within his local jurisdiction, he shall make an order in writing, stating the grounds of his being so satisfied, and requiring the parties concerned in such dispute to attend his court in person or by pleader, on a specified date and time, and to put in written statements of their respective claims as respects the fact of actual possession of the subject of dispute. (2) For the purposes of this section, the expression "land or water" includes buildings, markets, fisheries, crops or other produce of land and the rents or profits of any such property. (3) A copy of the order shall be served in the manner provided by this Code for the service of a summons upon such person or persons as the Magistrate may direct, and at least one copy shall be published by being affixed to some conspicuous place at or near the subject of dispute. Provided that if it appears to the Magistrate that any party has been forcibly and wrongfully dispossessed within two months next before the date on which the report of a police officer or other information was received by the Magistrate, or after that date and before the date of his order under sub- section (1), he may treat the party so dispossessed as if that party had been in possession on the date of this order under sub-section (1). (5) Nothing in this section shall preclude any party so required to attend, or any other person interested, from showing that no such dispute as aforesaid exists or has existed; and in such case the Magistrate shall cancel his said order, and all further proceedings thereon shall be stayed, but subject to such cancellation, the order of the Magistrate under sub-section (1) shall be final. (6) (a) If the Magistrate decides that one of the parties was, or should under the proviso to sub-section (4) be treated as being, in such possession of the said subject, he shall issue an order declaring such party to be entitled to possession thereof until evicted therefrom in due course of law, and forbidding all disturbance of such possession until such eviction; and when he proceeds under the proviso to sub-section (4), may restore to possession the party forcibly and wrongfully dispossessed. (b) The order made under this sub-section shall be served and published in the manner laid down in sub-section (3). (7) When any party to any such proceeding dies, the Magistrate may cause the legal representative of the deceased party to be made a party to the proceeding and shall thereupon continue the inquiry, and if any question arises as to who the legal representative of a deceased party for the purposes of such proceeding is, all persons claiming to be representatives of the deceased party shall be made parties thereto. (8) If the Magistrate is of opinion that any crop or other produce of the property, the subject of dispute in a proceeding under this section pending before him, is subject to speedy and natural decay, he may make an order for the proper custody or sale of such property, and, upon the completion of the inquiry, shall make such order for the disposal of such property, or the sale-proceeds thereof, as he thinks fit. (9) The Magistrate may, if he thinks fit, at any stage of the proceedings under this section, on the application of either party, issue a summons to any witness directing him to attend or to produce any document or thing. (10) Nothing in this section shall be deemed to be in derogation of the powers of the Magistrate to proceed under section 107. (1) Whenever an Executive Magistrate is satisfied from the report of a police officer or upon other information, that a dispute likely to cause a breach of the peace exists regarding any alleged right of user of any land or water within his local jurisdiction, whether such right be claimed as an easement or otherwise, he shall make an order in writing, stating the grounds of his being so satisfied and requiring the parties concerned in such dispute to attend his Court in person or by pleader on a specified date and time and to put in written statement of their respective claims. Explanation. - The expression "land or water" has the meaning given to it in sub-section (2) of section 145. (3) When any costs have been incurred by any party to a proceeding under section 145, section 146, or section 147, the Magistrate passing a decision may direct by whom such costs shall be paid, whether by such party or by any other party to the proceedings, and whether in whole or in part or proportion and such costs may include any expenses incurred in respect of witnesses and of pleader's fees, which the Court may consider reasonable.
"""Modified versions of gas properties and spectra that use the rate network.""" import numpy as np from ._spectra_priv import _interpolate_2d from . import gas_properties from . import spectra from .rate_network import RateNetwork class RateNetworkGas(gas_properties.GasProperties): """Replace the get_reproc_HI function with something that solves the rate network. Optionally can also do self-shielding.""" def __init__(self, redshift, absnap, hubble=0.71, fbar=0.17, units=None, sf_neutral=True, temp_factor=1, gamma_factor=1, **kwargs): super().__init__(redshift, absnap, hubble=hubble, fbar=fbar, units=units, sf_neutral=sf_neutral) self.rates = RateNetwork(redshift, f_bar = fbar, **kwargs) self.temp_factor = temp_factor self.gamma_factor = gamma_factor self.maxdens = self.PhysDensThresh/0.76 dmax = 5 dsz=1000 if self.sf_neutral: dmax = np.log(self.maxdens) dsz = 500 self.build_interp(dlim=(-16, dmax), elim=(2, 21),tsz=500, dsz=dsz) def build_interp(self, dlim, elim, tsz=500, dsz=1000): """Build the interpolator""" #Build interpolation self.densgrid = np.linspace(dlim[0], dlim[1], dsz) self.ienergygrid = np.linspace(elim[0], elim[1], tsz) dgrid, egrid = np.meshgrid(self.densgrid, self.ienergygrid) self.lh0grid = np.zeros_like(dgrid) self.tempgrid = np.zeros_like(dgrid) #We assume primordial helium for i in range(dsz): self.lh0grid[:,i] = np.log(self.rates.get_neutral_fraction(np.exp(dgrid[:,i]), np.exp(egrid[:,i]))) self.tempgrid[:,i] = np.log(self.rates.get_temp(np.exp(dgrid[:,i]), np.exp(egrid[:,i]))) def get_temp(self,part_type, segment): """Compute temperature (in K) from internal energy using the rate network.""" temp, ii2, density, ienergy = self._get_interp(part_type, segment, nhi=False) if np.size(ii2) > 0: temp[ii2] = self.rates.get_temp(density[ii2], ienergy[ii2]) assert np.all(np.logical_not(np.isnan(temp))) return temp def _get_interp(self, part_type, segment, nhi=True): """Get a neutral hydrogen fraction using a rate network which reads temperature and density of the gas.""" #expecting units of atoms/cm^3 density = self.get_code_rhoH(part_type, segment) #expecting units of 10^-10 ergs/g ienergy = self.absnap.get_data(part_type, "InternalEnergy", segment=segment)*self.units.UnitInternalEnergy_in_cgs/1e10 ienergy = self._get_ienergy_rescaled(density, ienergy) ldensity = np.log(density) lienergy = np.log(ienergy) #Clamp the temperatures : hot gas has the same neutral fraction of 0 anyway. ie = np.where(lienergy >= np.max(self.ienergygrid)) lienergy[ie] = np.max(self.ienergygrid)*0.99 ie = np.where(lienergy <= np.min(self.ienergygrid)) lienergy[ie] = np.min(self.ienergygrid)*1.01 out = np.ones_like(density) ii = np.where(ldensity < np.max(self.densgrid)) if (np.max(self.ienergygrid) < np.max(lienergy[ii])) or (np.min(self.ienergygrid) > np.min(lienergy[ii])): raise ValueError("Ienergy out of range: interp %g -> %g. Present: %g -> %g" % (np.min(self.ienergygrid), np.max(self.ienergygrid), np.min(lienergy[ii]), np.max(lienergy[ii]))) #Correct internal energy to the internal energy of a cold cloud if we are on the star forming equation of state. if nhi: zgrid = self.lh0grid else: zgrid = self.tempgrid out[ii] = np.exp(_interpolate_2d(ldensity[ii], lienergy[ii], self.densgrid, self.ienergygrid, zgrid)) ii2 = np.where(ldensity >= np.max(self.densgrid)) return out,ii2,density,ienergy def get_reproc_HI(self, part_type, segment): """Get a neutral hydrogen fraction using a rate network which reads temperature and density of the gas.""" #expecting units of atoms/cm^3 nH0, ii2, density, ienergy = self._get_interp(part_type, segment, nhi=True) if np.size(ii2) > 0: if self.sf_neutral: if self.redshift_coverage: ssnH0 = self._neutral_fraction(density[ii2], 1e4) nH0[ii2] = ssnH0 else: nH0[ii2] = 1. else: nH0[ii2] = self.rates.get_neutral_fraction(density[ii2], ienergy[ii2]) assert np.all(np.logical_not(np.isnan(nH0))) return nH0 def _get_ienergy_rescaled(self, density, ienergy): """Get the internal energy, rescaled to give the desired equation of state. Technically the e. of s. normally used is: T = T_0 (rho / rho_0)^(gamma-1) However in photoionisation equilibrium the electron density depends very weakly on the temperature, and so T/T_0 = U/U_0 So we can just rescale the internal energy: when T_0 -> T_0' U -> U * T_0'/T_0. Ditto for gamma, when gamma -> gamma' we have: U -> U (rho/rho_0) ^(gamma'-gamma) Note this means that if any particle lies off the original equation of state, it lies off the new one by a similar amount; the dispersion is preserved! """ #Adjust temperature by desired factor, to give desired equation of state. omegab = 0.0445 rhoc = self.units.rho_crit(self.hubble) * (1+self.redshift)**3 overden = self.units.protonmass * density /(omegab * rhoc) ienergy *= self.temp_factor #Adjust slope by same factor: note use gamma_factor -1 so gamma_factor = 1 means no change. if self.gamma_factor != 1.: ienergy *= (overden)**(self.gamma_factor-1.) assert np.all(np.logical_not(np.isnan(ienergy))) assert np.all(ienergy > 0) return ienergy class RateNetworkSpectra(spectra.Spectra): """Generate spectra with a neutral fraction from a rate network""" def __init__(self, *args, photo_factor = 1, sf_neutral=True, selfshield=True, temp_factor = 1, gamma_factor = 1, hubble = 0.71, fbar = 0.17, treecool_file = "data/TREECOOL_ep_2018p", **kwargs): kwargs["gasprop"]=RateNetworkGas kwargs["sf_neutral"] = sf_neutral kwargs["gasprop_args"] = {"photo_factor" : photo_factor, "selfshield" : selfshield, "temp_factor" : temp_factor, "gamma_factor" : gamma_factor, "hubble" : hubble, "fbar" : fbar, "treecool_file" : treecool_file} super().__init__(*args, **kwargs)
NICUs across the country are or have experienced medicine shortages ranging from drugs used to resuscitate a newborn to drugs that provide nutrition. Our State of Opportunity project focuses on kids and what it will take to get them ahead. At the most basic level, that means ensuring children are healthy. But as Michigan Radio’s Jennifer Guerra reports, nationwide drug shortages could threaten even that most basic task. We called every neonatal intensive care unit in Michigan, and all but one got back us. Each one has experienced or is experiencing a wide variety of drug shortages in the NICU. Erin Fox is the director of drug information at the University of Utah Hospitals and Clinics. Her group has been tracking national drug shortages since 2001 for the American Society of Health-System Pharmacists (ASHP). She says the reason for the shortage is pretty complicated. "There’s no one person, no one company, no one reason we can point the finger at and blame," says Fox. Instead, she gives three reasons. Reason #1: Supply chain. Only a few companies make these generic drugs. "If only two companies make a product and one has a problem, that’s almost always a shortage," explains Fox. Reason #2: Generic drug companies run lean. Fox says because generic drug companies run very lean, they don’t have a backup plan if one of their manufacturing lines goes down. Reason #3: Quality. Some of the factories and plants are having quality issues, so the FDA shuts them down until they are in compliance, which leads to more drug shortages. To hear more about the drug shortages and how they're impacting children in Michigan, click here to read our report on the State of Opportunity website.
#!/usr/bin/env python # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| """Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it Example: with ssh_agent.SshAgent() as agent: agent.add_key(private_key_string) # do ssh stuff # as agent loses scope, the ssh agent is killed """ from __future__ import with_statement import atexit import tempfile import os import sys import shutil import subprocess import random import time import datetime class SshAgentException(Exception): """An exception thrown for problems in SshAgent """ def __init__(self, message): # Call the base class constructor with the parameters it needs super(SshAgentException, self).__init__(message) class SshAgent(object): """Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it. The running agent can have one or more keys added (via the SshAgent.add_key() method or via any other method that can find and talk to the running agent. """ class Cleanup(object): """A helper functor class for SshAgent An object of this class can be passed directly to atexit, which will call __call__() when the program exits """ def __init__(self, ssh_agent, ssh_auth_sock_dir): self.ssh_agent = ssh_agent self.ssh_auth_sock_dir = ssh_auth_sock_dir self.cleaned_up = False self.original_env_var = os.environ.get('SSH_AUTH_SOCK') def __call__(self): if self.cleaned_up: return self.cleaned_up = True try: shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True) except OSError: pass try: self.ssh_agent.kill() except OSError: pass if self.original_env_var: os.environ['SSH_AUTH_SOCK'] = self.original_env_var else: del os.environ['SSH_AUTH_SOCK'] def pass_(self): """A function to appease pylint""" pass def pass__(self): """Another function to appease pylint""" self.pass_() def __init__(self): devnull = open(os.devnull, 'w') # Start an ssh-agent process and register it to be killed atexit self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.') self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent") self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull) self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir) # this is here so that when python exits, we make sure that the agent is killed # (in case python exits before our __del__() is called atexit.register(self.cleanup) os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock def __enter__(self): return self def __exit__(self, exc_type, exc_value, tback): self.cleanup() def __del__(self): self.cleanup() def kill(self): '''Explicitly kill the running ssh-agent It's not necessary to call this function as the agent will be cleaned up automatically. ''' self.cleanup() def add_key(self, key): """Add a key to the running agent. Note: This function can be called any number of times to add multiple keys. Args: key (str): A string containing the ssh private key to be added (the actual key data, not the filename of a key) Raises: SshAgentException: when ssh-add does not immediately return (as in the case of a private key with a passphrase) """ #if self.ssh_agent.poll() is None: # raise SshAgentException("Unable to add ssh key. Did agent die?") named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64))) try: os.mkfifo(named_pipe_path, 0600) except OSError, exception: print "Failed to create FIFO: %s" % exception devnull = open(os.devnull, 'w') ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull) fifo = open(named_pipe_path, 'w') print >> fifo, key fifo.close() #Popen.wait() doesn't have a timeout, so we'll implement one using poll() :( start_time = datetime.datetime.now() while ssh_add.poll() is None: if (datetime.datetime.now() - start_time).total_seconds() > 5: try: ssh_add.kill() except OSError: pass raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?") time.sleep(0.1) os.remove(named_pipe_path) # pylint: disable=too-many-lines # these are already imported inside of the ssh library #import os #import subprocess class GitCLIError(Exception): '''Exception class for openshiftcli''' pass # pylint: disable=too-few-public-methods class GitCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, path, verbose=False, ssh_key=None, author=None): ''' Constructor for GitCLI ''' self.path = path self.verbose = verbose self.ssh_key = ssh_key self.author = author self.environment_vars = os.environ.copy() if self.author: author_dict = {} author_list = author.split('<') author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip() author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip() self.environment_vars.update(author_dict) def _add(self, files_to_add=None): ''' git add ''' cmd = ["add", "--no-ignore-removal"] if files_to_add: cmd.extend(files_to_add) else: cmd.append('.') results = self.git_cmd(cmd) return results def _commit(self, msg, author=None): ''' git commit with message ''' cmd = ["commit", "-m", msg] if author: cmd += ["--author", author] results = self.git_cmd(cmd) return results def _clone(self, repo, dest, bare=False): ''' git clone ''' cmd = ["clone"] if bare: cmd += ["--bare"] cmd += [repo, dest] results = self.git_cmd(cmd) return results def _fetch(self, remote): ''' git fetch ''' cmd = ["fetch"] cmd += [remote] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _status(self, porcelain=False, show_untracked=True): ''' Do a git status ''' cmd = ["status"] if porcelain: cmd.append('--porcelain') if show_untracked: cmd.append('--untracked-files=normal') else: cmd.append('--untracked-files=no') results = self.git_cmd(cmd, output=True, output_type='raw') return results def _checkout(self, branch): ''' Do a git checkout to <branch> ''' cmd = ["checkout", branch] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _get_current_branch(self): ''' Do a git checkout to <branch> ''' cmd = ["describe", "--contains", "--all", "HEAD"] results = self.git_cmd(cmd, output=True, output_type='raw') results['results'] = results['results'].rstrip() return results def _merge(self, merge_id): ''' Do a git checkout to <branch> ''' cmd = ["merge", merge_id] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _push(self, remote, src_branch, dest_branch): ''' Do a git checkout to <branch> ''' push_branches = src_branch + ":" + dest_branch cmd = ["push", remote, push_branches] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _remote_update(self): ''' Do a git remote update ''' cmd = ["remote", "update"] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _diff(self, diff_branch): ''' Do a git diff diff_branch''' cmd = ["diff", diff_branch] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _rebase(self, rebase_branch): ''' Do a git rebase rebase_branch''' cmd = ["rebase", rebase_branch] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _config(self, get_args): ''' Do a git config --get <get_args> ''' cmd = ["config", '--get', get_args] results = self.git_cmd(cmd, output=True, output_type='raw') return results def git_cmd(self, cmd, output=False, output_type='json'): '''Base command for git ''' cmds = ['/usr/bin/git'] cmds.extend(cmd) rval = {} results = '' err = None if self.verbose: print ' '.join(cmds) if self.ssh_key: with SshAgent() as agent: self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK'] agent.add_key(self.ssh_key) proc = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.environment_vars) stdout, stderr = proc.communicate() rval = {"returncode": proc.returncode, "results": results, "cmd": ' '.join(cmds), } else: proc = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.environment_vars) stdout, stderr = proc.communicate() rval = {"returncode": proc.returncode, "results": results, "cmd": ' '.join(cmds), } if proc.returncode == 0: if output: if output_type == 'json': try: rval['results'] = json.loads(stdout) except ValueError as err: if "No JSON object could be decoded" in err.message: err = err.message elif output_type == 'raw': rval['results'] = stdout if self.verbose: print stdout print stderr if err: rval.update({"err": err, "cmd": cmds }) else: rval.update({"results": {}}) # Always include stdout/stderr: rval.update({"stderr": stderr, "stdout": stdout}) return rval class GitCheckout(GitCLI): ''' Class to wrap the git checkout command line tools ''' # pylint: disable=too-many-arguments def __init__(self, path, branch): ''' Constructor for GitCheckout ''' super(GitCheckout, self).__init__(path) self.path = path self.branch = branch self.debug = [] os.chdir(path) def checkout(self): '''perform a git checkout ''' current_branch_results = self._get_current_branch() if current_branch_results['results'] == self.branch: current_branch_results['checkout_not_needed'] = True return current_branch_results rval = {} rval['branch_results'] = current_branch_results checkout_results = self._checkout(self.branch) rval['checkout_results'] = checkout_results rval['returncode'] = checkout_results['returncode'] return rval def main(): ''' ansible git module for checkout ''' module = AnsibleModule( argument_spec=dict( state=dict(default='present', type='str', choices=['present']), path=dict(default=None, required=True, type='str'), branch=dict(default=None, required=True, type='str'), ), supports_check_mode=False, ) git = GitCheckout(module.params['path'], module.params['branch']) state = module.params['state'] if state == 'present': results = git.checkout() if results['returncode'] != 0: module.fail_json(msg=results) if results.has_key('checkout_not_needed'): module.exit_json(changed=False, results=results, state="present") module.exit_json(changed=True, results=results, state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required if __name__ == '__main__': from ansible.module_utils.basic import * main()
Home-ownership should make you feel safe and secure, and that includes financially. Be sure you can afford your home by calculating how much of a mortgage you can safely fit into your budget. Why not just take out the biggest mortgage a lender says you can have? Because your lender bases that number on a formula that doesn't consider your current and future financial and personal goals. But that's not the best method because it doesn't take into account your monthly expenses and debts. Those costs greatly influence how much you can afford. Let's say you earn $100,000 a year but have $1,000 in monthly payments for student debt, car loans, and credit card minimum payments. You don't have as much money to pay your mortgage as someone earning the same income with no debts. Better option: Prepare a family budget that tallies your ongoing monthly bills for everything -- credit cards, car and student loans, lunch at work, daycare, date night, vacations, and savings. See what's left over to spend on homeownership costs, like your mortgage, property taxes, insurance, maintenance, utilities, and community association fees, if applicable. How much money do you have for a down payment? The higher your down payment, the lower your monthly payments will be. If you put down at least 20% of the home's cost, you may not have to get a private mortgage insurance, which protects the lender if you default and costs hundreds each month. That leaves more money for your mortgage payment. The lower your down payment, the higher the loan amount you'll need to qualify for and the higher your monthly mortgage payment. Lenders generally follow the 43% rule. Your monthly mortgage payments covering your home loan principal, interest, taxes and insurance, plus all your other bills, like car loans, utilities, and credit cards, shouldn't exceed 43% of your gross annual income. - Your gross annual income is $100,000. - Multiply $100,000 by 43% to get $43,000 in annual income. - Divide $43,000 by 12 months to convert the annual 43% limit into a monthly upper limit of $3,583. All your monthly bills including your potential mortgage can't go above $3,583 per month. You might find a lender willing to give you a mortgage with a payment that goes above the 43% line, but consider carefully before you take it. Evidence from studies of mortgage loans suggest that borrowers who go over the limit are more likely to run into trouble making monthly payments, the Consumer Financial Protection Bureau warns. The tax benefits of homeownership generally allow you to afford a mortgage payment - including taxes and insurance - of about one-third more than your current rent payment without changing your lifestyle. So you can multiply your current rent by 1.33 to arrive at a rough estimate of a mortgage payment. Here's an example: If you currently pay $1,500 per month in rent, you should be able to comfortably afford a $2,000 monthly mortgage payment after factoring in the tax benefits of homeownership. However, if you're struggling to keep up with your rent, buy a home that will give you the same payment rather than going up to a higher monthly payment. You'll have additional costs for homeownership that your landlord now covers, like property taxes and repairs. If there's no room in your budget for those extras, you could become financially stressed. Also consider whether or not you'll itemize your deductions. If you take the standard deduction, you can't also deduct mortgage interest payments. Talking to a tax adviser, or using a tax software program to do a "what if" tax return, can help you see your tax situation more clearly.
from datetime import datetime __author__ = 'DeStars' class ReportBuilder: def __init__(self): self.data = {} @classmethod def _elements(cls): return ("_suite_id", "_start_date", "_end_date", "_date_granularity", "_metrics_id", "_elements") @staticmethod def _convert_to_int(int_str): return int(int_str) @staticmethod def _convert_to_date(date_str): return datetime.strptime(date_str, '%Y-%m-%d').strftime("%Y-%m-%d") def _copy(self): obj = ReportBuilder() for val in ReportBuilder._elements(): if val in self.data: obj.data[val] = self.data[val] return obj def with_report_suite_id(self, suite_id): obj = self._copy() obj.data["_suite_id"] = suite_id return obj def with_dates(self, start_date, end_date): obj = self._with_start_date(start_date)._with_end_date(end_date) return obj def _with_start_date(self, date): obj = self._copy() obj.data["_start_date"] = self._convert_to_date(date) return obj def _with_end_date(self, date): obj = self._copy() obj.data["_end_date"] = self._convert_to_date(date) return obj def with_date_granularity(self, granularity): obj = self._copy() obj.data["_date_granularity"] = granularity return obj def add_metrics(self, metrics_id): obj = self._copy() if "_metrics_id" not in obj.data: obj.data["_metrics_id"] = [] obj.data["_metrics_id"].extend(metrics_id) return obj def add_elements(self, element_id, num_values): obj = self._copy() if "_elements" not in obj.data: obj.data["_elements"] = [] obj.data["_elements"].append([element_id, str(num_values)]) return obj def get_report_definition(self): # check all required elements are available # return object metrics = [{"id": mid} for mid in self.data["_metrics_id"]] elements = [{"id": eid, "top": top} for eid, top in self.data["_elements"]] return { "reportDescription":{ "reportSuiteID": self.data["_suite_id"], "dateFrom": self.data["_start_date"], "dateTo": self.data["_end_date"], "dateGranularity": self.data["_date_granularity"], "metrics": metrics, "elements": elements } }
A detailed fishing report for the Beaver Creek will make the difference between a good fishing here and a bad one. Let's face it, there's a lot we can learn and share from one another like what bait or lure is generating the best results. On HookandBullet, we invite you to share your knowledge with others - what tackle is working, what fish species can be found here, and where the fish are hiding. So whether it's a Rock Bass fishing report, or a Largemouth Bass we invite you to submit a fishing report and help us to promote the sport. If you're looking to fish in the general area be sure to check out Hot Spots tab to see where the fish are biting. Check out our Fishing Times charts to determine when the fish will be most active. Check out our Hot Spots to learn where the fish are being caught in this area.
#!/usr/bin/python # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ import sys import re import os import shutil import commands """Copy Special exercise """ def get_special_paths(dir_path): if not dir_path.endswith('/'): dir_path = dir_path + '/' files_and_dirs = os.listdir(dir_path) special_paths = [] for _name in files_and_dirs: is_special = re.search(r'__\w+__', _name) if is_special: special_paths.append(dir_path + _name) return special_paths def main(): # This basic command line argument parsing code is provided. # Add code to call your functions below. # Make a list of command line arguments, omitting the [0] element # which is the script itself. args = sys.argv[1:] if not args: print "usage: [--todir dir][--tozip zipfile] dir [dir ...]"; sys.exit(1) # todir and tozip are either set from command line # or left as the empty string. # The args array is left just containing the dirs. todir = '' if args[0] == '--todir': todir = args[1] del args[0:2] tozip = '' if args[0] == '--tozip': tozip = args[1] del args[0:2] if len(args) == 0: print "error: must specify one or more dirs" sys.exit(1) special_paths = get_special_paths(args[0]) for special_path in special_paths: if todir: # empty strings return False if not os.path.exists(todir): os.makedirs(todir) shutil.copy(special_path, todir) elif tozip: print 'case tozip yet to be implemented' else: print special_path if __name__ == "__main__": main()
A nurse pinning ceremony is a tradition that is upheld by many schools to signify the completion of a nursing program. Because the road to becoming a nurse is difficult, whether it be an RN or LPN (see: What is the Difference Between an RN and LPN), the ceremony is used to show appreciation for graduates and to welcome them to the medical community. Here, you’ll be able to find out what a nurse pinning ceremony is, and what happens at one. HCPro describes the pinning ceremony has a more personal induction into the medical community rather than the graduation itself. The earliest record of pinning ceremonies being used in the nursing community was during the Crusades of the 12th century. Monks participated in similar rituals with the Hospital of St. John where they agreed to treat wounded Crusaders. They were given Maltese crosses as their badges for nursing the injured soldiers. Florence Nightingale, considered the founder of modern nursing, also presented medals of excellence to her nursing graduates. The medals evolved into pins and by 1916, nurse pinning ceremonies were commonplace. L.A. O’Hare from eHow states that when the ceremony begins, nurse graduates file into a room in their uniforms. In some cases, they were the white hats that were commonplace decades ago in medical settings. Instructors generally lead the line while music plays in the background. The music chosen for the ceremony is usually left to the discretion of the graduates. Once the graduates have filed in, there are often speeches given by nursing instructors and students with outstanding performance in the program. Students might share of the difficulties they overcame to get to where they are. They may also thank the instructors for their hard work and dedication in working with the graduates. Next, the actual pinning takes place. In some programs a faculty members does the pinning of all students. In others, graduates are asked to dedicate their pin to someone special in their life. When this is the case, a faculty member hands the pin to that special person and allows them to pin it on the graduate. The faculty member may also read a dedication speech, written by the student for the pinner (see: Nursing Pinning Ceremony: A Rite-of-Passage for Graduates). Sometimes the pinning ceremony will feature a final phase where students light candles and recite a pledge in honor of Florence Nightingale, nicknamed “lady with the lamp.” She was called this because she often worked late at night tending to patients with a lamp by her side. Lighting the candles is a representation of the flame being passed between nurses. Once the flame is lit, the Florence Nightingale pledge is recited. Once the pinning, candle lighting process, and recitation is complete, the students that participated in the process can officially call themselves nurses. At the ending point of the nurse pinning ceremony, they are considered official members of the medical community.
r"""Change detection with a Mahalanobis-type metric""" import numpy as np from numpy.linalg import inv from ruptures.base import BaseCost from ruptures.exceptions import NotEnoughPoints class CostMl(BaseCost): r"""Mahalanobis-type cost function.""" model = "mahalanobis" def __init__(self, metric=None): """Create a new instance. Args: metric (ndarray, optional): PSD matrix that defines a Mahalanobis-type pseudo distance. If None, defaults to the Mahalanobis matrix. Shape (n_features, n_features). """ self.metric = metric self.gram = None self.min_size = 2 def fit(self, signal) -> "CostMl": """Set parameters of the instance. Args: signal (array): signal. Shape (n_samples,) or (n_samples, n_features) Returns: self """ s_ = signal.reshape(-1, 1) if signal.ndim == 1 else signal # Mahalanobis metric if self.metric is None if self.metric is None: covar = np.cov(s_.T) self.metric = inv(covar.reshape(1, 1) if covar.size == 1 else covar) self.gram = s_.dot(self.metric).dot(s_.T) self.signal = s_ return self def error(self, start, end): """Return the approximation cost on the segment [start:end]. Args: start (int): start of the segment end (int): end of the segment Returns: float: segment cost Raises: NotEnoughPoints: when the segment is too short (less than ``'min_size'`` samples). """ if end - start < self.min_size: raise NotEnoughPoints sub_gram = self.gram[start:end, start:end] val = np.diagonal(sub_gram).sum() val -= sub_gram.sum() / (end - start) return val
Flâneur By Dublin is an art installation that brings the commissioned works of two great contemporary photographers to Smithfield. Displayed in a series of large cubes, their work disperses over a metal pathway installed in Smithfield Square. At dusk the cubes become lightboxes displaying the photographers' work. The two selected artists for Flâneur By Dublin are Esther Teichmann from Germany and Rik Moran from the UK. Both artists spend a residency in Ireland earlier on this year, to create the body of work which will be shown in Smithfield Square. Flâneur By Dublin is part of a larger project called Flâneur - New Urban Narratives. This is a new, European Union funded, 2 year long project, transforming photographers into flâneurs and requesting them to apply a new approach to their work within the urban territory. The project involves an international network of some 20 organisations from 11 different countries, and it will be presented in the 13 partner cities. The exhibition launches at 7pm on July 2, and will run every evening from 7pm to 9pm until 24 July in Smithfield Square.
# Copyright 2017 SAS Project Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WinnForum-specific version of ITM propagation model. Typical usage: # Configure the terrain driver (memory use is: cache_size * 50MB) from reference_models.geo import drive drive.ConfigureTerrainDriver(terrain_dir=my_ned_path, cache_size=16) # Get the path loss and incidence angles db_loss, incidence_angles, internals = CalcItmPropagationLoss( lat_cbsd, lon_cbsd, height_cbsd, lat_rx, lon_rx, height_rx, cbsd_indoor=False, reliability=0.5, freq_mhz=3625.) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import numpy as np from reference_models.geo import drive from reference_models.geo import vincenty from reference_models.propagation.itm import itm # TEMPORARY to avoid breaking code under PR terrainDriver = drive.terrain_driver # ITM warning codes class ItmErrorCode: NONE = 0 CAUTION = 1 NOTE = 2 WARNING = 3 OTHER = 4 _ITM_ERROR_MODES = { ItmErrorCode.NONE: 'No Error.', ItmErrorCode.CAUTION: ('Caution: Some parameters are nearly out of range.' ' Results should be used with caution.'), ItmErrorCode.NOTE: ('Note: Default parameters have been substituted for impossible ones.'), ItmErrorCode.WARNING: ('Warning: A combination of parameters is out of range.' ' Results are probably invalid.'), ItmErrorCode.OTHER: ('Warning: Some parameters are out of range.' ' Results are probably invalid.') } def GetInfoOnItmCode(code): """Get description of ITM error code.""" return _ITM_ERROR_MODES(code) # Defined namedtuple for nice output packing _PropagResult = namedtuple('_PropagResult', ['db_loss', 'incidence_angles', 'internals']) _IncidenceAngles = namedtuple('_IncidenceAngles', ['hor_cbsd', 'ver_cbsd', 'hor_rx', 'ver_rx']) # Main entry point for the Winnforum compliant ITM propagation model def CalcItmPropagationLoss(lat_cbsd, lon_cbsd, height_cbsd, lat_rx, lon_rx, height_rx, cbsd_indoor=False, reliability=0.5, freq_mhz=3625., its_elev=None, is_height_cbsd_amsl=False, return_internals=False): """Implements the WinnForum-compliant ITM point-to-point propagation model. According to WinnForum spec R2-SGN-17, R2-SGN-22 and R2-SGN-5 to 10. One can use this routine in 3 ways: reliability = -1 : to get the average path loss reliability in [0,1] : to get a pathloss for given quantile sequence of reliabilities: to get an array of pathloss. Used to obtain inverse CDF of the pathloss. Inputs: lat_cbsd, lon_cbsd, height_cbsd: Lat/lon (deg) and height AGL (m) of CBSD lat_rx, lon_rx, height_rx: Lat/lon (deg) and height AGL (m) of Rx point cbsd_indoor: CBSD indoor status - Default=False. reliability: Reliability. Default is 0.5 (median value) Different options: value in [0,1]: returns the CDF quantile -1: returns the mean path loss iterable sequence: returns a list of path losses freq_mhz: Frequency (MHz). Default is mid-point of band. its_elev: Optional profile to use (in ITM format). Default=None If not specified, it is extracted from the terrain. is_height_cbsd_amsl: If True, the CBSD height shall be considered as AMSL (Average mean sea level). return_internals: If True, returns internal variables. Returns: A namedtuple of: db_loss Path Loss in dB, either a scalar if reliability is scalar or a list of path losses if reliability is an iterable. incidence_angles: A namedtuple of hor_cbsd: Horizontal departure angle (bearing) from CBSD to Rx ver_cbsd: Vertical departure angle at CBSD hor_rx: Horizontal incidence angle (bearing) from Rx to CBSD ver_rx: Vertical incidence angle at Rx internals: A dictionary of internal data for advanced analysis (only if return_internals=True): itm_err_num: ITM error code from ItmErrorCode (see GetInfoOnItmCode). itm_str_mode: String containing description of dominant prop mode. dist_km: Distance between end points (km). prof_d_km ndarray of distances (km) - x values to plot terrain. prof_elev ndarray of terrain heightsheights (m) - y values to plot terrain, Raises: Exception if input parameters invalid or out of range. """ # Case of same points if (lat_cbsd == lat_rx and lon_cbsd == lon_rx): return _PropagResult( db_loss = 0 if np.isscalar(reliability) else [0] * len(reliability), incidence_angles = _IncidenceAngles(0,0,0,0), internals = None) # Sanity checks on input parameters if freq_mhz < 40.0 or freq_mhz > 10000: raise Exception('Frequency outside range [40MHz - 10GHz]') if is_height_cbsd_amsl: altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd) height_cbsd = height_cbsd - altitude_cbsd # Ensure minimum height of 1 meter if height_cbsd < 1: height_cbsd = 1 if height_rx < 1: height_rx = 1 # Internal ITM parameters are always set to following values in WF version: confidence = 0.5 # Confidence (always 0.5) dielec = 25. # Dielectric constant (always 25.) conductivity = 0.02 # Conductivity (always 0.02) polarization = 1 # Polarization (always vertical = 1) mdvar = 13 # Get the terrain profile, using Vincenty great circle route, and WF # standard (bilinear interp; 1500 pts for all distances over 45 km) if its_elev is None: its_elev = drive.terrain_driver.TerrainProfile( lat1=lat_cbsd, lon1=lon_cbsd, lat2=lat_rx, lon2=lon_rx, target_res_meter=30., do_interp=True, max_points=1501) # Find the midpoint of the great circle path dist_km, bearing_cbsd, bearing_rx = vincenty.GeodesicDistanceBearing( lat_cbsd, lon_cbsd, lat_rx, lon_rx) latmid, lonmid, _ = vincenty.GeodesicPoint( lat_cbsd, lon_cbsd, dist_km/2., bearing_cbsd) # Determine climate value, based on ITU-R P.617 method: climate = drive.climate_driver.TropoClim(latmid, lonmid) # If the common volume lies over the sea, the climate value to use depends # on the climate values at either end. A simple min() function should # properly implement the logic, since water is the max. if climate == 7: climate = min(drive.climate_driver.TropoClim(lat_cbsd, lon_cbsd), drive.climate_driver.TropoClim(lat_rx, lon_rx)) # Look up the refractivity at the path midpoint, if not explicitly provided refractivity = drive.refract_driver.Refractivity(latmid, lonmid) # Call ITM prop loss. reliabilities = reliability do_avg = False if np.isscalar(reliabilities) and reliability == -1: # Pathloss mean: average the value for 1% to 99% included reliabilities = np.arange(0.01, 1.0, 0.01) do_avg = True db_loss, ver_cbsd, ver_rx, str_mode, err_num = itm.point_to_point( its_elev, height_cbsd, height_rx, dielec, conductivity, refractivity, freq_mhz, climate, polarization, confidence, reliabilities, mdvar, False) if do_avg: db_loss = -10*np.log10(np.mean(10**(-np.array(db_loss)/10.))) # Add indoor losses if cbsd_indoor: if np.isscalar(db_loss): db_loss += 15 else: db_loss = [loss+15 for loss in db_loss] # Create distance/terrain arrays for plotting if desired internals = None if return_internals: prof_d_km = (its_elev[1]/1000.) * np.arange(len(its_elev)-2) prof_elev = np.asarray(its_elev[2:]) internals = { 'itm_err_num': err_num, 'itm_str_mode': str_mode, 'dist_km': dist_km, 'prof_d_km': prof_d_km, 'prof_elev': prof_elev } return _PropagResult( db_loss = db_loss, incidence_angles = _IncidenceAngles( hor_cbsd = bearing_cbsd, ver_cbsd = ver_cbsd, hor_rx = bearing_rx, ver_rx = ver_rx), internals = internals ) # Utility function to compute the HAAT for a CBSD def ComputeHaat(lat_cbsd, lon_cbsd, height_cbsd, height_is_agl=True): """Computes a CBSD HAAT (Height above average terrain). Args: lat_cbsd, lon_cbsd: the CBSD location (degrees). height_cbsd: the CBSD antenna height (meters) height_is_agl: boolean specifying if height is AGL (Above Ground Level) or AMSL (Above Mean Sea Level). Returns: the CBSD HAAT (meters). """ norm_haat, alt_ground = drive.terrain_driver.ComputeNormalizedHaat(lat_cbsd, lon_cbsd) if height_is_agl: return height_cbsd + norm_haat else: return height_cbsd - alt_ground + norm_haat
For more information or to contact SunnyBrook, visit http://www.sunnybrookrv.com/. SunnyBrook RVs.com is not the official site of SunnyBrook RV, nor are we affiliated with SunnyBrook RV in any way. If you wish to view SunnyBrook RV's official website please visit http://www.sunnybrookrv.com. THIS SITE IS NOT OWNED, MAINTAINED, OR ENDORSED BY SunnyBrook RV.
# -*- coding: utf-8 -*- """ Created on Thu Jul 28 12:11:07 2016 @author: Eric """ import argparse import pickle import TopoSparsenet import numpy as np import scipy.io as io parser = argparse.ArgumentParser(description="Learn dictionaries for Topographic Sparsenet with given parameters.") parser.add_argument('-d', '--data', default='images', type=str) parser.add_argument('-r', '--resultsfolder', default='',type=str) parser.add_argument('-s', '--suffix', default='ptwise', type=str) parser.add_argument('-i', '--niter', default=200, type=int) parser.add_argument('-l', '--lam', default=0.15, type=float) parser.add_argument('-l2', '--lam2', default=0.05, type=float) #parser.add_argument('--shape', default = (25,32), type=tuple) parser.add_argument('--sigma', default = 1, type=float) parser.add_argument('--binarize', action='store_true') args=parser.parse_args() data = args.data resultsfolder = args.resultsfolder shape = (25,32)#args.shape suffix = args.suffix niter = args.niter lam = args.lam lam2 = args.lam2 sigma = args.sigma binarize = args.binarize if data == 'images': datafile = '../vision/Data/IMAGES.mat' numinput = 256 data = io.loadmat(datafile)["IMAGES"] if resultsfolder == '': resultsfolder = '../vision/Results/' net = TopoSparsenet.TopoSparsenet(data, shape, paramfile='dummy') net.gain_rate = 0.001 elif data == 'spectros': datafile = '../audition/Data/speech_ptwisecut' numinput = 200 with open(datafile+'_pca.pickle', 'rb') as f: mypca, origshape = pickle.load(f) data = np.load(datafile+'.npy') data = data/data.std() if resultsfolder == '': resultsfolder = '../audition/Results/' net = TopoSparsenet.TopoSparsenet(data=data, dict_shape=shape, learnrate = 0.0005, datatype='spectro', pca=mypca, stimshape=origshape, sigma=sigma, gain_rate=0.001, var_goal=0.033) net.niter = niter net.lamb = lam net.lamb_2 = lam2 net.learnrate = 0.0005 if binarize: net.binarize_g() savestr = resultsfolder+'TSN'+str(shape[0])+'x'+str(shape[1]) + 's'+str(sigma)+ suffix net.save(savestr+'.pickle') net.run(ntrials=10000) net.save()
Couture Optique and owner Barry Franzblau have been setting the standard for fine fashion eyewear for over 20 years. This internationally known store has received the “Retailer Of The Year Award” presented by Eyecare Business Magazine. Barry’s passion has always been to set a very high standard in providing luxury eyewear and services to his very discerning customers. Mr Franzblau is also noted for being called the Optician to the Stars, because of his past work consulting and fitting actors with eyewear to enhance their roles in feature films and television shows. Understanding that his customers are the most important aspect of what he does, he knows they can count on finding some very unique eyewear collections from around the world with lines like, Chopard, Maybach, Dita, Etnia Barcelona, FHone, Robert Marc, J.F.Rey, Swissflex, just to name a few. Of course it’s not just about finding that perfect pair of eyewear. When it comes to filling prescriptions, Couture Optique only uses the most advanced digital lens technology and premium lens coatings available, insuring the most accurate and visually pleasing results. We welcome the opportunity to provide you with a great experience when purchasing your next pair of fine eyewear.
import datetime import os import tempfile import flask_restful import requests from flask import Blueprint, request from flask import abort from flask import flash from flask import redirect from flask import render_template from flask import session from flask_restful_swagger import swagger from werkzeug.utils import secure_filename from SpiderKeeper.app import db, api, agent, app from SpiderKeeper.app.spider.model import JobInstance, Project, JobExecution, SpiderInstance, JobRunType api_spider_bp = Blueprint('spider', __name__) ''' ========= api ========= ''' class ProjectCtrl(flask_restful.Resource): @swagger.operation( summary='list projects', parameters=[]) def get(self): return [project.to_dict() for project in Project.query.all()] @swagger.operation( summary='add project', parameters=[{ "name": "project_name", "description": "project name", "required": True, "paramType": "form", "dataType": 'string' }]) def post(self): project_name = request.form['project_name'] project = Project() project.project_name = project_name db.session.add(project) db.session.commit() return project.to_dict() class SpiderCtrl(flask_restful.Resource): @swagger.operation( summary='list spiders', parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }]) def get(self, project_id): project = Project.find_project_by_id(project_id) return [spider_instance.to_dict() for spider_instance in SpiderInstance.query.filter_by(project_id=project_id).all()] class SpiderDetailCtrl(flask_restful.Resource): @swagger.operation( summary='spider detail', parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "spider_id", "description": "spider instance id", "required": True, "paramType": "path", "dataType": 'int' }]) def get(self, project_id, spider_id): spider_instance = SpiderInstance.query.filter_by(project_id=project_id, id=spider_id).first() return spider_instance.to_dict() if spider_instance else abort(404) @swagger.operation( summary='run spider', parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "spider_id", "description": "spider instance id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "spider_arguments", "description": "spider arguments", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "priority", "description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2", "required": False, "paramType": "form", "dataType": 'int' }, { "name": "tags", "description": "spider tags", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "desc", "description": "spider desc", "required": False, "paramType": "form", "dataType": 'string' }]) def put(self, project_id, spider_id): spider_instance = SpiderInstance.query.filter_by(project_id=project_id, id=spider_id).first() if not spider_instance: abort(404) job_instance = JobInstance() job_instance.spider_name = spider_instance.spider_name job_instance.project_id = project_id job_instance.spider_arguments = request.form.get('spider_arguments') job_instance.desc = request.form.get('desc') job_instance.tags = request.form.get('tags') job_instance.run_type = JobRunType.ONETIME job_instance.priority = request.form.get('priority', 0) job_instance.enabled = -1 db.session.add(job_instance) db.session.commit() agent.start_spider(job_instance) return True JOB_INSTANCE_FIELDS = [column.name for column in JobInstance.__table__.columns] JOB_INSTANCE_FIELDS.remove('id') JOB_INSTANCE_FIELDS.remove('date_created') JOB_INSTANCE_FIELDS.remove('date_modified') class JobCtrl(flask_restful.Resource): @swagger.operation( summary='list job instance', parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }]) def get(self, project_id): return [job_instance.to_dict() for job_instance in JobInstance.query.filter_by(run_type="periodic", project_id=project_id).all()] @swagger.operation( summary='add job instance', notes="json keys: <br>" + "<br>".join(JOB_INSTANCE_FIELDS), parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "spider_name", "description": "spider_name", "required": True, "paramType": "form", "dataType": 'string' }, { "name": "spider_arguments", "description": "spider_arguments, split by ','", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "desc", "description": "desc", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "tags", "description": "tags , split by ','", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "run_type", "description": "onetime/periodic", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "priority", "description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2", "required": False, "paramType": "form", "dataType": 'int' }, { "name": "cron_minutes", "description": "@see http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_hour", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_day_of_month", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_day_of_week", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_month", "description": "", "required": False, "paramType": "form", "dataType": 'string' }]) def post(self, project_id): post_data = request.form if post_data: job_instance = JobInstance() job_instance.spider_name = post_data['spider_name'] job_instance.project_id = project_id job_instance.spider_arguments = post_data.get('spider_arguments') job_instance.desc = post_data.get('desc') job_instance.tags = post_data.get('tags') job_instance.run_type = post_data['run_type'] job_instance.priority = post_data.get('priority', 0) if job_instance.run_type == "periodic": job_instance.cron_minutes = post_data.get('cron_minutes') or '0' job_instance.cron_hour = post_data.get('cron_hour') or '*' job_instance.cron_day_of_month = post_data.get('cron_day_of_month') or '*' job_instance.cron_day_of_week = post_data.get('cron_day_of_week') or '*' job_instance.cron_month = post_data.get('cron_month') or '*' db.session.add(job_instance) db.session.commit() return True class JobDetailCtrl(flask_restful.Resource): @swagger.operation( summary='update job instance', notes="json keys: <br>" + "<br>".join(JOB_INSTANCE_FIELDS), parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "job_id", "description": "job instance id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "spider_name", "description": "spider_name", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "spider_arguments", "description": "spider_arguments, split by ','", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "desc", "description": "desc", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "tags", "description": "tags , split by ','", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "run_type", "description": "onetime/periodic", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "priority", "description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2", "required": False, "paramType": "form", "dataType": 'int' }, { "name": "cron_minutes", "description": "@see http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_hour", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_day_of_month", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_day_of_week", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "cron_month", "description": "", "required": False, "paramType": "form", "dataType": 'string' }, { "name": "enabled", "description": "-1 / 0, default: 0", "required": False, "paramType": "form", "dataType": 'int' }, { "name": "status", "description": "if set to 'run' will run the job", "required": False, "paramType": "form", "dataType": 'int' } ]) def put(self, project_id, job_id): post_data = request.form if post_data: job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_id).first() if not job_instance: abort(404) job_instance.spider_arguments = post_data.get('spider_arguments') or job_instance.spider_arguments job_instance.priority = post_data.get('priority') or job_instance.priority job_instance.enabled = post_data.get('enabled', 0) job_instance.cron_minutes = post_data.get('cron_minutes') or job_instance.cron_minutes job_instance.cron_hour = post_data.get('cron_hour') or job_instance.cron_hour job_instance.cron_day_of_month = post_data.get('cron_day_of_month') or job_instance.cron_day_of_month job_instance.cron_day_of_week = post_data.get('cron_day_of_week') or job_instance.cron_day_of_week job_instance.cron_month = post_data.get('cron_month') or job_instance.cron_month job_instance.desc = post_data.get('desc', 0) or job_instance.desc job_instance.tags = post_data.get('tags', 0) or job_instance.tags db.session.commit() if post_data.get('status') == 'run': agent.start_spider(job_instance) return True class JobExecutionCtrl(flask_restful.Resource): @swagger.operation( summary='list job execution status', parameters=[{ "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }]) def get(self, project_id): return JobExecution.list_jobs(project_id) class JobExecutionDetailCtrl(flask_restful.Resource): @swagger.operation( summary='stop job', notes='', parameters=[ { "name": "project_id", "description": "project id", "required": True, "paramType": "path", "dataType": 'int' }, { "name": "job_exec_id", "description": "job_execution_id", "required": True, "paramType": "path", "dataType": 'string' } ]) def put(self, project_id, job_exec_id): job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first() if job_execution: agent.cancel_spider(job_execution) return True api.add_resource(ProjectCtrl, "/api/projects") api.add_resource(SpiderCtrl, "/api/projects/<project_id>/spiders") api.add_resource(SpiderDetailCtrl, "/api/projects/<project_id>/spiders/<spider_id>") api.add_resource(JobCtrl, "/api/projects/<project_id>/jobs") api.add_resource(JobDetailCtrl, "/api/projects/<project_id>/jobs/<job_id>") api.add_resource(JobExecutionCtrl, "/api/projects/<project_id>/jobexecs") api.add_resource(JobExecutionDetailCtrl, "/api/projects/<project_id>/jobexecs/<job_exec_id>") ''' ========= Router ========= ''' @app.before_request def intercept_no_project(): if request.path.find('/project//') > -1: flash("create project first") return redirect("/project/manage", code=302) @app.context_processor def inject_common(): return dict(now=datetime.datetime.now(), servers=agent.servers) @app.context_processor def inject_project(): project_context = {} project_context['project_list'] = Project.query.all() if project_context['project_list'] and (not session.get('project_id')): project = Project.query.first() session['project_id'] = project.id if session.get('project_id'): project_context['project'] = Project.find_project_by_id(session['project_id']) project_context['spider_list'] = [spider_instance.to_dict() for spider_instance in SpiderInstance.query.filter_by(project_id=session['project_id']).all()] else: project_context['project'] = {} return project_context @app.context_processor def utility_processor(): def timedelta(end_time, start_time): ''' :param end_time: :param start_time: :param unit: s m h :return: ''' if not end_time or not start_time: return '' if type(end_time) == str: end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S') if type(start_time) == str: start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') total_seconds = (end_time - start_time).total_seconds() return readable_time(total_seconds) def readable_time(total_seconds): if not total_seconds: return '-' if total_seconds < 60: return '%s s' % total_seconds if total_seconds < 3600: return '%s m' % int(total_seconds / 60) return '%s h %s m' % (int(total_seconds / 3600), int((total_seconds % 3600) / 60)) return dict(timedelta=timedelta, readable_time=readable_time) @app.route("/") def index(): project = Project.query.first() if project: return redirect("/project/%s/job/dashboard" % project.id, code=302) return redirect("/project/manage", code=302) @app.route("/project/<project_id>") def project_index(project_id): session['project_id'] = project_id return redirect("/project/%s/job/dashboard" % project_id, code=302) @app.route("/project/create", methods=['post']) def project_create(): project_name = request.form['project_name'] project = Project() project.project_name = project_name db.session.add(project) db.session.commit() return redirect("/project/%s/spider/deploy" % project.id, code=302) @app.route("/project/<project_id>/delete") def project_delete(project_id): project = Project.find_project_by_id(project_id) agent.delete_project(project) db.session.delete(project) db.session.commit() return redirect("/project/manage", code=302) @app.route("/project/manage") def project_manage(): return render_template("project_manage.html") @app.route("/project/<project_id>/job/dashboard") def job_dashboard(project_id): return render_template("job_dashboard.html", job_status=JobExecution.list_jobs(project_id)) @app.route("/project/<project_id>/job/periodic") def job_periodic(project_id): project = Project.find_project_by_id(project_id) job_instance_list = [job_instance.to_dict() for job_instance in JobInstance.query.filter_by(run_type="periodic", project_id=project_id).all()] return render_template("job_periodic.html", job_instance_list=job_instance_list) @app.route("/project/<project_id>/job/add", methods=['post']) def job_add(project_id): project = Project.find_project_by_id(project_id) job_instance = JobInstance() job_instance.spider_name = request.form['spider_name'] job_instance.project_id = project_id job_instance.spider_arguments = request.form['spider_arguments'] job_instance.priority = request.form.get('priority', 0) job_instance.run_type = request.form['run_type'] # chose daemon manually if request.form['daemon'] != 'auto': spider_args = [] if request.form['spider_arguments']: spider_args = request.form['spider_arguments'].split(",") spider_args.append("daemon={}".format(request.form['daemon'])) job_instance.spider_arguments = ','.join(spider_args) if job_instance.run_type == JobRunType.ONETIME: job_instance.enabled = -1 db.session.add(job_instance) db.session.commit() agent.start_spider(job_instance) if job_instance.run_type == JobRunType.PERIODIC: job_instance.cron_minutes = request.form.get('cron_minutes') or '0' job_instance.cron_hour = request.form.get('cron_hour') or '*' job_instance.cron_day_of_month = request.form.get('cron_day_of_month') or '*' job_instance.cron_day_of_week = request.form.get('cron_day_of_week') or '*' job_instance.cron_month = request.form.get('cron_month') or '*' # set cron exp manually if request.form.get('cron_exp'): job_instance.cron_minutes, job_instance.cron_hour, job_instance.cron_day_of_month, job_instance.cron_day_of_week, job_instance.cron_month = \ request.form['cron_exp'].split(' ') db.session.add(job_instance) db.session.commit() return redirect(request.referrer, code=302) @app.route("/project/<project_id>/jobexecs/<job_exec_id>/stop") def job_stop(project_id, job_exec_id): job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first() agent.cancel_spider(job_execution) return redirect(request.referrer, code=302) @app.route("/project/<project_id>/jobexecs/<job_exec_id>/log") def job_log(project_id, job_exec_id): job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first() res = requests.get(agent.log_url(job_execution)) res.encoding = 'utf8' raw = res.text return render_template("job_log.html", log_lines=raw.split('\n')) @app.route("/project/<project_id>/job/<job_instance_id>/run") def job_run(project_id, job_instance_id): job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first() agent.start_spider(job_instance) return redirect(request.referrer, code=302) @app.route("/project/<project_id>/job/<job_instance_id>/remove") def job_remove(project_id, job_instance_id): job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first() db.session.delete(job_instance) db.session.commit() return redirect(request.referrer, code=302) @app.route("/project/<project_id>/job/<job_instance_id>/switch") def job_switch(project_id, job_instance_id): job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first() job_instance.enabled = -1 if job_instance.enabled == 0 else 0 db.session.commit() return redirect(request.referrer, code=302) @app.route("/project/<project_id>/spider/dashboard") def spider_dashboard(project_id): spider_instance_list = SpiderInstance.list_spiders(project_id) return render_template("spider_dashboard.html", spider_instance_list=spider_instance_list) @app.route("/project/<project_id>/spider/deploy") def spider_deploy(project_id): project = Project.find_project_by_id(project_id) return render_template("spider_deploy.html") @app.route("/project/<project_id>/spider/upload", methods=['post']) def spider_egg_upload(project_id): project = Project.find_project_by_id(project_id) if 'file' not in request.files: flash('No file part') return redirect(request.referrer) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No selected file') return redirect(request.referrer) if file: filename = secure_filename(file.filename) dst = os.path.join(tempfile.gettempdir(), filename) file.save(dst) agent.deploy(project, dst) flash('deploy success!') return redirect(request.referrer) @app.route("/project/<project_id>/project/stats") def project_stats(project_id): project = Project.find_project_by_id(project_id) run_stats = JobExecution.list_run_stats_by_hours(project_id) return render_template("project_stats.html", run_stats=run_stats) @app.route("/project/<project_id>/server/stats") def service_stats(project_id): project = Project.find_project_by_id(project_id) run_stats = JobExecution.list_run_stats_by_hours(project_id) return render_template("server_stats.html", run_stats=run_stats)
I consult predominantly to professional service led organizations who think that sales is a dirty word! As an agent of sales effectiveness that makes my job, let’s say, adventurous. As an evangelist of social selling, well that makes me intrepid. I love selling, I love being social – I hate that I can’t truly be a social seller but such is the nature of my consulting role in advisory services. What I can offer however is a passion for social engagement and understanding changes in buyer behavior. Social Media allows me to do two things I strive to be the best I can at. 2. Growing a network that respects and utilizes my skills. I started my social life in 2006 after entering a job with a sales assessment, placement and training organization – for me the early stages of social engagement were hit and miss. LinkedIn was not the tool then it is now and my profile, like experience was fairly junior. I was however blindly determined and set about getting in front of as many sales leaders as I could, albeit in hindsight, inefficiently. My offline network grew quickly and my cheeky, ambitious and rather fearlessly naïve persona (mostly my peculiar accent) very quickly got me a brand. I desperately wanted to learn about sales and how what we did could make organizations better. Business had its up and downs, I was a trainee salesman, I had a great boss and I loved it. As a Journalism major, my desire to produce well-read content was a constant long before I entered the fantastic world of professional selling. In fact, I remember one of those ‘life conversations’ with the aforementioned manager where he asked me why I wanted to be a Journalist, working long hours to targeted deadlines and calling on people who didn’t want to speak with me... all WITHOUT getting paid COMMISSION. Needless to say I was swayed by his rhetoric and have been in professional sales ever since, now lucky to be advising other organizations on sales effectiveness and improvement. My real affair with Social Media, took shape when I moved from the UK to Australia, relocating to a new country and market prompted a different way of thinking about building a sales community in a similar role, different location. I thought back to my clients in the UK and used LinkedIn to search for counterparts here in Sydney, then Melbourne and eventually; still adopting cold calling techniques too, I had amassed a well-connected network of sales leaders nationally here in Oz. The ownership of this network prompted me to leave the comfort of my role after a few years and search for pastures new, utilizing connections and researching the footprint of a global organization online through both LinkedIn and my new medium of Twitter. After quite a frustrating but socially experimental year, new additions to LinkedIn’s influencer platform gave birth to an affirmation for my field of study and a desire to move back into an advisory, sales analyst role and in doing so I set out to find and consume the content of as many sales thought leaders as I possibly could. This leads me to Huthwaite, now part of MHI Global, where I have spent the last two fantastic years helping companies and their sales leaders in APAC build and sustain customer focused, high-performance organizations. Here I get to leverage my two focus areas by listening to customers who articulate socially that they are in growth or trouble. I get to create and publish my own content so that my customers feel I am in contact with them and I grow my network as a thought leader. I get to prospect and lead generate as an agent of MHI Global to boost my pipeline – I get to share research on sales best practice to build my brand and become synonymous with an organization helping to drive profitable, predictable top line growth for its customers. I use social media every day to prospect, find, listen, connect and engage with my customers and prospective customers. For the first time I can use a communication method which doesn’t intrude on their time - feel free to read my article in 33 Social Selling Tips by Leading Social Selling Thought Leaders – rather is an interactive opportunity for me to gauge their interest, understand their problems and offer tailored solutions. In summary, when it comes to Social Media adoption, you don’t have to make a decision whether or not to be active as a seller on it – your customers have already made that decision for you. As a sales professional all you have to decide is how you are going to take the skills that make you successful offline and translate them into a consistent manner online. If you want to be successful, go where your customers are, go there with purpose and go there to add as much value as you can – go forward and don’t look back – in fact Go Social and you won’t have to.
import math class Vector2D(object): """ A vector in 2-dimensional space. """ def __init__(self, x, y=None): if y is None: x, y = x self.x = x self.y = y @classmethod def fromAngle(cls, angle, radians=True): """Return the unit vector in the given direction.""" if not radians: angle = math.radians(angle) return cls(math.cos(angle), math.sin(angle)) def __repr__(self): return "Vector2D(%g, %g)" % (self.x, self.y) def __hash__(self): return hash((self.x, self.y)) def __getitem__(self, key): if key == 0: return self.x return self.y def __iter__(self): return iter((self.x, self.y)) def __pos__(self): return Vector2D(self.x, self.y) def __neg__(self): return Vector2D(-self.x, -self.y) def __add__(self, other): return Vector2D(self.x + other.x, self.y + other.y) __radd__ = __add__ def __sub__(self, other): return Vector2D(self.x - other.x, self.y - other.y) def __rsub__(self, other): return Vector2D(other.x - self.x, other.y - self.y) def __mul__(self, other): return Vector2D(self.x*other, self.y*other) __rmul__ = __mul__ def __div__(self, other): return Vector2D(self.x/other, self.y/other) __truediv__ = __div__ def floor(self): """Floor the components of this vector.""" return Vector2D(int(self.x), int(self.y)) def magnitude(self): """Calculate the magnitude of this vector.""" return math.sqrt(self.x*self.x + self.y*self.y) def magnitudeSquared(self): """Calculate the squared magnitude of this vector.""" return self.dot(self) def dot(self, other): """Calculate the dot product of this vector and another.""" return self.x*other.x + self.y*other.y def normalize(self): """Return the normalization of this vector.""" return self/self.magnitude() def lnormal(self): """Return the left normal of this vector.""" return Vector2D(self.y, -self.x) def rnormal(self): """Return the right normal of this vector.""" return vector2D(-self.y, self.x) def projectOnto(self, other): """Return the projection of this vector onto another.""" scalar = self.dot(other)/other.magnitudeSquared() return other*scalar def rotateRelative(self, angle, origin, radians=True): """Rotate this vector relative to another by the given amount.""" if not radians: angle = math.radians(angle) x, y = self x -= origin.x y -= origin.y cos_theta = math.cos(angle) sin_theta = math.sin(angle) nx = x*cos_theta - y*sin_theta ny = x*sin_theta + y*cos_theta return Vector2D(nx + origin.x, ny + origin.y) def rotate(self, angle, radians=True): """Rotate this vector by the given amount.""" if not radians: angle = math.radians(angle) x, y = self cos_theta = math.cos(angle) sin_theta = math.sin(angle) return Vector2D(x*cos_theta - y*sin_theta, x*sin_theta + y*cos_theta) def lerp(self, other, amount): """Linearly interpolate between this vector and another.""" return self + amount*(other - self) def angle(self, radians=True): """ Return the angle at which this vector points relative to the positive x-axis. """ angle = math.atan2(self.y, self.x) if not radians: angle = math.degrees(angle) return angle Vector2D.origin = Vector2D(0, 0)
The latest visual novel by Studio Sprinkles, Heartful Chance~!, has now an official German translation. The game is free and can be downloaded here! Hope you enjoy the game! PS: Check out their first game Witch Spell, as well! It’s worth a shot.
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda McCabe """ import ast from .mccabe import McCabeChecker class AnacondaMcCabe(object): """Wrapper object around McCabe python script """ checker = McCabeChecker def __init__(self, code, filename): self.code = code self.filename = filename @property def tree(self): """Compile and send back an AST if buffer is able to be parsed """ try: code = self.code.encode('utf8') + b'\n' return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST) except SyntaxError: return None def get_code_complexity(self, threshold=7): """Get the code complexity for the current buffer and return it """ if self.tree is not None: self.checker.max_complexity = threshold return self.parse(self.checker(self.tree, self.filename).run()) return None def parse(self, complexities): """ Parse the given list of complexities to something that anaconda understand and is able to handle """ errors = [] for complexity in complexities: errors.append({ 'line': int(complexity[0]), 'offset': int(complexity[1] + 1), 'code': complexity[2].split(' ', 1)[0], 'message': complexity[2].split(' ', 1)[1] }) return errors
The furniture could be heavy such that you cannot on your own remove them without seeking external assistance. Even if you managed to remove them, you do not know where to take them such as a land fill. This could spell a logistical nightmare. Thankfully, you can count on our experts to handle the dirty work for you at a reasonable cost. They will come in uniform and a track, ready to haul away your old furniture. Instead of throwing it away, we will renovate them and offer them to charity, hence saving you high cost of disposing them. Talk to Furniture Removal Guys in North Bonneville, WA and learn more how the furniture junk in your premises can be put into good use at a fraction of the cost. You will get fully licensed, insured and bonded services in North Bonneville, WA. The aspect of licensing, insuring and bonding these furniture disposal services is so as to ensure that you get quality and standard services that are sanctioned by the local authorities. This is will ensure that the disposal services are carried out in compliance with the highest standards of safety. Poor handling of furniture could prove costly especially if you take it upon yourself to handle the disposal. For instance, throwing them to a landfill could get you at crossroads with environmentalists. From the garden waste, to renovation debris, you could easily pollute the environment when you just dump them. To save you the agony, simply get in touch and we will sort out the mess for you in an efficient way. Thankfully, we rank among equals with other players in North Bonneville, WA, which means you, will be able to engage us with a lot of confidence.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.recommendationengine_v1beta1.services.prediction_api_key_registry import ( pagers, ) from google.cloud.recommendationengine_v1beta1.types import ( prediction_apikey_registry_service, ) from .transports.base import PredictionApiKeyRegistryTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PredictionApiKeyRegistryGrpcTransport from .transports.grpc_asyncio import PredictionApiKeyRegistryGrpcAsyncIOTransport class PredictionApiKeyRegistryClientMeta(type): """Metaclass for the PredictionApiKeyRegistry client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[PredictionApiKeyRegistryTransport]] _transport_registry["grpc"] = PredictionApiKeyRegistryGrpcTransport _transport_registry["grpc_asyncio"] = PredictionApiKeyRegistryGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[PredictionApiKeyRegistryTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class PredictionApiKeyRegistryClient(metaclass=PredictionApiKeyRegistryClientMeta): """Service for registering API keys for use with the ``predict`` method. If you use an API key to request predictions, you must first register the API key. Otherwise, your prediction request is rejected. If you use OAuth to authenticate your ``predict`` method call, you do not need to register an API key. You can register up to 20 API keys per project. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "recommendationengine.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: PredictionApiKeyRegistryClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: PredictionApiKeyRegistryClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> PredictionApiKeyRegistryTransport: """Returns the transport used by the client instance. Returns: PredictionApiKeyRegistryTransport: The transport used by the client instance. """ return self._transport @staticmethod def event_store_path( project: str, location: str, catalog: str, event_store: str, ) -> str: """Returns a fully-qualified event_store string.""" return "projects/{project}/locations/{location}/catalogs/{catalog}/eventStores/{event_store}".format( project=project, location=location, catalog=catalog, event_store=event_store, ) @staticmethod def parse_event_store_path(path: str) -> Dict[str, str]: """Parses a event_store path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/eventStores/(?P<event_store>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def prediction_api_key_registration_path( project: str, location: str, catalog: str, event_store: str, prediction_api_key_registration: str, ) -> str: """Returns a fully-qualified prediction_api_key_registration string.""" return "projects/{project}/locations/{location}/catalogs/{catalog}/eventStores/{event_store}/predictionApiKeyRegistrations/{prediction_api_key_registration}".format( project=project, location=location, catalog=catalog, event_store=event_store, prediction_api_key_registration=prediction_api_key_registration, ) @staticmethod def parse_prediction_api_key_registration_path(path: str) -> Dict[str, str]: """Parses a prediction_api_key_registration path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/eventStores/(?P<event_store>.+?)/predictionApiKeyRegistrations/(?P<prediction_api_key_registration>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, PredictionApiKeyRegistryTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the prediction api key registry client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, PredictionApiKeyRegistryTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, PredictionApiKeyRegistryTransport): # transport is a PredictionApiKeyRegistryTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) def create_prediction_api_key_registration( self, request: prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest = None, *, parent: str = None, prediction_api_key_registration: prediction_apikey_registry_service.PredictionApiKeyRegistration = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> prediction_apikey_registry_service.PredictionApiKeyRegistration: r"""Register an API key for use with predict method. Args: request (google.cloud.recommendationengine_v1beta1.types.CreatePredictionApiKeyRegistrationRequest): The request object. Request message for the `CreatePredictionApiKeyRegistration` method. parent (str): Required. The parent resource path. ``projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. prediction_api_key_registration (google.cloud.recommendationengine_v1beta1.types.PredictionApiKeyRegistration): Required. The prediction API key registration. This corresponds to the ``prediction_api_key_registration`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.recommendationengine_v1beta1.types.PredictionApiKeyRegistration: Registered Api Key. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, prediction_api_key_registration]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest, ): request = prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if prediction_api_key_registration is not None: request.prediction_api_key_registration = ( prediction_api_key_registration ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.create_prediction_api_key_registration ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def list_prediction_api_key_registrations( self, request: prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListPredictionApiKeyRegistrationsPager: r"""List the registered apiKeys for use with predict method. Args: request (google.cloud.recommendationengine_v1beta1.types.ListPredictionApiKeyRegistrationsRequest): The request object. Request message for the `ListPredictionApiKeyRegistrations`. parent (str): Required. The parent placement resource name such as ``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.recommendationengine_v1beta1.services.prediction_api_key_registry.pagers.ListPredictionApiKeyRegistrationsPager: Response message for the ListPredictionApiKeyRegistrations. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest, ): request = prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.list_prediction_api_key_registrations ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListPredictionApiKeyRegistrationsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def delete_prediction_api_key_registration( self, request: prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Unregister an apiKey from using for predict method. Args: request (google.cloud.recommendationengine_v1beta1.types.DeletePredictionApiKeyRegistrationRequest): The request object. Request message for `DeletePredictionApiKeyRegistration` method. name (str): Required. The API key to unregister including full resource path. ``projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store/predictionApiKeyRegistrations/<YOUR_API_KEY>`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest, ): request = prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.delete_prediction_api_key_registration ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-recommendations-ai", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("PredictionApiKeyRegistryClient",)
Once accepted onto a course, after an interview, a £100 deposit will reserve a place on the course of your choice. Please note that payment can only be made in Pounds Sterling. The balance of fees are normally due 3 weeks before the start of the course. Please note that in the highly unlikely event of Saint George International cancelling the course, we will refund the full fees to you within 2 weeks of cancellation. Please note that for students who have started their course or who cancel within one week of the start date no refund will be made on tuition fees. If cancelling with 1-3 weeks notice 50% of the course fee will be refunded. Transfer to a later course can be discussed. Please note that for students who have started their course and have received their initial workbooks no refund can be made on tuition fees. Once course fees are paid, students have two years to complete their course, based on the month of starting. e.g start month, September 2015: latest finish month, September 2017. On a discretionary basis, a one year extension to the course can be agreed with SGI. Currently this extension fee is £180. Please note that for students who have started their course and have received their initial workbooks no refund can be made on tuition fees. Once course fees are paid, students have nine months to complete their course, based on the month of starting. e.g. start month, September 2015: latest finish month, June 2016. On a discretionary basis, a four-month extension to the course can be agreed with SGI. Currently this extension fee is £60.
""" Copyright (C) 2017 Quinn D Granfor <spootdev@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2, as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License version 2 for more details. You should have received a copy of the GNU General Public License version 2 along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ import pycountry from babel.dates import format_date from babel.numbers import format_decimal def com_inter_date_format(date_to_format, country_code='en_US'): return format_date(date_to_format, locale=country_code) def com_inter_number_format(number_to_format, country_code='en_US'): return format_decimal(number_to_format, locale=country_code) def com_inter_country_name(country_code='eng'): try: lang = pycountry.languages.get(alpha_3=country_code) except KeyError: return country_code if lang is None: return country_code return lang.name
At Residential Architect Pros, you can take advantage of experienced Architects who will deliver schematic design, design development and construction documents. The Architect will at Residential Architect Pros in Serafina, NM will work for your interests. When you hire an efficient residential Architect, your building construction needs will be fulfilled without any issues. The Architect will take care of all your needs. As per your instructions, residential Architects will deal with various contractors involved in the project. You can utilize licensed contractors’ service in Wyoming. When you get advice from interior Architects you can hire contractors in Serafina, NM without any issues. The home owner will get assistance from various quarters and correct estimation can be obtained. It is possible to build your dream home as per your budget levels. The bids will be reviewed and realistic estimations will be done. Architectural services offered by Residential Architect Pros in Serafina, NM will save you time, effort and money. When your project is handled by reliable residential Architects, you can achieve your dream home without any issues. Residential Architects will give you multiple options so that selection of right kind of service can be done very easily. The support of customer care can be taken to address various issues. You will get response through 800-509-2030 at any time without any issues. You can get easy access to various kinds of services from experienced contractors. The seasoned professional will have a discussion with you to get it done as per your needs. Residential Architect Pros in Serafina, NM will let you execute written contracts so that there will not be any conflict of interests. Your requirements will be take care of by the Architect company so that you can save lot of money. You should want to choose Architect companies in a very careful way. The reliability of the service can be noticed through initial interactions and the service rendered by the company to the past home owners.
class AVLNode(object): def __init__(self, key): self.key=key self.right_child=None self.left_child=None self.parent=None self.height=0 self.balance=0 def update_height(self, upwards=True): #If upwards we go up the tree correcting heights and balances, #if not we just correct the given node. if self.left_child is None: #Empty left tree. left_height = 0 else: left_height = self.left_child.height+1 if self.right_child is None: #Empty right tree. right_height = 0 else: right_height = self.right_child.height+1 #Note that the balance can change even when the height does not, #so change it before checking to see if height needs updating. self.balance = left_height-right_height height = max(left_height, right_height) if self.height != height: self.height = height if self.parent is not None: #We only need to go up a level if the height changes. if upwards: self.parent.update_height() def is_left(self): #Handy to find out whether a node is a left or right child or neither. if self.parent is None: return self.parent else: return self is self.parent.left_child class AVLTree(object): def __init__(self): self.root =None def insert(self, key, node=None): #The first call is slightly different. if node is None: #First call, start node at root. node = self.root if node is None: #Empty tree, create root. node = AVLNode(key=key) self.root=node return node else: ret= self.insert(key=key, node=node) self.balance(ret) return ret #Not a first call. if node.key ==key: #No need to insert, key already present. return node elif node.key >key: child = node.left_child if child is None: #Reached the bottom, insert node and update heights. child = AVLNode(key=key) child.parent=node node.left_child = child node.update_height() return child else: return self.insert(key=key, node=child) elif node.key < key: child = node.right_child if child is None: #Reached the bottom, insert node and update heights. child = AVLNode(key=key) child.parent=node node.right_child = child return child else: return self.insert(key=key, node=child) else: print "This shouldn't happen." def find(self, key, node=None): if node is None: #First call. node=self.root if self.root is None: return None else: return self.find(key, self.root) #Now we handle nonfirst calls. elif node.key == key: #Found the node. return node elif key < node.key: if node.left_child is None: #If key not in tree, we return a node that would be its parent. return node else: return self.find(key,node.left_child) else: if node.right_child is None: return node else: return self.find(key, node.right_child) def delete(self, key, node=None): #Delete key from tree. if node is None: #Initial call. node = self.find(key) if (node is None) or (node.key != key): #Empty tree or key not in tree. return if (node.left_child is None) and (node.right_child is not None): #Has one right child. right_child = node.right_child left = node.is_left() if left is not None: parent=node.parent if not left: parent.right_child=right_child else: parent.left_child=right_child right_child.parent =parent self.balance(parent) else: right_child.parent=None self.root = right_child #No need to update heights or rebalance. elif (node.left_child is not None) and (node.right_child is None): #Has one left child. left_child = node.left_child left= node.is_left() if left is not None: parent=node.parent if left: parent.left_child=left_child else: parent.right_child=right_child left_child.parent =parent self.balance(parent) else: left_child.parent=None self.root=left_child elif node.left_child is None: #Has no children. parent = node.parent if parent is None: #Deleting a lone root, set tree to empty. self.root = None else: if parent.left_child is node: parent.left_child =None else: parent.right_child=None self.balance(parent) else: #Node has two childen, swap keys with successor node #and delete successor node. right_most_child = self.find_leftmost(node.right_child) node.key = right_most_child.key self.delete(key=node.key,node=right_most_child) #Note that updating the heights will be handled in the next #call of delete. def find_rightmost(self, node): if node.right_child is None: return node else: return self.find_rightmost(node.right_child) def find_leftmost(self, node): if node.left_child is None: return node else: return self.find_leftmost(node.left_child) def find_next(self, key): node = self.find(key) if (node is None) or (node.key != key): #Key not in tree. return None else: right_child = node.right_child if right_child is not None: node= self.find_leftmost(right_child) else: parent = node.parent while(parent is not None): if node is parent.left_child: break node = parent parent = node.parent node=parent if node is None: #Key is largest in tree. return node else: return node.key def find_prev(self, key): node = self.find(key) if (node is None) or (node.key != key): #Key not in tree. return None else: left_child = node.left_child if left_child is not None: node= self.find_rightmost(left_child) else: parent = node.parent while(parent is not None): if node is parent.right_child: break node = parent parent = node.parent node=parent if node is None: #Key is largest in tree. return node else: return node.key def balance(self, node): node.update_height(False) if node.balance == 2: if node.left_child.balance != -1: #Left-left case. self.right_rotation(node) if node.parent.parent is not None: #Move up a level. self.balance(node.parent.parent) else: #Left-right case. self.left_rotation(node.left_child) self.balance(node) elif node.balance ==-2: if node.right_child.balance != 1: #Right-right case. self.left_rotation(node) if node.parent.parent is not None: self.balance(node.parent.parent) else: #Right-left case. self.right_rotation(node.right_child) self.balance(node) else: if node.parent is not None: self.balance(node.parent) #I also include a new plotting routine to show the balances or keys of the node. def plot(self, balance=False): #Builds a copy of the BST in igraphs for plotting. #Since exporting the adjacency lists loses information about #left and right children, we build it using a queue. import igraph as igraphs G = igraphs.Graph() if self.root is not None: G.add_vertices(1) queue = [[self.root,0]] #Queue has a pointer to the node in our BST, and its index #in the igraphs copy. index=0 not_break=True while(not_break): #At each iteration, we label the head of the queue with its key, #then add any children into the igraphs graph, #and into the queue. node=queue[0][0] #Select front of queue. node_index = queue[0][1] if not balance: G.vs[node_index]['label']=node.key else: G.vs[node_index]['label']=node.balance if index ==0: #Label root green. G.vs[node_index]['color']='green' if node.left_child is not None: G.add_vertices(1) G.add_edges([(node_index, index+1)]) queue.append([node.left_child,index+1]) G.vs[index+1]['color']='red' #Left children are red. index+=1 if node.right_child is not None: G.add_vertices(1) G.add_edges([(node_index, index+1)]) G.vs[index+1]['color']='blue' queue.append([node.right_child, index+1]) index += 1 queue.pop(0) if len(queue)==0: not_break=False layout = G.layout_reingold_tilford(root=0) igraphs.plot(G, layout=layout) def right_rotation(self, root): left=root.is_left() pivot = root.left_child if pivot is None: return root.left_child = pivot.right_child if pivot.right_child is not None: root.left_child.parent = root pivot.right_child = root pivot.parent = root.parent root.parent=pivot if left is None: self.root = pivot elif left: pivot.parent.left_child=pivot else: pivot.parent.right_child=pivot root.update_height(False) pivot.update_height(False) def left_rotation(self, root): left=root.is_left() pivot = root.right_child if pivot is None: return root.right_child = pivot.left_child if pivot.left_child is not None: root.right_child.parent = root pivot.left_child = root pivot.parent = root.parent root.parent=pivot if left is None: self.root = pivot elif left: pivot.parent.left_child=pivot else: pivot.parent.right_child=pivot root.update_height(False) pivot.update_height(False) def sort(lst, ascending=True): A = AVLTree() for item in lst: A.insert(item) ret=[] if ascending: node=A.find_leftmost(A.root) if node is not None: key = node.key else: key=node while (key is not None): ret.append(key) key=A.find_next(key) else: node=A.find_rightmost(A.root) if node is not None: key = node.key else: key=node while (key is not None): ret.append(key) key=A.find_prev(key) return ret def test_rotation(): lst= [1,4,2,5,1,3,7,11,4.5] print "List is ",lst B = AVLTree() for item in lst: print "inserting", item B.insert(item) node=B.find(4) B.left_rotation(node) B.plot(True) B.right_rotation(node.parent) test_rotation()
Cathryn Jakobson Ramin, an investigative journalist, is author of “Crooked: Outwitting the Back Pain Industry and Getting on the Road to Recovery.” Herself a back pain sufferer, she chronicles her odyssey, touching on virtually every option, conventional and alternative, for back pain. She claims that “back pain care is a microcosm of everything that’s wrong with our health care system.” How have perverse incentives created a self-perpetuating back pain industry, and sustained unproven and dangerous modalities? What are the downsides of surgery? Why is “PT” (physical therapy) not always the answer to avoiding unnecessary surgery? What are the pitfalls of “minimally-invasive” or laser spinal surgery? What about spinal injections for pain? How has back pain spawned an epidemic of opiate addiction and disability? Are there rogue surgeons who game the insurance system and prey on vulnerable patients? Why might it be a mistake to get an MRI? What role does exercise play in forestalling back problems? How does prolonged sitting set the stage for back problems? It’s not all bad news: Ramin presents a wide array of possible natural, non-invasive solutions to permanently address back pain that she has personally field-tested. Click HERE for part 2. Dr. Hoffman continues his conversation with Cathryn Jakobson Ramin, an investigative journalist and author of “Crooked: Outwitting the Back Pain Industry and Getting on the Road to Recovery.” Click HERE for part 1.
""" Timezone-related classes and functions. This module uses pytz when it's available and fallbacks when it isn't. """ from datetime import datetime, timedelta, tzinfo from threading import local import sys import time as _time try: import pytz except ImportError: pytz = None from django.conf import settings from django.utils import six __all__ = [ 'utc', 'get_default_timezone', 'get_default_timezone_name', 'get_current_timezone', 'get_current_timezone_name', 'activate', 'deactivate', 'override', 'localtime', 'now', 'is_aware', 'is_naive', 'make_aware', 'make_naive', ] # UTC and local time zones ZERO = timedelta(0) class UTC(tzinfo): """ UTC implementation taken from Python's docs. Used only when pytz isn't available. """ def __repr__(self): return "<UTC>" def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO class ReferenceLocalTimezone(tzinfo): """ Local time implementation taken from Python's docs. Used only when pytz isn't available, and most likely inaccurate. If you're having trouble with this class, don't waste your time, just install pytz. Kept identical to the reference version. Subclasses contain improvements. """ def __init__(self): # This code is moved in __init__ to execute it as late as possible # See get_default_timezone(). self.STDOFFSET = timedelta(seconds=-_time.timezone) if _time.daylight: self.DSTOFFSET = timedelta(seconds=-_time.altzone) else: self.DSTOFFSET = self.STDOFFSET self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET tzinfo.__init__(self) def __repr__(self): return "<LocalTimezone>" def utcoffset(self, dt): if self._isdst(dt): return self.DSTOFFSET else: return self.STDOFFSET def dst(self, dt): if self._isdst(dt): return self.DSTDIFF else: return ZERO def tzname(self, dt): is_dst = False if dt is None else self._isdst(dt) return _time.tzname[is_dst] def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 class LocalTimezone(ReferenceLocalTimezone): """ Slightly improved local time implementation focusing on correctness. It still crashes on dates before 1970 or after 2038, but at least the error message is helpful. """ def _isdst(self, dt): try: return super(LocalTimezone, self)._isdst(dt) except (OverflowError, ValueError) as exc: exc_type = type(exc) exc_value = exc_type( "Unsupported value: %r. You should install pytz." % dt) exc_value.__cause__ = exc six.reraise(exc_type, exc_value, sys.exc_info()[2]) utc = pytz.utc if pytz else UTC() """UTC time zone as a tzinfo instance.""" # In order to avoid accessing the settings at compile time, # wrap the expression in a function and cache the result. _localtime = None def get_default_timezone(): """ Returns the default time zone as a tzinfo instance. This is the time zone defined by settings.TIME_ZONE. See also :func:`get_current_timezone`. """ global _localtime if _localtime is None: if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None: _localtime = pytz.timezone(settings.TIME_ZONE) else: # This relies on os.environ['TZ'] being set to settings.TIME_ZONE. _localtime = LocalTimezone() return _localtime # This function exists for consistency with get_current_timezone_name def get_default_timezone_name(): """ Returns the name of the default time zone. """ return _get_timezone_name(get_default_timezone()) _active = local() def get_current_timezone(): """ Returns the currently active time zone as a tzinfo instance. """ return getattr(_active, "value", get_default_timezone()) def get_current_timezone_name(): """ Returns the name of the currently active time zone. """ return _get_timezone_name(get_current_timezone()) def _get_timezone_name(timezone): """ Returns the name of ``timezone``. """ try: # for pytz timezones return timezone.zone except AttributeError: # for regular tzinfo objects return timezone.tzname(None) # Timezone selection functions. # These functions don't change os.environ['TZ'] and call time.tzset() # because it isn't thread safe. def activate(timezone): """ Sets the time zone for the current thread. The ``timezone`` argument must be an instance of a tzinfo subclass or a time zone name. If it is a time zone name, pytz is required. """ if isinstance(timezone, tzinfo): _active.value = timezone elif isinstance(timezone, six.string_types) and pytz is not None: _active.value = pytz.timezone(timezone) else: raise ValueError("Invalid timezone: %r" % timezone) def deactivate(): """ Unsets the time zone for the current thread. Django will then use the time zone defined by settings.TIME_ZONE. """ if hasattr(_active, "value"): del _active.value class override(object): """ Temporarily set the time zone for the current thread. This is a context manager that uses ``~django.utils.timezone.activate()`` to set the timezone on entry, and restores the previously active timezone on exit. The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a time zone name, or ``None``. If is it a time zone name, pytz is required. If it is ``None``, Django enables the default time zone. """ def __init__(self, timezone): self.timezone = timezone self.old_timezone = getattr(_active, 'value', None) def __enter__(self): if self.timezone is None: deactivate() else: activate(self.timezone) def __exit__(self, exc_type, exc_value, traceback): if self.old_timezone is None: deactivate() else: _active.value = self.old_timezone # Templates def template_localtime(value, use_tz=None): """ Checks if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine. """ should_convert = (isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and not is_naive(value) and getattr(value, 'convert_to_local_time', True)) return localtime(value) if should_convert else value # Utilities def localtime(value, timezone=None): """ Converts an aware datetime.datetime to local time. Local time is defined by the current time zone, unless another time zone is specified. """ if timezone is None: timezone = get_current_timezone() value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available for pytz time zones value = timezone.normalize(value) return value def now(): """ Returns an aware or naive datetime.datetime, depending on settings.USE_TZ. """ if settings.USE_TZ: # timeit shows that datetime.now(tz=utc) is 24% slower return datetime.utcnow().replace(tzinfo=utc) else: return datetime.now() # By design, these four functions don't perform any checks on their arguments. # The caller should ensure that they don't receive an invalid value like None. def is_aware(value): """ Determines if a given datetime.datetime is aware. The logic is described in Python's docs: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None def is_naive(value): """ Determines if a given datetime.datetime is naive. The logic is described in Python's docs: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.tzinfo is None or value.tzinfo.utcoffset(value) is None def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize'): # available for pytz time zones return timezone.localize(value, is_dst=None) else: # may be wrong around DST changes return value.replace(tzinfo=timezone) def make_naive(value, timezone): """ Makes an aware datetime.datetime naive in a given time zone. """ value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available for pytz time zones value = timezone.normalize(value) return value.replace(tzinfo=None)
This page aims to collate community concerns related to the adoption of the "Geoservices REST API" document as a standard of the Open Geospatial Consortium (OGC). The page has been collaboratively edited, and delivered by the board of the OSGeo Foundation (OSGeo) to the OGC and OGC voting members on Friday 17 May 2013. The board of the Open Source Geospatial Foundation (OSGeo) is presenting this letter to the OGC. The letter highlights concerns about the "GeoServices REST API" from many people within the OSGeo community. As always, if there is anything the OSGeo board can do to help, then please let us know. We, the undersigned, have concerns that approving the "Geoservices REST API" as an OGC standard, will have detrimental impacts on interoperability within the spatial industry. We strongly urge that the proposed "Geoservices REST API", as it stands in May 2013, be rejected as an OGC standard. People have listed different reasons for concern. These concerns are described below. Mark Lucas, Founding member and board of directors for OSGeo foundation, Principal Scientist for RadiantBlue Technologies Inc. Landon Blake, GIS Department Manager/Land Surveyor at KSN, OSGeo California Chapter Board Representative. Daniel Morissette, President at Mapgears, OSGeo Board member, core contributor and PSC member of Mapserver and GDAL/OGR. Former OGC TC member and involved in the implementation of several OGC WxS specs in MapServer. Bob Basques, GIS Systems Developer at the City of Saint Paul, MN. Public Works GIS (GISmo), Technical Director at SharedGeo, OSGeo Charter Member, OSGeo TCMUG local chapter member, Co-founder and PSC member of GeoMoose project. Pedro-Juan Ferrer Matoses, PM at Omnium Strategic Intelligence, Spain, OSGeo Charter Member, OSGeo Spanish Local Chapter Liaison officer. María Arias de Reyna, software engineer at GeoCat, Spain, member of OSGeo Spanish Local Chapter. Anne Ghisla, OSGeo Board Member, Italy, member of OSGeo Italian Local Chapter. Olivier Courtin, Oslandia co-founder, core contributor or/and PSC member of Mapserver and PostGIS. OGC TC member. Anita Graser, GIS specialist with AIT Austrian Institute of Technology, OSGeo Charter member and QGIS team member. Sandro Santilli, OSGeo Charter Member, PostGIS and GEOS PSC member and core hacker. Carlos Krefft, GIS software developer at CSTARS - University of Miami, OGC and OSGeo Member. Benni Purwonegoro,Indonesia, IT-Spatial Engineer @ Geospatial Information Agency . Alan Boudreault, Developer at Mapgears, contributor to Mapserver and GDAL/OGR. Kumaran Narayanaswamy, CEO & Managing Director of kCube Consultancy Services Pvt Ltd India., Member of India OSGeo Chapter. Luis Fernando Bueno, Professor at Federal University of Rondonia, researcher and GIS analyst, Brazil. Bob Bruce, FEC, P.Eng., Geomatics Engineer, Winnipeg, Manitoba, Canada. Gavin Fleming, OSGeo Charter member, owner of AfriSpatial. Massimo Di Stefano , OSGeo Charter Member, Italy, founder-member of OSGeo Italian Local Chapter. The OGC candidate standard titled "GeoServices REST API" is currently, in May 2013, being considered to be approved as an Open Geospatial Consortium (OGC) standard. The vote to accept the document as a standard is unusually contentious. The controversy is the cause of this open letter. The candidate standard was previously released for public comment and can be found on the request for public comment page (though public comment has been closed for now). The candidate standard attempts to standardize a suite of web services such as a service which provides map images, a service which provides geospatial feature data, and a service which performs geospatial processing. The candidate standard focuses on interactions via a defined hierarchy of URLs and using predominantly a particular set of JSON schemas for the exchange of geospatial data. the lack of implementation diversity which is thought to give the vendor of the one complete implementation an unusual commercial advantage on top of the vendor's already dominant position in the domain. These issues have potential impacts on the use of 'Open Standards' by governments and companies, on the interoperability of software interacting with standards compliant OGC services, on the costs to developers and users of standards compliant software, on the understanding of 'Open Standards' by the public at large, and, possibly, on the reputation of the OGC as a champion of interoperability. The cost to application developers, systems integrators, testers and sponsors to support all relevant OGC standards will be substantially increased. Sponsors (such as governments) who require compliance with OGC standards will discover that applications don't communicate together, due to applications supporting different OGC standards that essentially do the same thing. This will result in a diminished importance of OGC, as the "OGC standards" stamp of approval will not equate interoperability. After a while, in order to solve interoperability issues, a respected international organisation or program will likely take the initiative to mandate one standard as the preferred standard for all agencies to follow. To date, the OGC has provided this leadership. there are alternative implementations for some of these services. The authors also stress that the existence of a large user base shows the service is useful, and that the standardization of the services at the OGC may encourage new implementations. The SWG has published two documents in response to various comments. OGC 12-646 Response to RFC Comments presents the responses from the Standards Working Group to the comments received from the public during the public Request for Comments (RFC). OGC 13-031r1 Response to 'no' votes presents the responses from the Standards Working Group to the reasons given by the organizations voting _no_ during the adoption vote. Both are available through the links above, or via the public page of the Standards Working Group. The discussion raises a number of issues, many based upon complex technical concepts and implications. This makes it difficult for voting OGC members considering whether to support (or not) the "Geoservices REST API" as a standard. The following provides one analysis of the positions on the vote, aimed to simplify and summarize key points. However, it does not necessarily represent the opinions held by all signatories above. The OGC should be in the business of developing good standards, not in choosing which standards should be implemented. The proposers of the document want to make a standard and have followed all the rules of the OGC. The work of any such group of members deserves serious, good faith consideration. The need for an integrated suite of services using simple data, which is addressed (partially) by the document, is real. The proposed document is pushing the OGC on this issue. The proposed document could be useful to a number of people. "I know how totally impossible it is to write a good standard, so the weaknesses in the existing document seem more acceptable." The OGC actually is, whether it should be or not, in the position of recommending interoperable standards for geospatial services. The proposed document is not good enough, has implementations dominated by one vendor's server implementation, and not publicly supported enough, to be considered at the same level as existing standards. Adopting a standard implies a desire to maintain the standard, but OGC's desire to support this approach has been questioned by some. In particular, the lack of collaboration and willingness to accept recommendations from the community on this version of the "Geoservices REST API" document bodes ill for the future. The overlap in functionality between the proposed services and the existing services, notably with the ongoing work to modularize the existing services, is almost 100 percent. However, compatibility is low. There is already a published document: http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf so there is no need for the document to be adopted as an OGC Standard merely for interoperability with the ESRI implementation. The document, as a new, separate effort, repeats mistakes which were made and since solved by the other services. The document focuses on the past (notably with backwards compatibility and use of only GET/POST) not on the future. The document needs a comprehensive editorial review and substantial rewriting for clarity. Both simple answers are bad. A simple acceptance of the standard would introduce a new set of 'OGC approved' open services. The OGC approval might enable governments to buy a XXXX-new-name-here-XXXX solution instead of a W*S or a S*S solution. The path forwards towards harmonizing the services is unclear. Fixing this document in addition to fixing the W*S services will be a pain. Simply rejecting the solution would be bad for the OGC. It would place the OGC in the position of picking winners and losers in the standards business. It would mean that the OGC is stuck on the project of fixing the W*S standards to meet some nebulous future functionality without having any path to get there. It would discourage innovation and progress. Is there any third way? Well, actually, there is a different way of thinking of the issue. Overall, there appears to be a shared desire for an integrated suite of geospatial services, originally focused on a simple data model, built on the exchange of well defined resources in simple formats including JSON, accessible and usable using the core HTTP verbs, and discoverable through following HTML links and patterns of URL paths. The hope is that such a suite can be designed based on the best expertise of the OGC, can be widely supported by the community, and can be implemented and tested by multiple groups. Neither the proposed document, nor the current services meet this vision. So the work, ultimately, is on improving all the services at the OGC, first to modularize them, then to enable simple implementations, and finally to link those implementations into a functional suite. Since this is the work that is already happening, perhaps the vote is an unfortunate distraction and the productive way forward is merely to redouble the efforts to create the next versions of the standards. Beyond the controversy described above, there are issues with the Geoservices REST API document itself. Even if the standard deserves support, these issues could be considered blockers to the adoption of the current, May 2013, document. The critique is incomplete because it quickly falls into a full editorial review of the text, something which takes a lot of time and effort and is beyond the scope and intent of this Open Letter. The critique can be found at: http://wiki.osgeo.org/wiki/Geoservices_REST_API_critique. Note that some of these critiques hold the document to OGC's current, standards writing guidelines. The OGC has been striving to develop better standards so new standards must meet higher requirements than past standards. The lack of clarity in the proposed document is not substantially worse than many published standards but ought to be resolved in new standards. In summary, the ESRI "Geoservice REST API" Imaging part is at a technological level where WCS departed from some 5 years ago. Inconciseness of the specification at large will make it difficult for third parties to come up with interoperable implementations. The components making up the ESRI "Geoservice REST API" provide natural blocks assignable to the matching SWGs. As for Part 6 of the ESRI "Geoservice REST API", if to become a standard it needs to be discussed in the WCS.SWG for harmonization, clarification, and improvement. The Geoservices REST API can not be amended (other than editorial changes in the specification document), because of a requirement for backward compatibility with the ESRI implementation. This has limited improvements in this version of the candidate specification. Cameron Shorter's Will OGC’s standards meet government purchasing guidelines? After delivery of this open letter, there were similar considered concerns raised by some members within the OGC community. As a result, the GeoServices REST API was withdrawn as a proposed OGC standard. The OGC then initiated an Ideas4OGC review, to rebaseline OGC priorities and processes in order to address weaknesses that had been identified in OGC processes. This page was last modified on 30 July 2015, at 14:00.
#!/cygdrive/C/Python25/python """A script to help automate the build process.""" # When preparing a new release, make sure to set all of these to the latest # values. VERSIONS = { 'bzr': '1.17', 'qbzr': '0.12', 'bzrtools': '1.17.0', 'bzr-svn': '0.6.3', 'bzr-rewrite': '0.5.2', 'subvertpy': '0.6.8', } # This will be passed to 'make' to ensure we build with the right python PYTHON='/cygdrive/c/Python25/python' # Create the final build in this directory TARGET_ROOT='release' DEBUG_SUBPROCESS = True import os import shutil import subprocess import sys BZR_EXE = None def bzr(): global BZR_EXE if BZR_EXE is not None: return BZR_EXE try: subprocess.call(['bzr', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) BZR_EXE = 'bzr' except OSError: try: subprocess.call(['bzr.bat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) BZR_EXE = 'bzr.bat' except OSError: raise RuntimeError('Could not find bzr or bzr.bat on your path.') return BZR_EXE def call_or_fail(*args, **kwargs): """Call a subprocess, and fail if the return code is not 0.""" if DEBUG_SUBPROCESS: print ' calling: "%s"' % (' '.join(args[0]),) p = subprocess.Popen(*args, **kwargs) (out, err) = p.communicate() if p.returncode != 0: raise RuntimeError('Failed to run: %s, %s' % (args, kwargs)) return out TARGET = None def get_target(): global TARGET if TARGET is not None: return TARGET out = call_or_fail([sys.executable, get_bzr_dir() + '/bzr', 'version', '--short'], stdout=subprocess.PIPE) version = out.strip() TARGET = os.path.abspath(TARGET_ROOT + '-' + version) return TARGET def clean_target(): """Nuke the target directory so we know we are starting from scratch.""" target = get_target() if os.path.isdir(target): print "Deleting: %s" % (target,) shutil.rmtree(target) def get_bzr_dir(): return 'bzr.' + VERSIONS['bzr'] def update_bzr(): """Make sure we have the latest bzr in play.""" bzr_dir = get_bzr_dir() if not os.path.isdir(bzr_dir): bzr_version = VERSIONS['bzr'] bzr_url = 'lp:bzr/' + bzr_version print "Getting bzr release %s from %s" % (bzr_version, bzr_url) call_or_fail([bzr(), 'co', bzr_url, bzr_dir]) else: print "Ensuring %s is up-to-date" % (bzr_dir,) call_or_fail([bzr(), 'update', bzr_dir]) def create_target(): target = get_target() print "Creating target dir: %s" % (target,) call_or_fail([bzr(), 'co', get_bzr_dir(), target]) def get_plugin_trunk_dir(plugin_name): return '%s/trunk' % (plugin_name,) def get_plugin_release_dir(plugin_name): return '%s/%s' % (plugin_name, VERSIONS[plugin_name]) def get_plugin_trunk_branch(plugin_name): return 'lp:%s' % (plugin_name,) def update_plugin_trunk(plugin_name): trunk_dir = get_plugin_trunk_dir(plugin_name) if not os.path.isdir(trunk_dir): plugin_trunk = get_plugin_trunk_branch(plugin_name) print "Getting latest %s trunk" % (plugin_name,) call_or_fail([bzr(), 'co', plugin_trunk, trunk_dir]) else: print "Ensuring %s is up-to-date" % (trunk_dir,) call_or_fail([bzr(), 'update', trunk_dir]) return trunk_dir def _plugin_tag_name(plugin_name): if plugin_name in ('bzr-svn', 'bzr-rewrite', 'subvertpy'): return '%s-%s' % (plugin_name, VERSIONS[plugin_name]) # bzrtools and qbzr use 'release-X.Y.Z' return 'release-' + VERSIONS[plugin_name] def update_plugin(plugin_name): release_dir = get_plugin_release_dir(plugin_name) if not os.path.isdir(plugin_name): if plugin_name in ('bzr-svn', 'bzr-rewrite'): # bzr-svn uses a different repo format call_or_fail([bzr(), 'init-repo', '--rich-root-pack', plugin_name]) else: os.mkdir(plugin_name) if os.path.isdir(release_dir): print "Removing existing dir: %s" % (release_dir,) shutil.rmtree(release_dir) # First update trunk trunk_dir = update_plugin_trunk(plugin_name) # Now create the tagged directory tag_name = _plugin_tag_name(plugin_name) print "Creating the branch %s" % (release_dir,) call_or_fail([bzr(), 'co', '-rtag:%s' % (tag_name,), trunk_dir, release_dir]) return release_dir def install_plugin(plugin_name): release_dir = update_plugin(plugin_name) # at least bzrtools doesn't like you to call 'setup.py' unless you are in # that directory specifically, so we cd, rather than calling it from # outside print "Installing %s" % (release_dir,) call_or_fail([sys.executable, 'setup.py', 'install', '-O1', '--install-lib=%s' % (get_target(),)], cwd=release_dir) def update_tbzr(): tbzr_loc = os.environ.get('TBZR', None) if tbzr_loc is None: raise ValueError('You must set TBZR to the location of tortoisebzr.') print 'Updating %s' % (tbzr_loc,) call_or_fail([bzr(), 'update', tbzr_loc]) def build_installer(): target = get_target() print print print '*' * 60 print 'Building standalone installer' call_or_fail(['make', 'PYTHON=%s' % (PYTHON,), 'installer'], cwd=target) def main(args): import optparse p = optparse.OptionParser(usage='%prog [OPTIONS]') opts, args = p.parse_args(args) update_bzr() update_tbzr() clean_target() create_target() install_plugin('subvertpy') install_plugin('bzrtools') install_plugin('qbzr') install_plugin('bzr-svn') install_plugin('bzr-rewrite') build_installer() if __name__ == '__main__': main(sys.argv[1:]) # vim: ts=4 sw=4 sts=4 et ai
Google is prodding merchants to provide more information to make its search results more effective–and more like Amazon. The question is whether merchants will play ball. In a blog post Tuesday, Google said that it will add ratings to its so called “product listing ads,” which are used by merchants to sell everything from juicers to microwaves to flat-screen TVs via Google search pages. Such ads appear in comparative grids of search results, and Google said U.S. shoppers will see ratings of up to five stars and a count of reviews below each item. The company said the rating system is based on aggregated rating and review data from multiple sources including merchants, third-party aggregators, editorial sites and users. But the Google plan faces a question: will merchants who gather customer ratings decide to share that data with the Internet giant?
# -*- coding: utf-8 -*- import json from warnings import warn from tornado.web import RedirectHandler, URLSpec from .helpers import query_except_handler, require_auth from app.mixins.routes import JsonResponseMixin from app.models.dbconnect import Session, db_inspector from app.models.usermodels import User from app.models.pagemodels import StaticPageModel, UrlMapping from app.models.non_relation_data import NonRelationData from app.models.catalogmodels import CatalogSectionModel, CatalogItemModel class AdminMainHandler(JsonResponseMixin): @require_auth def post(self): action = self.get_argument('action') kwrgs = {} try: kwrgs = json.loads(self.get_argument('args')) except: kwrgs = {} actions = { 'get_pages_list': self.get_pages_list, 'get_catalog_sections': self.get_catalog_sections, 'get_catalog_elements': self.get_catalog_elements, 'get_redirect_list': self.get_redirect_list, 'get_accounts_list': self.get_accounts_list, 'get_data_list': self.get_data_list, 'get_fields': self.get_fields, 'add': self.create, # for add new element/section forms 'update': self.update, # for editing elements/sections forms 'delete': self.delete, # for deleting elements/sections 'reorder': self.reorder # custom reordering } if action not in actions.keys(): return self.json_response({ 'status': 'error', 'error_code': 'non_existent_action' }) func = actions[action] return func(**kwrgs) @query_except_handler def get_pages_list(self): session = Session() try: result = session.execute( StaticPageModel.get_ordered_list_query().done() ) data = session.query(StaticPageModel).instances(result) except Exception as e: warn( 'adm/AdminMainHandler.get_pages_list(): '+ 'cannot get static pages:\n%s' % e ) raise e finally: session.close() pages_list = [x.static_list for x in data] for idx, page in enumerate(pages_list): page['sort'] = idx + 1 return self.json_response({ 'status': 'success', 'data_list': pages_list }) ## TODO : Optimize and using join ¯\(°_o)/¯ @query_except_handler def get_catalog_sections(self): session = Session() try: cats = session.query(CatalogSectionModel.id).all() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_catalog_sections(): ' + 'cannot get catalog sections:\n%s' % e ) raise e counts = [] for i in cats: try: count = ( session .query(CatalogItemModel.id) .filter_by(section_id=i[0]) .all() ) except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_catalog_sections(): ' + 'cannot get catalog items by section id #%s:\n%s' % (str(i[0]), e) ) raise e counts.append((len(count),)) try: data = session.query( CatalogSectionModel.title, CatalogSectionModel.id, CatalogSectionModel.is_active ).all() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_catalog_sections(): ' + 'cannot get catalog sections:\n%s' % e ) raise e session.close() return self.json_response({ 'status': 'success', 'data_list': [ { 'is_active': bool(x[1][2]), 'id': x[1][1], 'title': x[1][0], 'count': x[0][0] } for x in list(zip(counts, data)) ] }) @query_except_handler def get_catalog_elements(self, id): session = Session() try: data = session.query( CatalogItemModel.id, CatalogItemModel.title, CatalogItemModel.is_active ).filter_by(section_id=id).all() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_catalog_elements(): ' + 'cannot get catalog items by section id #%s:\n%s' % (str(id), e) ) raise e try: title = session.query( CatalogSectionModel.title ).filter_by(id=id).one() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_catalog_elements(): ' + 'cannot get catalog section by id #%s:\n%s' % (str(id), e) ) raise e session.close() return self.json_response({ 'status': 'success', 'section_title': title[0], 'data_list': [ { 'is_active': bool(x.is_active), 'title': x.title, 'id': x.id } for x in data ] }) @query_except_handler def get_redirect_list(self): session = Session() try: data = session.query(UrlMapping).all() except Exception as e: warn( 'adm/AdminMainHandler.get_redirect_list(): ' + 'cannot get data from UrlMapping model:\n%s' % e ) raise e finally: session.close() return self.json_response({ 'status': 'success', 'data_list': [x.item for x in data] }) @query_except_handler def get_accounts_list(self): session = Session() try: data = session.query(User).all() except Exception as e: warn( 'adm/AdminMainHandler.get_accounts_list(): ' + 'cannot get users:\n%s' % e ) raise e finally: session.close() return self.json_response({ 'status': 'success', 'data_list': [ { 'id': x.id, 'login': x.login, 'is_active': x.is_active } for x in data ] }) @query_except_handler def get_static_page(self, id): session = Session() try: data = session.query(StaticPageModel).filter_by(id=id).one() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_static_page(): ' + 'cannot get static page by id #%s:\n%s' % (str(id), e) ) raise e session.close() return self.json_response({ 'status': 'success', 'data': data.item }) _section_model_map = { 'pages': StaticPageModel, 'redirect': UrlMapping, 'catalog_section': CatalogSectionModel, 'catalog_element': CatalogItemModel, 'data': NonRelationData } _section_model_map_with_accounts = _section_model_map.copy() _section_model_map_with_accounts['accounts'] = User # models that support custom ordering _custom_ordering_models = [ StaticPageModel ] _able_to_remove_elements_models = [ User ] @query_except_handler def create(self, section, **fields_data): # set as True flags that was checked # only checked flags will be received from admin-panel front-end fields_data.update({ key: True for key in fields_data.keys() if key.startswith('is_') or key.startswith('has_') }) session = Session() Model = self._section_model_map[section] if Model in self._custom_ordering_models: fields_data['prev_elem'] = Model.extract_prev_elem( session.query(Model).instances( session.execute( Model.get_ordered_list_query().only_last().done() ) ) ) item = Model(**fields_data) try: session.add(item) except Exception as e: session.close() warn( 'adm/AdminMainHandler.create(): ' + 'cannot create item by "%s" section:\n%s' % (str(section), e) ) raise e if section == 'redirect': if not self._validate_redirect(fields_data): return self.json_response({ 'status': 'error', 'error_code': 'incorrect_data' }) from app.app import application application().handlers[0][1][:0] = [ self._get_redirect_router_item(fields_data) ] try: session.commit() except Exception as e: session.close() warn( 'adm/AdminMainHandler.create(): ' + 'cannot commit create item by "%s" section:\n%s' % (str(section), e) ) raise e session.close() return self.json_response({'status': 'success'}) @query_except_handler def update(self, id, section, **fields_data): Model = self._section_model_map[section] fields = db_inspector.get_columns(Model.__tablename__) fields_data_keys = fields_data.keys() fields_data.update({ # set as True flags that was checked and as False that wasn't # only checked flags will be received from admin-panel front-end field['name']: field['name'] in fields_data_keys for field in fields if field['name'].startswith('is_') or field['name'].startswith('has_') or field['name'].startswith('inherit_seo_') }) session = Session() try: data = session.query(Model).filter_by(id=id) except Exception as e: session.close() warn( 'adm/AdminMainHandler.update(): ' + 'cannot update element by "%s" section:\n%s' % (str(section), e) ) raise e # TODO :: Clear shitcode if section == 'redirect': if not self._validate_redirect(fields_data): return self.json_response({ 'status': 'error', 'error_code': 'incorrect_data' }) from app.app import application hndlr = application().handlers[0][1] for idx in range(len(hndlr)): try: if hndlr[idx].__dict__['kwargs']['url'] == data.one().new_url: hndlr[idx] = self._get_redirect_router_item(fields_data) except KeyError: continue data.update(fields_data) try: session.commit() except Exception as e: warn( 'adm/AdminMainHandler.update(): ' + 'cannot commit update element by "%s" section:\n%s' % (str(section), e) ) raise e finally: session.close() return self.json_response({'status': 'success'}) @query_except_handler def delete(self, section, id): Model = self._section_model_map_with_accounts[section] if Model not in self._able_to_remove_elements_models: warn( 'adm/AdminMainHandler.delete(): ' + 'model "%s" is not able to delete elements' % Model.__name__ ) return self.json_response({ 'status': 'error', 'error_code': 'model_is_not_able_to_delete_elements' }) session = Session() # TODO :: support custom reordering try: session.query(Model).filter_by(id=id).delete() session.commit() except Exception as e: warn( 'adm/AdminMainHandler.delete(): ' + 'cannot delete element by id #%s:\n%s' % (str(id), e) ) return self.json_response({ 'status': 'error', 'error_code': 'system_fail' }) finally: session.close() return self.json_response({'status': 'success'}) @query_except_handler def get_data_list(self): session = Session() try: data = session.query(NonRelationData).all() except Exception as e: warn( 'adm/AdminMainHandler.get_data_list(): ' + 'cannot get non-relation data elements:\n%s' % e ) raise e finally: session.close() return self.json_response({ 'status': 'success', 'data_list': [x.item for x in data] }) @query_except_handler def get_fields(self, section, edit=False, id=None): session = Session() Model = self._section_model_map_with_accounts[section] fields = db_inspector.get_columns(Model.__tablename__) # TODO :: refactoring types_map = { 'BOOLEAN': 'checkbox', 'TEXT': 'html', 'VARCHAR(4096)': 'text', 'VARCHAR(8192)': 'text', 'VARCHAR(1024)': 'text', 'VARCHAR(5000)': 'password', 'JSON': 'data_fields' if section == 'data' else 'files', 'INTEGER': 'text' } fields_list = [] for field in fields: try: # ignore 'id', 'section_id' and stuff if 'id' in field['name'] or field['name'] == 'prev_elem': continue field_dict = { 'name': field['name'], 'type': types_map[str(field['type'])], 'default_val': field['default'] } if field_dict['type'] == 'files': if field_dict['name'] == 'main_image': field_dict['mode'] = 'single' else: field_dict['mode'] = 'multiple' fields_list.append(field_dict) except KeyError: continue values = None if edit and id is not None: try: data = session.query(Model).filter_by(id=id).one() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_fields(): ' + 'cannot get fields by "%s" model and id #%s:\n%s' % (Model.__name__, id, e) ) raise e def get_field_type_by_name(name): for item in fields_list: if item['name'] == name: return item['type'] # extract values from model def value_resolver(key, val): field_type = get_field_type_by_name(key) if field_type == 'files': try: files = json.loads(val) assert type(files) is list except: files = [] return json.dumps(files) elif field_type == 'checkbox': if type(val) is bool: return val else: return False else: return val values = { k: value_resolver(k, v) for k, v in data.item.items() } if section == 'catalog_element': values.update({'section_id': data.section_id}) if section == 'catalog_element': try: sections = session.query(CatalogSectionModel).all() except Exception as e: session.close() warn( 'adm/AdminMainHandler.get_fields(): ' + 'cannot get catalog sections list:\n%s' % e ) raise e fields_list.append({ 'name': 'section_id', 'type': 'select', 'default_val': None, 'list_values': [ { 'title': x.title, 'value': x.id } for x in sections ] }) session.close() for k in ['create_date', 'last_change', '_sa_instance_state', 'password']: try: del values[k] except Exception: pass return self.json_response({ 'status': 'success', 'fields_list': fields_list, 'values_list': values }) @query_except_handler def reorder(self, section, target_id, at_id): # TODO # Model = self._custom_ordering_models if target_id == at_id: self.json_response({'status': 'success'}) return session = Session() try: # TODO :: multiple models session.execute( StaticPageModel .get_reorder_page_query() .page(target_id) .place_before(at_id) .done() ) session.commit() except Exception as e: warn( 'adm/AdminMainHandler.reorder(): ' + 'cannot reorder "%d" at "%d":\n%s' % (target_id, at_id, e) ) raise e finally: session.close() self.json_response({'status': 'success'}) # helpers # UrlMapping model def _validate_redirect(self, fields_data): try: fields_data['status'] = int(fields_data['status']) \ if 'status' in fields_data \ and bool(fields_data['status']) \ else 300 if fields_data['status'] not in [300, 301]: raise Exception('---invalid---') except: return False return True # UrlMapping model def _get_redirect_router_item(self, fields_data): return URLSpec( pattern=fields_data['old_url'] + '$', handler=RedirectHandler, kwargs={ 'url': fields_data['new_url'], 'permanent': fields_data['status'] == 301 }, name=None )
If you are running your own business or company and have a need for manufacturing, you realize just how expensive it can be. Manufacturing costs can eat away at your revenue enough so that you are actually losing money. You may be able to sell an item for ten dollars but if manufacturing is costing you eight dollars alone, your company will be out of business in no time. But don’t fret just yet. There are ways that you can bring down your manufacturing costs in the near future and leave you enough money to make a profit. Rather than try to increase the price of the product for now, you may be able to switch out the material to a less expensive one during the manufacturing process. Whether it is metal or some sort of fabric, there is always a less expensive option as long as it still fits for what you are trying to accomplish. The packaging costs during manufacturing can be expensive on its own. But do you really need packaging for your item? You might be able to remove the packaging altogether and save yourself some money. Lots of products don’t have packaging and maybe yours can make do without it as well. With the right machines, your manufacturing can go a lot quicker and smoother. Perhaps you should find a 5 axis CNC machine that can cut out every kind of angle or shape there is. With the right machines, you should be able to manufacture more products at a reduced price. People that run factories for a living know how to keep costs down. This is what they do. Find someone who has decades of experience and ask them questions on how to reduce costs during manufacturing. They may just surprise you with the answers. We aren’t saying to manufacture overseas somewhere where they pay their laborers something criminally ridiculous like ten cents a day, but you might be able to lower your labor cost somewhere. Perhaps ten people could do the work of twenty with the right tweaks here and there. You will never know until you try these changes out and see if they can lower your manufacturing costs in the future.
# # This file is part of pyasn1-modules software. # # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # import sys import unittest from pyasn1.codec.der.decoder import decode as der_decoder from pyasn1.codec.der.encoder import encode as der_encoder from pyasn1_modules import pem from pyasn1_modules import rfc5280 from pyasn1_modules import rfc6187 class SSHClientCertificateTestCase(unittest.TestCase): cert_pem_text = """\ MIICkDCCAhegAwIBAgIJAKWzVCgbsG5BMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n dXMgQ0EwHhcNMTkxMDI0MTgyNjA3WhcNMjAxMDIzMTgyNjA3WjB0MQswCQYDVQQG EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4 YW1wbGUxEDAOBgNVBAMTB0NoYXJsaWUxIjAgBgkqhkiG9w0BCQEWE2NoYXJsaWVA ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARfr1XPl5S0A/BwTOm4 /rO7mGVt2Tmfr3yvYnfN/ggMvyS3RiIXSsdzcAwzeqc907Jp7Dggab0PpaOKDOxD WoK0g6B8+kC/VMsU23mfShlb9et8qcR3A8gdU6g8uvSMahWjgakwgaYwCwYDVR0P BAQDAgeAMB0GA1UdDgQWBBQfwm5u0GoxiDcjhDt33UJYlvMPFTAfBgNVHSMEGDAW gBTyNds0BNqlVfK9aQOZsGLs4hUIwTATBgNVHSUEDDAKBggrBgEFBQcDFTBCBglg hkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBm b3IgYW55IHB1cnBvc2UuMAoGCCqGSM49BAMDA2cAMGQCMGEme38A3k8q4RGSEs2D ThQQOQz3TBJrIW8zr92S8e8BNPkRcQDR+C72TEhL/qoPCQIwGpGaC4ERiUypETkC voNP0ODFhhlpFo6lwVHd8Gu+6hShC2PKdAfs4QFDS9ZKgQeZ """ def setUp(self): self.asn1Spec = rfc5280.Certificate() def testDerCodec(self): ssh_eku_oids = [ rfc6187.id_kp_secureShellClient, rfc6187.id_kp_secureShellServer, ] substrate = pem.readBase64fromText(self.cert_pem_text) asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) count = 0 for extn in asn1Object['tbsCertificate']['extensions']: if extn['extnID'] == rfc5280.id_ce_extKeyUsage: extnValue, rest = der_decoder( extn['extnValue'], asn1Spec=rfc5280.ExtKeyUsageSyntax()) for oid in extnValue: if oid in ssh_eku_oids: count += 1 self.assertEqual(1, count) suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]) if __name__ == '__main__': result = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(not result.wasSuccessful())
It’s been here for some time, but after a few false starts (the not-so-simple SOAP protocol and WS-BS*) it seems the world has finally agreed on Json/REST as the de-facto protocol or lingua franca of the web. How do we discover it? Discover has always been an after-thought with REST, but these days most rest frameworks come with built in discovery (at least for human). But as devs, we always like consistency, - that’s where things like Swagger come in - read spec 2.0 here. Swagger stands on the shoulders of the Json Schema project. With the types announced it’s a stones throw to auto-generate bindings for all the strongly typed languages out there. Having all these services out there that we rely on is great, but we need to be able to stub them out with fakes. This is where open source projects like mockbin come in. By being able to record and then replay you can isolate yourself from your other (micro)services. Swagger supports marking bits of your API as deprecated which gives your users a warning they should be upgrading.
#!/usr/bin/python3 import json from flask import Blueprint, render_template import flask_babel from lib import load_data, Translator, required_roles, data_need_reload from inventory_filters import dummy_filter, filter_3, filter_4, filter_6 # Gettext hack _ = lambda x: x class Manager: def __init__(self, sort_items): self._translator = Translator('en') self._guilds = [] self._filters = [] self._sort_items = sort_items with open("data/guilds.json", "r") as f: keys = json.load(f) for k in keys: tmp = load_data(k, self._sort_items) tmp.api_key = k self._guilds.append(tmp) self.add_filter(dummy_filter) self.current_filter = self._filters[0] self.current_guild = None def add_filter(self, f): self._filters.append(f) def refresh_guilds(self): for g in self._guilds: if data_need_reload(g.api_key): g = load_data(g.api_key, self._sort_items) def get_items(self): items = [] pick_from = [] if self.current_guild: pick_from = [self.current_guild] else: pick_from = self._guilds for g in pick_from: for i in g.room: # Filter directly, quantity will be dealt later (after merge) if self.current_filter(i): i.origins = [(g, i.stack)] items.append(i) # We need to assume there's at least one item in our list of items # If not, just return directly if not items: return [] # Sort our list, for a nice display (and faster merging) items.sort() origins = [items[0].origins[0]] previous = items[0] merged = [] # List is already sorted, so we can fasten up for i in items: if i == previous: origins.append(i.origins[0]) else: total_stack = sum(x[1] for x in origins) previous.origins = origins # Now that we merged our item, we can check quantity if self.current_filter.quantity() < total_stack: merged.append(previous) origins = [i.origins[0]] previous = i return merged # Used by templates def guilds(self): dummy = {"gid": "all", "name": _('All guilds')} yield dummy yield from self._guilds # Used by templates def filters(self): yield from enumerate(self._filters) # Used by templates def tooltip(self, item): out = self.translation(item) for guild, number in item.origins: out += " - {} ({})".format(guild.name, number) return out # Used by templates def title(self): if self.current_guild: out = _('{} - {}'.format(self.current_guild.name, self.current_filter.description())) else: out = _('{} - {}'.format(_('All guilds'), self.current_filter.description())) return out def set_guild(self, id): if id == 'all': self.id = id self.current_guild = None else: self.id = int(id) self.current_guild = next(x for x in self._guilds if x.gid == self.id) def set_filter(self, id): try: self.current_filter = self._filters[id] except IndexError: self.current_filter = self._filters[0] def item_url(self, item): total_stack = sum(x[1] for x in item.origins) return 'http://api.ryzom.com/item_icon.php?q={0}&s={1}&sheetid={2}.sitem'.format( item.quality, total_stack, item.sheet) def translation(self, item): return self._translator.translate(item) def set_lang(self, lang): self._translator.set_lang(lang) def first_filter(item): return all(x in item.tags for x in ["material", "supreme"]) inventory = Blueprint('inventory', __name__) m = Manager(sort_items=True) m.add_filter(filter_3) m.add_filter(filter_4) m.add_filter(filter_6) @inventory.before_request def adjust_locale_inv(): m.set_lang(str(flask_babel.get_locale())) @inventory.route('/') @required_roles('user') def index(): return render_template('inventory/index.html', manager=m) @inventory.route('/list/<guild>/<filter>/') @required_roles('user') def list_inventory(guild, filter): m.refresh_guilds() m.set_guild(guild) m.set_filter(int(filter)) return render_template('inventory/list.html', manager=m)
Software downloads by AdventNet Inc. ManageEngine PasswordManager Pro Free Edition 4.5 download by AdventNet Inc. Password Management Solution for enterprises to control access to shared administrative/privileged passwords of any 'enterprise resource' such as servers, databases, network devices, applications. Free Windows Tools 1 download by AdventNet Inc. Desktop Central provides a set of free Windows tools that Windows Administrators might require on an day-to-day basis. It has a set of four tools, viz: Wake on LAN, GPO Update, Shutdown/Restart Tool, and Software Inventory Tool. AdventNet ManageEngine Asset Explorer 4.0 download by AdventNet Inc. ManageEngine AssetExplorer is comprehensive Asset Management Software that offers enterprise-wide asset visibility and control to manage all your IT and Non-IT assets. IT offers a single view to track and manage ownership of all your assets. AdventNet ManageEngine OpManager 7.0 download by AdventNet Inc. ManageEngine OpManager is a comprehensive, easy to use Network, Systems and Applications monitoring software that offers integrated fault and performance management functionality. SecureCentral PatchQuest Free Edition 4.3 download by AdventNet Inc. SecureCentral PatchQuest is an automated patch management software for distributing and managing security patches, hotfixes and updates across heterogeneous networks comprising Windows, Red Hat and Debian Linux systems in just a few simple clicks. AdventNet Web NMS Trial Edition 4.7 download by AdventNet Inc. An industry-leading network management framework for building custom EMS/ NMS applications. Networking equipment vendors and other management solution providers rely on AdventNet Web NMS for rapid management application development and deployment. Adventnet ManageEngine Desktop Central 4.0 download by AdventNet Inc. This Enterprise Desktop Management Software provides Remote Configurations, Patch Management, Software Installation, Service Pack Installation, Desktop Sharing, System Tools, User Logon Reports and Active Directory Reports. AdventNet Agent Tester 4 download by Adventnet Inc. AdventNet SNMP Agent Tester toolkit tests SNMP-based agents. It has support for SNMP V1,V2C & V3 (USM), Trap Testing, MIB syntax,compliance testing,performance & stress tests. TCL Engine runs customizable TCL test scripts, helps in regression tests. AdventNet ManageEngine OpUtils 4 download by AdventNet Inc. The AdventNet ManageEngine OpUtils is a comprehensive set of 45+ unique system and network monitoring tools. This web-based software helps IT administrators monitor, diagnose and troubleshoot their IT resources. OpUtils is web-based and very quick.
import win32debug, sys, os class D(object): """dict""" def __init__(self, d1, d2): self.d1 = d1 self.d2 = d2 def f(self): self.d_uninitialized = 1 class S(object): """slots""" __slots__ = 'slot1', 'slot2', 'slot_uninitialized' def __init__(self, s1, s2): self.slot1 = s1 self.slot2 = s2 class DsubD(D): """dict, parent dict""" def __init__(self, d1, d2, d3): D.__init__(self, d1, d2) self.d3 = d3 class SsubS(S): """slots, parent slots""" __slots__ = 'slot3' def __init__(self, s1, s2, s3): S.__init__(self, s1, s2) self.slot3 = s3 class DsubS(S): """dict, parent slots""" def __init__(self, s1, s2, d3): S.__init__(self, s1, s2) self.d3 = d3 class SsubD(D): """slots, parent dict""" __slots__ = 'slot3' def __init__(self, d1, d2, s3): D.__init__(self, d1, d2) self.slot3 = s3 class SsubDS(D, S): """slots, parents dict and slots""" __slots__ = 'slot3' def __init__(self, d1, d2, s1, s2, s3): D.__init__(self, d1, d2) S.__init__(self, s1, s2) self.slot3 = s3 class NegDictOffset(tuple): """inheriting from tuple leads to a negative tp_dictoffset""" def __init__(self, tupleValue): self.attr = 'test' d = D(1, 2) s = S(1, 2) dsubd = DsubD(1, 2, 3) ssubs = SsubS(1, 2, 3) dsubs = DsubS(1, 2, 3) ssubd = SsubD(1, 2, 3) ssubds = SsubDS(1, 2, 3, 4, 5) negDictOffset = NegDictOffset((1, 2, 3)) win32debug.dump_process("object_details.dmp")
It occurs to me that I haven't written an editorial for quite some time. Those of you who follow my web site closely should have been writing me indignant letters complaining about my remissness in this matter. I haven't received any such letters. This could mean that I have no faithful readers; I prefer to think that it merely means that they are lax in their censorious efforts. My faithful readers will have noticed that I have added a new motto to my home page - "Slum City Of The Mind". Slum City, indeed. I will have you know that I have received many compliments on my web pages, remarks such as sprawling, weird but amusing, and My God, man, don't you have anything better to do with your time. Sprawling it certainly is - I keep adding (very) random pages every so often. Unfortunately they are mostly in certain specific areas. The effect is indeed that of a sprawling city of slums in which the ramshackle tar paper shack sections keep spreading. It clearly behooves me to add a bit of taste, a few structures of intellectual elegance, a bit of class. That's the ticket. I shall bring forth new pages, pages that inspire and enthrall. This is my shining vision of the future. I shall start on it immediately. Almost immediately. Real soon now. Yeah, right. One of the things that adds to the aura of sluminess is that I don't do very much in the line of la-de-la graphics. There are no graphics maps, no java applets flying little piglets across the screen, no frames. You are free to believe, if you choose to do so, that this represents an ideological purity on my part, a revulsion against the mindless proliferation of meaningless graphics and gaudy nothingness on the web. If you do I will salute you for the nobility of your beliefs. However I strongly suggest that you purchase nothing that is offered you for sale over the phone and that you never, ever watch infomercials. The truth is, of course, that I edit raw HTML files with an editor. I have learned a few of the simpler tricks of the trade - I know how to change fonts and make tables. I have a stock of bars and buttons and such trash. I have template files to start with so that mostly I just type in text along with the paragraph tags. Some one of these days I will get around to doing some artsy-fartsy multimedia web pages for people who want that sort of thing. Some day. In the mean time, Slum City, here I come. This page was last updated June 10, 1997.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import markitup.fields import mptt.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('sites', '0001_initial'), ('auth', '0006_require_contenttypes_0002'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Page', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=200)), ('ptype', models.CharField(default='page', max_length=64, verbose_name='Type')), ('template', models.CharField(max_length=254, blank=True)), ('comment', models.TextField(max_length=254, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('date_approved', models.DateTimeField(null=True, verbose_name='Approved', blank=True)), ('date_publication', models.DateTimeField(null=True, verbose_name='Publication date', blank=True)), ('date_publication_end', models.DateTimeField(null=True, verbose_name='Publication end date', blank=True)), ('is_draft', models.BooleanField(default=True, verbose_name='Draft')), ('is_approved', models.BooleanField(default=False, verbose_name='Approved')), ('is_hidden', models.BooleanField(default=False, verbose_name='Hidden')), ('is_published', models.BooleanField(default=False, verbose_name='Published')), ('is_login_required', models.BooleanField(default=False, verbose_name='Login required')), ('is_permission_required', models.BooleanField(default=False, verbose_name='Permission required')), ('lft', models.PositiveIntegerField(editable=False, db_index=True)), ('rght', models.PositiveIntegerField(editable=False, db_index=True)), ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)), ('level', models.PositiveIntegerField(editable=False, db_index=True)), ('created_by', models.ForeignKey(related_name='page_creator', to=settings.AUTH_USER_MODEL, null=True)), ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='pages.Page', null=True)), ('sites', models.ManyToManyField(help_text='The site(s) where this pages is accessible.', to='sites.Site', verbose_name='sites')), ('updated_by', models.ForeignKey(related_name='page_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'ordering': ['tree_id', 'lft'], 'get_latest_by': 'date_publication', 'verbose_name': 'Page', 'verbose_name_plural': 'Pages', 'permissions': (('view_page', 'Can view pages'),), }, ), migrations.CreateModel( name='PageContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ], options={ 'verbose_name': 'Content', 'verbose_name_plural': 'Content', }, ), migrations.CreateModel( name='PageContentType', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type')), ('class_name', models.CharField(max_length=100, verbose_name='Class')), ('admin_class_name', models.CharField(max_length=100, verbose_name='Admin Class')), ('is_extended', models.BooleanField(default=False, verbose_name='Extended')), ], options={ 'verbose_name': 'Content Type', 'verbose_name_plural': 'Content Types', }, ), migrations.CreateModel( name='PageGroupObjectPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('content_object', models.ForeignKey(to='pages.Page')), ('group', models.ForeignKey(to='auth.Group')), ('permission', models.ForeignKey(to='auth.Permission')), ], options={ 'verbose_name': 'Page Group Permissions', 'verbose_name_plural': 'Pages Groups Permissions', }, ), migrations.CreateModel( name='PageMarkdownContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)), ('language', models.CharField(default=b'en', max_length=5)), ('sid', models.CharField(unique=True, max_length=200)), ('name', models.CharField(unique=True, max_length=200, blank=True)), ('is_extended', models.BooleanField(default=False, verbose_name='Extended?')), ('comment', models.CharField(max_length=250, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('text', markitup.fields.MarkupField(no_rendered_field=True, blank=True)), ('is_template', models.BooleanField(default=False, verbose_name='Template?')), ('_text_rendered', models.TextField(editable=False, blank=True)), ('created_by', models.ForeignKey(related_name='pages_pagemarkdowncontent_creator', to=settings.AUTH_USER_MODEL, null=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ('updated_by', models.ForeignKey(related_name='pages_pagemarkdowncontent_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'abstract': False, 'verbose_name': 'Markdown', 'verbose_name_plural': 'Markdown', }, ), migrations.CreateModel( name='PageMetaContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)), ('language', models.CharField(default=b'en', max_length=5)), ('sid', models.CharField(unique=True, max_length=200)), ('name', models.CharField(unique=True, max_length=200, blank=True)), ('is_extended', models.BooleanField(default=False, verbose_name='Extended?')), ('comment', models.CharField(max_length=250, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('title', models.CharField(max_length=160, blank=True)), ('description', models.TextField(max_length=160, blank=True)), ('keywords', models.CharField(max_length=200, blank=True)), ('is_noindex', models.BooleanField(default=False, verbose_name='NoIndex')), ('is_nofollow', models.BooleanField(default=False, verbose_name='NoFollow')), ('created_by', models.ForeignKey(related_name='pages_pagemetacontent_creator', to=settings.AUTH_USER_MODEL, null=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ('updated_by', models.ForeignKey(related_name='pages_pagemetacontent_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'abstract': False, 'verbose_name': 'Meta', 'verbose_name_plural': 'Meta', }, ), migrations.CreateModel( name='PageRedirectContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)), ('language', models.CharField(default=b'en', max_length=5)), ('sid', models.CharField(unique=True, max_length=200)), ('name', models.CharField(unique=True, max_length=200, blank=True)), ('is_extended', models.BooleanField(default=False, verbose_name='Extended?')), ('comment', models.CharField(max_length=250, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('redirect_to_page', models.CharField(max_length=254, null=True, blank=True)), ('redirect_to_url', models.URLField(max_length=254, null=True, blank=True)), ('is_permanent', models.BooleanField(default=False, verbose_name='Permanent')), ('created_by', models.ForeignKey(related_name='pages_pageredirectcontent_creator', to=settings.AUTH_USER_MODEL, null=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ('updated_by', models.ForeignKey(related_name='pages_pageredirectcontent_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'abstract': False, 'verbose_name': 'Redirect', 'verbose_name_plural': 'Redirect', }, ), migrations.CreateModel( name='PageSlugContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)), ('language', models.CharField(default=b'en', max_length=5)), ('sid', models.CharField(unique=True, max_length=200)), ('name', models.CharField(unique=True, max_length=200, blank=True)), ('is_extended', models.BooleanField(default=False, verbose_name='Extended?')), ('comment', models.CharField(max_length=250, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('slug', models.CharField(max_length=245)), ('created_by', models.ForeignKey(related_name='pages_pageslugcontent_creator', to=settings.AUTH_USER_MODEL, null=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ('updated_by', models.ForeignKey(related_name='pages_pageslugcontent_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'abstract': False, 'verbose_name': 'Slug', 'verbose_name_plural': 'Slugs', }, ), migrations.CreateModel( name='PageTextContent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)), ('language', models.CharField(default=b'en', max_length=5)), ('sid', models.CharField(unique=True, max_length=200)), ('name', models.CharField(unique=True, max_length=200, blank=True)), ('is_extended', models.BooleanField(default=False, verbose_name='Extended?')), ('comment', models.CharField(max_length=250, blank=True)), ('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')), ('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')), ('text', models.TextField(blank=True)), ('is_template', models.BooleanField(default=False, verbose_name='Template?')), ('created_by', models.ForeignKey(related_name='pages_pagetextcontent_creator', to=settings.AUTH_USER_MODEL, null=True)), ('page', models.ForeignKey(verbose_name='Page', to='pages.Page')), ('updated_by', models.ForeignKey(related_name='pages_pagetextcontent_editor', to=settings.AUTH_USER_MODEL, null=True)), ], options={ 'abstract': False, 'verbose_name': 'Text', 'verbose_name_plural': 'Text', }, ), migrations.CreateModel( name='PageUserObjectPermission', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('content_object', models.ForeignKey(to='pages.Page')), ('permission', models.ForeignKey(to='auth.Permission')), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Page User Permissions', 'verbose_name_plural': 'Pages Users Permissions', }, ), migrations.AddField( model_name='pagecontent', name='type', field=models.ForeignKey(to='pages.PageContentType'), ), migrations.AlterUniqueTogether( name='pageslugcontent', unique_together=set([('language', 'page')]), ), migrations.AlterUniqueTogether( name='pagemetacontent', unique_together=set([('language', 'page')]), ), migrations.AlterUniqueTogether( name='pagecontent', unique_together=set([('page', 'type')]), ), ]
Businesses that are either home based or small-scale ranging from medium to large has certain printing requirements, for which they require a good printer that can meet their printing demands in terms of output and speed. If the organizations demand premium quality and durability from their printing devices, then Brother HL-4200CN color laser printer is the ultimate choice. The printer is fast and it prints up to 26 pages per minute in either monochrome or color at an astonishing 1200x1200dpi resolution. Built-in duplex feature with large expandable paper capacity, the printer provides additional flexibility and can hold maximum sheets of paper at a time. Also, the printer combines the finest quality of printing at high speed and durability. This printer is one of the fastest and the most efficient printers in its class. It offers excellent printing speed of up to 26 pages per minute in color as well as black and white. The onboard memory of 64 megabytes is upgradable to 320MB. The amount of 600 sheets of A4 and legal size paper can be hold by the printer; 500 sheets in the input tray and the remaining 100 in the multipurpose tray. The printer easily supports all your workgroup`s color and monochrome output needs. The expandable memory allows a seamless performance. Brother HL4200CN toner cartridges offer Inkjet Cartridges supplies for this printer when they run out of ink. Also, the device can be used with four other toner cartridges including, Cyan (TN12C), Magenta (TN12M), Yellow (TN12Y), Black (TN12BK) that can effectively yield nearly 2,500 pages for the Black cartridge and 1,500 pages for the color cartridges.
import sublime, sublime_plugin, unicodedata # Needed because unicodedata.name() doesn't return names of control characters # See stackoverflow.com/questions/24552786/why-doesnt-unicodedata-recognise-certain-characters UNNAMED_CONTROL_CHARS = { 0x00: 'NULL', 0x01: 'START OF HEADING', 0x02: 'START OF TEXT', 0x03: 'END OF TEXT', 0x04: 'END OF TRANSMISSION', 0x05: 'ENQUIRY', 0x06: 'ACKNOWLEDGE', 0x07: 'BELL', 0x08: 'BACKSPACE', 0x09: 'CHARACTER TABULATION', 0x0A: 'LF: LINE FEED', 0x0B: 'LINE TABULATION', 0x0C: 'FF: FORM FEED', 0x0D: 'CR: CARRIAGE RETURN', 0x0E: 'SHIFT OUT', 0x0F: 'SHIFT IN', 0x10: 'DATA LINK ESCAPE', 0x11: 'DEVICE CONTROL ONE', 0x12: 'DEVICE CONTROL TWO', 0x13: 'DEVICE CONTROL THREE', 0x14: 'DEVICE CONTROL FOUR', 0x15: 'NEGATIVE ACKNOWLEDGE', 0x16: 'SYNCHRONOUS IDLE', 0x17: 'END OF TRANSMISSION BLOCK', 0x18: 'CANCEL', 0x19: 'END OF MEDIUM', 0x1A: 'SUBSTITUTE', 0x1B: 'ESCAPE', 0x1C: 'INFORMATION SEPARATOR FOUR', 0x1D: 'INFORMATION SEPARATOR THREE', 0x1E: 'INFORMATION SEPARATOR TWO', 0x1F: 'INFORMATION SEPARATOR ONE', 0x7F: 'DELETE', 0x80: 'CONTROL U+0080', 0x81: 'CONTROL U+0081', 0x82: 'BREAK PERMITTED HERE', 0x83: 'NO BREAK HERE', 0x84: 'CONTROL U+0084', 0x85: 'NEL: NEXT LINE', 0x86: 'START OF SELECTED AREA', 0x87: 'END OF SELECTED AREA', 0x88: 'CHARACTER TABULATION SET', 0x89: 'CHARACTER TABULATION WITH JUSTIFICATION', 0x8A: 'LINE TABULATION SET', 0x8B: 'PARTIAL LINE FORWARD', 0x8C: 'PARTIAL LINE BACKWARD', 0x8D: 'REVERSE LINE FEED', 0x8E: 'SINGLE SHIFT TWO', 0x8F: 'SINGLE SHIFT THREE', 0x90: 'DEVICE CONTROL STRING', 0x91: 'PRIVATE USE ONE', 0x92: 'PRIVATE USE TWO', 0x93: 'SET TRANSMIT STATE', 0x94: 'CANCEL CHARACTER', 0x95: 'MESSAGE WAITING', 0x96: 'START OF GUARDED AREA', 0x97: 'END OF GUARDED AREA', 0x98: 'START OF STRING', 0x99: 'CONTROL U+0099', 0x9A: 'SINGLE CHARACTER INTRODUCER', 0x9B: 'CONTROL SEQUENCE INTRODUCER', 0x9C: 'STRING TERMINATOR', 0x9D: 'OPERATING SYSTEM COMMAND', 0x9E: 'PRIVACY MESSAGE', 0x9F: 'APPLICATION PROGRAM COMMAND' } def getUnicodeCharName(char): charName = unicodedata.name(char, 'Unknown') # Get the Unicode name assigned to the character charCode = ord(char) if charName == 'Unknown' and charCode in UNNAMED_CONTROL_CHARS: charName = UNNAMED_CONTROL_CHARS[charCode] return charName def updateStatusBar(view): char = view.substr(view.sel()[0].a) # The character at the cursor or start of selection hexCode = hex(ord(char)).upper().replace('X','x') statusString = 'Char ' + hexCode viewEncoding = view.encoding() if viewEncoding == 'UTF-8' or viewEncoding == 'Undefined': # The encoding may be Undefined if the file only contains 7-bit ASCII characters # which are common to many encodings (UTF-8, ISO-8859-1, Windows-1251, etc) charName = getUnicodeCharName(char) statusString += ' (' + charName + ')' view.set_status('zzCharacterInfo', statusString) class CharacterInfoListener(sublime_plugin.EventListener): def on_selection_modified(self, view): updateStatusBar(view)
Consumer Electronic Devices require digital storage, whether local or in the cloud. At the 2018 CES as well as companion events, Digital Experience and Showstoppers, I had the opportunity to talk with many suppliers of digital storage devices, as well as folks using digital storage to enable their applications. This blog looks at products from Toshiba, Western Digital and Seagate Technology. The following blog will look at the many other storage products and some applications at the 2018 CES. The hard disk drive companies (represented in this blog) I spoke with said that they had introduced their last generation of high-performance HDDs, these are 10,000 and 15,000 RPM HDDs used for enterprise applications. They all plan to only provide flash storage for high-performance applications, likely using the high-performance NVMe interface. In the enterprise space, capacity HDDs providing inexpensive storage will be the dominant application for HDDs. Toshiba makes several digital storage products, including flash memory as well as hard disk drives. They recently introduced a 14 TB conventional magnetic recording 3.5-inch HDD with 9 disks. This is the largest hard disk drive using conventional magnetic recording, rather than shingled magnetic recording. Toshiba was also showing their large line of 2.5-inch hard disk drives, including their 7mm thick drives. I was told that Toshiba's 1 TB 2.5-inch 7 mm thick PC drive use SMR and that the industry is moving to SMR in one and two disk 2.5-inch drives. In addition to its HDDs, Toshiba was showing their SSDs, including their brand new RC100 NVMe M.2 SSD. This product uses Toshiba’s 64-layer 3-bit-per-cell (TLC) BiCS flash memory, including an in-house SSD controller. This small drive, shown below, can deliver up to 1.6 GB/s read and 1.2 GB/s write transfer speeds. These drives are available with 120-480 GB storage capacities. The RC100 is targeted to all mainstream computing (from gaming desktops and notebooks to mini-PCs. In addition to this new SSD, Toshiba was showing a large line of SATA, SAS and NVMe SSDs, including Ball Grid Array SSDs for embedded applications. They also showed external HDD and SSD products. Seagate was showing their co-pilot external storage product, developed in collaboration with drone maker, DJI. This is a rugged product focusing on ingest of content from flash memory cards in the field directly into a large portable HDD product, without having to use a computer. The product has a display that lets the consumer know the status of their ingested data. Seagate was also showing a product that is popular in the China market and co-developed with JingDong, their Joy Drive, that combines a battery-powered external hard drive with phone or tablet charging. Both products offer USB-C interfaces and either USB-A or mini-USB as well. Seagate also introduced new versions of their LaCie Rugged drives with hardware encryption and a fast external SSD product with up to 1 TB capacity and a native USB-C connector. Seagate had their 3.5-inch HAMR drive on display as well as a model of the dual actuator drive that they announced in December. Seagate was also showing a 5U 84-drive enclosure and operating system providing up to 1 PB of capacity with 84 drive trays supporting 8, 10 and 12 TB SAS drives, or SSDs. At some of the media events, they were also partnered with Synology external storage products as well as fire and waterproof external storage maker, ioSafe. Western Digital had several HDD and flash-based products on display at the CES. Their HDD-based MyCloud Home device (available in a single and duo size) now works with popular devices that support Amazon Alexa services to allow access to stored music collections via voice commands. The product also works with Google Chromecast to allow streaming home videos, TV shows and movies using Chromecast-enabled Smart TVs. Western Digital had a number of flash storage technologies on display including the world’s smallest 1 TB USB device, a USB-C flash solution for portable storage, a 480 GB micro-SD card and its Ultra Fit USB 3.1 Flash Drive with capacity up to 256 GB. The company also introduced two portable SSDs. One is the My Passport Wireless SSD that allows one-touch card copy for editing and sharing content in the field as well as direct access to the device within third-party mobile creative apps. The SanDisk Extreme Portable SSD is targeted for saving and editing high-resolution photos and videos. Consumer applications are on the forefront in generating digital content that needs to be stored and accessed. Toshiba, Western Digital and Seagate Technology have products that meet these consumer needs, whether at home or in the cloud. Their consumer products including new features such as new form factors, access via voice recognition and alliances with drone companies.
#!/usr/bin/env python ''' (c) 2015-Present Brendan Bulik-Sullivan Multi-trait genetic prediction. ''' from __future__ import division import argparse import time from core import bayes from core import ld import traceback import sys import pandas as pd try: x = pd.DataFrame({'A': [1, 2, 3]}) x.drop_duplicates(subset='A') except TypeError: raise ImportError('mtpred requires pandas version > 0.15.2') __version__ = '0.1' MASTHEAD = "*************************************\n" MASTHEAD += "* Multi-Trait Prediction (mtpred)\n" MASTHEAD += "* Version {V}\n".format(V=__version__) MASTHEAD += "* (C) 2015 Brendan Bulik-Sullivan\n" MASTHEAD += "* Broad Institute of MIT and Harvard\n" MASTHEAD += "* GNU General Public License v3\n" MASTHEAD += "*************************************\n" def sec_to_str(t): '''Convert seconds to days:hours:minutes:seconds''' [d, h, m, s, n] = reduce( lambda ll, b: divmod(ll[0], b) + ll[1:], [(t, 1), 60, 60, 24]) f = '' if d > 0: f += '{D}d:'.format(D=int(d)) if h > 0: f += '{H}h:'.format(H=int(h)) if m > 0: f += '{M}m:'.format(M=int(m)) f += '{S}s'.format(S=s) return f class Logger(object): '''Print to log file and stdout.''' def __init__(self, fh): self.log_fh = open(fh, 'wb', 0) def log(self, msg, stdout=True): '''Print to log file and stdout.''' print >>self.log_fh, msg if stdout: x = str(msg).split('\n') if len(x) > 20: msg = '\n'.join(x[1:10]) msg += '\nOutput truncated. See log file for full list.' sys.stdout.write(str(msg)+'\n') sys.stdout.flush() def close(self): self.log_fh.close() class ThrowingArgumentParser(argparse.ArgumentParser): def error(self, message): raise ValueError(message) def print_help(self, masthead=True): if masthead: print MASTHEAD argparse.ArgumentParser.print_help(self) parser = ThrowingArgumentParser() subparsers = parser.add_subparsers() # eigendecomposition of LD matrices eigen = subparsers.add_parser('ld', help='Calculate eigen-decompositions' ' of LD matrices.') eigen.add_argument('--bfile', default=None, type=str, required=True, help='Filename prefix for plink .bed/.bim/.fam fileset.') ld_wind = eigen.add_mutually_exclusive_group(required=True) ld_wind.add_argument('--ld-wind-snp', default=None, type=int, nargs=2, help='First arg window size, second arg buffer size.' ' Units are # SNPs.', metavar=('WIND', 'BUF')) ld_wind.add_argument('--ld-wind-kb', default=None, type=int, nargs=2, help='Not implemented yet.', metavar=('WIND', 'BUF')) ld_wind.add_argument('--ld-wind-cm', default=None, type=int, nargs=2, help='Not implemented yet.', metavar=('WIND', 'BUF')) eigen.add_argument('--covar', default=None, type=str, help='Covariates.') eigen.add_argument('--out', default='mtpred', type=str, help='Output filename prefix, default mtpred.') # posterior mean beta pmb = subparsers.add_parser('pmb', help='Convert summary statistics to ' 'prediction weights.') pmb.add_argument('--sumstats', default=None, type=str, required=True, nargs='+', help='List of filenames in .sumstats format containing GWAS ' 'summary statistics.') pmb.add_argument('--mat', default=None, type=str, required=True, nargs=2, help='Genetic covariance matrix ' 'followed by a sample overlap matrix.', metavar=('VG', 'P')) pmb.add_argument('--eigen-ld', default=None, type=str, required=True, help='Pre-computed eigen-decomposition of the LD matrix' ' (from mypred.py ld).') pmb.add_argument('--png', default=False, action='store_true', help='Save per-normalized-genotype weights? ' + '(default per-allele).') pmb.add_argument('--out', default='mtpred', type=str, help='Output filename prefix, default mtpred.') pmb.add_argument('--block-buf', default=3, type=int, help='Eigenblock buffer size.') if __name__ == '__main__': try: args = parser.parse_args() except ValueError: # override bad help msgs w/ subparsers print MASTHEAD ex_type, ex, tb = sys.exc_info() print traceback.format_exc(ex) parser.print_help(masthead=False) sys.exit(2) if args.out is None: raise ValueError('--out is required.') log = Logger(args.out+'.log') start_time = time.time() try: opts = vars(args) opts = {x: ' '.join([str(i) for i in opts[x]]) if type(opts[x]) is list else opts[x] for x in filter(lambda y: opts[y] is not None, opts)} header = MASTHEAD header += "\nOptions: \n" options = [ ' --' + x.replace('_', '-') + ' ' + str(opts[x]) for x in opts] header += '\n'.join(options) + '\n' log.log(header) log.log('Beginning analysis at {T}'.format(T=time.ctime())) try: args.bfile which = 'ld' except AttributeError: which = 'pmb' if which == 'ld': ld.eigenblocks(args, log) elif which == 'pmb': bayes.mtpred(args, log) else: log.log('Something went horribly wrong.') except Exception: ex_type, ex, tb = sys.exc_info() log.log(traceback.format_exc(ex), stdout=False) raise finally: log.log('Analysis finished at {T}'.format(T=time.ctime())) time_elapsed = round(time.time() - start_time, 2) log.log('Total time elapsed: {T}'.format(T=sec_to_str(time_elapsed))) log.close()
Melons are among the fruits whose appearance at the Market—despite the fact they’re in no way a natural feature of the British landscape—undoubtedly shout, “summer’s here”. Whatever the weather might be doing. Currently all but tumbling off the greengrocers’ colourful stalls, they’re a motley bunch, in hues of green and yellow and orange, striped and bumped, each with its own personality. What melons and watermelons lack in the pick-up, walk and eat appeal of strawberries and cherries, they make up for in versatility. “Wrap a chunk of melon in a bit of Parma ham and you can’t really go wrong,” says Paul. A perennial Italian starter, “it’s as old as the hills, but it’s endured for a reason.” Jenny Chandler echoes this classic agro-dulce combination in her watermelon, feta, and herby bulgur wheat salad. “I like torpedo watermelon filled with vodka, personally!” laughs Charlie; Celia Brooks prefers the more sophisticated watermelon margerita—the perfect summer party drink.
from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. Unlike typical Django validators, this requires a model instance as a parameter, instead of the value, which in this case would have been the ID of the related model. Because of this requirement, this validator cannot be added via the `validator` kwarg on a ForeignKey field. Rather this method should be called from the `clean` method. """ # Return early if there is no need to validate. if name.merged_with is None: return # Get the Name class from the model instance, to avoid # circular importing name.models.Name. Name = name.__class__ # Prevent the user from attempting to merge with a nonexistent # Name. try: merge_target = Name.objects.get(id=name.merged_with_id) except Name.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) # Prevent the user from attempting to merge a name with itself. if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
I am big fan of a YouTube channel called Language Transfer. I have used it for Spanish, Arabic and Italian, and I would listen to an episode mostly every morning while getting dressed or at night while cooking. Language Transfer’s method for learning is termed the “Thinking Method” which means that you are remembering grammar and vocabulary not just by simply memorizing it but by understanding the logic behind it. It does so by transferring the knowledge you already have from other languages to your target language. Remembering, then, becomes a way of following a mental process of logic steps, which both makes it easier to trace things back if forgotten and provides you with a better understanding of the language. This works especially well between English and Latin languages, because you can ‘transfer’ a lot of words from one to the other (thanks to William the Conquerer). Following a list of the most frequently used words in a language is a very efficient way to learn important vocabulary. This list also features example sentences for every word. If you make an account on this website, you can opt for a great daily or weekly newsletter with new words and example sentences at your own level. This is a silly but entertaining educational series available in a couple of languages, especially designed for language learners. It’s about a group of friends and their adolescent problems and attempts to find love. The difficulty of the dialogues (all subtitled) is increasing with every episode and it thus works great as a support to regular studying. The daily news but in “easy” French (not really easy still, I must admit). I never recommend Duolingo as a main source for starting a language as in my opinion it doesn’t focus enough on grammar, but for me it works wonderfully as a support to regular lessons and as a tool to keep in touch with the language when you’re not actively studying it. Reading in another language is effective for learning but not much fun if you have to look up every other word. This application called Readlang is a great free addition to your browser that gives a translation simply by clicking on a word. Even better: it saves those words (and the sentence in which they appear) for later review as flashcards. If you use Spotify, give my favorite French songs a try! A last tip: if you already know another Latin language, the best thing you can do is buy a grammar book or vocabulary list in that language. Even when you have to single out the faux amis, this will save you a lot of time and efforts and makes language learning much more interesting.
# Copyright 2017 Bateared Collie # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or other # materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may # be used to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT # SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from vtk.util import numpy_support from stk.DataTypes import vtkHyperCube import numpy as np def hyperCubeGenerate(array=np.zeros([100,1]), delta=[1.,1.], origin=[0.,0.], name="HyperCubeData", copy=True ): ''' @summary: Generates a vtkHyperCube object from an input numpy array and axis. @param array: input data array @type array: numpy nd array @param traceDictList: trace dictionary @type traceDictList: [dict] @param delta: time sampling @type delta: float @param origin: origin time for traces @type origin: float @param name: Name for data array @type name: string @param copy: perform a copy of the source array (turn this off at own risk) @type copy: boolean ** Example Usage ** >>> import stk.generators as gen >>> cube = gen.hyperCubeGenerate(array=np.zeros([10,20,5,6],dtype=np.float), delta=[0.1,0.2,1.,2.], origin=[-0.5,0.,-10.,3.], ) .. warning:: If you use copy=False then you should not try to access the the trace data after processing. ''' # Check the axis number nDim=len(array.shape) if len(delta)!=nDim: raise Exception("Delta length does not match array dimensions") if len(origin)!=nDim: raise Exception("Delta length does not match array dimensions") tpD = vtkHyperCube() tpD.SetNDimensions(nDim) tpD.SetDimensions(np.flip(array.shape,0)) tpD.SetSpacing(delta) tpD.SetOrigin(origin) vtk_data = numpy_support.numpy_to_vtk(array.flatten(),deep=copy) vtk_data.SetName(name) tpD.GetPointData().SetScalars(vtk_data) return tpD
Bookings Connected Ltd will be what’s known as the “Controller” of the personal data you provide to us. We only collect basic personal data about you which does not include any special categories of personal information about you (known as Special Category Data). This does however include name, address, e-mail, telephone number, financial information. We need to know your location in order to show you the most relevant search results, basic personal data in order to provide goods and services to you, process your orders, provide you with marketing, tell you about our products and services, etc. We will not collect any personal data from you we do not need to provide and oversee this service to you. We are based in the UK and we store our data on servers within the US. We will only use and store information for so long as it is required for the purposes it was collected. How long information will be stored depends on the information in question and what it is being used for. For example, if you ask us not to send you marketing e-mails, we will stop storing your e-mails for marketing purposes (though we’ll keep a record of your preference not to be emailed). If you wish to raise a complaint about how we have handled your personal data, you can contact us, and we will investigate the matter.
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'export_metadata.ui' # # Created: Sun Dec 8 11:53:10 2013 # by: pyside-uic 0.2.14 running on PySide 1.1.2 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Dialog_export_metadata(object): def setupUi(self, Dialog_export_metadata): Dialog_export_metadata.setObjectName("Dialog_export_metadata") Dialog_export_metadata.resize(428, 438) self.qdem_dialogButtonBox = QtGui.QDialogButtonBox(Dialog_export_metadata) self.qdem_dialogButtonBox.setGeometry(QtCore.QRect(70, 390, 341, 32)) self.qdem_dialogButtonBox.setOrientation(QtCore.Qt.Horizontal) self.qdem_dialogButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok) self.qdem_dialogButtonBox.setObjectName("qdem_dialogButtonBox") self.qdem_frame = QtGui.QFrame(Dialog_export_metadata) self.qdem_frame.setGeometry(QtCore.QRect(20, 110, 351, 191)) self.qdem_frame.setFrameShape(QtGui.QFrame.StyledPanel) self.qdem_frame.setFrameShadow(QtGui.QFrame.Raised) self.qdem_frame.setObjectName("qdem_frame") self.gridLayoutWidget = QtGui.QWidget(self.qdem_frame) self.gridLayoutWidget.setGeometry(QtCore.QRect(90, 40, 251, 141)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.rmdd_gridLayout = QtGui.QGridLayout(self.gridLayoutWidget) self.rmdd_gridLayout.setContentsMargins(0, 0, 0, 0) self.rmdd_gridLayout.setObjectName("rmdd_gridLayout") self.qdem_chk_export_exif_data = QtGui.QCheckBox(self.gridLayoutWidget) self.qdem_chk_export_exif_data.setObjectName("qdem_chk_export_exif_data") self.rmdd_gridLayout.addWidget(self.qdem_chk_export_exif_data, 0, 0, 1, 1) self.qdem_chk_export_xmp_data = QtGui.QCheckBox(self.gridLayoutWidget) self.qdem_chk_export_xmp_data.setObjectName("qdem_chk_export_xmp_data") self.rmdd_gridLayout.addWidget(self.qdem_chk_export_xmp_data, 1, 0, 1, 1) self.qdem_chk_export_iptc_data = QtGui.QCheckBox(self.gridLayoutWidget) self.qdem_chk_export_iptc_data.setObjectName("qdem_chk_export_iptc_data") self.rmdd_gridLayout.addWidget(self.qdem_chk_export_iptc_data, 3, 0, 1, 1) self.qdem_chk_export_gps_data = QtGui.QCheckBox(self.gridLayoutWidget) self.qdem_chk_export_gps_data.setObjectName("qdem_chk_export_gps_data") self.rmdd_gridLayout.addWidget(self.qdem_chk_export_gps_data, 2, 0, 1, 1) self.qdem_chk_export_iccprofile_data = QtGui.QCheckBox(self.gridLayoutWidget) self.qdem_chk_export_iccprofile_data.setObjectName("qdem_chk_export_iccprofile_data") self.rmdd_gridLayout.addWidget(self.qdem_chk_export_iccprofile_data, 4, 0, 1, 1) self.qdem_chk_export_all_metadata = QtGui.QCheckBox(self.qdem_frame) self.qdem_chk_export_all_metadata.setGeometry(QtCore.QRect(20, 10, 162, 17)) self.qdem_chk_export_all_metadata.setObjectName("qdem_chk_export_all_metadata") self.qdem_lbl = QtGui.QLabel(Dialog_export_metadata) self.qdem_lbl.setGeometry(QtCore.QRect(20, 10, 391, 101)) self.qdem_lbl.setWordWrap(True) self.qdem_lbl.setObjectName("qdem_lbl") self.qdem_lbl2 = QtGui.QLabel(Dialog_export_metadata) self.qdem_lbl2.setGeometry(QtCore.QRect(20, 310, 91, 16)) self.qdem_lbl2.setObjectName("qdem_lbl2") self.horizontalLayoutWidget = QtGui.QWidget(Dialog_export_metadata) self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 330, 401, 41)) self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget") self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget) self.horizontalLayout.setContentsMargins(10, -1, -1, -1) self.horizontalLayout.setObjectName("horizontalLayout") self.qdem_txt_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_txt_radiobutton.setChecked(True) self.qdem_txt_radiobutton.setObjectName("qdem_txt_radiobutton") self.horizontalLayout.addWidget(self.qdem_txt_radiobutton) self.qdem_tab_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_tab_radiobutton.setObjectName("qdem_tab_radiobutton") self.horizontalLayout.addWidget(self.qdem_tab_radiobutton) self.qdem_xml_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_xml_radiobutton.setObjectName("qdem_xml_radiobutton") self.horizontalLayout.addWidget(self.qdem_xml_radiobutton) self.qdem_html_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_html_radiobutton.setObjectName("qdem_html_radiobutton") self.horizontalLayout.addWidget(self.qdem_html_radiobutton) self.qdem_xmp_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_xmp_radiobutton.setObjectName("qdem_xmp_radiobutton") self.horizontalLayout.addWidget(self.qdem_xmp_radiobutton) self.qdem_csv_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget) self.qdem_csv_radiobutton.setEnabled(True) self.qdem_csv_radiobutton.setObjectName("qdem_csv_radiobutton") self.horizontalLayout.addWidget(self.qdem_csv_radiobutton) self.retranslateUi(Dialog_export_metadata) QtCore.QObject.connect(self.qdem_dialogButtonBox, QtCore.SIGNAL("accepted()"), Dialog_export_metadata.accept) QtCore.QObject.connect(self.qdem_dialogButtonBox, QtCore.SIGNAL("rejected()"), Dialog_export_metadata.reject) QtCore.QMetaObject.connectSlotsByName(Dialog_export_metadata) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_all_metadata, self.qdem_chk_export_exif_data) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_exif_data, self.qdem_chk_export_xmp_data) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_xmp_data, self.qdem_chk_export_gps_data) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_gps_data, self.qdem_chk_export_iptc_data) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_iptc_data, self.qdem_chk_export_iccprofile_data) Dialog_export_metadata.setTabOrder(self.qdem_chk_export_iccprofile_data, self.qdem_txt_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_txt_radiobutton, self.qdem_tab_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_tab_radiobutton, self.qdem_xml_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_xml_radiobutton, self.qdem_html_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_html_radiobutton, self.qdem_xmp_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_xmp_radiobutton, self.qdem_csv_radiobutton) Dialog_export_metadata.setTabOrder(self.qdem_csv_radiobutton, self.qdem_dialogButtonBox) def retranslateUi(self, Dialog_export_metadata): Dialog_export_metadata.setWindowTitle(QtGui.QApplication.translate("Dialog_export_metadata", "Export metadata", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_exif_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export exif data", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_xmp_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export xmp data", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_iptc_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export iptc data", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_gps_data.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "gps data can be both in exif and xmp data", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_gps_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export gps data", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_iccprofile_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export ICC profile", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_all_metadata.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "(Un)Check this value will (un)check all underlying values", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_chk_export_all_metadata.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export all metadata", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_lbl.setText(QtGui.QApplication.translate("Dialog_export_metadata", "This option gives you the possibility to export the metadata from your selected photo(s). A number of formats is supported.\n" "All formats give an export file per selected photo, apart from csv which will give you one (big) csv file for all selected photos.", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_lbl2.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export to:", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_txt_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a simple text output per photo", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_txt_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Txt", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_tab_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a tab separated text output per photo", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_tab_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "tab", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_xml_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you an xml formatted output file per photo", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_xml_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "xml", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_html_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a table formatted html file per photo", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_html_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "html", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_xmp_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you an xmp structured file per photo", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_xmp_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "xmp", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_csv_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you one csv file for the selected photos", None, QtGui.QApplication.UnicodeUTF8)) self.qdem_csv_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "csv", None, QtGui.QApplication.UnicodeUTF8))
Regardless of how long or how much you’ve smoked, or how often you’ve tried to quit, you can learn to become a nonsmoker. The Quit Smoking Kit will help you prepare for a quit attempt. You can take an addiction test, and get useful tips on how to boost your willpower. It will help you think about your reasons for quitting and recognize the triggers that make you crave cigarettes. The Stress Control Kit contains a variety of important techniques and strategies to help reduce and ultimately control the negative effects of stress. The kit is arranged into seven sections, each section is designed to give background information and action-oriented tips to help manage stress effectively.
# -*- coding: utf-8 -*- """ Demonstrates use of GLScatterPlotItem with rapidly-updating plots. """ ## Add path to library (just for examples; you do not need this) import initExample from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph.opengl as gl import numpy as np app = QtGui.QApplication([]) w = gl.GLViewWidget() w.opts['distance'] = 20 w.show() w.setWindowTitle('pyqtgraph example: GLScatterPlotItem') g = gl.GLGridItem() w.addItem(g) ## ## First example is a set of points with pxMode=False ## These demonstrate the ability to have points with real size down to a very small scale ## pos = np.empty((53, 3)) size = np.empty((53)) color = np.empty((53, 4)) pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5) pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5) pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5) z = 0.5 d = 6.0 for i in range(3,53): pos[i] = (0,0,z) size[i] = 2./d color[i] = (0.0, 1.0, 0.0, 0.5) z *= 0.5 d *= 2.0 sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False) sp1.translate(5,5,0) w.addItem(sp1) ## ## Second example shows a volume of points with rapidly updating color ## and pxMode=True ## pos = np.random.random(size=(100000,3)) pos *= [10,-10,10] pos[0] = (0,0,0) color = np.ones((pos.shape[0], 4)) d2 = (pos**2).sum(axis=1)**0.5 size = np.random.random(size=pos.shape[0])*10 sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size) phase = 0. w.addItem(sp2) ## ## Third example shows a grid of points with rapidly updating position ## and pxMode = False ## pos3 = np.zeros((100,100,3)) pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1] pos3 = pos3.reshape(10000,3) d3 = (pos3**2).sum(axis=1)**0.5 sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False) w.addItem(sp3) def update(): ## update volume colors global phase, sp2, d2 s = -np.cos(d2*2+phase) color = np.empty((len(d2),4), dtype=np.float32) color[:,3] = np.clip(s * 0.1, 0, 1) color[:,0] = np.clip(s * 3.0, 0, 1) color[:,1] = np.clip(s * 1.0, 0, 1) color[:,2] = np.clip(s ** 3, 0, 1) sp2.setData(color=color) phase -= 0.1 ## update surface positions and colors global sp3, d3, pos3 z = -np.cos(d3*2+phase) pos3[:,2] = z color = np.empty((len(d3),4), dtype=np.float32) color[:,3] = 0.3 color[:,0] = np.clip(z * 3.0, 0, 1) color[:,1] = np.clip(z * 1.0, 0, 1) color[:,2] = np.clip(z ** 3, 0, 1) sp3.setData(pos=pos3, color=color) t = QtCore.QTimer() t.timeout.connect(update) t.start(50) ## Start Qt event loop unless running in interactive mode. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
New York, NY – March 8, 2010: New York Festivals® Radio Programming & Promotions Awards, revealed its newly designed trophy, inspired by the classic 1920’s Art Deco microphone made popular during radio’s golden age. The freshly minted trophy will debut at the 2010 Radio Awards ceremony, and will be crafted in gold, silver, and bronze colors representing the competition’s winners ranking. This original design will continue the tradition started fifty-three years ago of honoring the World’s Best Work™, in radio broadcasting, with entries from radio stations, networks and independent producers from around the globe. NYF’s Radio Programming & Promotions Awards continually adapts its categories to the ever-changing, always evolving broadcast industry. In keeping with the demands of the consumer, NYF’s Radio Awards introduced new categories this year, including Podcast for the Arts, Best Student Artist Performance/Interview and Best Student Comedy. The complete list of categories include: News Programs, News Inserts, Information/Documentary, Entertainment, Talk Programs, On-air Talent, Craft & Technique, Programming Format, Promotion Spots, IDs, Audio Podcasts, and Student categories. The 2010 Radio Programming & Promotions GRANDJURY® enlists jurists from all over the world, representing some of the most recognizable voices and captivating programming producers in the industry. Entries to the competition are judged based on their production values, organization, presentation of information, creativity, and use of the medium. Each jury member listens to and scores entries via the NYF revolutionary online judging website. The Final deadline for 2010’s NYF Radio Programming & Promotions Awards is March 31, 2010. For more information go to:www.newyorkfestivals.com. All winning entries of the 2010 New York Festivals® Radio Programming & Promotions Award Competition will be featured at:www.newyorkfestivals.com, and promoted by our network of representatives in 84 countries around the world. International Awards Group (IAG) organizes advertising and programming competitions for the following five brands: AME Awards® for Advertising Marketing Effectiveness; Hive Awards® for the Unsung Heroes of the Internet; Midas Awards® for Financial Services Communications; The Global Awards® for Healthcare Communications; NYF World's Best Advertising; NYF Film & Video; NYF Radio Programming & Promotions; and NYF TV Programming & Promotions. Entries to each of the competitions are judged around the world by panels of peers in their respective industries. Founded in 1957, IAG and their five brands have representation in 84 countries. For more information, go to www.newyorkfestivals.com.
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/help.ui', # licensing of '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/help.ui' applies. # # Created: Thu Mar 7 12:39:25 2019 # by: pyside2-uic running on PySide2 5.12.1 # # WARNING! All changes made in this file will be lost! from PySide2 import QtCore, QtGui, QtWidgets class Ui_Help(object): def setupUi(self, Help): Help.setObjectName("Help") Help.resize(902, 734) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(Help.sizePolicy().hasHeightForWidth()) Help.setSizePolicy(sizePolicy) self.horizontalLayout = QtWidgets.QHBoxLayout(Help) self.horizontalLayout.setObjectName("horizontalLayout") self.splitter = QtWidgets.QSplitter(Help) self.splitter.setOrientation(QtCore.Qt.Horizontal) self.splitter.setObjectName("splitter") self.treeWidget = QtWidgets.QTreeWidget(self.splitter) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth()) self.treeWidget.setSizePolicy(sizePolicy) self.treeWidget.setObjectName("treeWidget") self.treeWidget.headerItem().setText(0, "1") self.verticalLayoutWidget = QtWidgets.QWidget(self.splitter) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.layoutWebKit = QtWidgets.QVBoxLayout() self.layoutWebKit.setObjectName("layoutWebKit") self.verticalLayout.addLayout(self.layoutWebKit) self.horizontalLayoutButtons = QtWidgets.QHBoxLayout() self.horizontalLayoutButtons.setObjectName("horizontalLayoutButtons") self.pushButtonPrevious = QtWidgets.QPushButton(self.verticalLayoutWidget) self.pushButtonPrevious.setObjectName("pushButtonPrevious") self.horizontalLayoutButtons.addWidget(self.pushButtonPrevious) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayoutButtons.addItem(spacerItem) self.pushButtoNext = QtWidgets.QPushButton(self.verticalLayoutWidget) self.pushButtoNext.setObjectName("pushButtoNext") self.horizontalLayoutButtons.addWidget(self.pushButtoNext) self.verticalLayout.addLayout(self.horizontalLayoutButtons) self.horizontalLayout.addWidget(self.splitter) self.retranslateUi(Help) QtCore.QMetaObject.connectSlotsByName(Help) def retranslateUi(self, Help): Help.setWindowTitle(QtWidgets.QApplication.translate("Help", "Help", None, -1)) self.pushButtonPrevious.setText(QtWidgets.QApplication.translate("Help", "Previous", None, -1)) self.pushButtoNext.setText(QtWidgets.QApplication.translate("Help", "Next", None, -1))
Toss cauliflower, mushrooms and bell pepper with 2 tablespoons oil. Sprinkle with thyme and cumin. Toss until evenly coated. Roast in preheated 450F oven 15 to 20 minutes or until cauliflower is golden brown, stirring occasionally. Meanwhile, heat remaining 2 teaspoons oil in large saucepan on medium-high heat. Add onions; cook and stir 5 minutes or until softened. Add vegetable mixture, broth and pepper. Bring to boil. Reduce heat to low; simmer 10 minutes or until cauliflower is tender, stirring occasionally. Ladle into soup bowls to serve.