code
stringlengths
1
18.2k
results.append(meta) except Exception as e: self.api.logger.exception('parse search groups result error: %s' % e) return build_list_result(results, xml)
def list_joined_groups(self, user_alias=None): """ 已加入的小组列表 :param user_alias: 用户名,默认为当前用户名 :return: 单页列表 """ xml = self.api.xml(API_GROUP_LIST_JOINED_GROUPS % (user_alias or self.api.user_alias)) xml_results = xml.xpath('//div[@class="group-list group-cards"]/ul/li') results = [] for item in xml_results: try: icon = item.xpath('.//img/@src')[0] link = item.xpath('.//div[@class="title"]/a')[0] url = link.get('href') name = link.text alias = url.rstrip('/').rsplit('/', 1)[1] user_count = int(item.xpath('.//span[@class="num"]/text()')[0][1:-1]) results.append({ 'icon': icon, 'alias': alias, 'url': url, 'name': name, 'user_count': user_count, }) except Exception as
e: self.api.logger.exception('parse joined groups exception: %s' % e) return build_list_result(results, xml)
def join_group(self, group_alias, message=None): """ 加入小组 :param group_alias: 小组ID :param message: 如果要验证,留言信息 :return: 枚举 - joined: 加入成功 - waiting: 等待审核 - initial: 加入失败 """ xml = self.api.xml(API_GROUP_GROUP_HOME % group_alias, params={ 'action': 'join', 'ck': self.api.ck(), }) misc = xml.xpath('//div[@class="group-misc"]')[0] intro = misc.xpath('string(.)') or '' if intro.find('退出小组') > -1: return 'joined' elif intro.find('你已经申请加入小组') > -1: return 'waiting' elif intro.find('申请加入小组') > -1: res = self.api.xml(API_GROUP_GROUP_HOME % group_alias,
'post', data={ 'ck': self.api.ck(), 'action': 'request_join', 'message': message, 'send': '发送', }) misc = res.xpath('//div[@class="group-misc"]')[0] intro = misc.xpath('string(.)') or '' if intro.find('你已经申请加入小组') > -1: return 'waiting' else: return 'initial' else: return 'initial'
def leave_group(self, group_alias): """ 退出小组 :param group_alias: 小组ID :return: """ return self.api.req(API_GROUP_GROUP_HOME % group_alias, params={ 'action': 'quit', 'ck': self.api.ck(), })
def search_topics(self, keyword, sort='relevance', start=0): """ 搜索话题 :param keyword: 关键字 :param sort: 排序方式 relevance/newest :param start: 翻页 :return: 带总数的列表 """ xml = self.api.xml(API_GROUP_SEARCH_TOPICS % (start, sort, keyword)) return build_list_result(self._parse_topic_table(xml), xml)
def list_topics(self, group_alias, _type='', start=0): """ 小组内话题列表 :param group_alias: 小组ID :param _type: 类型 默认最新,hot:最热 :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_LIST_GROUP_TOPICS % group_alias, params={ 'start': start, 'type': _type, }) return build_list_result(self._parse_topic_table(xml, 'title,author,comment,updated'), xml)
def list_joined_topics(self, start=0): """ 已加入的所有小组的话题列表 :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_HOME, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,created,group'), xml)
def list_user_topics(self, start=0): """ 发表的话题 :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_LIST_USER_PUBLISHED_TOPICS % self.api.user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,created,group'), xml)
def list_commented_topics(self, start=0): """ 回复过的话题列表 :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_LIST_USER_COMMENTED_TOPICS % self.api.user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,time,group'), xml)
def list_liked_topics(self, user_alias=None, start=0): """ 喜欢过的话题 :param user_alias: 指定用户,默认当前 :param start: 翻页 :return: 带下一页的列表 """ user_alias = user_alias or self.api.user_alias xml = self.api.xml(API_GROUP_LIST_USER_LIKED_TOPICS % user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,time,group'), xml)
def list_reced_topics(self, user_alias=None, start=0): """ 推荐的话题列表 :param user_alias: 指定用户,默认当前 :param start: 翻页 :return: 带下一页的列表 """ user_alias = user_alias or self.api.user_alias xml = self.api.xml(API_GROUP_LIST_USER_RECED_TOPICS % user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,time,group,rec'), xml)
def add_topic(self, group_alias, title, content): """ 创建话题(小心验证码~) :param group_alias: 小组ID :param title: 标题 :param content: 内容 :return: bool """ xml = self.api.req(API_GROUP_ADD_TOPIC % group_alias, 'post', data={ 'ck': self.api.ck(), 'rev_title': title, 'rev_text': content, 'rev_submit': '好了,发言', }) return not xml.url.startswith(API_GROUP_ADD_TOPIC % group_alias)
def remove_topic(self, topic_id): """ 删除话题(需要先删除所有评论,使用默认参数) :param topic_id: 话题ID :return: None """ comment_start = 0 while comment_start is not None: comments = self.list_comments(topic_id, comment_start) for comment in comments['results']: self.remove_comment(topic_id, comment['id']) comment_start = comments['next_start'] return self.api.req(API_GROUP_REMOVE_TOPIC % topic_id, params={'ck': self.api.ck()})
def update_topic(self, topic_id, title, content): """ 更新话题 :param topic_id: 话题ID :param title: 标题 :param content: 内容 :return: bool """ xml = self.api.req(API_GROUP_UPDATE_TOPIC % topic_id, 'post', data={ 'ck': self.api.ck(), 'rev_title': title, 'rev_text': content, 'rev_submit': '好了,改吧', }) return not xml.url.startswith(API_GROUP_UPDATE_TOPIC % topic_id)
def list_comments(self, topic_id, start=0): """ 回复列表 :param topic_id: 话题ID :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_GET_TOPIC % topic_id, params={'start': start}) xml_results = xml.xpath('//ul[@id="comments"]/li') results = [] for item in xml_results: try: author_avatar = item.xpath('.//img/@src')[0] author_url = item.xpath('.//div[@class="user-face"]/a/@href')[0] author_alias = slash_right(author_url) author_signature = item.xpath('.//h4/text()')[1].strip() author_nickname = item.xpath('.//h4/a/text()')[0].strip() created_at = item.xpath('.//h4/span/text()')[0].strip() content = etree.tostring(item.xpath('.//div[@class="reply-doc content"]/p')[0]).decode('utf8').strip() cid = item.get('id') results.append({ 'id': cid, 'author_avatar': author_avatar, 'author_url':
author_url, 'author_alias': author_alias, 'author_signature': author_signature, 'author_nickname': author_nickname, 'created_at': created_at, 'content': unescape(content), }) except Exception as e: self.api.logger.exception('parse comment exception: %s' % e) return build_list_result(results, xml)
def add_comment(self, topic_id, content, reply_id=None): """ 添加评论 :param topic_id: 话题ID :param content: 内容 :param reply_id: 回复ID :return: None """ return self.api.req(API_GROUP_ADD_COMMENT % topic_id, 'post', data={ 'ck': self.api.ck(), 'ref_cid': reply_id, 'rv_comment': content, 'start': 0, 'submit_btn': '加上去', })
def remove_comment(self, topic_id, comment_id, reason='0', other=None): """ 删除评论(自己发的话题所有的都可以删除,否则只能删自己发的) :param topic_id: 话题ID :param comment_id: 评论ID :param reason: 原因 0/1/2 (内容不符/反动/其它) :param other: 其它原因的具体(2) :return: None """ params = {'cid': comment_id} data = {'cid': comment_id, 'ck': self.api.ck(), 'reason': reason, 'other': other, 'submit': '确定'} r = self.api.req(API_GROUP_REMOVE_COMMENT % topic_id, 'post', params, data) if r.text.find('douban_admin') > -1: r = self.api.req(API_GROUP_ADMIN_REMOVE_COMMENT % topic_id, 'post', params, data) self.api.logger.debug('remove comment final
url is <%s>' % r.url) return r
def list_user_comments(self, topic_id, user_alias=None): """ 列出用户在话题下的所有回复 :param topic_id: 话题ID :param user_alias: 用户ID,默认当前 :return: 纯列表 """ user_alias = user_alias or self.api.user_alias comment_start = 0 results = [] while comment_start is not None: comments = self.list_comments(topic_id, comment_start) results += [item for item in comments['results'] if item['author_alias'] == user_alias] comment_start = comments['next_start'] return results
def remove_commented_topic(self, topic_id): """ 删除回复的话题(删除所有自己发布的评论) :param topic_id: 话题ID :return: None """ return [self.remove_comment(topic_id, item['id']) for item in self.list_user_comments(topic_id)]
def shell(name=None, **attrs): """Creates a new :class:`Shell` with a function as callback. This works otherwise the same as :func:`command` just that the `cls` parameter is set to :class:`Shell`. """ attrs.setdefault('cls', Shell) return click.command(name, **attrs)
def getLogger(name): """This is used by gcdt plugins to get a logger with the right level.""" logger = logging.getLogger(name) # note: the level might be adjusted via '-v' option logger.setLevel(logging_config['loggers']['gcdt']['level']) return logger
def _discover(self): """Discovers methods in the XML-RPC API and creates attributes for them on this object. Enables stuff like "magento.cart.create(...)" to work without having to define Python methods for each XML-RPC equivalent. """ self._resources = {} resources = self._client.resources(self._session_id) for resource in resources: self._resources[resource['name']] = MagentoResource( self._client, self._session_id, resource['name'], resource['title'], resource['methods'])
def keep_session_alive(self): """If the session expired, logs back in.""" try: self.resources() except xmlrpclib.Fault as fault: if fault.faultCode == 5: self.login() else: raise
def help(self): """Prints discovered resources and their associated methods. Nice when noodling in the terminal to wrap your head around Magento's insanity. """ print('Resources:') print('') for name in sorted(self._resources.keys()): methods = sorted(self._resources[name]._methods.keys()) print('{}: {}'.format(bold(name), ', '.join(methods)))
def run(self): """Import the controller and run it. This mimics the processing done by :func:`helper.start` when a controller is run in the foreground. A new instance of ``self.controller`` is created and run until a keyboard interrupt occurs or the controller stops on its own accord. """ segments = self.controller.split('.') controller_class = reduce(getattr, segments[1:], __import__('.'.join(segments[:-1]))) cmd_line = ['-f'] if self.configuration is not None: cmd_line.extend(['-c', self.configuration])
args = parser.get().parse_args(cmd_line) controller_instance = controller_class(args, platform) try: controller_instance.start() except KeyboardInterrupt: controller_instance.stop()
def get_info(self): """ Scans the input path and automatically determines the optimal piece size based on ~1500 pieces (up to MAX_PIECE_SIZE) along with other basic info, including total size (in bytes), the total number of files, piece size (in bytes), and resulting number of pieces. If ``piece_size`` has already been set, the custom value will be used instead. :return: ``(total_size, total_files, piece_size, num_pieces)`` """
if os.path.isfile(self.path): total_size = os.path.getsize(self.path) total_files = 1 elif os.path.exists(self.path): total_size = 0 total_files = 0 for x in os.walk(self.path): for fn in x[2]: if any(fnmatch.fnmatch(fn, ext) for ext in self.exclude): continue fpath = os.path.normpath(os.path.join(x[0], fn)) fsize = os.path.getsize(fpath) if fsize and not is_hidden_file(fpath): total_size += fsize total_files += 1 else: raise exceptions.InvalidInputException if not (total_files and total_size): raise exceptions.EmptyInputException if self.piece_size: ps =
self.piece_size else: ps = 1 << max(0, math.ceil(math.log(total_size / 1500, 2))) if ps < MIN_PIECE_SIZE: ps = MIN_PIECE_SIZE if ps > MAX_PIECE_SIZE: ps = MAX_PIECE_SIZE return (total_size, total_files, ps, math.ceil(total_size / ps))
def generate(self, callback=None): """ Computes and stores piece data. Returns ``True`` on success, ``False`` otherwise. :param callback: progress/cancellation callable with method signature ``(filename, pieces_completed, pieces_total)``. Useful for reporting progress if dottorrent is used in a GUI/threaded context, and if torrent generation needs to be cancelled. The callable's return value should evaluate to ``True`` to trigger cancellation. """ files = [] single_file = os.path.isfile(self.path) if
single_file: files.append((self.path, os.path.getsize(self.path), {})) elif os.path.exists(self.path): for x in os.walk(self.path): for fn in x[2]: if any(fnmatch.fnmatch(fn, ext) for ext in self.exclude): continue fpath = os.path.normpath(os.path.join(x[0], fn)) fsize = os.path.getsize(fpath) if fsize and not is_hidden_file(fpath): files.append((fpath, fsize, {})) else: raise exceptions.InvalidInputException total_size = sum([x[1] for x in files]) if not (len(files) and total_size): raise exceptions.EmptyInputException # set piece size if not already set if self.piece_size
is None: self.piece_size = self.get_info()[2] if files: self._pieces = bytearray() i = 0 num_pieces = math.ceil(total_size / self.piece_size) pc = 0 buf = bytearray() while i < len(files): fe = files[i] f = open(fe[0], 'rb') if self.include_md5: md5_hasher = md5() else: md5_hasher = None for chunk in iter(lambda: f.read(self.piece_size), b''): buf += chunk if len(buf) >= self.piece_size \ or i == len(files)-1: piece =
buf[:self.piece_size] self._pieces += sha1(piece).digest() del buf[:self.piece_size] pc += 1 if callback: cancel = callback(fe[0], pc, num_pieces) if cancel: f.close() return False if self.include_md5: md5_hasher.update(chunk) if self.include_md5: fe[2]['md5sum'] = md5_hasher.hexdigest() f.close() i += 1 # Add pieces from any remaining data while len(buf): piece = buf[:self.piece_size] self._pieces += sha1(piece).digest() del buf[:self.piece_size] pc += 1 if callback: cancel = callback(fe[0], pc, num_pieces) if cancel: return False
# Create the torrent data structure data = OrderedDict() if len(self.trackers) > 0: data['announce'] = self.trackers[0].encode() if len(self.trackers) > 1: data['announce-list'] = [[x.encode()] for x in self.trackers] if self.comment: data['comment'] = self.comment.encode() if self.created_by: data['created by'] = self.created_by.encode() else: data['created by'] = DEFAULT_CREATOR.encode() if self.creation_date: data['creation date'] = int(self.creation_date.timestamp()) if self.web_seeds: data['url-list'] = [x.encode() for x in self.web_seeds] data['info'] = OrderedDict() if single_file: data['info']['length']
= files[0][1] if self.include_md5: data['info']['md5sum'] = files[0][2]['md5sum'] data['info']['name'] = files[0][0].split(os.sep)[-1].encode() else: data['info']['files'] = [] path_sp = self.path.split(os.sep) for x in files: fx = OrderedDict() fx['length'] = x[1] if self.include_md5: fx['md5sum'] = x[2]['md5sum'] fx['path'] = [y.encode() for y in x[0].split(os.sep)[len(path_sp):]] data['info']['files'].append(fx) data['info']['name'] = path_sp[-1].encode() data['info']['pieces'] = bytes(self._pieces) data['info']['piece length'] = self.piece_size data['info']['private'] = int(self.private) if self.source: data['info']['source'] = self.source.encode() self._data = data return True
def info_hash_base32(self): """ Returns the base32 info hash of the torrent. Useful for generating magnet links. .. note:: ``generate()`` must be called first. """ if getattr(self, '_data', None): return b32encode(sha1(bencode(self._data['info'])).digest()) else: raise exceptions.TorrentNotGeneratedException
def info_hash(self): """ :return: The SHA-1 info hash of the torrent. Useful for generating magnet links. .. note:: ``generate()`` must be called first. """ if getattr(self, '_data', None): return sha1(bencode(self._data['info'])).hexdigest() else: raise exceptions.TorrentNotGeneratedException
def req(self, url, method='get', params=None, data=None, auth=False): """ 请求API :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :type auth: bool :param auth: if True and session expired will raise exception :rtype: requests.Response :return: Response """ self.logger.debug('fetch api<%s:%s>' % (method, url)) if auth and self.user_alias is None: raise
Exception('cannot fetch api<%s> without session' % url) s = requests.Session() r = s.request(method, url, params=params, data=data, cookies=self.cookies, headers=self.headers, timeout=self.timeout) s.close() if r.url is not url and RE_SESSION_EXPIRE.search(r.url) is not None: self.expire() if auth: raise Exception('auth expired, could not fetch with<%s>' % url) return r
def json(self, url, method='get', params=None, data=None): """ 请求并返回json :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :rtype: dict :return: """ r = self.req(url, method, params, data) return r.json()
def xml(self, url, method='get', params=None, data=None): """ 请求并返回xml :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :rtype: html.HtmlElement :return: """ r = self.req(url, method, params, data) # this is required for avoid utf8-mb4 lead to encoding error return self.to_xml(r.content, base_url=r.url)
def persist(self): """ 持久化会话信息 """ with open(self.persist_file, 'w+') as f: json.dump({ 'cookies': self.cookies, 'user_alias': self.user_alias, }, f, indent=2) self.logger.debug('persist session to <%s>' % self.persist_file)
def load(self): """ 加载会话信息 """ if not os.path.isfile(self.persist_file): return with open(self.persist_file, 'r') as f: cfg = json.load(f) or {} self.cookies = cfg.get('cookies', {}) self.user_alias = cfg.get('user_alias') or None self.logger.debug('load session for <%s> from <%s>' % (self.user_alias, self.persist_file))
def flush(self): """ 更新会话信息,主要是ck, user_alias """ if 'dbcl2' not in self.cookies: return r = self.req(API_ACCOUNT_HOME) if RE_SESSION_EXPIRE.search(r.url): return self.expire() self.cookies.update(dict(r.cookies)) self.user_alias = slash_right(r.url) self.logger.debug('flush with user_alias <%s>' % self.user_alias) return
def login(self, username, password): """ 登录 :type username: str :param username: 用户名(手机号或者邮箱) :type password: str :param password: 密码 """ r0 = self.req(API_HOME) time.sleep(1) cookies = dict(r0.cookies) data = { 'source': 'index_nav', 'form_email': username, 'form_password': password, 'remember': 'on', } r1 = self.req(API_ACCOUNT_LOGIN, method='post', data=data) cookies.update(dict(r1.cookies)) [cookies.update(dict(r.cookies)) for r in r1.history] if 'dbcl2' not in cookies: raise Exception('Authorization failed for <%s>: %s' % (username, r1.url)) cookies.update(dict(r1.cookies))
self.logger.info('login with username <%s>' % username) self.use(cookies) return self
def use(self, cookies): """ 如果遭遇验证码,用这个接口 :type cookies: str|dict :param cookies: cookie字符串或者字典 :return: self """ self.cookies = dict([item.split('=', 1) for item in re.split(r'; *', cookies)]) \ if isinstance(cookies, str) else cookies self.flush() self.persist() return self
def logout(self): """ 登出会话 :return: self """ self.req(API_ACCOUNT_LOGOUT % self.ck()) self.cookies = {} self.user_alias = None self.persist()
def deploy(awsclient, applicationName, deploymentGroupName, deploymentConfigName, bucket, bundlefile): """Upload bundle and deploy to deployment group. This includes the bundle-action. :param applicationName: :param deploymentGroupName: :param deploymentConfigName: :param bucket: :param bundlefile: :return: deploymentId from create_deployment """ etag, version = upload_file_to_s3(awsclient, bucket, _build_bundle_key(applicationName), bundlefile) client_codedeploy = awsclient.get_client('codedeploy') response = client_codedeploy.create_deployment( applicationName=applicationName, deploymentGroupName=deploymentGroupName, revision={ 'revisionType': 'S3', 's3Location': { 'bucket': bucket, 'key': _build_bundle_key(applicationName), 'bundleType': 'tgz', 'eTag': etag, 'version': version, },
}, deploymentConfigName=deploymentConfigName, description='deploy with tenkai', ignoreApplicationStopFailures=True ) log.info( "Deployment: {} -> URL: https://{}.console.aws.amazon.com/codedeploy/home?region={}#/deployments/{}".format( Fore.MAGENTA + response['deploymentId'] + Fore.RESET, client_codedeploy.meta.region_name, client_codedeploy.meta.region_name, response['deploymentId'], )) return response['deploymentId']
def output_deployment_status(awsclient, deployment_id, iterations=100): """Wait until an deployment is in an steady state and output information. :param deployment_id: :param iterations: :return: exit_code """ counter = 0 steady_states = ['Succeeded', 'Failed', 'Stopped'] client_codedeploy = awsclient.get_client('codedeploy') while counter <= iterations: response = client_codedeploy.get_deployment(deploymentId=deployment_id) status = response['deploymentInfo']['status'] if status not in steady_states: log.info('Deployment: %s - State: %s' % (deployment_id, status)) time.sleep(10) elif status == 'Failed': log.info( colored.red('Deployment:
{} failed: {}'.format( deployment_id, json.dumps(response['deploymentInfo']['errorInformation'], indent=2) )) ) return 1 else: log.info('Deployment: %s - State: %s' % (deployment_id, status)) break return 0
def stop_deployment(awsclient, deployment_id): """stop tenkai deployment. :param awsclient: :param deployment_id: """ log.info('Deployment: %s - stopping active deployment.', deployment_id) client_codedeploy = awsclient.get_client('codedeploy') response = client_codedeploy.stop_deployment( deploymentId=deployment_id, autoRollbackEnabled=True )
def _list_deployment_instances(awsclient, deployment_id): """list deployment instances. :param awsclient: :param deployment_id: """ client_codedeploy = awsclient.get_client('codedeploy') instances = [] next_token = None # TODO refactor generic exhaust_function from this while True: request = { 'deploymentId': deployment_id } if next_token: request['nextToken'] = next_token response = client_codedeploy.list_deployment_instances(**request) instances.extend(response['instancesList']) if 'nextToken' not in response: break next_token = response['nextToken'] return instances
def _get_deployment_instance_summary(awsclient, deployment_id, instance_id): """instance summary. :param awsclient: :param deployment_id: :param instance_id: return: status, last_event """ client_codedeploy = awsclient.get_client('codedeploy') request = { 'deploymentId': deployment_id, 'instanceId': instance_id } response = client_codedeploy.get_deployment_instance(**request) return response['instanceSummary']['status'], \ response['instanceSummary']['lifecycleEvents'][-1]['lifecycleEventName']
def _get_deployment_instance_diagnostics(awsclient, deployment_id, instance_id): """Gets you the diagnostics details for the first 'Failed' event. :param awsclient: :param deployment_id: :param instance_id: return: None or (error_code, script_name, message, log_tail) """ client_codedeploy = awsclient.get_client('codedeploy') request = { 'deploymentId': deployment_id, 'instanceId': instance_id } response = client_codedeploy.get_deployment_instance(**request) # find first 'Failed' event for i, event in enumerate(response['instanceSummary']['lifecycleEvents']): if event['status'] == 'Failed': return event['diagnostics']['errorCode'], \ event['diagnostics']['scriptName'], \ event['diagnostics']['message'], \ event['diagnostics']['logTail']
return None
def output_deployment_summary(awsclient, deployment_id): """summary :param awsclient: :param deployment_id: """ log.info('\ndeployment summary:') log.info('%-22s %-12s %s', 'Instance ID', 'Status', 'Most recent event') for instance_id in _list_deployment_instances(awsclient, deployment_id): status, last_event = \ _get_deployment_instance_summary(awsclient, deployment_id, instance_id) log.info(Fore.MAGENTA + '%-22s' + Fore.RESET + ' %-12s %s', instance_id, status, last_event)
def output_deployment_diagnostics(awsclient, deployment_id, log_group, start_time=None): """diagnostics :param awsclient: :param deployment_id: """ headline = False for instance_id in _list_deployment_instances(awsclient, deployment_id): diagnostics = _get_deployment_instance_diagnostics( awsclient, deployment_id, instance_id) #if error_code != 'Success': if diagnostics is not None: error_code, script_name, message, log_tail = diagnostics # header if not headline: headline = True log.info('\ndeployment diagnostics:') # event logs log.info('Instance ID: %s', Fore.MAGENTA + instance_id + Fore.RESET) log.info('Error Code: %s',
error_code) log.info('Script Name: %s', script_name) log.info('Message: %s', message) log.info('Log Tail: %s', log_tail) # cloudwatch logs if check_log_stream_exists(awsclient, log_group, instance_id): logentries = get_log_events( awsclient, log_group, instance_id, datetime_to_timestamp(start_time)) if logentries: log.info('instance %s logentries', instance_id) for e in logentries: log.info(e['message'].strip())
def is_type(self): """ :return: :rtype: bool """ if self.__is_type_result is not None: return self.__is_type_result self.__is_type_result = self.__is_type() return self.__is_type_result
def validate(self, error_message=None): """ :raises TypeError: If the value is not matched the type that the class represented. """ if self.is_type(): return if not error_message: error_message = "invalid value type" raise TypeError( "{}: expected={}, actual={}".format(error_message, self.typename, type(self._data)) )
def convert(self): """ :return: Converted value. :raises typepy.TypeConversionError: If the value cannot convert. """ if self.is_type(): return self.force_convert() raise TypeConversionError( "failed to convert from {} to {}".format(type(self._data).__name__, self.typename) )
def always_fails( self, work_dict): """always_fails :param work_dict: dictionary for key/values """ label = "always_fails" log.info(("task - {} - start " "work_dict={}") .format(label, work_dict)) raise Exception( work_dict.get( "test_failure", "simulating a failure")) log.info(("task - {} - done") .format(label)) return True
def name_build(self, name, is_policy=False, prefix=True): """ Build name from prefix and name + type :param name: Name of the role/policy :param is_policy: True if policy should be added as suffix :param prefix: True if prefix should be added :return: Joined name """ str = name # Add prefix if prefix: str = self.__role_name_prefix + str # Add policy suffix if is_policy: str = str
+ "-policy" return str
def name_strip(self, name, is_policy=False, prefix=True): """ Transforms name to AWS valid characters and adds prefix and type :param name: Name of the role/policy :param is_policy: True if policy should be added as suffix :param prefix: True if prefix should be added :return: Transformed and joined name """ str = self.name_build(name, is_policy, prefix) str = str.title() str = str.replace('-', '') return str
def build_policy(self, name, statements, roles, is_managed_policy=False): """ Generate policy for IAM cloudformation template :param name: Name of the policy :param statements: The "rules" the policy should have :param roles: The roles associated with this policy :param is_managed_policy: True if managed policy :return: Ref to new policy """ if is_managed_policy: policy = ManagedPolicy( self.name_strip(name, True), PolicyDocument={ "Version": self.VERSION_IAM, "Statement": statements, }, Roles=roles, Path=self.__role_path, ) else:
policy = PolicyType( self.name_strip(name, True), PolicyName=self.name_strip(name, True), PolicyDocument={ "Version": self.VERSION_IAM, "Statement": statements, }, Roles=roles, ) self.__template.add_resource(policy) return policy
def build_policy_bucket(self, bucket, name, statements): """ Generate bucket policy for S3 bucket :param bucket: The bucket to attach policy to :param name: The name of the bucket (to generate policy name from it) :param statements: The "rules" the policy should have :return: Ref to new policy """ policy = self.__template.add_resource( BucketPolicy( self.name_strip(name, True, False), Bucket=troposphere.Ref(bucket), DependsOn=[ troposphere.Name(bucket) ], PolicyDocument=Policy( Version=self.VERSION_IAM, Statement=statements ) ) )
return policy
def build_role(self, name, policies=False): """ Generate role for IAM cloudformation template :param name: Name of role :param policies: List of policies to attach to this role (False = none) :return: Ref to new role """ # Build role template if policies: role = self.__template.add_resource( Role( self.name_strip(name), AssumeRolePolicyDocument=Policy( Version=self.VERSION_IAM, Statement=[ Statement( Effect=Allow, Principal=Principal( "Service", self.__role_principals ), Action=[AssumeRole], ) ] ), Path=self.__role_path, ManagedPolicyArns=policies, )) # Add
role to list for default policy self.__roles_list.append(troposphere.Ref(role)) else: role = self.__template.add_resource( Role( self.name_strip(name), AssumeRolePolicyDocument=Policy( Version=self.VERSION_IAM, Statement=[ Statement( Effect=Allow, Principal=Principal( "Service", self.__role_principals ), Action=[AssumeRole], ) ] ), Path=self.__role_path, )) # Add role to list for default policy self.__roles_list.append(troposphere.Ref(role)) return role
def build_bucket(self, name, lifecycle_configuration=False, use_plain_name=False): """ Generate S3 bucket statement :param name: Name of the bucket :param lifecycle_configuration: Additional lifecycle configuration (default=False) :param use_plain_name: Just use the given name and do not add prefix :return: Ref to new bucket """ if use_plain_name: name_aws = name_bucket = name name_aws = name_aws.title() name_aws = name_aws.replace('-', '') else: name_aws = self.name_strip(name, False, False) name_bucket = self.name_build(name) if
lifecycle_configuration: return self.__template.add_resource( Bucket( name_aws, BucketName=name_bucket, LifecycleConfiguration=lifecycle_configuration ) ) else: return self.__template.add_resource( Bucket( name_aws, BucketName=name_bucket, ) )
def directive_SPACE(self, label, params): """ label SPACE num Allocate space on the stack. `num` is the number of bytes to allocate """ # TODO allow equations params = params.strip() try: self.convert_to_integer(params) except ValueError: warnings.warn("Unknown parameters; {}".format(params)) return self.labels[label] = self.space_pointer if params in self.equates: params = self.equates[params] self.space_pointer += self.convert_to_integer(params)
def directive_DCD(self, label, params): """ label DCD value[, value ...] Allocate a word space in read only memory for the value or list of values """ # TODO make this read only # TODO check for param size # TODO can take any length comma separated values (VAL DCD 1, 0x2, 3, 4 params = params.strip() try: self.convert_to_integer(params) except ValueError: # TODO allow word
DCDs (like SP_INIT, Reset_Handler) warnings.warn("Cannot reserve constant words; {}".format(params)) return # Align address if self.space_pointer % 4 != 0: self.space_pointer += self.space_pointer % 4 self.labels[label] = self.space_pointer if params in self.equates: params = self.equates[params] for i in range(4): self.memory[self.space_pointer + i] = (self.convert_to_integer(params) >> (8*i)) & 0xFF self.space_pointer += 4
def directive_DCH(self, label, params): """ label DCH value[, value ...] Allocate a half word space in read only memory for the value or list of values """ # TODO make this read only # TODO check for word size # Align address if self.space_pointer % 2 != 0: self.space_pointer += self.space_pointer % 2 self.labels[label] = self.space_pointer if params in self.equates: params = self.equates[params] for
i in range(2): self.memory[self.space_pointer + i] = (self.convert_to_integer(params) >> (8 * i)) & 0xFF self.space_pointer += 2
def directive_DCB(self, label, params): """ label DCB value[, value ...] Allocate a byte space in read only memory for the value or list of values """ # TODO make this read only # TODO check for byte size self.labels[label] = self.space_pointer if params in self.equates: params = self.equates[params] self.memory[self.space_pointer] = self.convert_to_integer(params) & 0xFF self.space_pointer += 1
def get_celery_app( name=os.getenv( "CELERY_NAME", "worker"), auth_url=os.getenv( "BROKER_URL", "redis://localhost:6379/9"), backend_url=os.getenv( "BACKEND_URL", "redis://localhost:6379/10"), include_tasks=[], ssl_options=None, transport_options=None, path_to_config_module=os.getenv( "CONFIG_MODULE_PATH", "celery_loaders.work_tasks.celery_config"), worker_log_format=os.getenv( "WORKER_LOG_FORMAT", "%(asctime)s: %(levelname)s %(message)s"), **kwargs): """get_celery_app :param name: name for this app :param auth_url: celery broker :param backend_url: celery backend :param include_tasks: list of modules containing tasks to add :param ssl_options: security options dictionary :param trasport_options: transport options dictionary :param path_to_config_module: config module :param worker_log_format: format
for logs """ if len(include_tasks) == 0: log.error(("creating celery app={} MISSING tasks={}") .format( name, include_tasks)) else: log.info(("creating celery app={} tasks={}") .format( name, include_tasks)) # get the Celery application app = celery.Celery( name, broker_url=auth_url, result_backend=backend_url, include=include_tasks) app.config_from_object( path_to_config_module, namespace="CELERY") app.conf.update(kwargs) if transport_options: log.info(("loading transport_options={}") .format(transport_options)) app.conf.update(**transport_options) # custom tranport options if ssl_options: log.info(("loading ssl_options={}") .format(ssl_options)) app.conf.update(**ssl_options) # custom ssl options if len(include_tasks) > 0: app.autodiscover_tasks(include_tasks)
return app
def check_arguments(self, **kwargs): """ Determine if the parameters meet the specifications kwargs contains lists grouped by their parameter rules are defined by methods starting with 'rule_' :param kwargs: :return: """ for key in kwargs: if key in self._rules: for val in kwargs[key]: self._rules[key](val) else: raise LookupError("Rule for {} does not exist. Make sure the rule starts with 'rule_'".format(key))
def link(self, key1, key2): """ Make these two keys have the same value :param key1: :param key2: :return: """ # TODO make this have more than one key linked # TODO Maybe make the value a set? self._linked_keys[key1] = key2 self._linked_keys[key2] = key1
def instance_ik_model_receiver(fn): """ A method decorator that filters out sign_original_specals coming from models that don't have fields that function as ImageFieldSourceGroup sources. """ @wraps(fn) def receiver(self, sender, **kwargs): # print 'inspect.isclass(sender? %s'%(inspect.isclass(sender)) if not inspect.isclass(sender): return for src in self._source_groups: if issubclass(sender, src.model_class): fn(self, sender=sender, **kwargs) # If we find a match, return. We don't want to handle the signal # more than once.
return return receiver
def source_group_receiver(self, sender, source, signal, **kwargs): """ Relay source group signals to the appropriate spec strategy. """ from imagekit.cachefiles import ImageCacheFile source_group = sender instance = kwargs['instance'] # Ignore signals from unregistered groups. if source_group not in self._source_groups: return #HOOK -- update source to point to image file. for id in self._source_groups[source_group]: spec_to_update = generator_registry.get(id, source=source, instance=instance, field=hack_spec_field_hash[id]) specs = [generator_registry.get(id, source=source, instance=instance, field=hack_spec_field_hash[id])
for id in self._source_groups[source_group]] callback_name = self._signals[signal] # print 'callback_name? %s'%(callback_name) for spec in specs: file = ImageCacheFile(spec) # print 'SEPC %s file %s'%(spec, file) call_strategy_method(file, callback_name)
def update_source_hashes(self, instance): """ Stores hashes of the source image files so that they can be compared later to see whether the source image has changed (and therefore whether the spec file needs to be regenerated). """ self.init_instance(instance) instance._ik['source_hashes'] = dict( (attname, hash(getattr(instance, attname))) for attname in self.get_source_fields(instance)) return instance._ik['source_hashes']
def get_source_fields(self, instance): """ Returns a list of the source fields for the given instance. """ return set(src.image_field for src in self._source_groups if isinstance(instance, src.model_class))
def on_success(self, retval, task_id, args, kwargs): """on_success http://docs.celeryproject.org/en/latest/reference/celery.app.task.html :param retval: return value :param task_id: celery task id :param args: arguments passed into task :param kwargs: keyword arguments passed into task """ log.info(("{} SUCCESS - retval={} task_id={} " "args={} kwargs={}") .format( self.log_label, retval, task_id, args, kwargs))
def on_failure(self, exc, task_id, args, kwargs, einfo): """on_failure http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance :param exc: exception :param task_id: task id :param args: arguments passed into task :param kwargs: keyword arguments passed into task :param einfo: exception info """ use_exc = str(exc) log.error(("{} FAIL - exc={} " "args={} kwargs={}") .format( self.log_label, use_exc, args, kwargs))
def check_hook_mechanism_is_intact(module): """Check if the hook configuration is absent or has both register AND deregister. :param module: :return: True if valid plugin / module. """ result = True if check_register_present(module): result = not result if check_deregister_present(module): result = not result return result
def cfn_viz(template, parameters={}, outputs={}, out=sys.stdout): """Render dot output for cloudformation.template in json format. """ known_sg, open_sg = _analyze_sg(template['Resources']) (graph, edges) = _extract_graph(template.get('Description', ''), template['Resources'], known_sg, open_sg) graph['edges'].extend(edges) _handle_terminals(template, graph, 'Parameters', 'source', parameters) _handle_terminals(template, graph, 'Outputs', 'sink', outputs) graph['subgraphs'].append(_handle_pseudo_params(graph['edges'])) _render(graph, out=out)