max_stars_repo_path
stringlengths
4
197
max_stars_repo_name
stringlengths
6
120
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
6
964k
score
float64
-0.88
3.95
int_score
int64
0
4
rplugin/python3/denite/ui/default.py
timgates42/denite.nvim
0
4
<gh_stars>0 # ============================================================================ # FILE: default.py # AUTHOR: <NAME> <<EMAIL> at g<EMAIL>> # License: MIT license # ============================================================================ import re import typing from denite.util import echo, error, clearmatch, regex_convert_py_vim from denite.util import Nvim, UserContext, Candidates, Candidate from denite.parent import SyncParent class Default(object): @property def is_async(self) -> bool: return self._is_async def __init__(self, vim: Nvim) -> None: self._vim = vim self._denite: typing.Optional[SyncParent] = None self._selected_candidates: typing.List[int] = [] self._candidates: Candidates = [] self._cursor = 0 self._entire_len = 0 self._result: typing.List[typing.Any] = [] self._context: UserContext = {} self._bufnr = -1 self._winid = -1 self._winrestcmd = '' self._initialized = False self._winheight = 0 self._winwidth = 0 self._winminheight = -1 self._is_multi = False self._is_async = False self._matched_pattern = '' self._displayed_texts: typing.List[str] = [] self._statusline_sources = '' self._titlestring = '' self._ruler = False self._prev_action = '' self._prev_status: typing.Dict[str, typing.Any] = {} self._prev_curpos: typing.List[typing.Any] = [] self._save_window_options: typing.Dict[str, typing.Any] = {} self._sources_history: typing.List[typing.Any] = [] self._previous_text = '' self._floating = False self._filter_floating = False self._updated = False self._timers: typing.Dict[str, int] = {} self._matched_range_id = -1 self._matched_char_id = -1 self._check_matchdelete = bool(self._vim.call( 'denite#util#check_matchdelete')) def start(self, sources: typing.List[typing.Any], context: UserContext) -> typing.List[typing.Any]: if not self._denite: # if hasattr(self._vim, 'run_coroutine'): # self._denite = ASyncParent(self._vim) # else: self._denite = SyncParent(self._vim) self._result = [] context['sources_queue'] = [sources] self._start_sources_queue(context) return self._result def do_action(self, action_name: str, command: str = '', is_manual: bool = False) -> None: if is_manual: candidates = self._get_selected_candidates() elif self._get_cursor_candidate(): candidates = [self._get_cursor_candidate()] else: candidates = [] if not self._denite or not candidates or not action_name: return self._prev_action = action_name action = self._denite.get_action( self._context, action_name, candidates) if not action: return post_action = self._context['post_action'] is_quit = action['is_quit'] or post_action == 'quit' if is_quit: self.quit() self._denite.do_action(self._context, action_name, candidates) self._result = candidates if command != '': self._vim.command(command) if is_quit and post_action == 'open': # Re-open denite buffer prev_cursor = self._cursor cursor_candidate = self._get_cursor_candidate() self._init_buffer() self.redraw(False) if cursor_candidate == self._get_candidate(prev_cursor): # Restore the cursor self._move_to_pos(prev_cursor) # Disable quit flag is_quit = False if not is_quit and is_manual: self._selected_candidates = [] self.redraw(action['is_redraw']) if is_manual and self._context['sources_queue']: self._context['input'] = '' self._context['quick_move'] = '' self._start_sources_queue(self._context) return def redraw(self, is_force: bool = True) -> None: self._context['is_redraw'] = is_force if is_force: self._gather_candidates() if self._update_candidates(): self._update_buffer() else: self._update_status() self._context['is_redraw'] = False def quit(self) -> None: if self._denite: self._denite.on_close(self._context) self._quit_buffer() self._result = [] return def _restart(self) -> None: self._context['input'] = '' self._quit_buffer() self._init_denite() self._gather_candidates() self._init_buffer() self._update_candidates() self._update_buffer() def _start_sources_queue(self, context: UserContext) -> None: if not context['sources_queue']: return self._sources_history.append({ 'sources': context['sources_queue'][0], 'path': context['path'], }) self._start(context['sources_queue'][0], context) if context['sources_queue']: context['sources_queue'].pop(0) context['path'] = self._context['path'] def _start(self, sources: typing.List[typing.Any], context: UserContext) -> None: from denite.ui.map import do_map self._vim.command('silent! autocmd! denite') if re.search(r'\[Command Line\]$', self._vim.current.buffer.name): # Ignore command line window. return resume = self._initialized and context['resume'] if resume: # Skip the initialization update = ('immediately', 'immediately_1', 'cursor_pos', 'prev_winid', 'start_filter', 'quick_move') for key in update: self._context[key] = context[key] self._check_move_option() if self._check_do_option(): return self._init_buffer() if context['refresh']: self.redraw() self._move_to_pos(self._cursor) else: if self._context != context: self._context.clear() self._context.update(context) self._context['sources'] = sources self._context['is_redraw'] = False self._is_multi = len(sources) > 1 if not sources: # Ignore empty sources. error(self._vim, 'Empty sources') return self._init_denite() self._gather_candidates() self._update_candidates() self._init_cursor() self._check_move_option() if self._check_do_option(): return self._init_buffer() self._update_displayed_texts() self._update_buffer() self._move_to_pos(self._cursor) if self._context['quick_move'] and do_map(self, 'quick_move', []): return if self._context['start_filter']: do_map(self, 'open_filter_buffer', []) def _init_buffer(self) -> None: self._prev_status = dict() self._displayed_texts = [] self._prev_bufnr = self._vim.current.buffer.number self._prev_curpos = self._vim.call('getcurpos') self._prev_wininfo = self._get_wininfo() self._prev_winid = self._context['prev_winid'] self._winrestcmd = self._vim.call('winrestcmd') self._ruler = self._vim.options['ruler'] self._switch_buffer() self._bufnr = self._vim.current.buffer.number self._winid = self._vim.call('win_getid') self._resize_buffer(True) self._winheight = self._vim.current.window.height self._winwidth = self._vim.current.window.width self._bufvars = self._vim.current.buffer.vars self._bufvars['denite'] = { 'buffer_name': self._context['buffer_name'], } self._bufvars['denite_statusline'] = {} self._vim.vars['denite#_previewed_buffers'] = {} self._save_window_options = {} window_options = { 'colorcolumn', 'concealcursor', 'conceallevel', 'cursorcolumn', 'cursorline', 'foldcolumn', 'foldenable', 'list', 'number', 'relativenumber', 'signcolumn', 'spell', 'winfixheight', 'wrap', } for k in window_options: self._save_window_options[k] = self._vim.current.window.options[k] # Note: Have to use setlocal instead of "current.window.options" # "current.window.options" changes global value instead of local in # neovim. self._vim.command('setlocal colorcolumn=') self._vim.command('setlocal conceallevel=3') self._vim.command('setlocal concealcursor=inv') self._vim.command('setlocal nocursorcolumn') self._vim.command('setlocal nofoldenable') self._vim.command('setlocal foldcolumn=0') self._vim.command('setlocal nolist') self._vim.command('setlocal nonumber') self._vim.command('setlocal norelativenumber') self._vim.command('setlocal nospell') self._vim.command('setlocal winfixheight') self._vim.command('setlocal nowrap') if self._context['prompt']: self._vim.command('setlocal signcolumn=yes') else: self._vim.command('setlocal signcolumn=auto') if self._context['cursorline']: self._vim.command('setlocal cursorline') options = self._vim.current.buffer.options if self._floating: # Disable ruler self._vim.options['ruler'] = False options['buftype'] = 'nofile' options['bufhidden'] = 'delete' options['swapfile'] = False options['buflisted'] = False options['modeline'] = False options['modifiable'] = False options['filetype'] = 'denite' if self._vim.call('exists', '#WinEnter'): self._vim.command('doautocmd WinEnter') if self._vim.call('exists', '#BufWinEnter'): self._vim.command('doautocmd BufWinEnter') if not self._vim.call('has', 'nvim'): # In Vim8, FileType autocmd is not fired after set filetype option. self._vim.command('silent doautocmd FileType denite') if self._context['auto_action']: self._vim.command('autocmd denite ' 'CursorMoved <buffer> ' 'call denite#call_map("auto_action")') self._init_syntax() def _switch_buffer(self) -> None: split = self._context['split'] if (split != 'no' and self._winid > 0 and self._vim.call('win_gotoid', self._winid)): if split != 'vertical' and not self._floating: # Move the window to bottom self._vim.command('wincmd J') self._winrestcmd = '' return self._floating = split in [ 'floating', 'floating_relative_cursor', 'floating_relative_window', ] self._filter_floating = False if self._vim.current.buffer.options['filetype'] != 'denite': self._titlestring = self._vim.options['titlestring'] command = 'edit' if split == 'tab': self._vim.command('tabnew') elif self._floating: self._split_floating(split) elif self._context['filter_split_direction'] == 'floating': self._filter_floating = True elif split != 'no': command = self._get_direction() command += ' vsplit' if split == 'vertical' else ' split' bufname = '[denite]-' + self._context['buffer_name'] if self._vim.call('exists', '*bufadd'): bufnr = self._vim.call('bufadd', bufname) vertical = 'vertical' if split == 'vertical' else '' command = ( 'buffer' if split in ['no', 'tab', 'floating', 'floating_relative_window', 'floating_relative_cursor'] else 'sbuffer') self._vim.command( 'silent keepalt %s %s %s %s' % ( self._get_direction(), vertical, command, bufnr, ) ) else: self._vim.call( 'denite#util#execute_path', f'silent keepalt {command}', bufname) def _get_direction(self) -> str: direction = str(self._context['direction']) if direction == 'dynamictop' or direction == 'dynamicbottom': self._update_displayed_texts() winwidth = self._vim.call('winwidth', 0) is_fit = not [x for x in self._displayed_texts if self._vim.call('strwidth', x) > winwidth] if direction == 'dynamictop': direction = 'aboveleft' if is_fit else 'topleft' else: direction = 'belowright' if is_fit else 'botright' return direction def _get_wininfo(self) -> typing.List[typing.Any]: return [ self._vim.options['columns'], self._vim.options['lines'], self._vim.call('win_getid'), self._vim.call('tabpagebuflist') ] def _switch_prev_buffer(self) -> None: if (self._prev_bufnr == self._bufnr or self._vim.buffers[self._prev_bufnr].name == ''): self._vim.command('enew') else: self._vim.command('buffer ' + str(self._prev_bufnr)) def _init_syntax(self) -> None: self._vim.command('syntax case ignore') self._vim.command('highlight default link deniteInput ModeMsg') self._vim.command('highlight link deniteMatchedRange ' + self._context['highlight_matched_range']) self._vim.command('highlight link deniteMatchedChar ' + self._context['highlight_matched_char']) self._vim.command('highlight default link ' + 'deniteStatusLinePath Comment') self._vim.command('highlight default link ' + 'deniteStatusLineNumber LineNR') self._vim.command('highlight default link ' + 'deniteSelectedLine Statement') if self._floating: self._vim.current.window.options['winhighlight'] = ( 'Normal:' + self._context['highlight_window_background'] ) self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' + ' contains=deniteConcealedMark') % ( self._context['selected_icon'])) self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' + ' conceal contained') % ( self._context['selected_icon'])) if self._denite: self._denite.init_syntax(self._context, self._is_multi) def _update_candidates(self) -> bool: if not self._denite: return False [self._is_async, pattern, statuses, self._entire_len, self._candidates] = self._denite.filter_candidates(self._context) prev_displayed_texts = self._displayed_texts self._update_displayed_texts() prev_matched_pattern = self._matched_pattern self._matched_pattern = pattern prev_statusline_sources = self._statusline_sources self._statusline_sources = ' '.join(statuses) if self._is_async: self._start_timer('update_candidates') else: self._stop_timer('update_candidates') updated = (self._displayed_texts != prev_displayed_texts or self._matched_pattern != prev_matched_pattern or self._statusline_sources != prev_statusline_sources) if updated: self._updated = True self._start_timer('update_buffer') if self._context['search'] and self._context['input']: self._vim.call('setreg', '/', self._context['input']) return self._updated def _update_displayed_texts(self) -> None: candidates_len = len(self._candidates) if not self._is_async and self._context['auto_resize']: winminheight = self._context['winminheight'] max_height = min(self._context['winheight'], self._get_max_height()) if (winminheight != -1 and candidates_len < winminheight): self._winheight = winminheight elif candidates_len > max_height: self._winheight = max_height elif candidates_len != self._winheight: self._winheight = candidates_len max_source_name_len = 0 if self._candidates: max_source_name_len = max([ len(self._get_display_source_name(x['source_name'])) for x in self._candidates]) self._context['max_source_name_len'] = max_source_name_len self._context['max_source_name_format'] = ( '{:<' + str(self._context['max_source_name_len']) + '}') self._displayed_texts = [ self._get_candidate_display_text(i) for i in range(0, candidates_len) ] def _update_buffer(self) -> None: is_current_buffer = self._bufnr == self._vim.current.buffer.number self._update_status() if self._check_matchdelete and self._context['match_highlight']: matches = [x['id'] for x in self._vim.call('getmatches', self._winid)] if self._matched_range_id in matches: self._vim.call('matchdelete', self._matched_range_id, self._winid) self._matched_range_id = -1 if self._matched_char_id in matches: self._vim.call('matchdelete', self._matched_char_id, self._winid) self._matched_char_id = -1 if self._matched_pattern != '': self._matched_range_id = self._vim.call( 'matchadd', 'deniteMatchedRange', r'\c' + regex_convert_py_vim(self._matched_pattern), 10, -1, {'window': self._winid}) matched_char_pattern = '[{}]'.format(re.sub( r'([\[\]\\^-])', r'\\\1', self._context['input'].replace(' ', '') )) self._matched_char_id = self._vim.call( 'matchadd', 'deniteMatchedChar', matched_char_pattern, 10, -1, {'window': self._winid}) prev_linenr = self._vim.call('line', '.') prev_candidate = self._get_cursor_candidate() buffer = self._vim.buffers[self._bufnr] buffer.options['modifiable'] = True self._vim.vars['denite#_candidates'] = [ x['word'] for x in self._candidates] buffer[:] = self._displayed_texts buffer.options['modifiable'] = False self._previous_text = self._context['input'] self._resize_buffer(is_current_buffer) is_changed = (self._context['reversed'] or (is_current_buffer and self._previous_text != self._context['input'])) if self._updated and is_changed: if not is_current_buffer: save_winid = self._vim.call('win_getid') self._vim.call('win_gotoid', self._winid) self._init_cursor() self._move_to_pos(self._cursor) if not is_current_buffer: self._vim.call('win_gotoid', save_winid) elif is_current_buffer: self._vim.call('cursor', [prev_linenr, 0]) if is_current_buffer: if (self._context['auto_action'] and prev_candidate != self._get_cursor_candidate()): self.do_action(self._context['auto_action']) self._updated = False self._stop_timer('update_buffer') def _update_status(self) -> None: inpt = '' if self._context['input']: inpt = self._context['input'] + ' ' if self._context['error_messages']: inpt = '[ERROR] ' + inpt path = '[' + self._context['path'] + ']' status = { 'input': inpt, 'sources': self._statusline_sources, 'path': path, # Extra 'buffer_name': self._context['buffer_name'], 'line_total': len(self._candidates), } if status == self._prev_status: return self._bufvars['denite_statusline'] = status self._prev_status = status linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))" if self._context['statusline']: if self._floating or self._filter_floating: self._vim.options['titlestring'] = ( "%{denite#get_status('input')}%* " + "%{denite#get_status('sources')} " + " %{denite#get_status('path')}%*" + "%{" + linenr + "}%*") else: winnr = self._vim.call('win_id2win', self._winid) self._vim.call('setwinvar', winnr, '&statusline', ( "%#deniteInput#%{denite#get_status('input')}%* " + "%{denite#get_status('sources')} %=" + "%#deniteStatusLinePath# %{denite#get_status('path')}%*" + "%#deniteStatusLineNumber#%{" + linenr + "}%*")) def _get_display_source_name(self, name: str) -> str: source_names = self._context['source_names'] if not self._is_multi or source_names == 'hide': source_name = '' else: short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name) if re.search(r'[^a-zA-Z]', name) else name[:2]) source_name = short_name if source_names == 'short' else name return source_name def _get_candidate_display_text(self, index: int) -> str: source_names = self._context['source_names'] candidate = self._candidates[index] terms = [] if self._is_multi and source_names != 'hide': terms.append(self._context['max_source_name_format'].format( self._get_display_source_name(candidate['source_name']))) encoding = self._context['encoding'] abbr = candidate.get('abbr', candidate['word']).encode( encoding, errors='replace').decode(encoding, errors='replace') terms.append(abbr[:int(self._context['max_candidate_width'])]) return (str(self._context['selected_icon']) if index in self._selected_candidates else ' ') + ' '.join(terms).replace('\n', '') def _get_max_height(self) -> int: return int(self._vim.options['lines']) if not self._floating else ( int(self._vim.options['lines']) - int(self._context['winrow']) - int(self._vim.options['cmdheight'])) def _resize_buffer(self, is_current_buffer: bool) -> None: split = self._context['split'] if (split == 'no' or split == 'tab' or self._vim.call('winnr', '$') == 1): return winheight = max(self._winheight, 1) winwidth = max(self._winwidth, 1) is_vertical = split == 'vertical' if not is_current_buffer: restore = self._vim.call('win_getid') self._vim.call('win_gotoid', self._winid) if not is_vertical and self._vim.current.window.height != winheight: if self._floating: wincol = self._context['winrow'] row = wincol if split == 'floating': if self._context['auto_resize'] and row > 1: row += self._context['winheight'] row -= self._winheight self._vim.call('nvim_win_set_config', self._winid, { 'relative': 'editor', 'row': row, 'col': self._context['wincol'], 'width': winwidth, 'height': winheight, }) filter_row = 0 if wincol == 1 else row + winheight filter_col = self._context['wincol'] else: init_pos = self._vim.call('nvim_win_get_config', self._winid) self._vim.call('nvim_win_set_config', self._winid, { 'relative': 'win', 'win': init_pos['win'], 'row': init_pos['row'], 'col': init_pos['col'], 'width': winwidth, 'height': winheight, }) filter_col = init_pos['col'] if init_pos['anchor'] == 'NW': winpos = self._vim.call('nvim_win_get_position', self._winid) filter_row = winpos[0] + winheight filter_winid = self._vim.vars['denite#_filter_winid'] self._context['filter_winrow'] = row if self._vim.call('win_id2win', filter_winid) > 0: self._vim.call('nvim_win_set_config', filter_winid, { 'relative': 'editor', 'row': filter_row, 'col': filter_col, }) self._vim.command('resize ' + str(winheight)) if self._context['reversed']: self._vim.command('normal! zb') elif is_vertical and self._vim.current.window.width != winwidth: self._vim.command('vertical resize ' + str(winwidth)) if not is_current_buffer: self._vim.call('win_gotoid', restore) def _check_do_option(self) -> bool: if self._context['do'] != '': self._do_command(self._context['do']) return True elif (self._candidates and self._context['immediately'] or len(self._candidates) == 1 and self._context['immediately_1']): self._do_immediately() return True return not (self._context['empty'] or self._is_async or self._candidates) def _check_move_option(self) -> None: if self._context['cursor_pos'].isnumeric(): self._cursor = int(self._context['cursor_pos']) + 1 elif re.match(r'\+\d+', self._context['cursor_pos']): for _ in range(int(self._context['cursor_pos'][1:])): self._move_to_next_line() elif re.match(r'-\d+', self._context['cursor_pos']): for _ in range(int(self._context['cursor_pos'][1:])): self._move_to_prev_line() elif self._context['cursor_pos'] == '$': self._move_to_last_line() def _do_immediately(self) -> None: goto = self._winid > 0 and self._vim.call( 'win_gotoid', self._winid) if goto: # Jump to denite window self._init_buffer() self.do_action('default') candidate = self._get_cursor_candidate() if not candidate: return echo(self._vim, 'Normal', '[{}/{}] {}'.format( self._cursor, len(self._candidates), candidate.get('abbr', candidate['word']))) if goto: # Move to the previous window self._vim.command('wincmd p') def _do_command(self, command: str) -> None: self._init_cursor() cursor = 1 while cursor < len(self._candidates): self.do_action('default', command) self._move_to_next_line() self._quit_buffer() def _cleanup(self) -> None: self._stop_timer('update_candidates') self._stop_timer('update_buffer') if self._vim.current.buffer.number == self._bufnr: self._cursor = self._vim.call('line', '.') # Note: Close filter window before preview window self._vim.call('denite#filter#_close_filter_window') if not self._context['has_preview_window']: self._vim.command('pclose!') # Clear previewed buffers for bufnr in self._vim.vars['denite#_previewed_buffers'].keys(): if not self._vim.call('win_findbuf', bufnr): self._vim.command('silent bdelete ' + str(bufnr)) self._vim.vars['denite#_previewed_buffers'] = {} self._vim.command('highlight! link CursorLine CursorLine') if self._floating or self._filter_floating: self._vim.options['titlestring'] = self._titlestring self._vim.options['ruler'] = self._ruler def _close_current_window(self) -> None: if self._vim.call('winnr', '$') == 1: self._vim.command('buffer #') else: self._vim.command('close!') def _quit_buffer(self) -> None: self._cleanup() if self._vim.call('bufwinnr', self._bufnr) < 0: # Denite buffer is already closed return winids = self._vim.call('win_findbuf', self._vim.vars['denite#_filter_bufnr']) if winids: # Quit filter buffer self._vim.call('win_gotoid', winids[0]) self._close_current_window() # Move to denite window self._vim.call('win_gotoid', self._winid) # Restore the window if self._context['split'] == 'no': self._switch_prev_buffer() for k, v in self._save_window_options.items(): self._vim.current.window.options[k] = v else: if self._context['split'] == 'tab': self._vim.command('tabclose!') if self._context['split'] != 'tab': self._close_current_window() self._vim.call('win_gotoid', self._prev_winid) # Restore the position self._vim.call('setpos', '.', self._prev_curpos) if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo: # Note: execute restcmd twice to restore layout properly self._vim.command(self._winrestcmd) self._vim.command(self._winrestcmd) clearmatch(self._vim) def _get_cursor_candidate(self) -> Candidate: return self._get_candidate(self._cursor) def _get_candidate(self, pos: int) -> Candidate: if not self._candidates or pos > len(self._candidates): return {} return self._candidates[pos - 1] def _get_selected_candidates(self) -> Candidates: if not self._selected_candidates: return [self._get_cursor_candidate() ] if self._get_cursor_candidate() else [] return [self._candidates[x] for x in self._selected_candidates] def _init_denite(self) -> None: if self._denite: self._denite.start(self._context) self._denite.on_init(self._context) self._initialized = True self._winheight = self._context['winheight'] self._winwidth = self._context['winwidth'] def _gather_candidates(self) -> None: self._selected_candidates = [] if self._denite: self._denite.gather_candidates(self._context) def _init_cursor(self) -> None: if self._context['reversed']: self._move_to_last_line() else: self._move_to_first_line() def _move_to_pos(self, pos: int) -> None: self._vim.call('cursor', pos, 0) self._cursor = pos if self._context['reversed']: self._vim.command('normal! zb') def _move_to_next_line(self) -> None: if self._cursor < len(self._candidates): self._cursor += 1 def _move_to_prev_line(self) -> None: if self._cursor >= 1: self._cursor -= 1 def _move_to_first_line(self) -> None: self._cursor = 1 def _move_to_last_line(self) -> None: self._cursor = len(self._candidates) def _start_timer(self, key: str) -> None: if key in self._timers: return if key == 'update_candidates': self._timers[key] = self._vim.call( 'denite#helper#_start_update_candidates_timer', self._bufnr) elif key == 'update_buffer': self._timers[key] = self._vim.call( 'denite#helper#_start_update_buffer_timer', self._bufnr) def _stop_timer(self, key: str) -> None: if key not in self._timers: return self._vim.call('timer_stop', self._timers[key]) # Note: After timer_stop is called, self._timers may be removed if key in self._timers: self._timers.pop(key) def _split_floating(self, split: str) -> None: # Use floating window if split == 'floating': self._vim.call( 'nvim_open_win', self._vim.call('bufnr', '%'), True, { 'relative': 'editor', 'row': self._context['winrow'], 'col': self._context['wincol'], 'width': self._context['winwidth'], 'height': self._context['winheight'], }) elif split == 'floating_relative_cursor': opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] + self._vim.call('winline') - 1) if self._context['auto_resize']: height = max(self._winheight, 1) width = max(self._winwidth, 1) else: width = self._context['winwidth'] height = self._context['winheight'] if opened_pos + height + 3 > self._vim.options['lines']: anchor = 'SW' row = 0 self._context['filter_winrow'] = row + opened_pos else: anchor = 'NW' row = 1 self._context['filter_winrow'] = row + height + opened_pos self._vim.call( 'nvim_open_win', self._vim.call('bufnr', '%'), True, { 'relative': 'cursor', 'row': row, 'col': 0, 'width': width, 'height': height, 'anchor': anchor, }) elif split == 'floating_relative_window': self._vim.call( 'nvim_open_win', self._vim.call('bufnr', '%'), True, { 'relative': 'win', 'row': self._context['winrow'], 'col': self._context['wincol'], 'width': self._context['winwidth'], 'height': self._context['winheight'], })
1.453125
1
examples/first_char_last_column.py
clarkfitzg/sta141c
24
132
<filename>examples/first_char_last_column.py #!/usr/bin/env python3 """ For the last column, print only the first character. Usage: $ printf "100,200\n0,\n" | python3 first_char_last_column.py Should print "100,2\n0," """ import csv from sys import stdin, stdout def main(): reader = csv.reader(stdin) writer = csv.writer(stdout) for row in reader: try: row[-1] = row[-1][0] except IndexError: # Python: Better to ask forgiveness than permission # Alternative: Look before you leap pass writer.writerow(row) if __name__ == "__main__": main()
1.789063
2
twitoff/predict.py
dscohen75/twitoff
0
260
import numpy as np from sklearn.linear_model import LogisticRegression from .models import User from .twitter import vectorize_tweet def predict_user(user1_name, user2_name, tweet_text): """ Determine and return which user is more likely to say a given Tweet. Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!') Returns 1 corresponding to 1st user passed in, or 0 for second. """ user1 = User.query.filter(User.name == user1_name).one() user2 = User.query.filter(User.name == user2_name).one() user1_vect = np.array([tweet.vect for tweet in user1.tweets]) user2_vect = np.array([tweet.vect for tweet in user2.tweets]) vects = np.vstack([user1_vect, user2_vect]) labels = np.concatenate([np.ones(len(user1.tweets)), np.zeros(len(user2.tweets))]) log_reg = LogisticRegression().fit(vects, labels) # We've done the model fitting, now to predict... hypo_tweet_vect = vectorize_tweet(tweet_text) return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
2.671875
3
tensorflow/python/ops/standard_ops.py
ashutom/tensorflow-upstream
8
388
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unused-import """Import names of Tensor Flow standard Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import platform as _platform import sys as _sys from tensorflow.python import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports the following modules so that @RegisterGradient get executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops import random_grad from tensorflow.python.ops import rnn_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to import * once we're okay with exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once binary size increase due to scatter_nd # ops is under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top if _platform.system() == "Windows": from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order # These modules were imported to set up RaggedTensor operators and dispatchers: del _ragged_dispatch, _ragged_operators
1.367188
1
Models.py
jmj23/Kaggle-Pneumothorax
0
516
import numpy as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model # Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): """Creates a Block CED model for segmentation problems Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list = [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): """Creates a Block model for pretraining on classification task Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list = [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def ConvertEncoderToCED(model): # Returns a model with frozen encoder layers # and complimentary, unfrozen decoder layers # get input layer # model must be compiled again after using this function lay_input = model.input # get skip connection layer outputs skip_list = [l.output for l in model.layers if 'skip' in l.name] numBlocks = len(skip_list) filt_num = int(skip_list[0].shape[-1]) x = model.layers[-3].output # freeze encoder layers for layer in model.layers: layer.trainable = False use_bn = True # make expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) def Inception_model(input_shape=(299, 299, 3)): incep_model = InceptionV3( include_top=False, weights=None, input_shape=input_shape, pooling='avg') input_layer = incep_model.input incep_output = incep_model.output # x = Conv2D(16, (3, 3), activation='relu')(incep_output) # x = Flatten()(x) x = Dense(1, activation='sigmoid')(incep_output) return Model(inputs=input_layer, outputs=x)
2.609375
3
vize/170401038.py
omuryorulmaz/kriptografi
8
644
# <NAME> 170401038 import math import random r = 3271 def egcd(a,b): if(a == 0): return(b,0,1) else: c,d,e = egcd(b % a, a) return(c, e - (b // a) * d, d) def modInvert(a,b): c,d,e = egcd(a,b) if c != 1: raise Exception('moduler ters bulunamadi') else: return d % b def randomInteger(n): return random.randrange(2 ** (n-1), 2 ** n) | 1 def RabinMiller(f): s = 5 if(f == 2): return 1 if not (f & 1): return 0 p = f-1 u = 0 r = f-1 while (r%2 == 0): r >>= 1 u+=1 def Control(a): z = pow(a, r, f) if z == 1: return 0 for i in range(u): z = pow(a, (2**i) * r, f-1) if z == p: return 0 return 1 for i in range(s): a = random.randrange(2, p-2) if Control(a): return 0 return 1 def Keygen(n): while True: p = randomInteger(n//2) if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1: break while True: q = randomInteger(n//2) if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1: break N = p * q phi = (p - 1) * (q - 1) while True: y = random.randrange(1, N) if math.gcd(y, N) == 1: x = pow(y, phi * modInvert(r, N) % N, N) if x != 1: break publicKeyFile = open("publickey.txt", "w+") publicKeyFile.write(str(N) + "\n" + str(y)) publicKeyFile.close() privateKeyFile = open("privatekey.txt", "w+") privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N)) privateKeyFile.close() def encrypt(plaintext, publickeytxt): try: open(publickeytxt, "r") except FileNotFoundError: print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.") else: publicKeyFile = open(publickeytxt, "r") N, y = publicKeyFile.read().split("\n") N = int(N) y = int(y) publicKeyFile.close() plainTextFile = open(plaintext, "r") plainCopy = int(plainTextFile.read().split("\n")[0]) plainTextFile.close() while True: u = random.randrange(1, int(N)) if math.gcd(y, N) == 1: break cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N cipherTextFile = open("ciphertext.txt", "w+") cipherTextFile.write(str(cipherText)) cipherTextFile.close() def decrypt(ciphertext, privatekeytxt): try: open(privatekeytxt, "r") except FileNotFoundError: print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.") else: privateKeyFile = open(privatekeytxt, "r") phi, x, N = privateKeyFile.read().split("\n") phi, x, N = int(phi), int(x), int(N) privateKeyFile.close() cipherTextFile = open(ciphertext, "r") cipherCopy = int(cipherTextFile.read()) a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N) for i in range(r -1): if(pow(x, i, N) == a): break plainText2File = open("plaintext2.txt", "w+") plainText2File.write(str(i)) plainText2File.close() plain2File = open("plaintext2.txt", "r") plain1File = open("plaintext.txt", "r") plain1 = plain1File.read().split("\n")[0] plain2 = plain2File.read().split("\n")[0] if plain1 == plain2: print("Dosyalar Özdeştir..") else: print("Dosyalar özdeş değildir..") n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: ")) Keygen(n) encrypt("plaintext.txt","publickey.txt") decrypt("ciphertext.txt", "privatekey.txt")
2.03125
2
python3_module_template/subproject/myexample.py
sdpython/python_project_template
0
772
<filename>python3_module_template/subproject/myexample.py # -*- coding: utf-8 -*- """ @file @brief This the documentation of this module (myexampleb). """ class myclass: """ This is the documentation for this class. **example with a sphinx directives** It works everywhere in the documentation. .. exref:: :title: an example of use Just for documentation purpose. :: m = myclass(0) The old way: @example(an old example of use) This only works from the code, not inserted in a RST file. The source documentation is parsed and every such example is collected and placed in a page ``all_examples.rst`` (look at the source). @code m = myclass(0) @endcode @endexample **FAQ** .. faqref:: :title: How to add a question ? Just look a this section. Look also :ref:`l-FAQ2`. .. faqref:: :title: Add a label :lid: label1 Look also :ref:`l-FAQ2`. **BLOC** .. blocref:: :title: How to add a bloc :tag: aaaa Just look a this bloc. Look also :ref:`l-FAQ2`. An accent, é, to check it is working. A link to github source: :githublink:`source|py`. """ def __init__(self, pa): """ documentation for the constructor @param pa first parameter """ self.pa = pa def get_value(self, mul): """ returns the parameter multiplied by a value @param mul a float @return a float """ return self.pa * mul
1.953125
2
test/crossrunner/compat.py
BluechipSystems/thrift
0
900
import os import sys if sys.version_info[0] == 2: _ENCODE = sys.getfilesystemencoding() def path_join(*args): bin_args = map(lambda a: a.decode(_ENCODE), args) return os.path.join(*bin_args).encode(_ENCODE) def str_join(s, l): bin_args = map(lambda a: a.decode(_ENCODE), l) b = s.decode(_ENCODE) return b.join(bin_args).encode(_ENCODE) logfile_open = open else: path_join = os.path.join str_join = str.join def logfile_open(*args): return open(*args, errors='replace')
1.101563
1
cinder/tests/unit/fake_group_snapshot.py
lightsey/cinder
571
1028
<filename>cinder/tests/unit/fake_group_snapshot.py # Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_group_snapshot(**updates): db_group_snapshot = { 'id': fake.GROUP_SNAPSHOT_ID, 'name': 'group-1', 'status': 'available', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'group_type_id': fake.GROUP_TYPE_ID, 'group_id': fake.GROUP_ID, } for name, field in objects.GroupSnapshot.fields.items(): if name in db_group_snapshot: continue if field.nullable: db_group_snapshot[name] = None elif field.default != fields.UnspecifiedDefault: db_group_snapshot[name] = field.default else: raise Exception('fake_db_group_snapshot needs help with %s.' % name) if updates: db_group_snapshot.update(updates) return db_group_snapshot def fake_group_snapshot_obj(context, **updates): return objects.GroupSnapshot._from_db_object( context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates))
1.445313
1
darknet2ncnn.py
nihui/gen-ncnn-models
4
1156
<reponame>nihui/gen-ncnn-models #! /usr/bin/env python # coding: utf-8 import configparser import numpy as np import re,sys,os from graph import MyGraph from collections import OrderedDict def unique_config_sections(config_file): """Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. """ from collections import defaultdict import io section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for line in fin: if line.startswith('['): section = line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find filters for ', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(" %s read %d bytes" % (msg, len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read out major, minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict() # record the output of the original layer mylist = [] count = 4 import queue for _section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get() section = sec.name print('Parsing section {}'.format(section)) # this section will can be a subsection if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic') if activation == 'linear': pass elif activation == 'linear' or activation == 'leaky' or activation == 'relu': node = MyGraph.MyNode() node.name = section node.op = 'Leaky' if activation == 'linear': node.slope = 1 elif activation == 'leaky': node.slope = 0.1 elif activation == 'relu': node.slope = 0 node.input = [prev_output] node.input_norm = node.input #node.attr = [] mydict[node.name] = node prev_output = node.name # prev_layer_filters no change else: raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\d+$', section): if section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters', fallback = 1) groups = 1 op = 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1) assert multiplier == 1 groups = filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1) stride = sec.getint('stride', fallback = 1) pad = sec.getint('pad', fallback = 0) padding = sec.getint('padding', fallback = 0) activation = sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is equivalent to Darknet pad=1 # padding = 'same' if pad == 1 else 'valid' if pad: padding = size//2 # Setting weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This assumes channel last dim_ordering. if conv == 'conv': weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2, 3] elif conv == 'dconv': weights_shape = (size, size, filters) idx_tf2darknet = [0, 1, 2] elif conv == 'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1, 2, 3, 4] idxmap = {x: i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' + conv, 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count += 3 * filters # TODO: Keras BatchNormalization mistakenly refers to var # as std. bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) # TODO: Add check for Theano dim ordering. #print("the darknet shape is ", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print("the tf shape is ", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Create nodes #conv_layer = np.zeros([1, 1, filters], dtype = np.float32) node = MyGraph.MyNode() node.name = section node.op = op node.input = [prev_output] node.input_norm = node.input node.kernel = conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1] node.groups = groups node.filters = filters mydict[node.name] = node prev_output = node.name prev_layer_filters = filters if batch_normalize: node = MyGraph.MyNode() node.name = section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.gamma = bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node prev_output = node.name # prev_layer_filters no change else: node = MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.bias = conv_bias mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section node.op = 'Shuffle' node.input = [prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\d+$', section): node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1) node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1 node.name = section node.op = 'Pooling' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output = node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section node.op = 'NCNNConcat' node.input = [mylist[i] for i in ids] #print('mylist is ', mylist, 'the ids is ', ids, 'node input is ', node.input) node.input_norm = node.input node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride', fallback = 1) node.input_norm = node.input node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\d+$', section): activation = sec.get('activation', fallback = 'logistic') from_ = sec.getint('from') node = MyGraph.MyNode() node.name = section node.op = 'BinaryOp' node.op_type = 0 node.input = [prev_output, mylist[from_]] #print('mylist is ', mylist, 'the from_ is ', from_, 'node input is ', node.input) node.input_norm = node.input mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section has relative reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight')) node = MyGraph.MyNode() node.name = section node.op = 'MatMul' node.input = [prev_output] node.input_norm = node.input node.multiplier = fc_data mydict[node.name] = node prev_output = node.name prev_layer_filters = filters node = MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input # node.attr = [] node.bias = bias_data mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetNet' node.input = [] node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) # node.attr = [] mydict[node.name] = node # start here prev_output = node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] = node prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name = section node.op = 'Softmax' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output = node.name mylist.append(section) pass elif section.startswith('cost'): pass # Configs not currently handled during model definition. else: raise ValueError( 'Unsupported section header type: {}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet' return mygraph if __name__ == '__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
1.8125
2
osrsapi/__init__.py
XaKingas/osrsapi
0
1284
from .grandexchange import GrandExchange, GameItemNotFound, GameItemParseError from .item import Item from .priceinfo import PriceInfo from .pricetrend import PriceTrend
0.283203
0
tests/components/mysensors/conftest.py
liangleslie/core
30,023
1412
<filename>tests/components/mysensors/conftest.py """Provide common mysensors fixtures.""" from __future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator import json from typing import Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: """Mock out device tracker known devices storage.""" devices = mock_device_tracker_conf return devices @pytest.fixture(name="mqtt") def mock_mqtt_fixture(hass: HomeAssistant) -> None: """Mock the MQTT integration.""" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name="is_serial_port") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: """Patch the serial port check.""" with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name="gateway_nodes") def gateway_nodes_fixture() -> dict[int, Sensor]: """Return the gateway nodes dict.""" return {} @pytest.fixture(name="serial_transport") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: """Mock a serial transport.""" with patch( "mysensors.gateway_serial.AsyncTransport", autospec=True ) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch( "mysensors.task.load_fw", autospec=True ), patch( "mysensors.task.Persistence", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: """Mock the gateway features.""" async def mock_schedule_save_sensors() -> None: """Load nodes from via persistence.""" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None: """Mock the start method.""" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name="transport") def transport_fixture(serial_transport: MagicMock) -> MagicMock: """Return the default mocked transport.""" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: """Return the transport mock that accepts string messages.""" return transport.return_value.send @pytest.fixture(name="serial_entry") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: """Create a config entry for a serial gateway.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: "2.3", CONF_DEVICE: "/test/device", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name="config_entry") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: """Provide the config entry used for integration set up.""" return serial_entry @pytest.fixture(name="integration") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: """Set up the mysensors integration with a config entry.""" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: """Receive a message for the gateway.""" def receive_message_callback(message_string: str) -> None: """Receive a message with the transport. The message_string parameter is a string in the MySensors message format. """ gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name="gateway") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: """Return a setup gateway.""" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: """Load mysensors nodes fixture.""" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: """Update the gateway nodes.""" gateway_nodes.update(nodes) return nodes @pytest.fixture(name="gps_sensor_state", scope="session") def gps_sensor_state_fixture() -> dict: """Load the gps sensor state.""" return load_nodes_state("mysensors/gps_sensor_state.json") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: """Load the gps sensor.""" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name="power_sensor_state", scope="session") def power_sensor_state_fixture() -> dict: """Load the power sensor state.""" return load_nodes_state("mysensors/power_sensor_state.json") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: """Load the power sensor.""" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name="energy_sensor_state", scope="session") def energy_sensor_state_fixture() -> dict: """Load the energy sensor state.""" return load_nodes_state("mysensors/energy_sensor_state.json") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: """Load the energy sensor.""" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name="sound_sensor_state", scope="session") def sound_sensor_state_fixture() -> dict: """Load the sound sensor state.""" return load_nodes_state("mysensors/sound_sensor_state.json") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: """Load the sound sensor.""" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name="distance_sensor_state", scope="session") def distance_sensor_state_fixture() -> dict: """Load the distance sensor state.""" return load_nodes_state("mysensors/distance_sensor_state.json") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: """Load the distance sensor.""" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name="temperature_sensor_state", scope="session") def temperature_sensor_state_fixture() -> dict: """Load the temperature sensor state.""" return load_nodes_state("mysensors/temperature_sensor_state.json") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: """Load the temperature sensor.""" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name="text_node_state", scope="session") def text_node_state_fixture() -> dict: """Load the text node state.""" return load_nodes_state("mysensors/text_node_state.json") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: """Load the text child node.""" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return node
1.484375
1
app/__init__.py
Jotasenpai/DigitalMediaStoreRESTfull
0
1540
import logging import os from flask import Flask from flask_cors import CORS from app.extensions import api from app.extensions.database import db from app.extensions.schema import ma from app.views import albums, artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r"/api/*": {"origins": "*"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError: pass return app
1.4375
1
src/find_genes_by_location/find_genes_by_location.py
NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome
3
1668
import argparse from collections import defaultdict import csv from dataclasses import dataclass, field from enum import Enum, unique, auto import os import sys import tempfile import yaml import zipfile import gffutils from google.protobuf import json_format from ncbi.datasets.v1alpha1 import dataset_catalog_pb2 from ncbi.datasets.v1alpha1.reports import assembly_pb2 from ncbi.datasets.reports.report_reader import DatasetsReportReader def retrieve_assembly_report(zip_in, catalog, assm_acc: str) -> assembly_pb2.AssemblyDataReport: report_files = get_catalog_files_for_assembly(catalog, dataset_catalog_pb2.File.FileType.DATA_REPORT, assm_acc) for path in report_files: yaml = zip_in.read(path) rpt_rdr = DatasetsReportReader() return rpt_rdr.assembly_report(yaml) def retrieve_data_catalog(zip_in) -> dataset_catalog_pb2.Catalog: catalog_json = zip_in.read('ncbi_dataset/data/dataset_catalog.json') return json_format.Parse(catalog_json, dataset_catalog_pb2.Catalog()) def get_catalog_files_for_assembly(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str): report_files = get_catalog_files(catalog, desired_filetype, assm_acc) filepaths = [] for assm_acc, paths in report_files.items(): filepaths.extend(paths) return filepaths def get_catalog_files(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str = None): files = defaultdict(list) for assm in catalog.assemblies: acc = assm.accession if assm_acc and assm_acc != acc: continue for f in assm.files: filepath = os.path.join('ncbi_dataset', 'data', f.file_path) if f.file_type == desired_filetype: files[acc].append(filepath) return files def get_zip_file_for_acc(acc, path): fname = os.path.join(path, f'{acc}.zip') if os.path.isfile(fname): return fname return None @dataclass class Gene: id: str feat_type: str name: str chrom: str strand: str range_start: int range_stop: int protein_accession: str = "" def get_fields(self): return [self.feat_type, self.name, self.range_start, self.range_stop, self.protein_accession] def name_val(self): return self.protein_accession if self.protein_accession else self.name def find_genes_by_loc(gff3_db, csvout, assm_acc, seq_acc, start, stop, extra_fields): found_genes = [] feat_types = ('gene', 'pseudogene') for gene in gff3_db.region(seqid=seq_acc, start=start, end=stop, featuretype=feat_types, completely_within=False): gene_name = gene.attributes.get('Name', None)[0] prot_acc = "" if gene.attributes['gene_biotype'][0] == 'protein_coding': cds = list(gff3_db.children(gene, featuretype='CDS')) prot_acc = cds[0].attributes.get('protein_id', None)[0] geneobj = Gene( gene.id, gene.featuretype, gene_name, gene.chrom, gene.strand, gene.start, gene.stop, prot_acc, ) csvout.writerow([assm_acc, seq_acc, start, stop, *extra_fields, *geneobj.get_fields()]) found_genes.append(geneobj) return found_genes class FindGenesByLoc: default_packages_dir = os.path.join('var', 'data', 'packages') def __init__(self): parser = argparse.ArgumentParser() parser.add_argument('--packages-dir', type=str, default=self.default_packages_dir, help=f'root of input data directory [{self.default_packages_dir}]') parser.add_argument('--locs', type=str, help='file containing genomic locations') self.args = parser.parse_args() self.writer = csv.writer(sys.stdout, dialect='excel-tab') def read_data(self): for row in csv.reader(iter(sys.stdin.readline, ''), dialect='excel-tab'): yield row def run(self): for assm_acc, seq_acc, start, stop, *extra in self.read_data(): self.find_all_for_location(assm_acc, seq_acc, start, stop, extra) def process_loc_for_gff(self, zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields): with tempfile.NamedTemporaryFile() as tmpfile: tmpfile.write(zin.read(gff_fname)) db = gffutils.create_db( tmpfile.name, dbfn=':memory:', force=True, keep_order=True, merge_strategy='merge', sort_attribute_values=True ) find_genes_by_loc(db, self.writer, assm_acc, seq_acc, start, stop, extra_fields) def find_all_for_location(self, assm_acc, seq_acc, start, stop, extra_fields): zip_file = get_zip_file_for_acc(assm_acc, self.args.packages_dir) try: with zipfile.ZipFile(zip_file, 'r') as zin: catalog = retrieve_data_catalog(zin) gff_files = get_catalog_files(catalog, dataset_catalog_pb2.File.FileType.GFF3) for assm_acc, gff_files in gff_files.items(): report = retrieve_assembly_report(zin, catalog, assm_acc) for gff_fname in gff_files: self.process_loc_for_gff(zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields) except zipfile.BadZipFile: print(f'{zip_file} is not a zip file') if __name__ == '__main__': FindGenesByLoc().run()
1.4375
1
src/Products/CMFCore/tests/test_DirectoryView.py
fdiary/Products.CMFCore
3
1796
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Unit tests for DirectoryView module. """ import sys import unittest import warnings from os import mkdir from os import remove from os.path import join from tempfile import mktemp from App.config import getConfiguration from . import _globals from .base.dummy import DummyFolder from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): """ These test that, no matter what is stored in their dirpath, FSDV's will do their best to find an appropriate skin and only do nothing in the case where an appropriate skin can't be found. """ def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing if given a really wacky path def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers to a non-existing path %r' % file) self.assertTrue(text in str(w[-1].message)) # this test tests that registerDirectory creates keys in the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews # also test registration of directory views doesn't barf pass def test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews # appears as a DirectoryViewSurrogate due # to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test that "artifact" files and dirs are ignored for name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored' % name) def test_surrogate_writethrough(self): # CMF Collector 316: It is possible to cause ZODB writes because # setting attributes on the non-persistent surrogate writes them # into the persistent DirectoryView as well. This is bad in situations # where you only want to store markers and remove them before the # transaction has ended - they never got removed because there was # no equivalent __delattr__ on the surrogate that would clean up # the persistent DirectoryView as well. fs = self.ob.fake_skin test_foo = 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that "artifact" files and dirs are ignored, # even when a custom ignore list is used; and that the # custom ignore list is also honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView # This is nasty, but there is no way to unregister anything # right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine if metadata shows up correctly on a # FSDV that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata shows up correctly on a # FSDV that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that a folder inside the fake skin really is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register a different class under the fake meta_type # "FOLDER" and test again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate the FSDV data we need to remove and # register again, that way the newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self) # add a method to the fake skin folder self._writeFile('test2.py', "return 'test2'") # edit the test1 method self._writeFile('test1.py', "return 'new test1'") # add a new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a method added to the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if an edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make sure a deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that if we delete a method, then add it back, # then edit it, the DirectoryView notices. # This exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to the fake skin folder self._writeFile('test2.py', "return 'test2.2'", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', "return 'test2.3'", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file, which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests)) return suite
1.4375
1
SLHCUpgradeSimulations/Configuration/python/aging.py
ckamtsikis/cmssw
852
1924
<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms # handle normal mixing or premixing def getHcalDigitizer(process): if hasattr(process,'mixData'): return process.mixData if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'): return process.mix.digitizers.hcal return None def getHGCalDigitizer(process,section): if hasattr(process,'mix') and hasattr(process.mix,'digitizers'): if section == 'EE' and hasattr(process.mix.digitizers,'hgceeDigitizer'): return process.mix.digitizers.hgceeDigitizer elif section == 'FH' and hasattr(process.mix.digitizers,'hgchefrontDigitizer'): return process.mix.digitizers.hgchefrontDigitizer elif section == 'BH' and hasattr(process.mix.digitizers,'hgchebackDigitizer'): return process.mix.digitizers.hgchebackDigitizer elif section == 'HFNose' and hasattr(process.mix.digitizers,'hfnoseDigitizer'): return process.mix.digitizers.hfnoseDigitizer return None # change assumptions about lumi rate def setScenarioHLLHC(module,scenarioHLLHC): if scenarioHLLHC=="nominal": from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_nominal module.years = _years_LHC + _years_HLLHC_nominal elif scenarioHLLHC=="ultimate": from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_ultimate module.years = _years_LHC + _years_HLLHC_ultimate return module # turnon = True enables default, False disables # recalibration and darkening always together def ageHB(process,turnon,scenarioHLLHC): if turnon: from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HBDarkeningEP process.HBDarkeningEP = HBDarkeningEP process.HBDarkeningEP = setScenarioHLLHC(process.HBDarkeningEP,scenarioHLLHC) hcaldigi = getHcalDigitizer(process) if hcaldigi is not None: hcaldigi.HBDarkening = cms.bool(turnon) if hasattr(process,'es_hardcode'): process.es_hardcode.HBRecalibration = cms.bool(turnon) return process def ageHE(process,turnon,scenarioHLLHC): if turnon: from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HEDarkeningEP process.HEDarkeningEP = HEDarkeningEP process.HEDarkeningEP = setScenarioHLLHC(process.HEDarkeningEP,scenarioHLLHC) hcaldigi = getHcalDigitizer(process) if hcaldigi is not None: hcaldigi.HEDarkening = cms.bool(turnon) if hasattr(process,'es_hardcode'): process.es_hardcode.HERecalibration = cms.bool(turnon) return process def ageHF(process,turnon): hcaldigi = getHcalDigitizer(process) if hcaldigi is not None: hcaldigi.HFDarkening = cms.bool(turnon) if hasattr(process,'es_hardcode'): process.es_hardcode.HFRecalibration = cms.bool(turnon) return process def agedHFNose(process,algo=0): from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HFNose_setEndOfLifeNoise process = HFNose_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo) return process def agedHGCal(process,algo=0): from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setEndOfLifeNoise process = HGCal_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo) return process def realisticHGCalStartup(process): from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setRealisticStartupNoise process = HGCal_setRealisticStartupNoise(process) return process # needs lumi to set proper ZS thresholds (tbd) def ageSiPM(process,turnon,lumi): process.es_hardcode.hbUpgrade.doRadiationDamage = turnon process.es_hardcode.heUpgrade.doRadiationDamage = turnon # todo: determine ZS threshold adjustments # adjust PF thresholds for increased noise # based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg hcal_lumis = [300, 1000, 3000, 4500, 1e10] hcal_thresholds = { 300: { "seed": [0.5, 0.625, 0.75, 0.75], "rec": [0.4, 0.5, 0.6, 0.6], }, 1000: { "seed": [1.0, 1.5, 1.5, 1.5], "rec": [0.8, 1.2, 1.2, 1.2], }, 3000: { "seed": [1.25, 2.5, 2.5, 2.5], "rec": [1.0, 2.0, 2.0, 2.0], }, 4500: { "seed": [1.5, 3.0, 3.0, 3.0], "rec": [1.25, 2.5, 2.5, 2.5], }, } ctmodules = ['calotowermaker','caloTowerForTrk','caloTowerForTrkPreSplitting','towerMaker','towerMakerWithHO'] for ilumi, hcal_lumi in enumerate(hcal_lumis[:-1]): if lumi >= hcal_lumi and lumi < hcal_lumis[ilumi+1]: if hasattr(process,'particleFlowClusterHBHE'): process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold = hcal_thresholds[hcal_lumi]["seed"] process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = hcal_thresholds[hcal_lumi]["rec"] process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = hcal_thresholds[hcal_lumi]["rec"] process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"] process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"] if hasattr(process,'particleFlowClusterHCAL'): process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"] if hasattr(process,'particleFlowRecHitHBHE'): process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold = hcal_thresholds[hcal_lumi]["rec"] for ctmod in ctmodules: if hasattr(process,ctmod): getattr(process,ctmod).HBThreshold1 = hcal_thresholds[hcal_lumi]["rec"][0] getattr(process,ctmod).HBThreshold2 = hcal_thresholds[hcal_lumi]["rec"][1] getattr(process,ctmod).HBThreshold = hcal_thresholds[hcal_lumi]["rec"][-1] break return process def ageHcal(process,lumi,instLumi,scenarioHLLHC): hcaldigi = getHcalDigitizer(process) if hcaldigi is not None: hcaldigi.DelivLuminosity = cms.double(float(lumi)) # integrated lumi in fb-1 # these lines need to be further activated by turning on 'complete' aging for HF if hasattr(process,'g4SimHits'): process.g4SimHits.HCalSD.InstLuminosity = cms.double(float(instLumi)) process.g4SimHits.HCalSD.DelivLuminosity = cms.double(float(lumi)) # recalibration and darkening always together if hasattr(process,'es_hardcode'): process.es_hardcode.iLumi = cms.double(float(lumi)) # functions to enable individual subdet aging process = ageHB(process,True,scenarioHLLHC) process = ageHE(process,True,scenarioHLLHC) process = ageHF(process,True) process = ageSiPM(process,True,lumi) return process def turn_on_HB_aging(process): process = ageHB(process,True,"") return process def turn_off_HB_aging(process): process = ageHB(process,False,"") return process def turn_on_HE_aging(process): process = ageHE(process,True,"") return process def turn_off_HE_aging(process): process = ageHE(process,False,"") return process def turn_on_HF_aging(process): process = ageHF(process,True) return process def turn_off_HF_aging(process): process = ageHF(process,False) return process def turn_off_SiPM_aging(process): process = ageSiPM(process,False,0.0) return process def hf_complete_aging(process): if hasattr(process,'g4SimHits'): process.g4SimHits.HCalSD.HFDarkening = cms.untracked.bool(True) hcaldigi = getHcalDigitizer(process) if hcaldigi is not None: hcaldigi.HFDarkening = cms.untracked.bool(False) return process def ageEcal(process,lumi,instLumi): if hasattr(process,'g4SimHits'): #these lines need to be further activiated by tuning on 'complete' aging for ecal process.g4SimHits.ECalSD.InstLuminosity = cms.double(instLumi) process.g4SimHits.ECalSD.DelivLuminosity = cms.double(float(lumi)) # available conditions ecal_lumis = [300,1000,3000,4500] ecal_conditions = [ ['EcalIntercalibConstantsRcd','EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'], ['EcalIntercalibConstantsMCRcd','EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'], ['EcalLaserAPDPNRatiosRcd','EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'], ['EcalPedestalsRcd','EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'], ['EcalTPGLinearizationConstRcd','EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'], ] # update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf ecal_thresholds = { 300 : 0.103, 1000 : 0.175, 3000 : 0.435, 4500 : 0.707, } ecal_seed_multiplier = 2.5 # try to get conditions if int(lumi) in ecal_lumis: if not hasattr(process.GlobalTag,'toGet'): process.GlobalTag.toGet=cms.VPSet() for ecal_condition in ecal_conditions: process.GlobalTag.toGet.append(cms.PSet( record = cms.string(ecal_condition[0]), tag = cms.string(ecal_condition[1].format(int(lumi))), connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS") ) ) if hasattr(process,"particleFlowClusterECALUncorrected"): _seeds = process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector for iseed in range(0,len(_seeds)): if _seeds[iseed].detector.value()=="ECAL_BARREL": _seeds[iseed].seedingThreshold = cms.double(ecal_thresholds[int(lumi)]*ecal_seed_multiplier) _clusters = process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector for icluster in range(0,len(_clusters)): if _clusters[icluster].detector.value()=="ECAL_BARREL": _clusters[icluster].gatheringThreshold = cms.double(ecal_thresholds[int(lumi)]) return process def ecal_complete_aging(process): if hasattr(process,'g4SimHits'): process.g4SimHits.ECalSD.AgeingWithSlopeLY = cms.untracked.bool(True) if hasattr(process,'ecal_digi_parameters'): process.ecal_digi_parameters.UseLCcorrection = cms.untracked.bool(False) return process def customise_aging_300(process): process=ageHcal(process,300,5.0e34,"nominal") process=ageEcal(process,300,5.0e34) return process def customise_aging_1000(process): process=ageHcal(process,1000,5.0e34,"nominal") process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process,1000,5.0e34) return process def customise_aging_3000(process): process=ageHcal(process,3000,5.0e34,"nominal") process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process,3000,5.0e34) process=agedHGCal(process) process=agedHFNose(process) return process def customise_aging_3000_ultimate(process): process=ageHcal(process,3000,7.5e34,"ultimate") process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process,3000,7.5e34) process=agedHGCal(process) process=agedHFNose(process) return process def customise_aging_4500_ultimate(process): process=ageHcal(process,4500,7.5e34,"ultimate") process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process,4500,7.5e34) process=agedHGCal(process) process=agedHFNose(process) return process
1.140625
1
chroma-manager/tests/utils/__init__.py
GarimaVishvakarma/intel-chroma
0
2052
import time import datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs): "Monkey patch an object's attributes, restoring them after the block." stored = {} for name in attrs: stored[name] = getattr(obj, name) setattr(obj, name, attrs[name]) try: yield finally: for name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): "Print elapsed time of a block, if over optional threshold." start = time.time() try: yield finally: elapsed = time.time() - start if elapsed >= threshold: print datetime.timedelta(seconds=elapsed), msg
2.03125
2
tao_compiler/mlir/disc/tests/glob_op_test.bzl
JamesTheZ/BladeDISC
328
2180
# Test definitions for Lit, the LLVM test runner. # # This is reusing the LLVM Lit test runner in the interim until the new build # rules are upstreamed. # TODO(b/136126535): remove this custom rule. """Lit runner globbing test """ load("//tensorflow:tensorflow.bzl", "filegroup") load("@bazel_skylib//lib:paths.bzl", "paths") load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts") # Default values used by the test runner. _default_test_file_exts = ["mlir", ".pbtxt", ".td"] _default_driver = "@llvm-project//mlir:run_lit.sh" _default_size = "small" _default_tags = [] # These are patterns which we should never match, for tests, subdirectories, or # test input data files. _ALWAYS_EXCLUDE = [ "**/LICENSE.txt", "**/README.txt", "**/lit.local.cfg", # Exclude input files that have spaces in their names, since bazel # cannot cope with such "targets" in the srcs list. "**/* *", "**/* */**", ] def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties): """Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir. Note that, due to Bazel's hermetic builds, lit only sees the tests that are included in the `data` parameter, regardless of what other tests might exist in the directory searched. Args: name: str, the name of the test, including extension. data: [str], the data input to the test. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """ name_without_suffix = test_file[0].split('.')[0] local_test_files = name + ".test_files" filegroup( name = local_test_files, srcs = native.glob([ "data/" + name_without_suffix + "*.mlir", ]), ) tf_cc_test( name = name, srcs = test_file, size = size, deps = [ "//tensorflow/compiler/mlir/disc/tests:mlir_feature_test", "//tensorflow/core:test", "//tensorflow/core:test_main", "//tensorflow/core:testlib", ], data = [":" + local_test_files] + data + [ "//tensorflow/compiler/mlir/disc:disc_compiler_main", "//tensorflow/compiler/mlir:tf-mlir-translate", "//tensorflow/compiler/mlir:tf-opt", ], ) def glob_op_tests( exclude = [], test_file_exts = _default_test_file_exts, default_size = _default_size, size_override = {}, data = [], per_test_extra_data = {}, default_tags = _default_tags, tags_override = {}, driver = _default_driver, features = [], exec_properties = {}): """Creates all plausible Lit tests (and their inputs) under this directory. Args: exclude: [str], paths to exclude (for tests and inputs). test_file_exts: [str], extensions for files that are tests. default_size: str, the test size for targets not in "size_override". size_override: {str: str}, sizes to use for specific tests. data: [str], additional input data to the test. per_test_extra_data: {str: [str]}, extra data to attach to a given file. default_tags: [str], additional tags to attach to the test. tags_override: {str: str}, tags to add to specific tests. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. exec_properties: a dictionary of properties to pass on. """ # Ignore some patterns by default for tests and input data. exclude = _ALWAYS_EXCLUDE + exclude tests = native.glob( ["*." + ext for ext in test_file_exts], exclude = exclude, ) # Run tests individually such that errors can be attributed to a specific # failure. for i in range(len(tests)): curr_test = tests[i] # Instantiate this test with updated parameters. lit_test( name = curr_test, data = data + per_test_extra_data.get(curr_test, []), size = size_override.get(curr_test, default_size), tags = default_tags + tags_override.get(curr_test, []), driver = driver, features = features, exec_properties = exec_properties, ) def lit_test( name, data = [], size = _default_size, tags = _default_tags, driver = _default_driver, features = [], exec_properties = {}): """Runs test files under lit. Args: name: str, the name of the test. data: [str], labels that should be provided as data inputs. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """ _run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
1.15625
1
devtools/api/health.py
ankeshkhemani/devtools
0
2308
import datetime from fastapi import APIRouter router = APIRouter() @router.get("", tags=["health"]) async def get_health(): return { "results": [], "status": "success", "timestamp": datetime.datetime.now().timestamp() }
1.03125
1
withdrawal/floor_ceiling.py
hoostus/prime-harvesting
23
2436
<filename>withdrawal/floor_ceiling.py<gh_stars>10-100 from decimal import Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount = self.rate * self.portfolio.value self.initial_amount = amount return amount def next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor) amount = min(amount, ceiling) return amount
2.515625
3
ding/hpc_rl/wrapper.py
davide97l/DI-engine
1
2564
<filename>ding/hpc_rl/wrapper.py import importlib from ditk import logging from collections import OrderedDict from functools import wraps import ding ''' Overview: `hpc_wrapper` is the wrapper for functions which are supported by hpc. If a function is wrapped by it, we will search for its hpc type and return the function implemented by hpc. We will use the following code as a sample to introduce `hpc_wrapper`: ``` @hpc_wrapper(shape_fn=shape_fn_dntd, namedtuple_data=True, include_args=[0,1,2,3], include_kwargs=['data', 'gamma', 'v_min', 'v_max'], is_cls_method=False) def dist_nstep_td_error( data: namedtuple, gamma: float, v_min: float, v_max: float, n_atom: int, nstep: int = 1, ) -> torch.Tensor: ... ``` Parameters: - shape_fn (:obj:`function`): a function which return the shape needed by hpc function. In fact, it returns all args that the hpc function needs. - nametuple_data (:obj:`bool`): If True, when hpc function is called, it will be called as hpc_function(*nametuple). If False, nametuple data will remain its `nametuple` type. - include_args (:obj:`list`): a list of index of the args need to be set in hpc function. As shown in the sample, include_args=[0,1,2,3], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - include_kwargs (:obj:`list`): a list of key of the kwargs need to be set in hpc function. As shown in the sample, include_kwargs=['data', 'gamma', 'v_min', 'v_max'], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - is_cls_method (:obj:`bool`): If True, it means the function we wrap is a method of a class. `self` will be put into args. We will get rid of `self` in args. Besides, we will use its classname as its fn_name. If False, it means the function is a simple method. Q&A: - Q: Is `include_args` and `include_kwargs` need to be set at the same time? - A: Yes. `include_args` and `include_kwargs` can deal with all type of input, such as (data, gamma, v_min=v_min, v_max=v_max) and (data, gamma, v_min, v_max). - Q: What is `hpc_fns`? - A: Here we show a normal `hpc_fns`: ``` hpc_fns = { 'fn_name1': { 'runtime_name1': hpc_fn1, 'runtime_name2': hpc_fn2, ... }, ... } ``` Besides, `per_fn_limit` means the max length of `hpc_fns[fn_name]`. When new function comes, the oldest function will be popped from `hpc_fns[fn_name]`. ''' hpc_fns = {} per_fn_limit = 3 def register_runtime_fn(fn_name, runtime_name, shape): fn_name_mapping = { 'gae': ['hpc_rll.rl_utils.gae', 'GAE'], 'dist_nstep_td_error': ['hpc_rll.rl_utils.td', 'DistNStepTD'], 'LSTM': ['hpc_rll.torch_utils.network.rnn', 'LSTM'], 'ppo_error': ['hpc_rll.rl_utils.ppo', 'PPO'], 'q_nstep_td_error': ['hpc_rll.rl_utils.td', 'QNStepTD'], 'q_nstep_td_error_with_rescale': ['hpc_rll.rl_utils.td', 'QNStepTDRescale'], 'ScatterConnection': ['hpc_rll.torch_utils.network.scatter_connection', 'ScatterConnection'], 'td_lambda_error': ['hpc_rll.rl_utils.td', 'TDLambda'], 'upgo_loss': ['hpc_rll.rl_utils.upgo', 'UPGO'], 'vtrace_error': ['hpc_rll.rl_utils.vtrace', 'VTrace'], } fn_str = fn_name_mapping[fn_name] cls = getattr(importlib.import_module(fn_str[0]), fn_str[1]) hpc_fn = cls(*shape).cuda() if fn_name not in hpc_fns: hpc_fns[fn_name] = OrderedDict() hpc_fns[fn_name][runtime_name] = hpc_fn while len(hpc_fns[fn_name]) > per_fn_limit: hpc_fns[fn_name].popitem(last=False) # print(hpc_fns) return hpc_fn def hpc_wrapper(shape_fn=None, namedtuple_data=False, include_args=[], include_kwargs=[], is_cls_method=False): def decorate(fn): @wraps(fn) def wrapper(*args, **kwargs): if ding.enable_hpc_rl: shape = shape_fn(args, kwargs) if is_cls_method: fn_name = args[0].__class__.__name__ else: fn_name = fn.__name__ runtime_name = '_'.join([fn_name] + [str(s) for s in shape]) if fn_name not in hpc_fns or runtime_name not in hpc_fns[fn_name]: hpc_fn = register_runtime_fn(fn_name, runtime_name, shape) else: hpc_fn = hpc_fns[fn_name][runtime_name] if is_cls_method: args = args[1:] clean_args = [] for i in include_args: if i < len(args): clean_args.append(args[i]) nouse_args = list(set(list(range(len(args)))).difference(set(include_args))) clean_kwargs = {} for k, v in kwargs.items(): if k in include_kwargs: if k == 'lambda_': k = 'lambda' clean_kwargs[k] = v nouse_kwargs = list(set(kwargs.keys()).difference(set(include_kwargs))) if len(nouse_args) > 0 or len(nouse_kwargs) > 0: logging.warn( 'in {}, index {} of args are dropped, and keys {} of kwargs are dropped.'.format( runtime_name, nouse_args, nouse_kwargs ) ) if namedtuple_data: data = args[0] # args[0] is a namedtuple return hpc_fn(*data, *clean_args[1:], **clean_kwargs) else: return hpc_fn(*clean_args, **clean_kwargs) else: return fn(*args, **kwargs) return wrapper return decorate
1.695313
2
resolwe/__init__.py
plojyon/resolwe
27
2692
<reponame>plojyon/resolwe<gh_stars>10-100 """.. Ignore pydocstyle D400. ======= Resolwe ======= Open source enterprise dataflow engine in Django. """ from resolwe.__about__ import ( # noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__, __version__, )
0.660156
1
tcapygen/layoutgen.py
Ahrvo-Trading-Systems/tcapy
189
2820
from __future__ import division, print_function __author__ = 'saeedamen' # <NAME> / <EMAIL> # # Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro # # See the License for the specific language governing permissions and limitations under the License. # ## Web server components import dash_core_components as dcc import dash_html_components as html import base64 import os ## Date/time components import pandas as pd import datetime from datetime import timedelta from collections import OrderedDict from pandas.tseries.offsets import * from tcapy.vis.layoutdash import LayoutDash ######################################################################################################################## class LayoutDashImplGen(LayoutDash): """This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two web pages - detailed_page - for doing detailed tcapy analysis for a specific currency pair - aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods """ def __init__(self, app=None, constants=None, url_prefix=''): super(LayoutDashImplGen, self).__init__(app=app, constants=constants, url_prefix=url_prefix) available_dates = pd.date_range( datetime.datetime.today().date() - timedelta(days=self._constants.gui_lookback_window), datetime.datetime.today().date(), freq=BDay()) times = pd.date_range("0:00", "23:59", freq="15min") ### create the possible values for drop down boxes on both pages # Reverse date list (for both detailed and aggregated pages) self.available_dates = [x.date() for x in available_dates[::-1]] # For detailed page only self.available_times = [t.strftime("%H:%M") for t in times] self.available_tickers = self._constants.available_tickers_dictionary['All'] self.available_venues = self._constants.available_venues_dictionary['All'] self.available_brokers = self._constants.available_brokers_dictionary['All'] self.available_algos = self._constants.available_algos_dictionary['All'] self.available_market_data = self._constants.available_market_data self.available_order_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'arrival', 'twap', 'vwap', 'buy trade', 'sell trade'] self.available_execution_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'buy trade', 'sell trade'] self.available_slippage_bounds = ['0.25', '0.5', '1.0', '1.25', '1.5', '2.0', 'bid/ask'] # For aggregated page only self.available_grouped_tickers = self._flatten_dictionary(self._constants.available_tickers_dictionary) self.available_grouped_venues = self._flatten_dictionary(self._constants.available_venues_dictionary) self.available_grouped_brokers = self._flatten_dictionary(self._constants.available_brokers_dictionary) self.available_grouped_algos = self._flatten_dictionary(self._constants.available_algos_dictionary) self.available_event_types = self._constants.available_event_types self.available_metrics = self._constants.available_metrics self.available_reload = ['no', 'yes'] self.available_visualization = ['yes', 'no'] self.construct_layout() def _flatten_dictionary(self, dictionary): available = dictionary['All'] available_groups = self._util_func.dict_key_list(dictionary.keys()) return self.flatten_list_of_strings([available_groups, available]) def construct_layout(self): self.page_content = html.Div([ dcc.Location(id='url', refresh=False), html.Div(id='page-content') ]) link_bar_dict = {'Detailed' : 'detailed', 'Aggregated' : 'aggregated', 'Compliance' : 'compliance'} trade_outliers_cols = ['Date', 'ticker', 'side', 'notional cur', 'benchmark', 'exec not', 'exec not in rep cur', 'slippage'] broker_cols = ['Date', 'by broker notional (rep cur)'] # Main page for detailed analysing of (eg. over the course of a few days) self.pages['detailed'] = html.Div([ self._sc.header_bar('FX: Detailed - Trader Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='detailed-status'), margin_left=5), self._sc.horizontal_bar(), # Dropdown selection boxes html.Div([ self._sc.drop_down(caption='Start Date', id={'start-date-val' : self.available_dates, 'start-time-val' : self.available_times}, prefix_id='detailed'), self._sc.drop_down(caption='Finish Date', id=OrderedDict([('finish-date-val', self.available_dates), ('finish-time-val', self.available_times)]), prefix_id='detailed'), self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='detailed', drop_down_values=self.available_tickers), self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='detailed', drop_down_values=self.available_grouped_brokers), self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='detailed', drop_down_values=self.available_grouped_algos), self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='detailed', drop_down_values=self.available_grouped_venues), self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='detailed', drop_down_values=self.available_market_data), self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='detailed', drop_down_values=self.available_metrics) ]), self._sc.horizontal_bar(), self._sc.button(caption='Calculate', id='calculation-button', prefix_id='detailed'), # self.button(caption = 'Print PDF', id = 'detailed-print-pdf-button', className = 'no-print'), # Orders self._sc.horizontal_bar(), self._sc.plot(caption='Orders: Timeline', id='order-candle-timeline-plot', prefix_id='detailed', element_add=self._sc.timeline_dropdown('detailed-order-candle-timeline-plot', self.available_order_plot_lines), downloadplot_caption='Download CSV', downloadplot_tag='order-candle-timeline-download-link', download_file='download_order_candle_timeline', height=500), self._sc.plot(caption='Orders: Markout', id='order-markout-plot', prefix_id='detailed', height=500), self._sc.plot(caption='Orders: Histogram vs PDF fit', id='order-dist-plot', prefix_id='detailed', height=500), # Execution trades self._sc.horizontal_bar(), self._sc.plot(caption='Executions: Timeline', id='execution-candle-timeline-plot', prefix_id='detailed', element_add=self._sc.timeline_dropdown('detailed-execution-candle-timeline-plot', self.available_execution_plot_lines), downloadplot_caption='Download CSV', downloadplot_tag='execution-candle-timeline-download-link', download_file='download_execution_candle_timeline.csv', height=500), self._sc.plot(caption='Executions: Markout', id='execution-markout-plot', prefix_id='detailed', height=500), self._sc.plot(caption='Executions: Histogram vs PDF fit', id='execution-dist-plot', prefix_id='detailed', height=500), # Detailed tcapy markout table for executions html.Div([ html.H3('Executions: Markout Table'), html.Div(id='detailed-execution-table') ], style={'width': '1000px', 'display': 'inline-block', 'marginBottom': 5, 'marginTop': 5, 'marginLeft': 5, 'marginRight': 5}), ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) ################################################################################################################ # Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker? self.pages['aggregated'] = html.Div([ self._sc.header_bar('FX: Aggregated - Trader Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='aggregated-status'), margin_left=5), self._sc.horizontal_bar(), # dropdown selection boxes html.Div([ self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='aggregated', drop_down_values=self.available_dates), self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='aggregated', drop_down_values=self.available_dates), self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='aggregated', drop_down_values=self.available_grouped_tickers, multiselect=True), self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='aggregated', drop_down_values=self.available_grouped_brokers, multiselect=True), self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='aggregated', drop_down_values=self.available_grouped_algos, multiselect=True), self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='aggregated', drop_down_values=self.available_grouped_venues, multiselect=True), self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='aggregated', drop_down_values=self.available_reload), self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='aggregated', drop_down_values=self.available_market_data), self._sc.drop_down(caption='Event Type', id='event-type-val', prefix_id='aggregated', drop_down_values=self.available_event_types), self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='aggregated', drop_down_values=self.available_metrics), ]), self._sc.horizontal_bar(), self._sc.button(caption='Calculate', id='calculation-button', prefix_id='aggregated'), # , msg_id='aggregated-status'), self._sc.horizontal_bar(), # self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]), self._sc.plot(caption='Aggregated Trader: Summary', id=['execution-by-ticker-bar-plot', 'execution-by-venue-bar-plot'], prefix_id='aggregated', height=500), self._sc.horizontal_bar(), self._sc.plot(caption='Aggregated Trader: Timeline', id='execution-by-ticker-timeline-plot', prefix_id='aggregated', height=500), self._sc.horizontal_bar(), self._sc.plot(caption='Aggregated Trader: PDF fit (' + self._constants.reporting_currency + ' notional)', id=['execution-by-ticker-dist-plot', 'execution-by-venue-dist-plot'], prefix_id='aggregated', height=500), self._sc.horizontal_bar() ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) ################################################################################################################ self.pages['compliance'] = html.Div([ self._sc.header_bar('FX: Compliance Analysis', img='logo.png'), self._sc.link_bar(link_bar_dict), self._sc.width_row_cell(html.B("Status: ok", id='compliance-status'), margin_left=5), self._sc.horizontal_bar(), # Dropdown selection boxes html.Div([ self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='compliance', drop_down_values=self.available_dates), self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='compliance', drop_down_values=self.available_dates), self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='compliance', drop_down_values=self.available_grouped_tickers, multiselect=True), self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='compliance', drop_down_values=self.available_grouped_brokers, multiselect=True), self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='compliance', drop_down_values=self.available_grouped_algos, multiselect=True), self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='compliance', drop_down_values=self.available_grouped_venues, multiselect=True), self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='compliance', drop_down_values=self.available_reload), self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='compliance', drop_down_values=self.available_market_data), self._sc.drop_down(caption='Filter by Time', id='filter-time-of-day-val', prefix_id='compliance', drop_down_values=self.available_reload), self._sc.drop_down(caption='Start Time of Day', id='start-time-of-day-val', prefix_id='compliance', drop_down_values=self.available_times), self._sc.drop_down(caption='Finish Time of Day', id='finish-time-of-day-val', prefix_id='compliance', drop_down_values=self.available_times), self._sc.drop_down(caption='Slippage to Mid (bp)', id='slippage-bounds-val', prefix_id='compliance', drop_down_values=self.available_slippage_bounds), self._sc.drop_down(caption='Visualization', id='visualization-val', prefix_id='compliance', drop_down_values=self.available_visualization) ]), self._sc.horizontal_bar(), html.Div([ self._sc.button(caption='Calculate', id='calculation-button', prefix_id='compliance'), # self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'), # self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'), ]), self._sc.horizontal_bar(), self._sc.table(caption='Compliance: Trade Outliers', id='execution-by-anomalous-table', prefix_id='compliance', columns=trade_outliers_cols, downloadplot_caption='Trade outliers CSV', downloadplot_tag='execution-by-anomalous-download-link', download_file='download_execution_by_anomalous.csv'), self._sc.table(caption='Compliance: Totals by Broker', id='summary-by-broker-table', prefix_id='compliance', columns=broker_cols, downloadplot_caption='Download broker CSV', downloadplot_tag='summary-by-broker-download-link', download_file='download_broker.csv' ), self._sc.horizontal_bar() ], style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'}) # ID flags self.id_flags = { # Detailed trader page # 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'}, # 'markout_trade_orders' : {'client-orders': 'order_df', 'executions': 'trade_df'}, 'detailed_candle_timeline_trade_order': {'execution': 'sparse_market_trade_df', 'order': 'sparse_market_order_df'}, 'detailed_markout_trade_order': {'execution': 'trade_df', 'order': 'order_df'}, 'detailed_table_trade_order': {'execution': 'table_trade_df_markout_by_all'}, 'detailed_dist_trade_order': {'execution': 'dist_trade_df_by/pdf/side', 'order': 'dist_order_df_by/pdf/side'}, 'detailed_download_link_trade_order': {'execution-candle-timeline': 'sparse_market_trade_df', 'order-candle-timeline': 'sparse_market_order_df'}, # Aggregated trader page 'aggregated_bar_trade_order': {'execution-by-ticker': 'bar_trade_df_by/mean/ticker', 'execution-by-venue': 'bar_trade_df_by/mean/venue'}, 'aggregated_timeline_trade_order': {'execution-by-ticker': 'timeline_trade_df_by/mean_date/ticker', 'execution-by-venue': 'timeline_trade_df_by/mean_date/venue'}, 'aggregated_dist_trade_order': {'execution-by-ticker': 'dist_trade_df_by/pdf/ticker', 'execution-by-venue': 'dist_trade_df_by/pdf/venue'}, # Compliance page 'compliance_metric_table_trade_order': {'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all', 'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'}, 'compliance_download_link_trade_order': {'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all', 'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'}, }
1.796875
2
geolucidate/functions.py
kurtraschke/geolucidate
3
2948
<filename>geolucidate/functions.py # -*- coding: utf-8 -*- from decimal import Decimal, setcontext, ExtendedContext from geolucidate.links.google import google_maps_link from geolucidate.links.tools import MapLink from geolucidate.parser import parser_re setcontext(ExtendedContext) def _cleanup(parts): """ Normalize up the parts matched by :obj:`parser.parser_re` to degrees, minutes, and seconds. >>> _cleanup({'latdir': 'south', 'longdir': 'west', ... 'latdeg':'60','latmin':'30', ... 'longdeg':'50','longmin':'40'}) ['S', '60', '30', '00', 'W', '50', '40', '00'] >>> _cleanup({'latdir': 'south', 'longdir': 'west', ... 'latdeg':'60','latmin':'30', 'latdecsec':'.50', ... 'longdeg':'50','longmin':'40','longdecsec':'.90'}) ['S', '60', '30.50', '00', 'W', '50', '40.90', '00'] """ latdir = (parts['latdir'] or parts['latdir2']).upper()[0] longdir = (parts['longdir'] or parts['longdir2']).upper()[0] latdeg = parts.get('latdeg') longdeg = parts.get('longdeg') latmin = parts.get('latmin', '00') or '00' longmin = parts.get('longmin', '00') or '00' latdecsec = parts.get('latdecsec', '') longdecsec = parts.get('longdecsec', '') if (latdecsec and longdecsec): latmin += latdecsec longmin += longdecsec latsec = '00' longsec = '00' else: latsec = parts.get('latsec', '') or '00' longsec = parts.get('longsec', '') or '00' return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec] def _convert(latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec): """ Convert normalized degrees, minutes, and seconds to decimal degrees. Quantize the converted value based on the input precision and return a 2-tuple of strings. >>> _convert('S','50','30','30','W','50','30','30') ('-50.508333', '-50.508333') >>> _convert('N','50','27','55','W','127','27','65') ('50.459167', '-127.460833') """ if (latsec != '00' or longsec != '00'): precision = Decimal('0.000001') elif (latmin != '00' or longmin != '00'): precision = Decimal('0.001') else: precision = Decimal('1') latitude = Decimal(latdeg) latmin = Decimal(latmin) latsec = Decimal(latsec) longitude = Decimal(longdeg) longmin = Decimal(longmin) longsec = Decimal(longsec) if latsec > 59 or longsec > 59: # Assume that 'seconds' greater than 59 are actually a decimal # fraction of minutes latitude += (latmin + (latsec / Decimal('100'))) / Decimal('60') longitude += (longmin + (longsec / Decimal('100'))) / Decimal('60') else: latitude += (latmin + (latsec / Decimal('60'))) / Decimal('60') longitude += (longmin + (longsec / Decimal('60'))) / Decimal('60') if latdir == 'S': latitude *= Decimal('-1') if longdir == 'W': longitude *= Decimal('-1') lat_str = str(latitude.quantize(precision)) long_str = str(longitude.quantize(precision)) return (lat_str, long_str) def replace(string, sub_function=google_maps_link()): """ Replace detected coordinates with a map link, using the given substitution function. The substitution function will be passed a :class:`~.MapLink` instance, and should return a string which will be substituted by :func:`re.sub` in place of the detected coordinates. >>> replace("58147N/07720W") '<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>' >>> replace("5814N/07720W", google_maps_link('satellite')) '<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>' >>> from geolucidate.links.bing import bing_maps_link >>> replace("58N/077W", bing_maps_link('map')) '<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>' """ def do_replace(match): original_string = match.group() (latitude, longitude) = _convert(*_cleanup(match.groupdict())) return sub_function(MapLink(original_string, latitude, longitude)) return parser_re.sub(do_replace, string) def get_replacements(string, sub_function=google_maps_link()): """ Return a dict whose keys are instances of :class:`re.Match` and whose values are the corresponding replacements. Use :func:`get_replacements` when the replacement cannot be performed through ordinary string substitution by :func:`re.sub`, as in :func:`replace`. >>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W") ... #doctest: +ELLIPSIS {<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'} >>> test_string = "4630 NORTH 5705 WEST 58147N/07720W" >>> replacements = get_replacements(test_string) >>> offset = 0 >>> out = bytearray(test_string, encoding="ascii", errors="replace") >>> for (match, link) in replacements.items(): ... start = match.start() + offset ... end = match.end() + offset ... out[start:end] = bytearray(link, encoding="ascii", errors="replace") ... offset += (len(link) - len(match.group())) >>> out.decode(encoding="ascii") == replace(test_string) True """ substitutions = {} matches = parser_re.finditer(string) for match in matches: (latitude, longitude) = _convert(*_cleanup(match.groupdict())) substitutions[match] = sub_function(MapLink(match.group(), latitude, longitude)) return substitutions
1.640625
2
apps/orders/models.py
LinkanDawang/FreshMallDemo
0
3076
from django.db import models from utils.models import BaseModel from users.models import User, Address from goods.models import GoodsSKU # Create your models here. class OrderInfo(BaseModel): """订单信息""" PAY_METHOD = ['1', '2'] PAY_METHOD_CHOICES = ( (1, "货到付款"), (2, "支付宝"), ) ORDER_STATUS_CHOICES = ( (1, "待支付"), (2, "待发货"), (3, "待收货"), (4, "待评价"), (5, "已完成"), ) """---------订单信息------------------------""" PAY_METHODS = { 1: "货到付款", 2: "支付宝", } ORDER_STATUS = { 1: "待支付", 2: "待发货", 3: "待收货", 4: "待评价", 5: "已完成", } PAY_METHODS_ENUM = { "CASH": 1, "ALIPAY": 2 } ORDER_STATUS_ENUM = { "UNPAID": 1, "UNSEND": 2, "UNRECEIVED": 3, "UNCOMMENT": 4, "FINISHED": 5 } order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号") user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="下单用户") address = models.ForeignKey(Address, on_delete=models.CASCADE, verbose_name="收获地址") total_count = models.IntegerField(default=1, verbose_name="商品总数") total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额") trans_cost = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费") pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式") status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态") trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号") class Meta: db_table = "df_order_info" class OrderGoods(BaseModel): """订单商品""" order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单") sku = models.ForeignKey(GoodsSKU, on_delete=models.CASCADE, verbose_name="订单商品") count = models.IntegerField(default=1, verbose_name="数量") price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价") comment = models.TextField(default="", verbose_name="评价信息") class Meta: db_table = "df_order_goods"
1.585938
2
quaesit/agent.py
jgregoriods/quaesit
0
3204
<gh_stars>0 import inspect from math import hypot, sin, asin, cos, radians, degrees from abc import ABCMeta, abstractmethod from random import randint, choice from typing import Dict, List, Tuple, Union class Agent(metaclass=ABCMeta): """ Class to represent an agent in an agent-based model. """ _id = 0 colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange', 'pink', 'purple', 'red', 'yellow'] def __init__(self, world, coords: Tuple = None): self._id = Agent._id Agent._id += 1 self.world = world self.coords = coords or (randint(0, self.world.width - 1), randint(0, self.world.height - 1)) self.direction = 90 self.breed = self.__class__.__name__.lower() self.icon = '.' self.color = choice(self.colors) self.world.add_agent(self) def die(self): """ Remove the agent from the world. """ del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): """ Creates an agent and initializes it with the same parameters as oneself. """ sig = inspect.signature(self.__init__) filter_keys = [param.name for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key in filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords: Tuple): """ Places the agent in a different cell of the world grid. """ self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def cell_here(self, layer = None): """ Returns the value of a layer in the model's grid for the cell where the agent is. If no layer is specified, the values of all layers are returned. """ if layer is not None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self, coords: Tuple) -> int: """ Returns the distance (in cells) from the agent to a pair of coordinates. """ x, y = coords return round(hypot((x - self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self, radius: int) -> Dict: """ Returns all cells and respective attributes within a distance of the agent. """ if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] - radius, self.coords[0] + radius + 1) for y in range(self.coords[1] - radius, self.coords[1] + radius + 1) if self.get_distance((x, y)) <= radius} else: neighborhood = {(x, y): self.world.grid[(x, y)] for x in range(self.coords[0] - radius, self.coords[0] + radius + 1) for y in range(self.coords[1] - radius, self.coords[1] + radius + 1) if (self.get_distance((x, y)) <= radius and (x, y) in self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius: int) -> Dict: """ Returns all empty cells (with no agents on them) and respective attributes within a distance of the agent. """ if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] - radius, self.coords[0] + radius + 1) for y in range(self.coords[1] - radius, self.coords[1] + radius + 1) if (self.get_distance((x, y)) <= radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood = {(x, y): self.world.grid[(x, y)] for x in range(self.coords[0] - radius, self.coords[0] + radius + 1) for y in range(self.coords[1] - radius, self.coords[1] + radius + 1) if (self.get_distance((x, y)) <= radius and (x, y) in self.world.grid and not self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self, cells: Union[List, Dict]) -> Tuple: """ Given a list or dictionary of cells, returns the coordinates of the cell that is nearest to the agent. """ dists = {cell: self.get_distance(cell) for cell in cells} return min(dists, key=dists.get) def agents_in_radius(self, radius: int): """ Returns all agents within a distance of oneself. """ neighborhood = self.cells_in_radius(radius) neighbors = [agent for coords in neighborhood for agent in self.world.grid[coords]['agents'] if agent is not self] return neighbors def agents_here(self) -> List: """ Returns all agents located on the same cell as oneself. """ return [agent for agent in self.world.grid[self.coords]['agents'] if agent is not self] def nearest_agent(self, agents: List = None): """ Given a list of agents, returns the agent that is nearest to oneself. If no list is provided, all agents are evaluated. """ if agents is None: agents = [self.world.agents[_id] for _id in self.world.agents] dists = {agent: self.get_distance(agent.coords) for agent in agents if agent is not self} return min(dists, key=dists.get) def turn_right(self, angle: int = 90): """ Rotates the agent's direction a number of degrees to the right. """ self.direction = round((self.direction - angle) % 360) def turn_left(self, angle: int = 90): """ Rotates the agent's direction a number of degrees to the left. """ self.direction = round((self.direction + angle) % 360) def forward(self, n_steps: int = 1): """ Moves the agent a number of cells forward in the direction it is currently facing. """ x = round(self.coords[0] + cos(radians(self.direction)) * n_steps) y = round(self.coords[1] + sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y) in self.world.grid: self.move_to((x, y)) def face_towards(self, coords: Tuple): """ Turns the agent's direction towards a given pair of coordinates. """ if coords != self.coords: xdif = coords[0] - self.coords[0] ydif = coords[1] - self.coords[1] dist = hypot(xdif, ydif) angle = degrees(asin(ydif / dist)) if xdif < 0: self.direction = round(180 - angle) else: self.direction = round((360 + angle) % 360) def random_walk(self, n_steps: int = 1): """ Moves the agent one cell forward in a random direction for a number of times. """ for i in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self): """ Methods to be performed by the agent at each step of the simulation. """ raise NotImplementedError
2.890625
3
tools/load_demo_data.py
glenn2763/skyportal
0
3332
import datetime import os import subprocess import base64 from pathlib import Path import shutil import pandas as pd import signal import requests from baselayer.app.env import load_env from baselayer.app.model_util import status, create_tables, drop_tables from social_tornado.models import TornadoStorage from skyportal.models import init_db, Base, DBSession, Source, User from skyportal.model_util import setup_permissions, create_token from skyportal.tests import api from baselayer.tools.test_frontend import verify_server_availability if __name__ == "__main__": """Insert test data""" env, cfg = load_env() basedir = Path(os.path.dirname(__file__)) / ".." with status(f"Connecting to database {cfg['database']['database']}"): init_db(**cfg["database"]) with status("Dropping all tables"): drop_tables() with status("Creating tables"): create_tables() for model in Base.metadata.tables: print(" -", model) with status(f"Creating permissions"): setup_permissions() with status(f"Creating dummy users"): super_admin_user = User( username="<EMAIL>", role_ids=["Super admin"] ) group_admin_user = User( username="<EMAIL>", role_ids=["Super admin"] ) full_user = User(username="<EMAIL>", role_ids=["Full user"]) view_only_user = User( username="<EMAIL>", role_ids=["View only"] ) DBSession().add_all( [super_admin_user, group_admin_user, full_user, view_only_user] ) for u in [super_admin_user, group_admin_user, full_user, view_only_user]: DBSession().add( TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2") ) with status("Creating token"): token = create_token( [ "Manage groups", "Manage sources", "Upload data", "Comment", "Manage users", ], super_admin_user.id, "load_demo_data token", ) def assert_post(endpoint, data): response_status, data = api("POST", endpoint, data, token) if not response_status == 200 and data["status"] == "success": raise RuntimeError( f'API call to {endpoint} failed with status {status}: {data["message"]}' ) return data with status("Launching web app & executing API calls"): try: response_status, data = api("GET", "sysinfo", token=token) app_already_running = True except requests.ConnectionError: app_already_running = False web_client = subprocess.Popen( ["make", "run"], cwd=basedir, preexec_fn=os.setsid ) server_url = f"http://localhost:{cfg['ports.app']}" print() print(f"Waiting for server to appear at {server_url}...") try: verify_server_availability(server_url) print("App running - continuing with API calls") with status("Creating dummy group & adding users"): data = assert_post( "groups", data={ "name": "Stream A", "group_admins": [ super_admin_user.username, group_admin_user.username, ], }, ) group_id = data["data"]["id"] for u in [view_only_user, full_user]: data = assert_post( f"groups/{group_id}/users/{u.username}", data={"admin": False} ) with status("Creating dummy instruments"): data = assert_post( "telescope", data={ "name": "Palomar 1.5m", "nickname": "P60", "lat": 33.3633675, "lon": -116.8361345, "elevation": 1870, "diameter": 1.5, "group_ids": [group_id], }, ) telescope1_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "P60 Camera", "type": "phot", "band": "optical", "telescope_id": telescope1_id, }, ) instrument1_id = data["data"]["id"] data = assert_post( "telescope", data={ "name": "Nordic Optical Telescope", "nickname": "NOT", "lat": 28.75, "lon": 17.88, "elevation": 1870, "diameter": 2.56, "group_ids": [group_id], }, ) telescope2_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "ALFOSC", "type": "both", "band": "optical", "telescope_id": telescope2_id, }, ) with status("Creating dummy sources"): SOURCES = [ { "id": "14gqr", "ra": 353.36647, "dec": 33.646149, "redshift": 0.063, "group_ids": [group_id], "comments": [ "No source at transient location to R>26 in LRIS imaging", "Strong calcium lines have emerged.", ], }, { "id": "16fil", "ra": 322.718872, "dec": 27.574113, "redshift": 0.0, "group_ids": [group_id], "comments": ["Frogs in the pond", "The eagle has landed"], }, ] (basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True) for source_info in SOURCES: comments = source_info.pop("comments") data = assert_post("sources", data=source_info) assert data["data"]["id"] == source_info["id"] for comment in comments: data = assert_post( "comment", data={"source_id": source_info["id"], "text": comment}, ) phot_file = basedir / "skyportal/tests/data/phot.csv" phot_data = pd.read_csv(phot_file) data = assert_post( "photometry", data={ "source_id": source_info["id"], "time_format": "iso", "time_scale": "utc", "instrument_id": instrument1_id, "observed_at": phot_data.observed_at.tolist(), "mag": phot_data.mag.tolist(), "e_mag": phot_data.e_mag.tolist(), "lim_mag": phot_data.lim_mag.tolist(), "filter": phot_data["filter"].tolist(), }, ) spec_file = os.path.join( os.path.dirname(os.path.dirname(__file__)), "skyportal", "tests", "data", "spec.csv", ) spec_data = pd.read_csv(spec_file) for i, df in spec_data.groupby("instrument_id"): data = assert_post( "spectrum", data={ "source_id": source_info["id"], "observed_at": str(datetime.datetime(2014, 10, 24)), "instrument_id": 1, "wavelengths": df.wavelength.tolist(), "fluxes": df.flux.tolist(), }, ) for ttype in ["new", "ref", "sub"]: fname = f'{source_info["id"]}_{ttype}.png' fpath = basedir / f"skyportal/tests/data/{fname}" thumbnail_data = base64.b64encode( open(os.path.abspath(fpath), "rb").read() ) data = assert_post( "thumbnail", data={ "source_id": source_info["id"], "data": thumbnail_data, "ttype": ttype, }, ) source = Source.query.get(source_info["id"]) source.add_linked_thumbnails() finally: if not app_already_running: print("Terminating web app") os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
1.34375
1
tools.py
Jakuko99/effectb
1
3460
from calendar import month_name class Tools: def __init__(self): self.output = "" def formatDate(self, date): elements = date.split("-") return f"{elements[2]}. {month_name[int(elements[1])]} {elements[0]}" def shortenText(self, string, n): #return first n sentences from string first = string.find(".") for _ in range(n - 1): if not string.find(".", first + 1) == -1: first = string.find(".", first + 1) return f"{string[:first-len(string)]}." def tupleUnpack(self, tup): self.output = "" for item in tup: self.output += f"{item} " return self.output[:-1] def joinList(self, list): self.output = "" for item in list: self.output += f"{item}, " return self.output[:-2] #remove last ', ' def partialJoin(self, list, n): self.output = "" i = 0 for item in list: self.output += f"{item}, " i += 1 if i >= n: break return self.output[:-2] def processFilmography(self, list, n): self.output = "" i = 0 for item in list: if 'year' in item: self.output += f"{item['title']} ({item['year']}), " else: self.output += f"{item['title'].replace(' ()', '')}, " i += 1 if i >= n: break return self.output[:-2] def convertTime(self, runtime): time = int(runtime) mins = time % 60 hours = int(time / 60) if hours >= 1: return f"{hours} h {mins} min" else: return f"{mins} min"
2.46875
2
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card