hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c906663e816567788a872d79ad4e2f03fb4244fb | 12,019 | py | Python | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 34 | 2017-10-18T06:09:16.000Z | 2022-03-21T18:53:16.000Z | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 52 | 2017-10-19T13:35:39.000Z | 2021-06-03T08:54:55.000Z | python/loom_viewer/loom_cli.py | arao11/pattern_viz | 3123f19a127c9775fadcca25f83aebfc8dc3b9f9 | [
"BSD-2-Clause"
] | 6 | 2018-05-28T06:16:26.000Z | 2020-08-17T11:49:34.000Z | #!/usr/bin/env python
# Copyright (c) 2016 Sten Linnarsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import *
from mypy_extensions import NoReturn
import sys
import os
import argparse
import logging
import warnings
import loompy
from ._version import __version__
from .loom_expand import LoomExpand
from .loom_datasets import def_dataset_dir, LoomDatasets
from .loom_server import start_server
if __name__ == "__main__":
main()
| 26.473568 | 144 | 0.719611 |
c907566de3410b8c828deb59e531487549202dc6 | 1,260 | py | Python | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | 1 | 2021-12-25T04:08:53.000Z | 2021-12-25T04:08:53.000Z | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | null | null | null | test_function.py | will-huynh/process_controller | e193c80976ef1d35fb9e661425bf609a86a313c8 | [
"MIT"
] | null | null | null | import logging
import tcp_log_socket
logging_socket = tcp_log_socket.local_logging_socket(__name__)
logger = logging_socket.logger
#Test method simulating a method with required arguments; division is used to test exception handling
#Test method simulating a method with no required arguments
#Test method simulating an argument with keyworded and optional arguments
| 37.058824 | 102 | 0.692857 |
c908908fcda77dbed54b6f285d7d03c69d799dc0 | 3,154 | py | Python | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | 1 | 2020-09-10T11:26:05.000Z | 2020-09-10T11:26:05.000Z | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | users/views.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
#####################################################################
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from .forms import UserRegisterForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
| 38.463415 | 88 | 0.642676 |
c909851fe73dcfad421fb6354ea395215029d6a8 | 689 | py | Python | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 62 | 2015-03-25T15:56:38.000Z | 2021-01-07T21:32:27.000Z | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 73 | 2015-02-13T16:02:31.000Z | 2021-01-17T19:35:10.000Z | tests/test-vext-pth.py | NomAnor/vext | adea4b593ae4c82da0965ec1addaa1cd6d5b396c | [
"MIT"
] | 8 | 2016-01-24T16:16:46.000Z | 2020-09-23T17:56:47.000Z | import os
import unittest
from vext.install import DEFAULT_PTH_CONTENT
if __name__ == "__main__":
unittest.main()
| 28.708333 | 73 | 0.683599 |
c9098d28bd2a0a51fc33c4cd5fecc41dc7fc38ec | 2,196 | py | Python | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 97 | 2016-12-14T16:48:49.000Z | 2021-09-12T17:48:10.000Z | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 38 | 2016-12-13T09:42:38.000Z | 2020-07-05T11:58:07.000Z | stats/monitor.py | pawankaushal/crossbar-examples | b6e0cc321bad020045c4fafec091f78abd938618 | [
"Apache-2.0"
] | 118 | 2016-12-12T21:36:40.000Z | 2021-11-17T11:49:33.000Z | import argparse
from pprint import pformat
import txaio
txaio.use_twisted()
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output.')
parser.add_argument('--url',
dest='url',
type=str,
default="ws://localhost:8080/ws",
help='The router URL (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm',
dest='realm',
type=str,
default="realm1",
help='The realm to join (default: "realm1").')
args = parser.parse_args()
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
runner = ApplicationRunner(url=args.url, realm=args.realm)
runner.run(ClientSession, auto_reconnect=True)
| 31.826087 | 89 | 0.583789 |
c90c7861eaff4add66e4d61ef78a76a073959d73 | 29,349 | py | Python | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 2 | 2020-10-08T17:03:45.000Z | 2021-03-09T17:49:44.000Z | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 17 | 2020-09-24T17:35:38.000Z | 2020-12-11T16:10:13.000Z | spirou/sandbox/fits2ramp.py | clairem789/apero-utils | 68ed0136a36b6badeaf15eb20d673052ad79a949 | [
"MIT"
] | 5 | 2020-04-10T06:41:00.000Z | 2020-12-16T21:09:14.000Z | #!/usr/bin/env python2.7
# Version date : Aug 21, 2018
#
# --> very minor correction compared to previous version. As keywords may change in files through time, when we delete
# a keyword, we first check if the keyword is preseent rather than "blindly" deleting it
# --> also corrected integer vs float divisions in refpixcor. This ensures python3 compatibility
#
# Version date : May 29, 2018
#
# --> The first frame is used as a "bias" for all subsequent readouts
# Subsequent frames are corrected for reference pixels
# This significantly improves the quality of the error measurement
# --> The top/bottom reference pixels are always corrected in odd/even manner, not as a constant offset for odd/even columns
# --> We now perform the non-linearity measurement
# --> All the "print" statement have been made consistent with the python3
# --> Add the "selfbias" keyword. This option uses the 1st readout as a bias estimate. This allows ref pixel correction per frame
#
# Version date : Mar 23, 2018
#
# --> corrects an error in the ref pixels
# --> Nothing changed to the input syntax compared to previous versions
#
# - accepts both H2RG and H4RG data. The size of the images is determined
# from the calibration files given in input, avoiding hardcoding the size
# of the input images. I removed all references to dim1 and dim2 (x and y size of
# images) as we will always have square images. This is now simply imdim. Imdim can
# only be equal to 2048 or 4096. If not, then something is really wrong and the code exits
# with a message.
#
# - uses pixels on the side of the array and not only top/bottom ones
# filters 1/f noise with side pixels. Important for the H4RG data
#
# - ramp algorithm significantly faster as we took some variable handling out the big loop. Does not
# change the output values in the end. sx and sx2 are now determined only at the end of the
# loop on image by using the timestamp vector combined with the n variable. Saves ~0.5s per readout
#
# - medians are now handling nans properly; avoids problems in rare cases when a nan appears in the
# ref pixel region. nanmedian exists in python3 but not python2, so I defined the function
# here. When we'll switch to p3, we can simply delete this function and we won't
# need to modify the code itself. We'll juste need : import numpy.nanmedian as nanmedian
#
# - if the bias frame is set entirely to zero (mostly for debugging purpose), then we avoid
# subtracting zeros to the entire image and save ~0.1s per image.
#
# - ref pixel filtering is defined as a function. This was done at two places in the
# code.
#
# - the reference pixel function is much faster thanks to some more clever handling
# of variables.
#
# - the flux in the "mask" region used now uses np.nanmean instead of mean. This avoids
# having a NaN flux measurement in the posemeter. It also avoids problems when writing
# the posemeter values in the header as one cannot have a NaN as a keyword value.
#
# - we now have an ascii output per iteration that tell you how long each frame took to
# process and how long is left before the end of the big loop. On our machine, the
# average for an H2RG image with the "-noerror" keyword (faster) is slightly less than
# 1 s per image.
#
#
# Now includes the following options :
#
# -n=XXX -> Will only perform the ramp fitting on the first XXX readouts of the array
# This can be used to simulate a shorter sequence. This could be useful to get the
# dark that exactly matches the integration time of a given science sequence. Say you
# have a dark of 100 frames but a science sequence of 20 frames, you may want to only use
# the first 20 frames of the dark to get exactly the same statistical properties as in your
# science sequence.
# -cube -> set this to get an output cube with all readouts. Use only if you want to examine the readouts.
# -linearize -> corrects for non-linearity. Do not use this keyword to speed things up. We don't have the liearity coefficients in hand anyway
# -noerror -> do not compute the error on slope. This seeds-up the code as we need to read the images only once.
# -noref -> Skip all reference pixel corrections entirely
# -selfbias -> subtract the 1st readout from all subsequent readouts to allow ref pixel correction per frame
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
from array import *
import glob
import os
# import pyfits --> rendered obsolete by the use of the more recent astropy.io.fits
import time
import sys
import scipy.ndimage.filters
from astropy.io import fits as pyfits
from scipy.stats.stats import pearsonr
# will be set to True if selfbias=True. If we use a file for bias (later update?) then this will also
# change the dobias to True
dobias = False
arg=np.asarray(sys.argv)
arg=arg[1:] # first argument is simply the name of the program and needs to be removed
write_cube = sum(arg=='-cube') ==1. # if set, then we will write cube, if not, then we skip this step that may be long
skip_error = sum(arg=='-noerror') ==1. # if set, we skip slope error
skip_ref = sum(arg=='-noref') ==1. # if set, we skip reference pixel corrections
linearize = sum(arg=='-linearize') ==1. # if set, we correct for non-linearity
selfbias = sum(arg=='-selfbias') ==1. # if set, we correct ref pixels on a frame-to-frame basis
nmax_set=False
for argn in arg:
if (argn)[0:3] == '-n=':
nmax_set=True
dim3=np.int( (argn)[3:] )
# here we remove arguments with a "-"
keep=np.zeros(len(arg))
for i in range(len(arg)):
keep[i] = (arg[i])[0] != '-'
arg=arg[keep ==1] # keep only params not beginning with a "-"
if len(arg)>=1:
odometer = arg[0] # first argument after program and flags is the output name
fic = arg[1:]
if len(fic)>=1:
h = pyfits.getheader(fic[0])
h2=h
mef_flag=0 # file is a MEF flag
cubefits_flag=0 # file is a CUBE flag
if len(fic) ==1:
naxis =h['naxis']
if naxis ==0:
mef_flag=1# we have a flag to know that the input file is a MEF and that extensions need to be read from there
if naxis==3:
cubefits_flag=1#this is a cuube
exists = np.zeros(len(fic),dtype=bool)
for i in range(len(fic)):
exists[i] = os.path.isfile(fic[i])
if np.sum(exists ==0) !=0:
print('some files given as inputs do not exist')
print('missing file(s) --')
print('')
missing=fic[exists !=1]
for i in range(len(missing)):
print(missing[i])
print('')
print('... you way also have given some erroneous input, double check your inputs dude!')
sys.exit()
if len(sys.argv) <=2:
print('***** !!! warning, something went wrong !!! *****')
print('')
print(' ----- you can provide a list of files as an input -----')
print('')
print('syntax : python fits2ramp.py outname directory/file*.fits -cube -noerror -linearize')
print('')
print('')
print(' the argument after the "outname" must be the files to combine')
print(' with the ramp-fitting algorithm. ex: 20170322140210/H2RG_R01_M01_N08*.fits ')
print(' should also accept *.fits.gz files')
print(' you need at least two files in the wildcard. You can also expliclty')
print(' name the files you combine.')
print(' The syntax would be :')
print(' python fits2ramp.py outname file1.fits file2.fits ... fileN.fits')
print('')
print(' ----- you can also provide a single file that has a MEF format -----')
print('')
print('syntax : python fits2ramp.py outname mef_file*.fits -cube -noerror -linearize')
print('')
print(' if you provide an outname and a single fits file, then we know its a MEF')
print('')
print(' if you provide a -n=XXXX then only the first XXXX readouts within the MEF')
print('')
print(' will be used for slope fitting')
print(' ---- some more options ----' )
print('')
print(' -cube saves all slices in a cube. This is slower and takes disk space')
print(' -noerror does not compute the slope error. This is faster.' )
print(' -linearize corrects for non-linearity. This is slower but more accurate.')
print('')
print(' If all goes well, the programs outputs 2 files: ')
print(' outnameo.fits ')
print(' ... ext=1, ramp frame' )
print(' ... ext=2, ramp intercept')
print(' ... ext=3, ramp error' )
print(' ... ext=4, ramp # valid frames')
print(' ... every where, NaN values trace saturated pixel')
print(' outnamer.fits.gz')
print(' ... cube with as many slices as there are files in the wildcard above')
print(' ... outnamer.fits.gz contains the same info as the files' )
print(' ... this is only done if we pass the "-cube" argument')
print('')
sys.exit()
#################################################################
#################################################################
# We need the size of the image. Should be 2048 or 4096 (H2RG/H4RG)
imdim=(np.shape(pyfits.getdata(fic[0])))[1]
if (imdim!=2048) and (imdim!=4096):
print('')
print('')
print(' something is really wrong with the size of the input image')
print(' the image '+fic[0]+' has a width of :',imdim,' pixel(s)')
print(' and we should only have values of 2048 or 4096 pixels')
print('')
print('')
sys.exit()
# reading the relevant calibrations
#mask = getdata(calibdir+'/mask.fits') # 0/1 mask defining the area of the science array used as pose-meter
mask=np.zeros([imdim,imdim],dtype=float) # dummy ~~~>>> will need to be changed for the H4RG
# this is the region used for the posemeter
# For SPIRou, we will have a binary mask selecting the H-band orders (science and not ref channel)
mask[1912:1938,572:777]=1
mask=np.where(mask ==1)
# non-linearity cube with 4 slices. The linearized flux will be derived from the measured flux with the
# following relation :
# F_lin = a0 + a1*(F_mea - bias) + a2*(F_mea - bias)**2 + a3*(F_mea - bias)**3
# where aN is the Nth slice of the linearity cube
# ... bias is the super-bias
# ... F_lin is the linearised flux
# ... F_mea is the measured flux
#linearity = getdata(calibdir+'/non_lin.fits') # we will use files with non-linearity correction here
# This is an operation that may be done if we do not have a bias in hand and want to
# correct non-linearity. Lets consider this under development and set it to False for now
#
linearity_saturation = pyfits.getdata('nonlin.fits')
# Slice 1 - 2nd ordre term of non-linearity correction
# Slice 2 - 3rd ordre term of non-linearity correction
linearity = linearity_saturation[0:2,:,:]
# Slice 3 - dynamical range for <20% non-linearity
saturation = linearity_saturation[2,:,:]
if mef_flag==0 and cubefits_flag==0:
if nmax_set == False:
dim3 = len(fic)
else:
if len(fic) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(fic),' files')
sys.exit()
if mef_flag==1:
hdulist = pyfits.open(fic[0],memmap=False) ## We will use memmap when CFHT gets rid of BZERO/BSCALE/BLANK header keywords
dims=np.shape(hdulist[1])
if nmax_set == False:
dim3= len(hdulist)-1
else:
if (len(hdulist)-1) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your MEF')
sys.exit()
if cubefits_flag==1:
if nmax_set == False:
dim3 = h['naxis3']
else:
if (h['naxis3']) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your cube')
sys.exit()
# delete all keywords from the reference file
del_keywords=['DATLEVEL', 'ASICGAIN', 'NOMGAIN', 'AMPRESET', 'KTCREMOV', 'SRCCUR',\
'AMPINPUT', 'V4V3V2V1', 'PDDECTOR', 'CLKOFF', 'NADCS', 'INTTIME',\
'TSTATION', 'SEQNUM_N', 'SEQNUM_M', 'CLOCKING', 'NEXTRAP','NEXTRAL', 'SEQNNAME']
for key in del_keywords:
if key in h: # as keywords may change from version to version, we check if the keyword we want to delete is present
del h[key]
del h['bias*']
timestamp=np.zeros(dim3,dtype=float)
# loop to check image size and populate header with time stamps
for i in range(dim3):
if mef_flag==0 and cubefits_flag==0: # we have a mef file, info is in the ith extension
h_tmp = pyfits.getheader(fic[i])
if 'frmtime' not in h_tmp:
h_tmp['frmtime'] = 5.24288, 'assumed integration time (s)'
if 'inttime' not in h_tmp:
h_tmp['inttime'] = 5.24288*(i+1), 'assumed frame time (s)'
timestamp[i]=h_tmp['inttime']
if cubefits_flag==1: # we have a cube, calculate from FRMTIME
timestamp[i]= (i+1)*h['frmtime'] # sets zero time at the time of reset
if mef_flag==1: # we read the ith extension
h_tmp = hdulist[i+1].header
timestamp[i]=h_tmp['inttime']
if mef_flag==0 and cubefits_flag==0:
order = np.argsort(timestamp) # who knows, the files may not be in the right order! Lets sort them according to their timestamps
fic=fic[order]
timestamp=timestamp[order]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag0))+tag0
tag = 'INTT'+tag
h[tag] = (timestamp[i],'Timestamp, '+tag0+'/'+str(dim3))
if mef_flag==1:
write_cube=False
if write_cube:
cube=np.zeros([dim3,dim2,dim1],dtype=float)
print('loading all files in cube')
for i in range(dim3):
print(i+1,'/',len(fic),fic[i])
im=pyfits.getdata(fic[i])
cube[i,:,:] = im
print('writing the cube file --> '+odometer+'r.fits ')
t1 = time.time()
hcube=h2
hcube['NAXIS'] = 3
hcube['NAXIS3'] = dim3
pyfits.writeto(odometer+'r.fits', cube,header=hcube)
# This operation is somewhat long and could lead to back-log of files on a slow machine
# ... for the code development, we time it. This may be remove at a later point.
print('Duration of file writting : '+str(float(time.time()-t1))+' s')
# zipping the .fits file. Normally this could be done within pyfits.writeto, but its much, much slower
os.system('gzip -f '+odometer+'r.fits &')
print('done writing the cube file --> '+odometer+'r.fits')
print(' compressing file in background ... ')
del cube # removing cube from memory to make things lighter... unclear in necessary
else:
print('we do not write the cube file for this ramp')
# place htimestampolders for some arithmetics for the linear fit
#sx = 0#np.zeros([dim2,dim1])
#sx2 = 0#np.zeros([dim2,dim1])
sy = np.zeros([imdim,imdim],dtype=float)
n = np.zeros([imdim,imdim],dtype=np.int16)
sxy = np.zeros([imdim,imdim],dtype=float)
fmask = np.zeros(dim3,dtype=float)
# mask for pixels that are valid
goodmask = np.full((imdim,imdim),True,dtype=bool)
# when a pixels goes above saturation, it remains invalid for the rest of the ramp
if skip_error == False:
savname=['']*dim3
print(mef_flag,cubefits_flag,linearize)
t_start=time.time()
for i in range(dim3):
t0=time.time()
print(i+1,'/',dim3,' ~~~> Computing slope')
if mef_flag==0 and cubefits_flag==0: # this is a set with N files
im = pyfits.getdata(fic[i])
if mef_flag==1:
im=hdulist[i+1].data # reading the Nth extension
if cubefits_flag==1:
if i ==0:
bigcube=pyfits.getdata(fic[0]) # that's dangerous as it may overfill memory
im=bigcube[i,:,:]
im = np.array(im,dtype='float')
if selfbias and (i ==0):
bias = np.array(im)
print('setting 1st extension as a bias file')
dobias=True
goodmask = (im <= saturation)*goodmask
if dobias:
if selfbias:
print('bias subtraction with 1st readout')
else:
print('bias subtraction with provided bias file')
im-=bias
if linearize:
print('applying non-lin correction')
# first we linearize the data by applying the non-linearity coefficients and bias correction
for j in range(2):
im += linearity[j,:,:]*(im)**(j+2)
if selfbias and (skip_ref == False):
print('as we applied self-bias, we correct ref pixels')
im=refpixcorr(im)
n+= goodmask
fmask[i]=np.nanmean( im[mask])
# m*=goodmask # starting now, only the product of the two is needed. saves one multipltication
# Actually, best not fill what used to be saturated elements in the array with
# 0, which is what this did. Then, if the errslope calculation wants to check
# im <= saturation as it used to do, it will come up with the wrong answer.
# Since the first check for im <= saturation (about 20 lines above) does so
# before linearity correction and this check would be after, they could also
# come up with different answers though, unless the linearity function is
# is guaranteed to apply a correction that keeps saturation values at the same
# ADU. Since we already have n[], when the errslope calculation happens, it
# uses that, now with a simple "goodmask = (n > i)" for each i on that pass.
sy[goodmask]+= im[goodmask]#*goodmask
sxy[goodmask]+=(im[goodmask]*timestamp[i])
# here we save the non-linearity corrected images as python npz files
# we could just dump everything into a big cube to be used in the slope
# error determination. We opt to write these files to disk to avoid overfilling
# the memory. This should be safer for very large number of reads.
#
# We cannot simply re-read the fits files are the "im" variable saved in the npz has been corrected for
# non-linearity, which is NOT the case for the .fits.gz. We save the NPZ only if the data is linearized
#
# We also corrected for the bias regions of the detector, so a temporary file is necessary if we want to properly compute slope error
# and cannot afford to keep everything in memory. Keeping everything in memory may be fine for small datasets, but we want
# to avoid having a code that crashes for long sequences or on machines with less memory!
if skip_error == False:
savname[i]='.tmp'+str(i)+'.npz'
np.savez(savname[i],im=im) # this file is temporary and will be deleted after computing the slope error
dt=(time.time()-t_start)/(i+1.0)
print('dt[last image] ','{:5.2f}'.format(time.time()-t0),'s; dt[mean/image] ','{:5.2f}'.format(dt),'s; estimated time left '+'{:3.0f}'.format(np.floor((dim3-i)*dt/60))+'m'+'{:2.0f}'.format(np.floor((dim3-i)*dt % 60))+'s')
# we now have these variables outside the loop. We keep n that contains the
# number of valid reads, and directely interpolate the vector with the cumulative
# sum of timestamp and timestamp**2. Previously, we added these values to the sx and sx2
# matrices for each frame. This operation is much, much faster and equivalent.
sx=np.where(n>0,(np.cumsum(timestamp))[n-1],0)
sx2=np.where(n>0,(np.cumsum(timestamp**2))[n-1],0)
if mef_flag==1:
hdulist.close()
fmask-=fmask[0]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag))+tag0
tag = 'POSE'+tag
h[tag] = (fmask[i],'Posemeter, '+tag0+'/'+str(len(fic)))
a = np.zeros([imdim,imdim],dtype=float)+np.nan # slope, NaN if not enough valid readouts
b = np.zeros([imdim,imdim],dtype=float)+np.nan # intercept
valid=n>1 # only valid where there's more than one good readout(s)
b[valid] = (sx*sxy-sx2*sy)[valid]/(sx**2-n*sx2)[valid] # algebra of the linear fit
a[valid] = (sy-n*b)[valid]/sx[valid]
# For the sake of consistency, we fix the slope, error and intercept to NaN for
# pixels that have 0 or 1 valid (i.e., not saturated) values and for which
# one cannot determine a valid slope
errslope = np.zeros([imdim,imdim],dtype=float)+np.nan
goodmask = np.full((imdim,imdim),True,dtype=bool)
if skip_error == False:
varx2 = np.zeros([imdim,imdim],dtype=float)
vary2 = np.zeros([imdim,imdim],dtype=float)
xp = np.zeros([imdim,imdim],dtype=float)
valid = (n>2)
xp[valid]=sx[valid]/n[valid] # used in the determination of error below
print('we now compute the standard error on the slope')
for i in range(dim3):
# we read the npz as this file has been linearized (if the -linearize keyword has been set)
# and we subtracted the reference regions on the array
data=np.load(savname[i])
os.system('rm '+savname[i])
im=data['im']
goodmask = (n > i)
yp = b+a*timestamp[i]
print(i+1,'/',dim3,' ~~~> Computing slope error')
varx2+= ((timestamp[i]-xp)**2)*goodmask # we multiply by goodmask so that only
vary2+= ((im-yp)**2)*goodmask
valid*=(varx2!=0) # avoid diving by zero
errslope[valid] = np.sqrt(vary2[valid]/(n[valid]-2))/np.sqrt(varx2[valid])
# deleting the temporary npz
else:
print(' We do not calculate the error on slope.')
print(' This is faster and intended for debugging but ')
print(' ultimately we will want to compute slope error ')
print(' for all files')
h['satur1']=(nanmedian(saturation),'median saturation limit in ADU')
h['satur2']=(nanmedian(saturation)/max(timestamp),'median saturation limit in ADU/s')
dfmask = fmask[1:]-fmask[0:-1] # flux received between readouts
dtimestamp = timestamp[1:]+0.5*(timestamp[-1]-timestamp[0])/(len(timestamp)-1) # mid-time of Nth readout
### we estimate the RON by checking the slope error in pixels receiving little flux
### as the orders cover ~50% of the science array, we take the median slope error of
### pixels that are below the median slope. We assume that these pixels have an RMS that is
### dominated by readout noise (TO BE CONFIRMED).
### we also clip pixels that are above 3x the median RMS
pseudodark = 0.0 # (a < np.median(a))*(errslope < 3*np.median(errslope))
ron_estimate = 0.0 #np.median(errslope[pseudodark])*(max(timestamp)-min(timestamp)) # converted into ADU instead of ADU/s
#### Standard FITS Keywords BITPIX = 16 / 16bit
h['BSCALE']=(1.0 , 'Scale factor')
#### FITS keyword related to the detector
h['RON_EST']=(ron_estimate , '[ADU] read noise estimate')
h['NSUBEXPS']=(len(fic) , 'Total number of sub-exposures of 5.5s ')
#h['TMID']= (np.sum(dtimestamp*dfmask)/np.sum(dfmask) , '[s] Flux-weighted mid-exposure time ' )
#h['CMEAN']= ( np.mean(dfmask)/(timestamp[1]-timestamp[0]), '[ADU/s] Average count posemeter' )
if skip_ref == False:
a=refpixcorr(a,oddeven=True)
a=np.float32(a)
if dobias:
# we subtracted the bias from all frames, we need to add it to the intercept
b+=bias
b=np.float32(b)
errslope=np.float32(errslope)
hdu1 = pyfits.PrimaryHDU()
hdu1.header = h
hdu1.header['NEXTEND'] = 4
hdu2 = pyfits.ImageHDU(a)
hdu2.header['UNITS'] = ('ADU/S','Slope of fit, flux vs time')
hdu2.header['EXTNAME'] = ('slope','Slope of fit, flux vs time')
hdu3 = pyfits.ImageHDU(b)
hdu3.header['UNITS'] = ('ADU','Intercept of the pixel/time fit.')
hdu3.header['EXTNAME'] = ('intercept','Intercept of the pixel/time fit.')
hdu4 = pyfits.ImageHDU(errslope)
hdu4.header['UNITS'] = ('ADU/S','Formal error on slope fit')
hdu4.header['EXTNAME'] = ('errslope','Formal error on slope fit')
hdu5 = pyfits.ImageHDU(n)
hdu5.header['UNITS'] = ('Nimages','N readouts below saturation')
hdu5.header['EXTNAME'] = ('count','N readouts below saturation')
new_hdul = pyfits.HDUList([hdu1, hdu2, hdu3, hdu4, hdu5])
# just to avoid an error message with writeto
if os.path.isfile(odometer+'.fits'):
print('file : '+odometer+'.fits exists, we are overwriting it')
os.system('rm '+odometer+'.fits')
new_hdul.writeto(odometer +'.fits', clobber=True)
print('Elapsed time for entire fits2ramp : '+str(float(time.time()-t0))+' s')
| 40.20411 | 225 | 0.665474 |
c90f386866b7264c9826cea39ffcc2b6fd5aaf00 | 394 | py | Python | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 1 | 2019-12-04T10:10:53.000Z | 2019-12-04T10:10:53.000Z | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 14 | 2019-04-07T07:46:07.000Z | 2022-03-11T23:44:31.000Z | blog/urls.py | encukou/Zpetnovazebnik | 0d058fd67049a3d42814b04486bde93bc406fa3b | [
"MIT"
] | 1 | 2019-02-16T09:25:51.000Z | 2019-02-16T09:25:51.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.course_list, name='course_list'),
path('<course_slug>/', views.session_list, name='session_list'),
path('<course_slug>/<session_slug>/', views.session_detail, name='session_detail'),
path('<course_slug>/<session_slug>/<password>/', views.add_comment_to_session, name='add_comment_to_session'),
]
| 35.818182 | 114 | 0.72335 |
c912b5b1a08a02d640553311c19b5c840ef97729 | 4,651 | py | Python | web_app/api_service.py | shayan-taheri/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 23 | 2017-11-29T17:33:30.000Z | 2021-10-15T14:51:12.000Z | web_app/api_service.py | shayan-taheri/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 1 | 2017-10-12T11:23:08.000Z | 2017-10-12T11:23:08.000Z | web_app/api_service.py | isabella232/sql_python_deep_learning | ceb2c41bcb1fed193080f64ba4da018d76166222 | [
"MIT"
] | 16 | 2017-12-21T08:55:09.000Z | 2021-03-21T20:17:40.000Z | from api import app, BAD_PARAM, STATUS_OK, BAD_REQUEST
from flask import request, jsonify, abort, make_response,render_template, json
import sys
from lung_cancer.connection_settings import get_connection_string, TABLE_SCAN_IMAGES, TABLE_GIF, TABLE_MODEL, TABLE_FEATURES, LIGHTGBM_MODEL_NAME, DATABASE_NAME,NUMBER_PATIENTS
from lung_cancer.lung_cancer_utils import get_patients_id, get_patient_id_from_index, select_entry_where_column_equals_value, get_features, get_lightgbm_model, prediction
import pyodbc
import cherrypy
from paste.translogger import TransLogger
# Connection
connection_string = get_connection_string()
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
# Model
model = get_lightgbm_model(TABLE_MODEL, cur, LIGHTGBM_MODEL_NAME)
# Functions
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def manage_request_patient_index(patient_request):
patient1 = "Anthony Embleton".lower()
patient2 = "Ana Fernandez".lower()
if patient_request.lower() in patient1:
patient_index = 1
elif patient_request.lower() in patient2:
patient_index = 175
else:
if is_integer(patient_request):
patient_index = int(patient_request)
if patient_index > NUMBER_PATIENTS:
patient_index = NUMBER_PATIENTS - 1
else:
patient_index = 7
return patient_index
if __name__ == "__main__":
run_server()
conn.close() | 33.221429 | 176 | 0.723285 |
c9144a2b1a0cbf40a3d765da71a5f9435588a292 | 335 | py | Python | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | null | null | null | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | null | null | null | 10-blood/scripts/bloodMeasure.py | antl-mipt-ru/get | c914bd16131639e1af4452ae7351f2554ef83ce9 | [
"MIT"
] | 1 | 2021-10-11T16:24:32.000Z | 2021-10-11T16:24:32.000Z | import bloodFunctions as blood
import time
try:
samples = []
blood.initSpiAdc()
start = time.time()
while (time.time() - start) < 60:
samples.append(blood.getAdc())
finish = time.time()
blood.deinitSpiAdc()
blood.save(samples, start, finish)
finally:
print("Blood measure script finished") | 17.631579 | 42 | 0.641791 |
c915f05bb0ce24d1fe5469fea260ce3e99ceb13c | 5,144 | py | Python | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 77 | 2018-11-19T18:38:50.000Z | 2020-11-16T22:49:59.000Z | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 373 | 2018-11-17T16:06:06.000Z | 2020-11-20T22:55:03.000Z | bot/exts/utilities/twemoji.py | thatbirdguythatuknownot/sir-lancebot | 7fd74af261385bdf7d989f459bec4c9b0cb4392a | [
"MIT"
] | 165 | 2018-11-19T04:04:44.000Z | 2020-11-18T17:53:28.000Z | import logging
import re
from typing import Literal, Optional
import discord
from discord.ext import commands
from emoji import UNICODE_EMOJI_ENGLISH, is_emoji
from bot.bot import Bot
from bot.constants import Colours, Roles
from bot.utils.decorators import whitelist_override
from bot.utils.extensions import invoke_help_command
log = logging.getLogger(__name__)
BASE_URLS = {
"png": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/72x72/",
"svg": "https://raw.githubusercontent.com/twitter/twemoji/master/assets/svg/",
}
CODEPOINT_REGEX = re.compile(r"[a-f1-9][a-f0-9]{3,5}$")
def setup(bot: Bot) -> None:
"""Load the Twemoji cog."""
bot.add_cog(Twemoji(bot))
| 34.066225 | 110 | 0.614891 |
c916bd42a9f49b86089b3c70e101b95ec26db97d | 198 | py | Python | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | 1 | 2019-04-27T15:37:04.000Z | 2019-04-27T15:37:04.000Z | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | null | null | null | Lecture 28/Lecture28HWAssignment4.py | AtharvaJoshi21/PythonPOC | 6b95eb5bab7b28e9811e43b39e863faf2ee7565b | [
"MIT"
] | 1 | 2020-08-14T06:57:08.000Z | 2020-08-14T06:57:08.000Z | # WAP to accept a filename from user and print all words starting with capital letters.
if __name__ == "__main__":
main() | 24.75 | 87 | 0.686869 |
c916da29a2d83f2c59eacc745d8499ef2a44d2e6 | 1,215 | py | Python | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 25 | 2017-06-30T15:31:33.000Z | 2021-04-21T20:12:18.000Z | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 34 | 2017-06-07T01:18:17.000Z | 2021-04-24T09:44:00.000Z | tests/python-playground/least_abs_dev_0.py | marcocannici/scs | 799a4f7daed4294cd98c73df71676195e6c63de4 | [
"MIT"
] | 13 | 2017-06-07T01:16:09.000Z | 2021-06-07T09:12:56.000Z | # This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "least_abs_dev_0"
prob = None
opt_val = None
# Variable declarations
import scipy.sparse as sps
np.random.seed(0)
m = 5000
n = 200
A = np.random.randn(m,n);
A = A*sps.diags([1 / np.sqrt(np.sum(A**2, 0))], [0])
b = A.dot(10*np.random.randn(n) + 5*np.random.randn(1))
k = max(m//50, 1)
idx = np.random.randint(0, m, k)
b[idx] += 100*np.random.randn(k)
# Problem construction
x = cp.Variable(n)
v = cp.Variable(1)
prob = cp.Problem(cp.Minimize(cp.norm1(A*x + v*np.ones(m) - b)))
# Problem collection
# Single problem collection
problemDict = {
"problemID" : problemID,
"problem" : prob,
"opt_val" : opt_val
}
problems = [problemDict]
# For debugging individual problems:
if __name__ == "__main__":
printResults(**problems[0])
| 18.409091 | 69 | 0.650206 |
c9188684a1a8b8220b62b9249ea8815fc31f7412 | 2,621 | py | Python | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 4 | 2017-06-15T16:36:01.000Z | 2021-12-25T09:13:22.000Z | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 1 | 2018-09-28T23:32:42.000Z | 2018-09-28T23:32:42.000Z | experimentations/20-climate-data/test-perf.py | Kitware/spark-mpi-experimentation | 9432b63130059fc54843bc5ca6f2f5510e5a4098 | [
"BSD-3-Clause"
] | 6 | 2017-07-22T00:10:00.000Z | 2021-12-25T09:13:11.000Z | from __future__ import print_function
import os
import sys
import time
import gdal
import numpy as np
# -------------------------------------------------------------------------
# Files to process
# -------------------------------------------------------------------------
fileNames = [
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2006.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2007.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2008.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2009.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2010.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2011.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2012.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2013.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2014.tif',
'tasmax_day_BCSD_rcp85_r1i1p1_MRI-CGCM3_2015.tif',
]
basepath = '/data/sebastien/SparkMPI/data/gddp'
# -------------------------------------------------------------------------
# Read file and output (year|month, temp)
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def readFileAndCompute(fileName):
year = fileName.split('_')[-1][:-4]
print('year', year)
dataset = gdal.Open('%s/%s' % (basepath, fileName))
total = 0
count = 0
for bandId in range(dataset.RasterCount):
band = dataset.GetRasterBand(bandId + 1).ReadAsArray()
for value in band.flatten():
if value < 50000:
total += value
count += 1
return (year, total / count)
# -----------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Read timing
# -------------------------------------------------------------------------
t0 = time.time()
for fileName in fileNames:
readDoNothing(fileName)
t1 = time.time()
print('### Total execution time - %s ' % str(t1 - t0))
| 33.177215 | 79 | 0.518123 |
c9195aa10c6d748883a1b2125a3a031fa6170f06 | 1,380 | py | Python | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | deluca/envs/lung/__init__.py | AlexanderJYu/deluca | 9e8b0d84d2eb0a58ff82a951b42881bdb2dc9f00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO
# - interp smh
import jax.numpy as jnp
from deluca import JaxObject
DEFAULT_PRESSURE_RANGE = (5.0, 35.0)
DEFAULT_KEYPOINTS = [1e-8, 1.0, 1.5, 3.0]
__all__ = ["BreathWaveform"]
| 32.857143 | 76 | 0.695652 |
c91a77c07622a3736aa47e0888f81515c8655b66 | 746 | py | Python | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | 9 | 2015-02-05T12:16:47.000Z | 2022-02-04T07:48:23.000Z | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | 1 | 2018-02-11T16:31:36.000Z | 2018-02-11T16:31:36.000Z | ivoire/__init__.py | Julian/Ivoire | af3f4ac77daf9d6c5167ef8a906557cc9d1d0ba7 | [
"MIT"
] | null | null | null | """
Ivoire is an RSpec-like testing framework for Python.
Globals defined in this module:
current_result: Should be set by a runner to an object that has the same
interface as unittest.TestResult. It will be used by every
example that is instantiated to record test results during
the runtime of Ivoire.
__version__: The current version information
"""
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
from ivoire.standalone import Example, describe
from ivoire.manager import ContextManager
__version__ = metadata.version("ivoire")
_manager = ContextManager()
context = _manager.create_context
current_result = None
| 26.642857 | 78 | 0.727882 |
c91fcc058836389aa81c0420f1fedf01f1106ff3 | 1,699 | py | Python | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | null | null | null | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | null | null | null | similarity.py | Blair-Johnson/faceswap | 79b75f7f112acb3bf6b228116facc4d0812d2099 | [
"MIT"
] | 1 | 2021-11-04T08:21:07.000Z | 2021-11-04T08:21:07.000Z | # Blair Johnson 2021
from facenet_pytorch import InceptionResnetV1, MTCNN
import numpy as np
def create_embeddings(images):
'''
Take an iterable of image candidates and return an iterable of image embeddings.
'''
if type(images) != list:
images = [images]
extractor = MTCNN()
encoder = InceptionResnetV1(pretrained='vggface2').eval()
embeddings = []
for image in images:
cropped_img = extractor(image)
embeddings.append(encoder(cropped_img.unsqueeze(0)))
return embeddings
def candidate_search(candidates, target):
'''
Take an iterable of candidates and a target image and determine the best candidate fit
'''
cand_embs = create_embeddings(candidates)
target_embs = create_embeddings(target)[0]
best_loss = np.inf
best_candidate = np.inf
for i,embedding in enumerate(cand_embs):
loss = np.linalg.norm(target_embs.detach().numpy()-embedding.detach().numpy(), ord='fro')
if loss < best_loss:
best_loss = loss
best_candidate = i
return candidates[i], best_candidate
if __name__ == '__main__':
from PIL import Image
import matplotlib.pyplot as plt
test1 = np.array(Image.open('/home/bjohnson/Pictures/fake_face.jpg'))
test2 = np.array(Image.open('/home/bjohnson/Pictures/old_face.jpg'))
test3 = np.array(Image.open('/home/bjohnson/Pictures/young_face.jpg'))
target = np.array(Image.open('/home/bjohnson/Pictures/profile_pic_lake_louise.png'))
candidates = [test1,test2,test3]
chosen, index = candidate_search(candidates, target)
print(index)
#plt.imshow(candidate_search(candidates, target))
| 29.807018 | 97 | 0.683343 |
c920d8ceac18d8c9ff46fde63a7fa287e05e877b | 6,075 | py | Python | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
] | 4 | 2022-02-13T15:52:18.000Z | 2022-03-26T17:33:13.000Z | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | 1 | 2022-02-13T22:48:09.000Z | 2022-02-13T22:48:09.000Z | opentamp/domains/robot_manipulation_domain/generate_base_prob.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | from IPython import embed as shell
import itertools
import numpy as np
import random
# SEED = 1234
NUM_PROBS = 1
NUM_CLOTH = 4
filename = "probs/base_prob.prob"
GOAL = "(RobotAt baxter robot_end_pose)"
# init Baxter pose
BAXTER_INIT_POSE = [0, 0, 0]
BAXTER_END_POSE = [0, 0, 0]
R_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [0, -0.8436, -0.09, 0.91, 0.043, 1.5, -0.05] # [ 0.1, -1.36681967, -0.23718529, 1.45825713, 0.04779009, 1.48501637, -0.92194262]
L_ARM_INIT = [0, 0, 0, 0, 0, 0, 0] # [-0.6, -1.2513685 , -0.63979997, 1.41307933, -2.9520384, -1.4709618, 2.69274026]
OPEN_GRIPPER = [0.02]
CLOSE_GRIPPER = [0.015]
MONITOR_LEFT = [np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
MONITOR_RIGHT = [-np.pi/4, -np.pi/4, 0, 0, 0, 0, 0]
CLOTH_ROT = [0, 0, 0]
TABLE_GEOM = [1.23/2, 2.45/2, 0.97/2]
TABLE_POS = [1.23/2-0.1, 0, 0.97/2-0.375-0.665]
TABLE_ROT = [0,0,0]
ROBOT_DIST_FROM_TABLE = 0.05
REGION1 = [np.pi/4]
REGION2 = [0]
REGION3 = [-np.pi/4]
REGION4 = [-np.pi/2]
cloth_init_poses = np.ones((NUM_CLOTH, 3)) * 0.615
cloth_init_poses = cloth_init_poses.tolist()
if __name__ == "__main__":
main()
| 41.047297 | 175 | 0.576461 |
c9210c12cb167b3a01782592accbb83cee14ae03 | 2,633 | py | Python | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | 5 | 2021-06-10T21:12:16.000Z | 2022-01-14T05:04:03.000Z | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | null | null | null | tests/views/test_hsva.py | ju-sh/colorviews | b9757dd3a799d68bd89966852f36f06f21e36072 | [
"MIT"
] | null | null | null | import pytest
from colorviews import AlphaColor
def test_vals_getter():
vals = (0.75, 0.45, 0.29, 0.79)
color = AlphaColor.from_hsva(0.75, 0.45, 0.29, 0.79)
assert [round(val, 4) for val in color.hsva.vals] == list(vals)
| 29.920455 | 67 | 0.545765 |
c92170ef42c7d1d4c09bcc11c88becf053c48250 | 2,645 | py | Python | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/__init__.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | import random, logging
from collections import Counter
from flask import Flask, session, request, render_template, jsonify
from app.util import unflatten
from app.fiftycents import FiftyCentsGame
from app.fiftycents import Card
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
app.secret_key = 'peanut'
game = FiftyCentsGame(2)
| 38.897059 | 107 | 0.483554 |
c921d773c35312ecebe3d4b6eaaaef9e999e9c07 | 4,905 | py | Python | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | bluvo_test.py | JanJaapKo/BlUVO | 2a72b06a56069fee5bd118a12b846513096014b1 | [
"MIT"
] | null | null | null | import time
import logging
import pickle
import json
import consolemenu
from generic_lib import georeverse, geolookup
from bluvo_main import BlueLink
from tools.stamps import postOffice
from params import * # p_parameters are read
logging.basicConfig(format='%(asctime)s - %(levelname)-8s - %(filename)-18s - %(message)s', filename='bluvo_test.log',
level=logging.DEBUG)
menuoptions = ['0 exit',"1 Lock", "2 Unlock", "3 Status", "4 Status formatted", "5 Status refresh", "6 location", "7 loop status",
"8 Navigate to", '9 set Charge Limits', '10 get charge schedule', '11 get services', '12 poll car', '13 get stamps', '14 odometer', '15 get park location',
'16 get user info', '17 get monthly report', '18 get monthly report lists']
mymenu = consolemenu.SelectionMenu(menuoptions)
# heartbeatinterval, initsuccess = initialise(p_email, p_password, p_pin, p_vin, p_abrp_token, p_abrp_carmodel, p_WeatherApiKey,
# p_WeatherProvider, p_homelocation, p_forcepollinterval, p_charginginterval,
# p_heartbeatinterval)
bluelink = BlueLink(p_email, p_password, p_pin, p_vin, p_abrp_carmodel, p_abrp_token, p_WeatherApiKey, p_WeatherProvider, p_homelocation)
bluelink.initialise(p_forcepollinterval, p_charginginterval)
if bluelink.initSuccess:
#stampie = postOffice("hyundai", False)
while True:
for i in menuoptions:
print(i)
#try:
x = int(input("Please Select:"))
print(x)
if x == 0: exit()
if x == 1: bluelink.vehicle.api_set_lock('on')
if x == 2: bluelink.vehicle.api_set_lock('off')
if x == 3: print(bluelink.vehicle.api_get_status(False))
if x == 4:
status_record = bluelink.vehicle.api_get_status(False, False)
for thing in status_record:
print(thing + ": " + str(status_record[thing]))
if x == 5: print(bluelink.vehicle.api_get_status(True))
if x == 6:
locatie = bluelink.vehicle.api_get_location()
if locatie:
locatie = locatie['gpsDetail']['coord']
print(georeverse(locatie['lat'], locatie['lon']))
if x == 7:
while True:
# read semaphore flag
try:
with open('semaphore.pkl', 'rb') as f:
manualForcePoll = pickle.load(f)
except:
manualForcePoll = False
print(manualForcePoll)
updated, parsedStatus, afstand, googlelocation = bluelink.pollcar(manualForcePoll)
# clear semaphore flag
manualForcePoll = False
with open('semaphore.pkl', 'wb') as f:
pickle.dump(manualForcePoll, f)
if updated:
print('afstand van huis, rijrichting, snelheid en km-stand: ', afstand, ' / ',
parsedStatus['heading'], '/', parsedStatus['speed'], '/', parsedStatus['odometer'])
print(googlelocation)
print("range ", parsedStatus['range'], "soc: ", parsedStatus['chargeHV'])
if parsedStatus['charging']: print("Laden")
if parsedStatus['trunkopen']: print("kofferbak open")
if not (parsedStatus['locked']): print("deuren van slot")
if parsedStatus['dooropenFL']: print("bestuurdersportier open")
print("soc12v ", parsedStatus['charge12V'], "status 12V", parsedStatus['status12V'])
print("=============")
time.sleep(bluelink.heartbeatinterval)
if x == 8: print(bluelink.vehicle.api_set_navigation(geolookup(input("Press Enter address to navigate to..."))))
if x == 9:
invoer = input("Enter maximum for fast and slow charging (space or comma or semicolon or colon seperated)")
for delim in ',;:': invoer = invoer.replace(delim, ' ')
print(bluelink.vehicle.api_set_chargelimits(invoer.split()[0], invoer.split()[1]))
if x == 10: print(json.dumps(bluelink.vehicle.api_get_chargeschedule(),indent=4))
if x == 11: print(bluelink.vehicle.api_get_services())
if x == 12: print(str(bluelink.pollcar(True)))
if x == 13:
print( "feature removed")
if x == 14: print(bluelink.vehicle.api_get_odometer())
if x == 15: print(bluelink.vehicle.api_get_parklocation())
if x == 16: print(bluelink.vehicle.api_get_userinfo())
if x == 17: print(bluelink.vehicle.api_get_monthlyreport(2021,5))
if x == 18: print(bluelink.vehicle.api_get_monthlyreportlist())
input("Press Enter to continue...")
# except (ValueError) as err:
# print("error in menu keuze")
else:
logging.error("initialisation failed")
| 50.56701 | 171 | 0.601019 |
c92214401251c6b4745f3ba05c668f2913227e7f | 2,962 | py | Python | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | null | null | null | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | null | null | null | lda/test3/interpret_topics.py | kaiiam/amazon-continuation | 9faaba80235614e6eea3e305c423975f2ec72e3e | [
"MIT"
] | 1 | 2019-05-28T21:49:45.000Z | 2019-05-28T21:49:45.000Z | #!/usr/bin/env python3
"""
Author : kai
Date : 2019-06-26
Purpose: Rock the Casbah
"""
import argparse
import sys
import re
import csv
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument(
# 'positional', metavar='str', help='A positional argument')
parser.add_argument(
'-a',
'--arg',
help='A named string argument',
metavar='str',
type=str,
default='')
parser.add_argument(
'-i',
'--int',
help='A named integer argument',
metavar='int',
type=int,
default=0)
parser.add_argument(
'-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
str_arg = args.arg
int_arg = args.int
flag_arg = args.flag
#pos_arg = args.positional
#read and open the annotations file
intpro_dict = {}
with open('InterPro_entry_list.tsv') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
intpro_dict[row['ENTRY_AC']] = row['ENTRY_NAME']
with open('model_topics.txt', 'r') as file:
model_topics = file.read().replace('\n', '')
model_topics = re.sub("'", "", model_topics)
model_topics = re.sub("\[", "", model_topics)
model_topics = re.sub("\]", "", model_topics)
mtl = model_topics.split('), ')
with open('output_topics.tsv' ,'w') as f:
print('Topic\tModel_coefficient\tInterpro_ID\tInterPro_ENTRY_NAME', file=f)
for list in mtl:
topic = list[1]
split_list = list.split()
id_re = re.compile('IPR\d{3}')
c_words = []
for w in split_list:
match = id_re.search(w)
if match:
c_words.append(w)
c_words = [re.sub('"', '', i) for i in c_words]
for w in c_words:
re.sub('\)', '', w)
coef, intpro = w.split('*')
intpro = intpro[:9]
if intpro in intpro_dict.keys():
label = intpro_dict[intpro]
else:
label = ''
print('{}\t{}\t{}\t{}'.format(topic,coef,intpro,label), file=f)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.684685 | 83 | 0.497637 |
c924841b1d689ef522dd4926df95b7101d1bb341 | 292 | py | Python | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | app/users/urls.py | ManojKumarMRK/recipe-app-api | f518e91fc335c46eb1034d865256c94bb3e56b32 | [
"MIT"
] | null | null | null | from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('create/',views.CreateUserView.as_view(),name='create'),
path('token/',views.CreateTokenView.as_view(),name='token'),
path('me/', views.ManageUserView.as_view(),name='me'),
] | 26.545455 | 66 | 0.674658 |
c92510f03e8c86ab8acb7443fa38d2785d4a3bca | 4,200 | py | Python | archive/visualization/network.py | ajrichards/bayesian-examples | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | 2 | 2016-01-27T08:51:23.000Z | 2017-04-17T02:21:34.000Z | archive/visualization/network.py | ajrichards/notebook | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | null | null | null | archive/visualization/network.py | ajrichards/notebook | fbd87c6f1613ea516408e9ebc3c9eff1248246e4 | [
"BSD-3-Clause"
] | null | null | null | import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
def draw_graph(edgeWeights,plotName='network_graph.png'):
"""
INPUT: this function takes in a dictionary of each edge names and the weight corresponding to that edge name
"""
edgeDict = {"t1e1":("T1","E1"), "t1e2":("T1","E2"), "t1e6":("T1","E6"), "t2e4":("T2","E4"), "t2e5":("T2","E5"), "t2e6":("T2","E6"), "t3e3":("T3","E3"), "t3e4":("T3","E4"), "t3e5":("T3","E5")}
## initialize the graph
G = nx.Graph()
for node in ["T1","T2","T3","E1","E2","E3","E4", "E5", "E6"]:
G.add_node(node)
for edgeName,edge in edgeDict.iteritems():
G.add_edge(edge[0],edge[1],weight=edgeWeights[edgeName])
# explicitly set positions
pos={"T1":(2,2),
"T2":(3.5,2),
"T3":(5,2),
"E1":(1,1),
"E2":(2,1),
"E3":(3,1),
"E4":(4,1),
"E5": (5, 1),
"E6": (6, 1)}
## get insignificant edges
isEdges = [(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] ==0.0]
# plot the network
nodeSize = 2000
colors = [edge[2]['weight'] for edge in G.edges_iter(data=True)]
cmap = plt.cm.winter
fig = plt.figure(figsize=(12,6))
fig.suptitle('Word Theme Probabilities', fontsize=14, fontweight='bold')
ax = fig.add_axes([0.355, 0.0, 0.7, 1.0])
nx.draw(G,pos,node_size=nodeSize,edge_color=colors,width=4,edge_cmap=cmap,edge_vmin=-0.5,edge_vmax=0.5,ax=ax, with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["T1","T2","T3"],node_color='#F2F2F2',with_labels=True)
nx.draw_networkx_nodes(G,pos,node_size=nodeSize,nodelist=["E1","E2","E3","E4", "E5", "E6"],node_color='#0066FF',with_labels=True)
nx.draw_networkx_edges(G,pos,edgelist=isEdges,width=1,edge_color='k',style='dashed')
## add a colormap
ax1 = fig.add_axes([0.03, 0.05, 0.35, 0.14])
norm = mpl.colors.Normalize(vmin=0.05, vmax=.2)
cb1 = mpl.colorbar.ColorbarBase(ax1,cmap=cmap,
norm=norm,
orientation='horizontal')
# add an axis for the legend
ax2 = fig.add_axes([0.03,0.25,0.35,0.65]) # l,b,w,h
ax2.set_yticks([])
ax2.set_xticks([])
ax2.set_frame_on(True)
fontSize = 10
ax2.text(0.1,0.9,r"$T1$ = Horrendous IVR" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.8,r"$T2$ = Mobile Disengagement" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.7,r"$T3$ = Mobile Users" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.6,r"$E1$ = agent.transfer->ivr.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.5,r"$E2$ = agent.assigned->call.transfer" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.4,r"$E3$ = sureswip.login->view.account.summary" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.3,r"$E4$ = mobile.exit->mobile.entry" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.2,r"$E5$ = mobile.exit->journey.exit" ,color='k',fontsize=fontSize,ha="left", va="center")
ax2.text(0.1,0.1,r"$E6$ = ivr.entry->ivr.proactive.balance" ,color='k',fontsize=fontSize,ha="left", va="center")
plt.savefig(plotName)
if __name__ == "__main__":
filepath = '../word_transition_model/data/transitions_df.csv'
data_dict = get_a_dict(filepath)
summary = data_dict['Just Show Me the Summary']
summary_events = summary[0]
summary_scores = summary[1]
edge_weights = {"t1e1":0.14, "t1e2":0.13, "t1e6":0.12, "t2e4":0.05, "t2e5":0.16, "t2e6":0.0, "t3e3":0.3, "t3e4":0.1, "t3e5":0.04}
draw_graph(edge_weights)
| 44.680851 | 196 | 0.61381 |
c926f1cc84ef2be7db59c1ebc4dd4db9c3aeb3e1 | 332 | py | Python | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | accounting_app/accounting_app/doctype/gl_entry/gl_entry.py | imdadhussain/accounting_app | 0f4b54242d81953c0c3ece3fb098701e86ce0eaf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, BS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.nestedset import get_descendants_of
from frappe.utils import flt
| 25.538462 | 53 | 0.789157 |
c926fbd01b5a51930f76ba3ff40785e357d452a6 | 574 | py | Python | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | null | null | null | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | 5 | 2020-03-13T09:48:40.000Z | 2021-09-22T18:42:22.000Z | main/migrations/0002_auto_20200314_1530.py | kwatog/jumuk | 6234bf18ea0bf1eeb4194ecce23af9b669d4a841 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-14 15:30
from django.db import migrations, models
| 23.916667 | 73 | 0.578397 |
c92cee00e5a3f53b6fcf563376119be5a8fa6b38 | 645 | py | Python | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 14 | 2021-04-05T01:25:12.000Z | 2022-02-17T19:44:28.000Z | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T08:32:49.000Z | 2021-07-05T12:34:57.000Z | fusion/dataset/mnist_svhn/transforms.py | Mrinal18/fusion | 34e563f2e50139385577c3880c5de11f8a73f220 | [
"BSD-3-Clause"
] | 1 | 2022-02-01T21:56:11.000Z | 2022-02-01T21:56:11.000Z | from torch import Tensor
from torchvision import transforms
| 17.916667 | 42 | 0.493023 |
c92dbb28d5fa5849ee22ef3b509bd866ce701e9e | 1,508 | py | Python | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | scripts/previousScripts-2015-12-25/getVariableInfo.py | mistryrakesh/SMTApproxMC | 7c97e10c46c66e52c4e8972259610953c3357695 | [
"MIT"
] | null | null | null | #!/home/rakeshmistry/bin/Python-3.4.3/bin/python3
# @author: rakesh mistry - 'inspire'
# @date: 2015-08-06
import sys
import re
import os
import math
# Function: parseSmt2File
# Function: main
if __name__ == "__main__":
main(sys.argv)
| 25.133333 | 121 | 0.611406 |
c92faeda80f7623d46a23810d5c128754efcada2 | 9,880 | py | Python | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 7 | 2019-08-11T10:31:03.000Z | 2021-03-08T10:07:52.000Z | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 1 | 2020-12-29T02:30:18.000Z | 2021-01-25T02:49:37.000Z | simplified_scrapy/core/spider.py | yiyedata/simplified-scrapy | ccfdc686c53b2da3dac733892d4f184f6293f002 | [
"Apache-2.0"
] | 4 | 2019-10-22T02:14:35.000Z | 2021-05-13T07:01:56.000Z | #!/usr/bin/python
#coding=utf-8
import json, re, logging, time, io, os
import sys
from simplified_scrapy.core.config_helper import Configs
from simplified_scrapy.core.sqlite_cookiestore import SqliteCookieStore
from simplified_scrapy.core.request_helper import requestPost, requestGet, getResponseStr, extractHtml
from simplified_scrapy.core.utils import convertTime2Str, convertStr2Time, printInfo, absoluteUrl
from simplified_scrapy.core.regex_helper import *
from simplified_scrapy.core.sqlite_urlstore import SqliteUrlStore
from simplified_scrapy.core.sqlite_htmlstore import SqliteHtmlStore
from simplified_scrapy.core.obj_store import ObjStore
| 34.666667 | 102 | 0.542611 |
c92fe0a2d25d872fa12d88c6134dd6759ab24310 | 1,457 | py | Python | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-2469.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/evn python
#--coding:utf-8--*--
#Name:10
#Refer:http://www.wooyun.org/bugs/wooyun-2015-0120852/
#Author:xq17
if __name__ == '__main__':
from dummy import *
audit(assign('tianrui_lib','http://218.92.71.5:1085/trebook/')[1]) | 41.628571 | 154 | 0.587509 |
c93112ec790fae5b416d3ab6e0ee349a48489f55 | 49,239 | py | Python | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | 7 | 2021-03-15T08:43:56.000Z | 2022-01-09T11:56:43.000Z | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | null | null | null | FBDParser/charmaps/symbols.py | jonix6/fbdparser | 617a79bf9062092e4fa971bbd66da02cd9d45124 | [
"MIT"
] | 3 | 2021-09-07T09:40:16.000Z | 2022-01-11T10:32:23.000Z | # -*- coding: utf-8 -*-
"A"
symbolsA = UnicodeMap()
_update = symbolsA.update
# Area A1
_update({
0xA140: 0xA140, # 1()
0xA141: 0xA141, # 2()
0xA142: 0xA142, # 3()
0xA143: 0xA143, # 4()
0xA144: 0xA144, # 5()
0xA145: 0xA145, # 6()
0xA146: 0xA146, # 7()
0xA147: 0xA147, # 8()
0xA148: 0xA148, # 9()
0xA149: 0xA149, # 10()
0xA14A: 0xA14A, # 11()
0xA14B: 0xA14B, # 12()
0xA14C: 0x003D, # = =
0xA14D: 0x2212, # =
0xA14E: 0x2215, # =
0xA14F: 0x1D7CE, #
0xA150: 0x1D7CF, #
0xA151: 0x1D7D0, #
0xA152: 0x1D7D1, #
0xA153: 0x1D7D2, #
0xA154: 0x1D7D3, #
0xA155: 0x1D7D4, #
0xA156: 0x1D7D5, #
0xA157: 0x1D7D6, #
0xA158: 0x1D7D7, #
0xA159: 0x2664, #
0xA15A: 0x2667, #
0xA15B: 0x00B6, #
0xA15C: 0x26BE, #
0xA15D: 0x263E, # 1/4 =
0xA15E: 0x263D, # 1/4 =
0xA15F: 0x263A, # =
0xA160: 0x1F31C, # =
0xA161: 0x1F31B, # =
0xA162: 0x3036, #
0xA163: 0x2252, # =
0xA164: 0xA164, # T + S
0xA165: 0x002B, # = +
0xA166: 0x223C, # =
0xA167: 0x00A9, #
0xA168: 0x24D2, #
0xA169: 0x24B8, #
0xA16A: 0x00AE, #
0xA16B: 0x24C7, #
0xA16D: 0x203E, # =
0xA16E: 0x005F, # = _
0xA16F: 0x25E2, #
0xA170: 0x25E3, #
0xA171: 0x25E5, #
0xA172: 0x25E4, #
0xA173: 0x256D, #
0xA174: 0x256E, #
0xA175: 0x2570, #
0xA176: 0x256F, #
0xA177: 0x2550, # =
0xA178: 0x2551, # =
0xA179: 0x2223, # =
0xA17A: 0x2926, #
0xA17B: 0x2924, #
0xA17C: 0x2923, #
0xA17D: 0x293E, #
0xA17E: 0x293F, #
0xA180: 0x21E7, #
0xA181: 0x21E9, #
0xA182: 0xA182, # 0 + 0
0xA183: 0xA183, # 1 + 1
0xA184: 0xA184, # 2 + 2
0xA185: 0xA185, # 3 + 3
0xA186: 0xA186, # 4 + 4
0xA187: 0xA187, # 5 + 5
0xA188: 0xA188, # 6 + 6
0xA189: 0xA189, # 7 + 7
0xA18A: 0xA18A, # 8 + 8
0xA18B: 0xA18B, # 9 + 9
0xA18C: 0xA18C, # 00
0xA18D: 0xA18D, # 11
0xA18E: 0xA18E, # 22
0xA18F: 0xA18F, # 33
0xA190: 0xA190, # 44
0xA191: 0xA191, # 55
0xA192: 0xA192, # 66
0xA193: 0xA193, # 77
0xA194: 0xA194, # 88
0xA195: 0xA195, # 99
0xA196: 0x1F6AD, #
0xA197: 0x1F377, #
0xA198: 0x26A0, #
0xA199: 0x2620, #
0xA19A: 0xA19A, # +
0xA19B: 0x2B4D, #
0xA19C: 0x21B7, #
0xA19D: 0x293A, #
0xA19E: 0x2716, #
0xA19F: 0x003F, # = ?
0xA1A0: 0x0021 # = !
})
# Area A2
_update({
0xA240: 0x231C, #
0xA241: 0x231F, #
0xA242: 0xA242, # empty
0xA243: 0xA243, # empty
0xA244: 0x231D, #
0xA245: 0x231E, #
0xA246: 0xA246, # empty
0xA247: 0xA247, # empty
0xA248: 0xFF1C, #
0xA249: 0xFF1E, #
0xA24A: 0x2AA1, #
0xA24B: 0x2AA2, #
0xA24C: 0xA24C, # vertical
0xA24D: 0xA24D, # vertical
0xA24E: 0x201E, #
0xA24F: 0xA24F, # italic !
0xA250: 0xA250, # italic ?
0xA251: 0xA76C, #
0xA252: 0xA76D, #
0xA253: 0xA253, # reversed
0xA254: 0xA254, # reversed
0xA255: 0xA255, # reversed
0xA256: 0xA256, # reversed
0xA257: 0x203C, # =
0xA258: 0xA258, # italic
0xA259: 0x2047, # =
0xA25A: 0xA25A, # italic
0xA25B: 0x2048, # =
0xA25C: 0xA25C, # italic
0xA25D: 0x2049, # =
0xA25E: 0xA25E, # italic
0xA25F: 0xA25F, # vertical .
0xA260: 0x03D6, # PI =
0xA261: 0x2116, #
0xA262: 0x0142, # l =
0xA263: 0x0131, # I =
0xA264: 0x014B, # eng =
0xA265: 0x0327, # =
0xA266: 0x00BF, # =
0xA267: 0x00A1, # =
0xA268: 0x00D8, # O =
0xA269: 0x00F8, # o =
0xA26A: 0x0087, # =
0xA26B: 0x0086, # =
0xA26C: 0x014A, # ENG =
0xA26D: 0xFB00, # =
0xA26E: 0xFB01, # =
0xA26F: 0xFB02, # =
0xA270: 0xFB03, # =
0xA271: 0xFB04, # =
0xA272: 0x0141, # =
0xA273: 0x00C7, # =
0xA274: 0x00C6, # =
0xA275: 0x00E6, # =
0xA276: 0x008C, # =
0xA277: 0x009C, # =
0xA278: 0x00DF, # =
0xA279: 0x0083, # =
0xA27A: 0x00E5, # =
0xA27B: 0x00E2, # =
0xA27C: 0x00E4, # =
0xA27D: 0x0101, # =
0xA27E: 0x00E1, # =
0xA280: 0x01CE, # =
0xA281: 0x00E0, # =
0xA282: 0x00E3, # =
0xA283: 0x00EB, # =
0xA284: 0x1EBD, # =
0xA285: 0x00EE, # =
0xA286: 0x00EF, # =
0xA287: 0x00F5, # =
0xA288: 0x00F4, # =
0xA289: 0x00F6, # =
0xA28A: 0x00FB, # =
0xA28B: 0x00F1, # =
0xA28C: 0x009A, # =
0xA28D: 0x015D, # =
0xA28E: 0x011D, # =
0xA28F: 0x00FF, # =
0xA290: 0x009E, # =
0xA291: 0x1E91, # =
0xA292: 0x0109, # =
0xA293: 0x00E7, # =
0xA294: 0xA294, #
0xA295: 0x1EBF, # =
0xA296: 0xA296, #
0xA297: 0x1EC1, # =
0xA29A: 0x0307, # =
0xA29B: 0x030A, # =
0xA29C: 0x0303, # =
0xA29D: 0x20F0, # =
0xA29E: 0x0306, # =
0xA29F: 0x002C, # = ,
0xA2A0: 0x0085, # =
0xA2AB: 0x217A, # 11 =
0xA2AC: 0x217B, # 12 =
0xA2AD: 0xA2AD, # 13
0xA2AE: 0xA2AE, # 14
0xA2AF: 0xA2AF, # 15
0xA2B0: 0xA2B0, # 16
0xA2EF: 0xA2EF, # 15
0xA2F0: 0xA2F0, # 16
0xA2FD: 0xA2FD, # 13
0xA2FE: 0xA2FE, # 14
})
# Area A3
_update({
0xA340: 0xA340, # 1()
0xA341: 0xA341, # 2()
0xA342: 0xA342, # 3()
0xA343: 0xA343, # 4()
0xA344: 0xA344, # 5()
0xA345: 0xA345, # 6()
0xA346: 0xA346, # 7()
0xA347: 0xA347, # 8()
0xA348: 0xA348, # 9()
0xA349: 0xA349, # 10()
0xA34A: 0xA34A, # 11()
0xA34B: 0xA34B, # 12()
0xA34C: 0x24FF, # 0 =
0xA34D: 0x2776, # 1 =
0xA34E: 0x2777, # 2 =
0xA34F: 0x2778, # 3 =
0xA350: 0x2779, # 4 =
0xA351: 0x277A, # 5 =
0xA352: 0x277B, # 6 =
0xA353: 0x277C, # 7 =
0xA354: 0x277D, # 8 =
0xA355: 0x277E, # 9 =
0xA356: 0x24B6, # A =
0xA357: 0x24B7, # B =
0xA358: 0x24B8, # C =
0xA359: 0x24B9, # D =
0xA35A: 0x24BA, # E =
0xA35B: 0x24BB, # F =
0xA35C: 0x24BC, # G =
0xA35D: 0x24BD, # H =
0xA35E: 0x24BE, # I =
0xA35F: 0x24BF, # J =
0xA360: 0x1F110, # A =
0xA361: 0x1F111, # B =
0xA362: 0x1F112, # C =
0xA363: 0x1F113, # D =
0xA364: 0x1F114, # E =
0xA365: 0x1F115, # F =
0xA366: 0x1F116, # G =
0xA367: 0x1F117, # H =
0xA368: 0x1F118, # I =
0xA369: 0x1F119, # J =
0xA36A: 0x24D0, # a =
0xA36B: 0x24D1, # b =
0xA36C: 0x24D2, # c =
0xA36D: 0x24D3, # d =
0xA36E: 0x24D4, # e =
0xA36F: 0x24D5, # f =
0xA370: 0x24D6, # g =
0xA371: 0x24D7, # h =
0xA372: 0x24D8, # i =
0xA373: 0x24D9, # j =
0xA374: 0x249C, # a =
0xA375: 0x249D, # b =
0xA376: 0x249E, # c =
0xA377: 0x249F, # d =
0xA378: 0x24A0, # e =
0xA379: 0x24A1, # f =
0xA37A: 0x24A2, # g =
0xA37B: 0x24A3, # h =
0xA37C: 0x24A4, # i =
0xA37D: 0x24A5, # j =
0xA37E: 0x3396, # =
0xA380: 0x3397, #
0xA381: 0x33CB, # =
0xA382: 0x3398, # =
0xA383: 0x33A0, # =
0xA384: 0x33A4, # =
0xA385: 0x33A5, # =
0xA386: 0x33A2, # =
0xA387: 0x33BE, # =
0xA388: 0x33C4, #
0xA389: 0x3383, # =
0xA38A: 0x33C2, #
0xA38B: 0x33D8, #
0xA38C: 0x33CD, #
0xA38D: 0x33D7, #
0xA38E: 0x33DA, #
0xA38F: 0x339C, #
0xA390: 0x339D, #
0xA391: 0x339E, #
0xA392: 0x33CE, # =
0xA393: 0x338E, # =
0xA394: 0x338F, # =
0xA395: 0x33A1, # =
0xA396: 0x33D2, #
0xA397: 0x33D1, #
0xA398: 0x33C4, #
0xA399: 0x33D5, #
0xA39A: 0xAB36, #
0xA39B: 0x2113, #
0xA39C: 0x006D, # m
0xA39D: 0x0078, # x
0xA39E: 0x1EFF, #
0xA39F: 0x0028, # = (
0xA3A0: 0x0029, # = )
})
# Area A4
_update({
0xA440: 0xA440, # BD +
0xA441: 0xA441, # BD +
0xA442: 0xA442, # BD +
0xA443: 0xA443, # BD +
0xA444: 0xA444, # + +
0xA445: 0xA445, # +
0xA446: 0xA446, # + +
0xA447: 0xA447, # +
0xA448: 0x29C8, #
0xA449: 0x1F79C, #
0xA44A: 0xA44A, # +
0xA44B: 0xA44B, # +
0xA44C: 0xA44C, # +
0xA44D: 0x26CB, #
0xA44E: 0x2756, #
0xA44F: 0xA44F, # negative
0xA450: 0xA450, # 5-black-square cross, like
0xA451: 0xA451, # 5-white-square cross, like
0xA452: 0x2795, #
0xA453: 0x271A, #
0xA454: 0x23FA, #
0xA455: 0x2704, #
0xA456: 0x25C9, #
0xA457: 0x2A00, #
0xA458: 0x2740, #
0xA459: 0x273F, #
0xA45A: 0x2668, #
0xA45B: 0x2669, #
0xA45C: 0x266A, #
0xA45D: 0x266C, #
0xA45E: 0x2B57, #
0xA45F: 0x26BE, #
0xA460: 0x260E, #
0xA461: 0x2025, #
0xA462: 0x261C, #
0xA463: 0x261E, #
0xA464: 0x3021, # =
0xA465: 0x3022, # =
0xA466: 0x3023, # =
0xA467: 0x3024, # =
0xA468: 0x3025, # =
0xA469: 0x3026, # =
0xA46A: 0x3027, # =
0xA46B: 0x3028, # =
0xA46C: 0x3029, # =
0xA46D: 0x3038, # =
0xA46E: 0x3039, # =
0xA46F: 0x303A, # =
0xA470: 0x25A2, #
0xA471: 0x00AE, #
0xA472: 0x25CF, #
0xA473: 0x25CB, #
0xA474: 0x25CB, #
0xA475: 0x25CA, #
0xA476: 0xA476, # +
0xA477: 0x2236, #
0xA478: 0xA478, # m/m
0xA479: 0xA479, # c/m
0xA47A: 0xA47A, # d/m
0xA47B: 0x2105, #
0xA47D: 0xA47D, # circled
0xA47E: 0x2122, #
0xA480: 0xAB65, #
0xA481: 0x026E, #
0xA482: 0x02A7, #
0xA483: 0x01EB, #
0xA484: 0x03C5, #
0xA485: 0xA7AC, #
0xA486: 0x1D93, #
0xA487: 0x1D74, #
0xA488: 0x1D92, #
0xA489: 0x1D95, #
0xA48A: 0x02AE, #
0xA48B: 0x1D8B, #
0xA48C: 0x0119, #
0xA48D: 0x01BE, #
0xA48E: 0x1D97, #
0xA48F: 0x0293, #
0xA490: 0xA490, # h
0xA491: 0x0253, #
0xA492: 0x0287, #
0xA493: 0x01AB, #
0xA494: 0x028D, #
0xA495: 0x1D8D, #
0xA496: 0x0269, #
0xA497: 0x025C, #
0xA498: 0x02A5, #
0xA499: 0x019E, #
0xA49A: 0x01AA, #
0xA49B: 0x0250, #
0xA49C: 0x0286, #
0xA49D: 0x01BB, #
0xA49E: 0x00D8, #
0xA4F4: 0xA4F4, # !!!
0xA4F5: 0xA4F5, # italic !!!
0xA4F6: 0x32A3, # =
0xA4F7: 0x329E, # =
0xA4F8: 0x32A4, # =
0xA4F9: 0x32A5, # =
0xA4FA: 0x32A6, # =
0xA4FB: 0x32A7, # =
0xA4FC: 0x32A8, # =
0xA4FD: 0xA4FD, # +
0xA4FE: 0xA4FE, # +
})
# Area A5
_update({
0xA540: 0x0111, #
0xA541: 0x1D80, #
0xA542: 0x1D81, #
0xA543: 0x0252, #
0xA544: 0xA544, # +
0xA545: 0x026B, #
0xA546: 0x1D88, #
0xA547: 0x1D82, #
0xA548: 0x02A6, #
0xA549: 0x025F, #
0xA54A: 0x00FE, #
0xA54B: 0x0257, #
0xA54C: 0xAB67, #
0xA54D: 0x0260, #
0xA54E: 0x0242, #
0xA54F: 0x02AF, #
0xA550: 0xA550, #
0xA551: 0x0241, #
0xA552: 0x025A, #
0xA553: 0x1D8A, #
0xA554: 0x0296, #
0xA555: 0x1D8C, #
0xA556: 0x1D75, #
0xA557: 0x1D6D, #
0xA558: 0x027D, #
0xA559: 0x027A, #
0xA55A: 0x01BA, #
0xA55B: 0xA55B, # turned
0xA55C: 0x0273, #
0xA55D: 0xA795, #
0xA55E: 0x01B0, #
0xA55F: 0x1D85, #
0xA560: 0x0260, #
0xA561: 0x1D86, #
0xA562: 0x0277, #
0xA563: 0x02A4, #
0xA564: 0x02A3, #
0xA565: 0x1D87, #
0xA566: 0x1D7C, #
0xA567: 0x02A8, #
0xA568: 0x1D8F, #
0xA569: 0x029A, #
0xA56A: 0x1D9A, #
0xA56B: 0xA727, #
0xA56C: 0x1D83, #
0xA56D: 0xA56D, # italic
0xA56E: 0x029E, #
0xA56F: 0x0195, #
0xA570: 0x1D76, #
0xA571: 0x027E, #
0xA572: 0x1D8E, #
0xA573: 0x1D89, #
0xA574: 0x027C, #
0xA575: 0x0279, #
0xA576: 0x018D, #
0xA577: 0x03C9, #
0xA578: 0x025D, #
0xA579: 0x03C3, #
0xA57A: 0x027B, #
0xA57B: 0x026D, #
0xA57C: 0x0267, #
0xA57D: 0x025A, #
0xA57E: 0xAB66, #
0xA580: 0x5F02, #
0xA581: 0x28473, #
0xA582: 0x5194, #
0xA583: 0x247A3, #
0xA584: 0x2896D, #
0xA585: 0x5642, #
0xA586: 0x7479, #
0xA587: 0x243B9, #
0xA588: 0x723F, #
0xA589: 0x9D56, #
0xA58A: 0x4D29, #
0xA58B: 0x20779, #
0xA58C: 0x210F1, #
0xA58D: 0x2504C, #
0xA58E: 0x233CC, #
0xA58F: 0x032F, # =
0xA590: 0x0312, # =
0xA591: 0x030D, # =
0xA592: 0x0314, # =
0xA593: 0x0313, # =
0xA594: 0x2F83B, #
0xA595: 0x25EC0, #
0xA596: 0x445B, #
0xA597: 0x21D3E, #
0xA598: 0x0323, # =
0xA599: 0x0325, # =
0xA59A: 0x0331, # =
0xA59B: 0x032A, # =
0xA59C: 0x032C, # =
0xA59D: 0x032B, # =
0xA59E: 0x0329, # =
0xA59F: 0xFF5B, # =
0xA5A0: 0xFF5D, # =
0xA5F7: 0x3016, # =
0xA5F8: 0x3017, # =
0xA5F9: 0x29DB, #
0xA5FA: 0xA5FA, # vertical
0xA5FB: 0x534D, #
0xA5FC: 0xFE47, # =
0xA5FD: 0xFE48, # =
0xA5FE: 0x2571, # =
})
# Area A6
_update({
0xA640: 0x00C5, # =
0xA641: 0x0100, # =
0xA642: 0x00C1, # =
0xA643: 0x01CD, # =
0xA644: 0x00C0, # =
0xA645: 0x00C2, # =
0xA646: 0x00C4, # =
0xA647: 0x00C3, # =
0xA648: 0x0112, # =
0xA649: 0x00C9, # =
0xA64A: 0x011A, # =
0xA64B: 0x00C8, # =
0xA64C: 0x00CA, # =
0xA64D: 0x00CB, # =
0xA64E: 0x1EBC, # =
0xA64F: 0x012A, # =
0xA650: 0x00CD, # =
0xA651: 0x01CF, # =
0xA652: 0x00CC, # =
0xA653: 0x00CE, # =
0xA654: 0x00CF, # =
0xA655: 0x014C, # =
0xA656: 0x00D3, # =
0xA657: 0x01D1, # =
0xA658: 0x00D2, # =
0xA659: 0x00D4, # =
0xA65A: 0x00D6, # =
0xA65B: 0x00D5, # =
0xA65C: 0x016A, # =
0xA65D: 0x00DA, # =
0xA65E: 0x01D3, # =
0xA65F: 0x00D9, # =
0xA660: 0x00DB, # =
0xA661: 0x00DC, # =
0xA662: 0x01D5, # =
0xA663: 0x01D7, # =
0xA664: 0x01D9, # =
0xA665: 0x01DB, # =
0xA666: 0xA666, #
0xA667: 0x0108, # =
0xA668: 0x011C, # =
0xA669: 0x0124, # =
0xA66A: 0x0134, # =
0xA66B: 0x0160, # =
0xA66C: 0x015C, # =
0xA66D: 0x0178, # =
0xA66E: 0x017D, # =
0xA66F: 0x1E90, # =
0xA670: 0x0125, # =
0xA671: 0x0135, # =
0xA672: 0x00D1, # =
0xA673: 0x00E1, #
0xA674: 0x00E9, #
0xA675: 0x00ED, #
0xA676: 0x00F3, #
0xA677: 0x00FA, #
0xA678: 0x2339D, #
0xA679: 0x29F15, #
0xA67A: 0x23293, #
0xA67B: 0x3CA0, #
0xA67C: 0x2F922, #
0xA67D: 0x24271, #
0xA67E: 0x2720F, #
0xA680: 0x00C1, #
0xA681: 0x0403, #
0xA682: 0x00C9, #
0xA683: 0x040C, #
0xA684: 0x00D3, #
0xA685: 0x00FD, #
0xA686: 0xA686, #
0xA687: 0xA687, #
0xA688: 0x04EC, #
0xA689: 0xA689, #
0xA68A: 0xA68A, #
0xA68B: 0xA68B, #
0xA68C: 0xA68C, #
0xA68D: 0xA68D, #
0xA68E: 0x27E1B, #
0xA68F: 0x910B, #
0xA690: 0x29F14, #
0xA691: 0x2A0DF, #
0xA692: 0x20270, #
0xA693: 0x203F1, #
0xA694: 0x211AB, #
0xA695: 0x211E5, #
0xA696: 0x21290, #
0xA697: 0x363E, #
0xA698: 0x212DF, #
0xA699: 0x57D7, #
0xA69A: 0x2165F, #
0xA69B: 0x248C2, #
0xA69C: 0x22288, #
0xA69D: 0x23C62, #
0xA69E: 0x24276, #
0xA69F: 0xFF1A, # =
0xA6A0: 0xFF1B, # =
0xA6B9: 0x2202, # =
0xA6BA: 0x03F5, # =
0xA6BB: 0x03D1, # =
0xA6BC: 0x03D5, # =
0xA6BD: 0x03C6, # =
0xA6BE: 0x03F0, # =
0xA6BF: 0x03F1, # =
0xA6C0: 0x03C2, # =
0xA6D9: 0xFE10, # =
0xA6DA: 0xFE12, # =
0xA6DB: 0xFE11, # =
0xA6DC: 0xFE13, # =
0xA6DD: 0xFE14, # =
0xA6DE: 0xFE15, # =
0xA6DF: 0xFE16, # =
0xA6EC: 0xFE17, # =
0xA6ED: 0xFE18, # =
0xA6F3: 0xFE19, # =
0xA6F6: 0x00B7, # =
0xA6F7: 0xA6F7, # middle
0xA6F8: 0xA6F8, # middle
0xA6F9: 0xA6F9, # middle
0xA6FA: 0xA6FA, # middle
0xA6FB: 0xA6FB, # middle
0xA6FC: 0xA6FC, # middle
0xA6FD: 0xA6FD, # middle
0xA6FE: 0xA6FE #
})
# Area A7
_update({
0xA740: 0x24235, #
0xA741: 0x2431A, #
0xA742: 0x2489B, #
0xA743: 0x4B63, #
0xA744: 0x25581, #
0xA745: 0x25BB0, #
0xA746: 0x7C06, #
0xA747: 0x23388, #
0xA748: 0x26A40, #
0xA749: 0x26F16, #
0xA74A: 0x2717F, #
0xA74B: 0x22A98, #
0xA74C: 0x3005, #
0xA74D: 0x22F7E, #
0xA74E: 0x27BAA, #
0xA74F: 0x20242, #
0xA750: 0x23C5D, #
0xA751: 0x22650, #
0xA752: 0x247EF, #
0xA753: 0x26221, #
0xA754: 0x29A02, #
0xA755: 0x45EA, #
0xA756: 0x26B4C, #
0xA757: 0x26D9F, #
0xA758: 0x26ED8, #
0xA759: 0x359E, #
0xA75A: 0x20E01, #
0xA75B: 0x20F90, #
0xA75C: 0x3A18, #
0xA75D: 0x241A2, #
0xA75E: 0x3B74, #
0xA75F: 0x43F2, #
0xA760: 0x40DA, #
0xA761: 0x3FA6, #
0xA762: 0x24ECA, #
0xA763: 0x28C3E, #
0xA764: 0x28C47, #
0xA765: 0x28C4D, #
0xA766: 0x28C4F, #
0xA767: 0x28C4E, #
0xA768: 0x28C54, #
0xA769: 0x28C53, #
0xA76A: 0x25128, #
0xA76B: 0x251A7, #
0xA76C: 0x45AC, #
0xA76D: 0x26A2D, #
0xA76E: 0x41F2, #
0xA76F: 0x26393, #
0xA770: 0x29F7C, #
0xA771: 0x29F7E, #
0xA772: 0x29F83, #
0xA773: 0x29F87, #
0xA774: 0x29F8C, #
0xA775: 0x27785, #
0xA776: 0x2775E, #
0xA777: 0x28EE7, #
0xA778: 0x290AF, #
0xA779: 0x2070E, #
0xA77A: 0x22AC1, #
0xA77B: 0x20CED, #
0xA77C: 0x3598, #
0xA77D: 0x220C7, #
0xA77E: 0x22B43, #
0xA780: 0x4367, #
0xA781: 0x20CD3, #
0xA782: 0x20CAC, #
0xA783: 0x36E2, #
0xA784: 0x35CE, #
0xA785: 0x3B39, #
0xA786: 0x44EA, #
0xA787: 0x20E96, #
0xA788: 0x20E4C, #
0xA789: 0x35ED, #
0xA78A: 0x20EF9, #
0xA78B: 0x24319, #
0xA78C: 0x267CC, #
0xA78D: 0x28056, #
0xA78E: 0x28840, #
0xA78F: 0x20F90, #
0xA790: 0x21014, #
0xA791: 0x236DC, #
0xA792: 0x28A17, #
0xA793: 0x28879, #
0xA794: 0x4C9E, #
0xA795: 0x20410, #
0xA796: 0x40DF, #
0xA797: 0x210BF, #
0xA798: 0x22E0B, #
0xA799: 0x4312, #
0xA79A: 0x233AB, #
0xA79B: 0x2812E, #
0xA79C: 0x4A31, #
0xA79D: 0x27B48, #
0xA79E: 0x29EAC, #
0xA79F: 0x23822, #
0xA7A0: 0x244CB, #
0xA7C2: 0x0409, # LJE =
0xA7C3: 0x040A, # NJE =
0xA7C4: 0x040F, # DZHE =
0xA7C5: 0x04AE, # =
0xA7C6: 0x0402, # =
0xA7C7: 0x040B, # =
0xA7C8: 0x0474, # =
0xA7C9: 0x0462, # =
0xA7CA: 0x0463, # =
0xA7CB: 0x04E8, # =
0xA7CC: 0x0459, # =
0xA7CD: 0x045A, # =
0xA7CE: 0x045F, # =
0xA7CF: 0x04AF, # =
0xA7F2: 0x00E1, # =
0xA7F3: 0x00E9, # =
0xA7F4: 0xA7F4, #
0xA7F5: 0x00F3, # =
0xA7F6: 0x00FD, # =
0xA7F7: 0xA7F7, #
0xA7F8: 0xA7F8, #
0xA7F9: 0xA7F9, #
0xA7FA: 0xA7FA, #
0xA7FB: 0x0452, # =
0xA7FC: 0x045B, # =
0xA7FD: 0x0475, # =
0xA7FE: 0x04E9 # =
})
# Area A8
_update({
0xA8BC: 0x1E3F, # () =
0xA8C1: 0xA8C1, # +
0xA8C2: 0xA8C2, # +
0xA8C3: 0xA8C3, # +
0xA8C4: 0x4E00, # =
0xA8EA: 0xA8EA, # +
0xA8EB: 0xA8EB, # +
0xA8EC: 0xA8EC, # +
0xA8ED: 0xA8ED, # +
0xA8EE: 0xA8EE, # +
0xA8EF: 0xA8EF, # +
0xA8F0: 0xA8F0, # +
0xA8F1: 0xA8F1, # +
0xA8F2: 0xA8F2, # +
0xA8F3: 0xA8F3, # +
0xA8F4: 0xA8F4, # +
0xA8F5: 0xA8F5, # +
0xA8F6: 0xA8F6, # +
0xA8F7: 0xA8F7, # +
0xA8F8: 0xA8F8, # +
0xA8F9: 0xA8F9, # +
0xA8FA: 0xA8FA, # +
0xA8FB: 0xA8FB, # +
0xA8FC: 0xA8FC, # +
0xA8FD: 0xA8FD, # +
0xA8FE: 0xA8FE # +
})
# Area A9
_update({
0xA9A1: 0xA9A1, #
0xA9A2: 0xA9A2, #
0xA9F0: 0x21E8, # =
0xA9F1: 0x21E6, # =
0xA9F2: 0x2B06, # =
0xA9F3: 0x2B07, # =
0xA9F4: 0x27A1, # =
0xA9F5: 0x2B05, # =
0xA9F6: 0x2B62, # - =
0xA9F7: 0x2B60, # - =
0xA9F8: 0x2B61, # - =
0xA9F9: 0x2B63, # - =
0xA9FA: 0x21C1, # - =
0xA9FB: 0x21BD, # - =
0xA9FC: 0xA9FC, # -
0xA9FD: 0x2195, # - =
0xA9FE: 0x2B65, # - =
})
# Area AA
_update({
0xAAA1: 0xAAA1, # BD
0xAAA2: 0xAAA2, # BD)
0xAAA3: 0xAAA3, # BD
0xAAA4: 0xAAA4, # BD
0xAAA5: 0xAAA5, # BD
0xAAA6: 0xAAA6, # BD
0xAAA7: 0xAAA7, # BD +
0xAAA8: 0xAAA8, # BD +
0xAAA9: 0xAAA9, # BD
0xAAAA: 0xAAAA, # BD
0xAAAB: 0xAAAB, # BD
0xAAAC: 0xAAAC, # BD
0xAAAD: 0xAAAD, # BD
0xAAB0: 0x002C, # = ,
0xAAB1: 0x002E, # = .
0xAAB2: 0x2010, # =
0xAAB3: 0x002A, # = *
0xAAB4: 0x0021, # = !
0xAAB5: 0x2202, # =
0xAAB6: 0x2211, # =
0xAAB7: 0x220F, # =
0xAAB8: 0x2AEE, # =
0xAAB9: 0x2031, # =
0xAABA: 0x227B, # =
0xAABB: 0x227A, # =
0xAABC: 0x2282, # =
0xAABD: 0x2283, # =
0xAABE: 0x225C, # Delta =
0xAABF: 0x00AC, # =
0xAAC0: 0x22CD, #
0xAAC1: 0x2286, # =
0xAAC2: 0x2287, # =
0xAAC3: 0x225C, #
0xAAC4: 0x2243, # =
0xAAC5: 0x2265, # =
0xAAC6: 0x2264, # =
0xAAC7: 0x2214, # =
0xAAC8: 0x2238, # =
0xAAC9: 0x2A30, # =
0xAACA: 0x2271, # =
0xAACB: 0x2270, # =
0xAACC: 0x2AB0, #
0xAACD: 0x2AAF, #
0xAACE: 0x5350, #
0xAACF: 0x212A, # =
0xAAD0: 0x2200, # =
0xAAD1: 0x21D1, #
0xAAD2: 0x21E7, #
0xAAD3: 0x21BE, #
0xAAD4: 0x21D3, #
0xAAD5: 0x21E9, #
0xAAD6: 0x21C3, #
0xAAD7: 0x2935, #
0xAAD8: 0x21E5, #
0xAAD9: 0x22F0, # =
0xAADA: 0x21D4, # =
0xAADB: 0x21C6, #
0xAADC: 0x2194, #
0xAADD: 0x21D2, # =
0xAADE: 0x21E8, #
0xAADF: 0x21C0, #
0xAAE0: 0x27F6, #
0xAAE1: 0x21D0, #
0xAAE2: 0x21E6, #
0xAAE3: 0x21BC, #
0xAAE4: 0x27F5, #
0xAAE5: 0x2196, #
0xAAE6: 0x2199, #
0xAAE7: 0x2198, #
0xAAE8: 0x2197, #
0xAAE9: 0x22D5, # =
0xAAEA: 0x2AC5, # =
0xAAEB: 0x2AC6, # =
0xAAEC: 0x29CB, # =
0xAAED: 0x226B, # =
0xAAEE: 0x226A, # =
0xAAEF: 0x2A72, # =
0xAAF0: 0x22BB, #
0xAAF1: 0x2AE8, # =
0xAAF2: 0x2277, # =
0xAAF3: 0x227D, #
0xAAF4: 0x227C, #
0xAAF5: 0x2109, # =
0xAAF6: 0x2203, # =
0xAAF7: 0x22F1, # =
0xAAF9: 0x2241, #
0xAAFA: 0x2244, #
0xAAFB: 0x2276, #
0xAAFC: 0x2209, # =
0xAAFD: 0x2267, #
0xAAFE: 0x2266 #
})
# Area AB
_update({
0xABA1: 0x224B, #
0xABA2: 0x2262, # =
0xABA3: 0x2251, # =
0xABA4: 0x2284, # =
0xABA5: 0x2285, # =
0xABA6: 0x2259, # =
0xABA7: 0x2205, # =
0xABA8: 0x2207, # =
0xABA9: 0x2A01, # =
0xABAA: 0x2A02, # =
0xABAB: 0x03F9, # =
0xABAC: 0xABAC, # +
0xABAD: 0x263C, #
0xABAE: 0xABAE, # +
0xABAF: 0x2247, # =
0xABB0: 0x2249, # =
0xABB1: 0x2278, # =
0xABB2: 0x22F6, # =
0xABB3: 0x2AFA, # =
0xABB4: 0x2AF9, # =
0xABB5: 0x2245, # =
0xABB6: 0x2267, # =
0xABB7: 0x2250, # =
0xABB8: 0x2266, # =
0xABB9: 0x2A26, # =
0xABBA: 0x2213, # =
0xABBB: 0x233F, #
0xABBC: 0x30FC, # =
0xABBD: 0xABBD, # +
0xABBE: 0x2288, # =
0xABBF: 0x2289, # =
0xABC0: 0x225A, # =
0xABC1: 0x2205, # =
0xABC2: 0x2205, # diagonal
0xABC3: 0x0024, # $
0xABC4: 0x2709, #
0xABC5: 0x272E, #
0xABC6: 0x272F, #
0xABC7: 0x2744, #
0xABC8: 0x211E, # =
0xABC9: 0x1D110, #
0xABCA: 0x2034, # =
0xABCB: 0xABCB, # +
0xABCC: 0x2ACB, # =
0xABCD: 0x2ACC, # =
0xABCE: 0x2A63, #
0xABCF: 0xABCF, # 00 + \
0xABD0: 0xABD0, # 11 + \
0xABD1: 0xABD1, # 22 + \
0xABD2: 0xABD2, # 33 + \
0xABD3: 0xABD3, # 44 + \
0xABD4: 0xABD4, # 55 + \
0xABD5: 0xABD5, # 66 + \
0xABD6: 0xABD6, # 77 + \
0xABD7: 0xABD7, # 88 + \
0xABD8: 0xABD8, # 99 + \
0xABD9: 0x216C, # 50 =
0xABDA: 0x216D, # 100 =
0xABDB: 0x216E, # 500 =
0xABDC: 0x216F, # 1000 =
0xABDD: 0x2295, # =
0xABDE: 0xABDE, # +
0xABDF: 0x2296, # =
0xABE0: 0xABE0, # +
0xABE1: 0x2297, # =
0xABE2: 0x2A38, # =
0xABE3: 0x229C, # =
0xABE4: 0xABE4, # +
0xABE5: 0xABE5, # +
0xABE6: 0xABE6, # +
0xABE7: 0x224A, # =
0xABE8: 0xABE8, # > + >
0xABE9: 0xABE9, # < + <
0xABEA: 0x22DB, # =
0xABEB: 0x22DA, # =
0xABEC: 0x2A8C, # =
0xABED: 0x2A8B, # =
0xABEE: 0x2273, #
0xABEF: 0x2272, #
0xABF0: 0x29A5, #
0xABF1: 0x29A4, #
0xABF2: 0x2660, # =
0xABF3: 0x2394, # =
0xABF4: 0x2B20, # =
0xABF5: 0x23E2, # =
0xABF6: 0x2663, # =
0xABF7: 0x25B1, # =
0xABF8: 0x25AD, # =
0xABF9: 0x25AF, # =
0xABFA: 0x2665, # =
0xABFB: 0x2666, # =
0xABFC: 0x25C1, # =
0xABFD: 0x25BD, # =
0xABFE: 0x25BD # =
})
# Area AC
_update({
0xACA1: 0x25C0, # =
0xACA2: 0x25BC, # =
0xACA3: 0x25B6, # =
0xACA4: 0x25FA, # =
0xACA5: 0x22BF, # =
0xACA6: 0x25B3, #
0xACA7: 0x27C1, #
0xACA8: 0x2BCE, #
0xACA9: 0x2B2F, #
0xACAA: 0xACAA, # +
0xACAB: 0x2B2E, #
0xACAC: 0x2279, # =
0xACAD: 0x1D10B, #
0xACAE: 0x2218, # =
0xACAF: 0xACAF, # vertical
0xACB2: 0xACB2, # F-like symbol
0xACB3: 0x22A6, #
0xACB4: 0x22A7, #
0xACB5: 0x22A8, #
0xACB6: 0x29FA, # =
0xACB7: 0x29FB, # =
0xACB8: 0xACB8, # ++++
0xACB9: 0x291A, #
0xACBA: 0xACBA, # + _
0xACBB: 0xACBB, # + _
0xACBC: 0x2713, # =
0xACBD: 0x22CE, #
0xACBE: 0xACBE, # V + \
0xACBF: 0xACBF, # + | +
0xACC0: 0x224E, # =
0xACC1: 0x224F, # =
0xACC2: 0x23D3, #
0xACC3: 0xACC3, # + _
0xACC4: 0xACC4, # + _ + /
0xACC5: 0x2715, #
0xACC6: 0xACC6, # +
0xACC8: 0xACC8, # +
0xACC9: 0xACC9, # +
0xACCA: 0xACCA, # V
0xACCB: 0xACCB, # V
0xACCC: 0xACCC, # V
0xACCD: 0x2126, #
0xACCE: 0x221D, # =
0xACCF: 0x29A0, # =
0xACD0: 0x2222, # =
0xACD1: 0x2AAC, # =
0xACD2: 0x2239, # =
0xACD3: 0x223A, #
0xACD4: 0x2135, #
0xACD5: 0xACD5, # +
0xACD6: 0xACD6, # + + /
0xACD7: 0x21CC, #
0xACD8: 0x274B, #
0xACD9: 0x2B01, #
0xACDA: 0x2B03, #
0xACDB: 0x2B02, #
0xACDC: 0x2B00, #
0xACDD: 0xACDD, # +
0xACDE: 0xACDE, # +
0xACDF: 0xACDE, # +
0xACE0: 0xACE0, # [ +
0xACE1: 0xACE1, # +
0xACE2: 0xACE2, # +
0xACE3: 0xACE3, # ] +
0xACE4: 0xACE4, # +
0xACE5: 0xACE5, # + +
0xACE6: 0xACE6, # + +
0xACE7: 0xACE7, # + +
0xACE8: 0xACE8, # + +
0xACE9: 0x2233, # =
0xACEA: 0x2232, # =
0xACEB: 0x222C, # =
0xACEC: 0x222F, # =
0xACED: 0x222D, # =
0xACEE: 0x2230, # =
0xACEF: 0x0421, # =
0xACF0: 0x2019, # =
0xACF1: 0x0027, # = '
0xACF2: 0x03A3, # =
0xACF3: 0x03A0, # =
0xACF4: 0x02C7, # =
0xACF5: 0x02CB, # =
0xACF6: 0x02CA, # =
0xACF7: 0x02D9, # =
0xACF8: 0x29F72, #
0xACF9: 0x362D, #
0xACFA: 0x3A52, #
0xACFB: 0x3E74, #
0xACFC: 0x27741, #
0xACFD: 0x30FC, # =
0xACFE: 0x2022 # =
})
# Area AD
_update({
0xADA1: 0x3280, # =
0xADA2: 0x3281, # =
0xADA3: 0x3282, # =
0xADA4: 0x3283, # =
0xADA5: 0x3284, # =
0xADA6: 0x3285, # =
0xADA7: 0x3286, # =
0xADA8: 0x3287, # =
0xADA9: 0x3288, # =
0xADAA: 0xADAA, # +
0xADAB: 0xADAB, # +
0xADAC: 0xADAC, # +
0xADAD: 0xADAD, # +
0xADAE: 0xADAE, # +
0xADAF: 0xADAF, # +
0xADB0: 0xADB0, # +
0xADB1: 0xADB1, # +
0xADB2: 0xADB2, # +
0xADB3: 0xADB3, # +
0xADB4: 0xADB4, # +
0xADB5: 0x24EA, # 0 =
0xADB6: 0x2018, # =
0xADB7: 0x201C, # =
0xADB8: 0x2019, # =
0xADB9: 0x201D, # =
0xADBA: 0x025B, # =
0xADBB: 0x0251, # =
0xADBC: 0x0259, # =
0xADBD: 0x025A, # =
0xADBE: 0x028C, # =
0xADBF: 0x0254, # =
0xADC0: 0x0283, # =
0xADC1: 0x02D1, # =
0xADC2: 0x02D0, # =
0xADC3: 0x0292, # =
0xADC4: 0x0261, # =
0xADC5: 0x03B8, # =
0xADC6: 0x00F0, # =
0xADC7: 0x014B, # =
0xADC8: 0x0264, # =
0xADC9: 0x0258, # =
0xADCA: 0x026A, # =
0xADCB: 0x0268, # =
0xADCC: 0x027F, # =
0xADCD: 0x0285, # =
0xADCE: 0x028A, # =
0xADCF: 0x00F8, # =
0xADD0: 0x0275, # =
0xADD1: 0x026F, # =
0xADD2: 0x028F, # =
0xADD3: 0x0265, # =
0xADD4: 0x0289, # =
0xADD5: 0x0278, # =
0xADD6: 0x0288, # =
0xADD7: 0x0290, # =
0xADD8: 0x0256, # =
0xADD9: 0x0282, # =
0xADDA: 0x0272, # =
0xADDB: 0x0271, # =
0xADDC: 0x03B3, # =
0xADDD: 0x0221, # =
0xADDE: 0x0255, # =
0xADDF: 0x0235, # =
0xADE0: 0x0291, # =
0xADE1: 0x0236, # =
0xADE2: 0x026C, # =
0xADE3: 0x028E, # =
0xADE4: 0x1D84, # =
0xADE5: 0xAB53, # =
0xADE6: 0x0127, # =
0xADE7: 0x0263, # =
0xADE8: 0x0281, # =
0xADE9: 0x0294, # =
0xADEA: 0x0295, # =
0xADEB: 0x0262, # =
0xADEC: 0x0266, # =
0xADED: 0x4C7D, #
0xADEE: 0x24B6D, #
0xADEF: 0x00B8, # =
0xADF0: 0x02DB, # =
0xADF1: 0x04D8, # =
0xADF2: 0x04BA, # =
0xADF3: 0x0496, # =
0xADF4: 0x04A2, # =
0xADF5: 0x2107B, #
0xADF6: 0x2B62C, #
0xADF7: 0x04D9, # =
0xADF8: 0x04BB, # =
0xADF9: 0x0497, # =
0xADFA: 0x04A3, # =
0xADFB: 0x40CE, #
0xADFC: 0x04AF, # =
0xADFD: 0x02CC, # =
0xADFE: 0xff40 # =
})
# Area F8
_update({
0xF8A1: 0x5C2A, #
0xF8A2: 0x97E8, #
0xF8A3: 0x5F67, #
0xF8A4: 0x672E, #
0xF8A5: 0x4EB6, #
0xF8A6: 0x53C6, #
0xF8A7: 0x53C7, #
0xF8A8: 0x8BBB, #
0xF8A9: 0x27BAA, #
0xF8AA: 0x8BEA, #
0xF8AB: 0x8C09, #
0xF8AC: 0x8C1E, #
0xF8AD: 0x5396, #
0xF8AE: 0x9EE1, #
0xF8AF: 0x533D, #
0xF8B0: 0x5232, #
0xF8B1: 0x6706, #
0xF8B2: 0x50F0, #
0xF8B3: 0x4F3B, #
0xF8B4: 0x20242, #
0xF8B5: 0x5092, #
0xF8B6: 0x5072, #
0xF8B7: 0x8129, #
0xF8B8: 0x50DC, #
0xF8B9: 0x90A0, #
0xF8BA: 0x9120, #
0xF8BB: 0x911C, #
0xF8BC: 0x52BB, #
0xF8BD: 0x52F7, #
0xF8BE: 0x6C67, #
0xF8BF: 0x6C9A, #
0xF8C0: 0x6C6D, #
0xF8C1: 0x6D34, #
0xF8C2: 0x6D50, #
0xF8C3: 0x6D49, #
0xF8C4: 0x6DA2, #
0xF8C5: 0x6D65, #
0xF8C6: 0x6DF4, #
0xF8C7: 0x6EEA, #
0xF8C8: 0x6E87, #
0xF8C9: 0x6EC9, #
0xF8CA: 0x6FBC, #
0xF8CB: 0x6017, #
0xF8CC: 0x22650, #
0xF8CD: 0x6097, #
0xF8CE: 0x60B0, #
0xF8CF: 0x60D3, #
0xF8D0: 0x6153, #
0xF8D1: 0x5BAC, #
0xF8D2: 0x5EBC, #
0xF8D3: 0x95EC, #
0xF8D4: 0x95FF, #
0xF8D5: 0x9607, #
0xF8D6: 0x9613, #
0xF8D7: 0x961B, #
0xF8D8: 0x631C, #
0xF8D9: 0x630C, #
0xF8DA: 0x63AF, #
0xF8DB: 0x6412, #
0xF8DC: 0x63F3, #
0xF8DD: 0x6422, #
0xF8DE: 0x5787, #
0xF8DF: 0x57B5, #
0xF8E0: 0x57BD, #
0xF8E1: 0x57FC, #
0xF8E2: 0x56AD, #
0xF8E3: 0x26B4C, #
0xF8E4: 0x8313, #
0xF8E5: 0x8359, #
0xF8E6: 0x82F3, #
0xF8E7: 0x8399, #
0xF8E8: 0x44D6, #
0xF8E9: 0x841A, #
0xF8EA: 0x83D1, #
0xF8EB: 0x84C2, #
0xF8EC: 0x8439, #
0xF8ED: 0x844E, #
0xF8EE: 0x8447, #
0xF8EF: 0x84DA, #
0xF8F0: 0x26D9F, #
0xF8F1: 0x849F, #
0xF8F2: 0x84BB, #
0xF8F3: 0x850A, #
0xF8F4: 0x26ED8, #
0xF8F5: 0x85A2, #
0xF8F6: 0x85B8, #
0xF8F7: 0x85E8, #
0xF8F8: 0x8618, #
0xF8F9: 0x596D, #
0xF8FA: 0x546F, #
0xF8FB: 0x54A5, #
0xF8FC: 0x551D, #
0xF8FD: 0x5536, #
0xF8FE: 0x556F #
})
# Area F9
_update({
0xF9A1: 0x5621, #
0xF9A2: 0x20E01, #
0xF9A3: 0x20F90, #
0xF9A4: 0x360E, #
0xF9A5: 0x56F7, #
0xF9A6: 0x5E21, #
0xF9A7: 0x5E28, #
0xF9A8: 0x5CA8, #
0xF9A9: 0x5CE3, #
0xF9AA: 0x5D5A, #
0xF9AB: 0x5D4E, #
0xF9AC: 0x5D56, #
0xF9AD: 0x5DC2, #
0xF9AE: 0x8852, #
0xF9AF: 0x5FAF, #
0xF9B0: 0x5910, #
0xF9B1: 0x7330, #
0xF9B2: 0x247EF, #
0xF9B3: 0x734F, #
0xF9B4: 0x9964, #
0xF9B5: 0x9973, #
0xF9B6: 0x997E, #
0xF9B7: 0x9982, #
0xF9B8: 0x9989, #
0xF9B9: 0x5C43, #
0xF9BA: 0x5F36, #
0xF9BB: 0x5B56, #
0xF9BC: 0x59EE, #
0xF9BD: 0x5AEA, #
0xF9BE: 0x7ED6, #
0xF9BF: 0x7F0A, #
0xF9C0: 0x7E34, #
0xF9C1: 0x7F1E, #
0xF9C2: 0x26221, #
0xF9C3: 0x9A8E, #
0xF9C4: 0x29A02, #
0xF9C5: 0x9A95, #
0xF9C6: 0x9AA6, #
0xF9C7: 0x659D, #
0xF9C8: 0x241A2, #
0xF9C9: 0x712E, #
0xF9CA: 0x7943, #
0xF9CB: 0x794E, #
0xF9CC: 0x7972, #
0xF9CD: 0x7395, #
0xF9CE: 0x73A0, #
0xF9CF: 0x7399, #
0xF9D0: 0x73B1, #
0xF9D1: 0x73F0, #
0xF9D2: 0x740E, #
0xF9D3: 0x742F, #
0xF9D4: 0x7432, #
0xF9D5: 0x67EE, #
0xF9D6: 0x6812, #
0xF9D7: 0x3B74, #
0xF9D8: 0x6872, #
0xF9D9: 0x68BC, #
0xF9DA: 0x68B9, #
0xF9DB: 0x68C1, #
0xF9DC: 0x696F, #
0xF9DD: 0x69A0, #
0xF9DE: 0x69BE, #
0xF9DF: 0x69E5, #
0xF9E0: 0x6A9E, #
0xF9E1: 0x69DC, #
0xF9E2: 0x6B95, #
0xF9E3: 0x80FE, #
0xF9E4: 0x89F1, #
0xF9E5: 0x74FB, #
0xF9E6: 0x7503, #
0xF9E7: 0x80D4, #
0xF9E8: 0x22F7E, #
0xF9E9: 0x668D, #
0xF9EA: 0x9F12, #
0xF9EB: 0x6F26, #
0xF9EC: 0x8D51, #
0xF9ED: 0x8D52, #
0xF9EE: 0x8D57, #
0xF9EF: 0x7277, #
0xF9F0: 0x7297, #
0xF9F1: 0x23C5D, #
0xF9F2: 0x8090, #
0xF9F3: 0x43F2, #
0xF9F4: 0x6718, #
0xF9F5: 0x8158, #
0xF9F6: 0x81D1, #
0xF9F7: 0x7241, #
0xF9F8: 0x7242, #
0xF9F9: 0x7A85, #
0xF9FA: 0x7A8E, #
0xF9FB: 0x7ABE, #
0xF9FC: 0x75A2, #
0xF9FD: 0x75AD, #
0xF9FE: 0x75CE #
})
# Area FA
_update({
0xFAA1: 0x3FA6, #
0xFAA2: 0x7604, #
0xFAA3: 0x7606, #
0xFAA4: 0x7608, #
0xFAA5: 0x24ECA, #
0xFAA6: 0x88C8, #
0xFAA7: 0x7806, #
0xFAA8: 0x7822, #
0xFAA9: 0x7841, #
0xFAAA: 0x7859, #
0xFAAB: 0x785A, #
0xFAAC: 0x7875, #
0xFAAD: 0x7894, #
0xFAAE: 0x40DA, #
0xFAAF: 0x790C, #
0xFAB0: 0x771C, #
0xFAB1: 0x251A7, #
0xFAB2: 0x7786, #
0xFAB3: 0x778B, #
0xFAB4: 0x7564, #
0xFAB5: 0x756C, #
0xFAB6: 0x756F, #
0xFAB7: 0x76C9, #
0xFAB8: 0x76DD, #
0xFAB9: 0x28C3E, #
0xFABA: 0x497A, #
0xFABB: 0x94D3, #
0xFABC: 0x94E6, #
0xFABD: 0x9575, #
0xFABE: 0x9520, #
0xFABF: 0x9527, #
0xFAC0: 0x28C4F, #
0xFAC1: 0x9543, #
0xFAC2: 0x953D, #
0xFAC3: 0x28C4E, #
0xFAC4: 0x28C54, #
0xFAC5: 0x28C53, #
0xFAC6: 0x9574, #
0xFAC7: 0x79FE, #
0xFAC8: 0x7A16, #
0xFAC9: 0x415F, #
0xFACA: 0x7A5E, #
0xFACB: 0x9E30, #
0xFACC: 0x9E34, #
0xFACD: 0x9E27, #
0xFACE: 0x9E2E, #
0xFACF: 0x9E52, #
0xFAD0: 0x9E53, #
0xFAD1: 0x9E59, #
0xFAD2: 0x9E56, #
0xFAD3: 0x9E61, #
0xFAD4: 0x9E6F, #
0xFAD5: 0x77DE, #
0xFAD6: 0x76B6, #
0xFAD7: 0x7F91, #
0xFAD8: 0x7F93, #
0xFAD9: 0x26393, #
0xFADA: 0x7CA6, #
0xFADB: 0x43AC, #
0xFADC: 0x8030, #
0xFADD: 0x8064, #
0xFADE: 0x8985, #
0xFADF: 0x9892, #
0xFAE0: 0x98A3, #
0xFAE1: 0x8683, #
0xFAE2: 0x86B2, #
0xFAE3: 0x45AC, #
0xFAE4: 0x8705, #
0xFAE5: 0x8730, #
0xFAE6: 0x45EA, #
0xFAE7: 0x8758, #
0xFAE8: 0x7F4D, #
0xFAE9: 0x7B4A, #
0xFAEA: 0x41F2, #
0xFAEB: 0x7BF0, #
0xFAEC: 0x7C09, #
0xFAED: 0x7BEF, #
0xFAEE: 0x7BF2, #
0xFAEF: 0x7C20, #
0xFAF0: 0x26A2D, #
0xFAF1: 0x8C68, #
0xFAF2: 0x8C6D, #
0xFAF3: 0x8DF6, #
0xFAF4: 0x8E04, #
0xFAF5: 0x8E26, #
0xFAF6: 0x8E16, #
0xFAF7: 0x8E27, #
0xFAF8: 0x8E53, #
0xFAF9: 0x8E50, #
0xFAFA: 0x8C90, #
0xFAFB: 0x9702, #
0xFAFC: 0x9F81, #
0xFAFD: 0x9F82, #
0xFAFE: 0x9C7D #
})
# Area FB
_update({
0xFBA1: 0x9C8A, #
0xFBA2: 0x9C80, #
0xFBA3: 0x9C8F, #
0xFBA4: 0x4C9F, #
0xFBA5: 0x9C99, #
0xFBA6: 0x9C97, #
0xFBA7: 0x29F7C, #
0xFBA8: 0x9C96, #
0xFBA9: 0x29F7E, #
0xFBAA: 0x29F83, #
0xFBAB: 0x29F87, #
0xFBAC: 0x9CC1, #
0xFBAD: 0x9CD1, #
0xFBAE: 0x9CDB, #
0xFBAF: 0x9CD2, #
0xFBB0: 0x29F8C, #
0xFBB1: 0x9CE3, #
0xFBB2: 0x977A, #
0xFBB3: 0x97AE, #
0xFBB4: 0x97A8, #
0xFBB5: 0x9B4C, #
0xFBB6: 0x9B10, #
0xFBB7: 0x9B18, #
0xFBB8: 0x9E80, #
0xFBB9: 0x9E95, #
0xFBBA: 0x9E91, #
})
"B"
symbolsB = UnicodeMap()
symbolsB.update({
0x8940: 0x1E37, # =
0x8941: 0x1E43, # =
0x8942: 0x1E47, # =
0x8943: 0x015E, # =
0x8944: 0x015F, # =
0x8945: 0x0162, # =
0x8946: 0x0163, # =
0x94C0: 0x2654, # - =
0x94C1: 0x2655, # - =
0x94C2: 0x2656, # - =
0x94C3: 0x2658, # - =
0x94C4: 0x2657, # - =
0x94C5: 0x2659, # - =
0x94C6: 0x265A, # - =
0x94C7: 0x265B, # - =
0x94C8: 0x265C, # - =
0x94C9: 0x265E, # - =
0x94CA: 0x265D, # - =
0x94CB: 0x265F, # - =
0x94EC: 0x2660, # - =
0x94ED: 0x2665, # - =
0x94EE: 0x2666, # - =
0x94EF: 0x2663, # - =
0x95F1: 0x1FA67, # - =
0x95F2: 0x1FA64, # - =
0x95F3: 0x1FA63, # - =
0x95F4: 0x1FA65, # - =
0x95F5: 0x1FA66, # - =
0x95F6: 0x1FA62, # - =
0x95F7: 0x1FA61, # - =
0x95F8: 0x1FA60, # - =
0x95F9: 0x1FA6B, # - =
0x95FA: 0x1FA6A, # - =
0x95FB: 0x1FA6C, # - =
0x95FC: 0x1FA6D, # - =
0x95FD: 0x1FA68, # - =
0x95FE: 0x1FA69, # - =
0x968F: 0x1D11E, # =
0x97A0: 0x4DC0, # =
0x97A1: 0x4DC1, # =
0x97A2: 0x4DC2, # =
0x97A3: 0x4DC3, # =
0x97A4: 0x4DC4, # =
0x97A5: 0x4DC5, # =
0x97A6: 0x4DC6, # =
0x97A7: 0x4DC7, # =
0x97A8: 0x4DC8, # =
0x97A9: 0x4DC9, # =
0x97AA: 0x4DCA, # =
0x97AB: 0x4DCB, # =
0x97AC: 0x4DCC, # =
0x97AD: 0x4DCD, # =
0x97AE: 0x4DCE, # =
0x97AF: 0x4DCF, # =
0x97B0: 0x4DD0, # =
0x97B1: 0x4DD1, # =
0x97B2: 0x4DD2, # =
0x97B3: 0x4DD3, # =
0x97B4: 0x4DD4, # =
0x97B5: 0x4DD5, # =
0x97B6: 0x4DD6, # =
0x97B7: 0x4DD7, # =
0x97B8: 0x4DD8, # =
0x97B9: 0x4DD9, # =
0x97BA: 0x4DDA, # =
0x97BB: 0x4DDB, # =
0x97BC: 0x4DDC, # =
0x97BD: 0x4DDD, # =
0x97BE: 0x4DDE, # =
0x97BF: 0x4DDF, # =
0x97C0: 0x4DE0, # =
0x97C1: 0x4DE1, # =
0x97C2: 0x4DE2, # =
0x97C3: 0x4DE3, # =
0x97C4: 0x4DE4, # =
0x97C5: 0x4DE5, # =
0x97C6: 0x4DE6, # =
0x97C7: 0x4DE7, # =
0x97C8: 0x4DE8, # =
0x97C9: 0x4DE9, # =
0x97CA: 0x4DEA, # =
0x97CB: 0x4DEB, # =
0x97CC: 0x4DEC, # =
0x97CD: 0x4DED, # =
0x97CE: 0x4DEE, # =
0x97CF: 0x4DEF, # =
0x97D0: 0x4DF0, # =
0x97D1: 0x4DF1, # =
0x97D2: 0x4DF2, # =
0x97D3: 0x4DF3, # =
0x97D4: 0x4DF4, # =
0x97D5: 0x4DF5, # =
0x97D6: 0x4DF6, # =
0x97D7: 0x4DF7, # =
0x97D8: 0x4DF8, # =
0x97D9: 0x4DF9, # =
0x97DA: 0x4DFA, # =
0x97DB: 0x4DFB, # =
0x97DC: 0x4DFC, # =
0x97DD: 0x4DFD, # =
0x97DE: 0x4DFE, # =
0x97DF: 0x4DFF, # =
0x97E0: 0x2630, # =
0x97E1: 0x2637, # =
0x97E2: 0x2633, # =
0x97E3: 0x2634, # =
0x97E4: 0x2635, # =
0x97E5: 0x2632, # =
0x97E6: 0x2636, # =
0x97E7: 0x2631, # =
0x97EF: 0x2A0D, # =
0x97F0: 0x0274, # =
0x97F1: 0x0280, # =
0x97F2: 0x97F2, #
0x97F3: 0x97F3, #
0xA080: 0x00B7, # =
0xA08E: 0x2039, # =
0xA08F: 0x203A, # =
0xA090: 0x00AB, # =
0xA091: 0x00BB, # =
0xBD8A: 0x2201, # =
0xBD8B: 0x2115, # N =
0xBD8C: 0x2124, # Z =
0xBD8D: 0x211A, # Q =
0xBD8E: 0x211D, # R =
0xBD8F: 0x2102, # C =
0xBD90: 0x00AC, # =
0xBD93: 0xBD93, # + \
0xBD94: 0xBD94, # + |
0xBD95: 0x220B, # =
0xBD96: 0x220C, # =
0xBD97: 0xBD97, # + |
0xBD98: 0xBD98, # + \
0xBD99: 0x22FD, # =
0xBD9A: 0xBD9A, # = + \
0xBD9B: 0x1d463 #
})
| 28.744308 | 88 | 0.518268 |
c933cadd6174b03b61565756a1609302c0c6bfc6 | 6,176 | py | Python | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | 2 | 2022-03-26T15:27:31.000Z | 2022-03-28T22:00:32.000Z | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | null | null | null | moona/lifespan/handlers.py | katunilya/mona | 8f44a9e06910466afbc9b2bcfb42144dcd25ed5a | [
"MIT"
] | null | null | null | from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from typing import Callable, TypeVar
from pymon import Future, Pipe, cmap, creducel, hof_2, this_async
from pymon.core import returns_future
from moona.lifespan import LifespanContext
LifespanFunc = Callable[[LifespanContext], Future[LifespanContext | None]]
_LifespanHandler = Callable[
[LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
def compose(h1: _LifespanHandler, h2: _LifespanHandler) -> LifespanHandler:
"""Compose 2 `LifespanHandler`s into one.
Args:
h1 (_LifespanHandler): to run first.
h2 (_LifespanHandler): to run second.
Returns:
LifespanHandler: resulting handler.
"""
return LifespanHandler(handler)
A = TypeVar("A")
B = TypeVar("B")
C = TypeVar("C")
def handler(func: _LifespanHandler) -> LifespanHandler:
"""Decorator that converts function to LifespanHandler callable."""
return LifespanHandler(func)
def handle_func(func: LifespanFunc) -> LifespanHandler:
"""Converts `LifespanFunc` to `LifespanHandler`.
Args:
func (LifespanFunc): to convert to `LifespanHandler`.
Returns:
LifespanHandler: result.
"""
return _handler
def handle_func_sync(
func: Callable[[LifespanContext], LifespanContext | None]
) -> LifespanHandler:
"""Converts sync `LifespanFunc` to `LifespanHandler`.
Args:
func (Callable[[LifespanContext], LifespanContext | None]): to convert to
`LifespanHandler`.
Returns:
LifespanHandler: result.
"""
return _handler
def choose(handlers: list[LifespanHandler]) -> LifespanHandler:
"""Iterate though handlers till one would return some `LifespanContext`.
Args:
handlers (list[LifespanHandler]): to iterate through.
Returns:
LifespanHandler: result.
"""
return _handler
def handler1(
func: Callable[[A, LifespanFunc, LifespanContext], Future[LifespanContext | None]]
) -> Callable[[A], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
return wrapper
def handler2(
func: Callable[
[A, B, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B], LifespanHandler]:
"""Decorator for LifespanHandlers with 2 additional arguments.
Makes it "curried".
"""
return wrapper
def handler3(
func: Callable[
[A, B, C, LifespanFunc, LifespanContext], Future[LifespanContext | None]
]
) -> Callable[[A, B, C], LifespanHandler]:
"""Decorator for LifespanHandlers with 1 additional argument.
Makes it "curried".
"""
return wrapper
def skip(_: LifespanContext) -> Future[None]:
"""`LifespanFunc` that skips pipeline by returning `None` instead of context.
Args:
_ (LifespanContext): ctx we don't care of.
Returns:
Future[None]: result.
"""
return Future(this_async(None))
def end(ctx: LifespanContext) -> Future[LifespanContext]:
"""`LifespanFunc` that finishes the pipeline of request handling.
Args:
ctx (LifespanContext): to end.
Returns:
Future[LifespanContext]: ended ctx.
"""
return Future(this_async(ctx))
| 25.841004 | 86 | 0.629858 |
c9340f2d3c1db26d4655357d65aa1d342c92a30f | 4,246 | py | Python | bot/cogs/birthday/birthday.py | Qtopia-Team/luci | 9b7f1966050910d50f04cbd9733d1c77ffbb8cba | [
"MIT"
] | 5 | 2021-04-27T10:50:54.000Z | 2021-08-02T09:11:56.000Z | bot/cogs/birthday/birthday.py | Qtopia-Team/luci | 9b7f1966050910d50f04cbd9733d1c77ffbb8cba | [
"MIT"
] | 2 | 2021-06-17T14:53:13.000Z | 2021-06-19T02:14:36.000Z | bot/cogs/birthday/birthday.py | luciferchase/luci | 91e30520cfc60177b9916d3f3d41678f590ecdfc | [
"MIT"
] | 4 | 2021-06-11T12:02:42.000Z | 2021-06-30T16:56:46.000Z | import discord
from discord.ext import commands
import json
import os
import psycopg2
import pytz
| 35.090909 | 105 | 0.544277 |
c934c6f917f8d18513144569e61a6ad4e232777a | 651 | py | Python | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | 1 | 2022-02-15T14:03:10.000Z | 2022-02-15T14:03:10.000Z | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | null | null | null | apps/main/proc_scraper.py | suenklerhaw/seoeffekt | 0a31fdfa1a7246da37e37bf53c03d94c5f13f095 | [
"MIT"
] | null | null | null | #sub processes to scrape using the normal Google scraper
#include libs
import sys
sys.path.insert(0, '..')
from include import *
process1 = threading.Thread(target=scraper)
process2 = threading.Thread(target=save_sources)
process3 = threading.Thread(target=reset_scraper)
process4 = threading.Thread(target=reset_sources)
process1.start()
process2.start()
process3.start()
process4.start()
| 22.448276 | 56 | 0.738863 |
c9359b5500958801527c3395149655f6f66f2d7a | 1,620 | py | Python | ingestion/producer1.py | aspk/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | 1 | 2020-03-03T18:46:15.000Z | 2020-03-03T18:46:15.000Z | ingestion/producer1.py | Keyology/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | null | null | null | ingestion/producer1.py | Keyology/ratsadtarget | e93cd3f71000ec409e79e6e0c873578f0e8fa8b3 | [
"Apache-2.0"
] | 1 | 2020-03-03T18:46:18.000Z | 2020-03-03T18:46:18.000Z | # producer to stream data into kafka
from boto.s3.connection import S3Connection
import datetime
import json
import bz2
from kafka import KafkaProducer
from kafka.errors import KafkaError
import time
import pytz
conn = S3Connection()
key = conn.get_bucket('aspk-reddit-posts').get_key('comments/RC_2017-11.bz2')
producer = KafkaProducer(bootstrap_servers=['10.0.0.5:9092'])
count = 0
decomp = bz2.BZ2Decompressor()
CHUNK_SIZE= 5000*1024
timezone = pytz.timezone("America/Los_Angeles")
start_time = time.time()
while True:
print('in')
chunk = key.read(CHUNK_SIZE)
if not chunk:
break
data = decomp.decompress(chunk).decode()
for i in data.split('\n'):
try:
count+=1
if count%10000==0 and count!=0:
print('rate of kafka producer messages is {}'.format(count/(time.time()-start_time)))
comment = json.loads(i)
reddit_event = {}
reddit_event['post'] = comment['permalink'].split('/')[-3]
reddit_event['subreddit'] = comment['subreddit']
reddit_event['timestamp'] = str(datetime.datetime.fromtimestamp(time.time()))
reddit_event['body'] = comment['body']
reddit_event['author'] = comment['author']
producer.send('reddit-stream-topic', bytes(json.dumps(reddit_event),'utf-8'))
producer.flush()
# to reduce speed use time.sleep(0.01)
#time.sleep(0.001)
except:
print('Incomplete string ... skipping this comment')
#break
| 33.061224 | 105 | 0.608642 |
c9378ebb2e19a75b65829de15453b31293aca652 | 3,060 | py | Python | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 4 | 2020-12-15T15:57:14.000Z | 2020-12-16T21:52:23.000Z | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 2 | 2021-03-15T02:49:56.000Z | 2021-03-27T12:42:38.000Z | src/odin-http/odin/http/models.py | wenshuoliu/odin | 7998ee7541b3de44dd149899168983e964f2b8f7 | [
"Apache-2.0"
] | 5 | 2020-12-15T19:09:00.000Z | 2021-04-21T20:40:38.000Z | #from pydantic import BaseModel as Model
# This gives us backwards compatible API calls
from fastapi_camelcase import CamelModel as Model
from typing import Optional, List
from datetime import date, datetime
| 21.103448 | 80 | 0.693137 |
c9380c3f618a01051fb6b644e3bcd12fce9edfdc | 7,931 | py | Python | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 688 | 2018-06-01T23:43:28.000Z | 2022-03-23T06:37:20.000Z | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 188 | 2018-07-09T23:22:31.000Z | 2021-04-01T07:43:46.000Z | tests/test_data/test_data_core.py | shaoeric/hyperparameter_hunter | 3709d5e97dd23efa0df1b79982ae029789e1af57 | [
"MIT"
] | 100 | 2018-08-28T03:30:47.000Z | 2022-01-25T04:37:11.000Z | ##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.data.data_core import BaseDataChunk, BaseDataset, NullDataChunk
##################################################
# Import Miscellaneous Assets
##################################################
import pandas as pd
import pytest
from unittest import mock
##################################################
# White-Box/Structural Test Fixtures
##################################################
##################################################
# White-Box/Structural Tests
##################################################
##################################################
# `BaseDataChunk` Equality
##################################################
def _update_data_chunk(updates: dict):
chunk = BaseDataChunk(None)
for key, value in updates.items():
if key.startswith("T."):
setattr(chunk.T, key[2:], value)
else:
setattr(chunk, key, value)
return chunk
#################### Test Scenario Data ####################
df_0 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]))
df_1 = pd.DataFrame(dict(a=[1, 2, 3], b=[999, 5, 6]))
df_2 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]), index=["foo", "bar", "baz"])
df_3 = pd.DataFrame(dict(a=[1, 2, 3], c=[4, 5, 6]), index=["foo", "bar", "baz"])
df_4 = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6], c=[7, 8, 9]))
chunk_data_0 = dict(d=pd.DataFrame())
chunk_data_1 = dict(d=pd.DataFrame(), fold=df_0)
chunk_data_2 = dict(d=pd.DataFrame(), fold=df_1)
chunk_data_3 = dict(d=pd.DataFrame(), fold=df_2)
chunk_data_4 = {"d": pd.DataFrame(), "fold": df_2, "T.fold": df_3}
chunk_data_5 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2}
chunk_data_6 = {"d": pd.DataFrame(), "fold": df_3, "T.fold": df_2, "T.d": df_4}
#################### Inequality Tests ####################
| 40.258883 | 102 | 0.667381 |
c939aef00a062e0b98f7c418e70663b8692f035d | 108 | py | Python | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | sample/sample.py | eaybek/getthat | 3ca34902f773ec6a40a1df0b7dac5845a22cc8e4 | [
"MIT"
] | null | null | null | from getthat import getthat
# from sna.search import Sna
Sna = getthat("sna.search", "Sna")
sna = Sna()
| 12 | 34 | 0.685185 |
c93c9aaedb099246f931a93b0f3660c7f68b5819 | 2,481 | py | Python | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 79 | 2021-10-01T22:29:51.000Z | 2022-03-30T04:19:58.000Z | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 2 | 2021-11-18T19:50:59.000Z | 2022-01-08T00:57:24.000Z | src/models/zeroshot.py | mmatena/wise-ft | 2630c366d252ad32db82ea886f7ab6a752142792 | [
"MIT"
] | 10 | 2021-10-14T18:29:59.000Z | 2022-03-27T12:40:18.000Z | import os
import torch
from tqdm import tqdm
import numpy as np
import clip.clip as clip
import src.templates as templates
import src.datasets as datasets
from src.args import parse_arguments
from src.models.modeling import ClassificationHead, ImageEncoder, ImageClassifier
from src.models.eval import evaluate
if __name__ == '__main__':
args = parse_arguments()
eval(args) | 30.62963 | 94 | 0.694478 |
c93cab934e2e3f25cd7169e11400beb6e6d43570 | 425 | py | Python | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | app/main/__init__.py | csmcallister/beular | 219bcd552c1303eb0557f3ef56d44355a932399e | [
"CNRI-Python"
] | null | null | null | from flask import Blueprint
bp = Blueprint('main', __name__)
from app.main import routes # noqa: F401 | 25 | 74 | 0.665882 |
c94067f14edbfaeef67d40e03949c3cc7bd61802 | 734 | py | Python | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | blog/models.py | sd5682295/course_demo-master-2fe2955bdcb6985c2b48bb3487da5732c395bbc2 | face6e8d4e6cc61c3ef437142b71639393de3bf8 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
| 22.9375 | 65 | 0.76703 |
c94170821cd5e437201c56213668e61ba65bc8e5 | 21,018 | py | Python | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | methcomp/regression.py | daneishdespot/methcomp | 767d85aa56a8fda372847585decca8879ec2ac98 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import statsmodels.api as sm
import math
import numpy as np
__all__ = ["deming", "passingbablok", "linear"]
def deming(method1, method2,
vr=None, sdr=None, bootstrap=1000,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_deming='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using Deming regression.
This is an Axis-level function which will draw the Deming plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
vr : float
The assumed known ratio of the (residual) variance of the ys relative to that of the xs.
Defaults to 1.
sdr : float
The assumed known standard deviations. Parameter vr takes precedence if both are given.
Defaults to 1.
bootstrap : int or None
Amount of bootstrap estimates that should be performed to acquire standard errors (and confidence
intervals). If None, no bootstrapping is performed. Defaults to 1000.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in Deming line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Deming equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_deming : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Deming plot.
See Also
-------
Koopmans, T. C. (1937). Linear regression analysis of economic time series. DeErven F. Bohn, Haarlem, Netherlands.
Deming, W. E. (1943). Statistical adjustment of data. Wiley, NY (Dover Publications edition, 1985).
"""
plotter: _Deming = _Deming(method1, method2,
vr, sdr, bootstrap,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_deming,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
def passingbablok(method1, method2,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_paba='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using Passing-Bablok regression.
This is an Axis-level function which will draw the Passing-Bablok plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Passing-Bablok plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the passing-bablok line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Passing-Bablok equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_paba : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Passing-Bablok plot.
See Also
-------
Passing H and Bablok W. J Clin Chem Clin Biochem, vol. 21, no. 11, 1983, pp. 709 - 720
"""
plotter: _PassingBablok = _PassingBablok(method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_paba,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
def linear(method1, method2,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_regr='#008bff',
point_kws=None,
square=False, ax=None):
"""Provide a method comparison using simple, linear regression.
This is an Axis-level function which will draw the linear regression plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the linear regression plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the linear regression line. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Linear regression equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_paba : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
point_kws : dict of key, value mappings, optional
Additional keyword arguments for `plt.scatter`.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the linear regression plot.
See Also
-------
..............
"""
plotter: _Linear = _Linear(method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_regr,
point_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
| 39.433396 | 118 | 0.591683 |
c9418c993a05d0182f414df4de245fd5f5288aa8 | 1,470 | py | Python | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | setup.py | jmacgrillen/perspective | 6e6e833d8921c54c907dd6314d4bc02ba3a3c0b6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
setup.py
Desscription:
Install the maclib package.
Version:
1 - Inital release
Author:
J.MacGrillen <macgrillen@gmail.com>
Copyright:
Copyright (c) John MacGrillen. All rights reserved.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
"maclib",
"opencv-python",
"numpy",
"Pillow",
"charset-normalizer"
]
def setup_perspective_package() -> None:
"""
Install and configure Perspective for use
"""
setup(
name='Perspective',
version="0.0.1",
description='Analyse images using the range of tools provided',
long_description=long_description,
author='J.MacGrillen',
scripts=[],
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=install_requirements,
license="MIT License",
python_requires=">= 3.7.*",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
if __name__ == "__main__":
setup_perspective_package()
| 25.344828 | 71 | 0.593197 |
c941a3a73b37c420856313d2ddda37d278df3e52 | 1,021 | py | Python | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | 2 | 2019-12-02T22:27:59.000Z | 2019-12-04T07:48:27.000Z | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | null | null | null | 2021/day2.py | MadsPoder/advent-of-code | 4f190e18d24332e21308a7d251c331777b52a5f1 | [
"MIT"
] | null | null | null | # Playing with pattern matching in python 3.10
# Add lambda to parse commands into command and corresponding units
parse_command = lambda x, y: (x, int(y))
# Read puzzle input
with open ('day2.txt') as fp:
commands = [parse_command(*x.strip().split(' ')) for x in fp.readlines()]
horizontal_position = 0
depth = 0
for command in commands:
match command:
case ['forward', units]:
horizontal_position = horizontal_position + units
case ['down', units]:
depth = depth + units
case ['up', units]:
depth = depth - units
# Part 1
print(depth * horizontal_position)
# Part 2
aim = 0
horizontal_position = 0
depth = 0
for command in commands:
match command:
case ['forward', units]:
horizontal_position = horizontal_position + units
depth = depth + (aim * units)
case ['down', units]:
aim = aim + units
case ['up', units]:
aim = aim - units
print(depth * horizontal_position) | 25.525 | 77 | 0.613124 |
c943169325309fd0984d9e08fbc50df17f771916 | 2,159 | py | Python | etl/vector/process_all.py | nismod/oi-risk-vis | a5c7460a8060a797dc844be95d5c23689f42cd17 | [
"MIT"
] | 2 | 2020-09-29T15:52:48.000Z | 2021-03-31T02:58:53.000Z | etl/vector/process_all.py | nismod/oi-risk-vis | a5c7460a8060a797dc844be95d5c23689f42cd17 | [
"MIT"
] | 41 | 2021-05-12T17:12:14.000Z | 2022-03-17T10:49:20.000Z | etl/vector/process_all.py | nismod/infra-risk-vis | 1e5c28cced578d8bd9c78699e9038ecd66f47cf7 | [
"MIT"
] | null | null | null | #!/bin/env python3
from argparse import ArgumentParser
import csv
import os
from pathlib import Path
import subprocess
import sys
this_directory = Path(__file__).parent.resolve()
vector_script_path = this_directory / 'prepare_vector.sh'
if __name__ == '__main__':
parser = ArgumentParser(description='Converts all vector datasets to GeoJSON and then to MBTILES')
parser.add_argument('--raw', type=Path, help='Root of the raw data directory. Assumes a file network_layers.csv exists in the dir.', required=True)
parser.add_argument('--out', type=Path, help='Directory in which to store results of the processing', required=True)
args = parser.parse_args()
process_vector_datasets(args.raw.expanduser().resolve(), args.out.expanduser().resolve())
| 41.519231 | 156 | 0.742937 |
c944a392c3c65b876eac48378aa9aaaa59c4cea9 | 1,688 | py | Python | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | 1 | 2021-11-05T09:45:25.000Z | 2021-11-05T09:45:25.000Z | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | null | null | null | django/week9/main/models.py | yrtby/Alotech-Fullstack-Bootcamp-Patika | e2fd775e2540b8d9698dcb7dc38f84a6d7912e8d | [
"MIT"
] | 3 | 2021-11-07T07:16:30.000Z | 2021-12-07T20:22:59.000Z | from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinLengthValidator
# Create your models here.
| 35.914894 | 83 | 0.708531 |
c947e59db3be68e0dcce4600b6cfeb33b848886c | 375 | py | Python | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | 1 | 2021-09-24T09:12:06.000Z | 2021-09-24T09:12:06.000Z | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | null | null | null | tests/test_dir_dataset.py | gimlidc/igre | bf3425e838cca3d1fa8254a2550ecb44774ee0ef | [
"MIT"
] | null | null | null | import stable.modalities.dir_dataset as dataset
import os.path
| 34.090909 | 99 | 0.706667 |
c949f74729063705c3b6e636bb65a45813ce66bb | 1,118 | py | Python | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | 5 | 2018-03-07T03:54:36.000Z | 2022-01-01T04:43:48.000Z | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | null | null | null | sample/main.py | qjw/flasgger | d43644da1fea6af596ff0e2f11517b578377850f | [
"MIT"
] | 2 | 2021-11-11T08:48:39.000Z | 2022-01-01T04:43:49.000Z | import logging
import jsonschema
from flask import Flask, jsonify
from flask import make_response
from flasgger import Swagger
from sample.config import Config
app = Flask(__name__)
app.config.update(Config or {})
init_logging(app)
Swagger(app)
from sample.api import api
app.register_blueprint(api, url_prefix='/api/v123456')
if __name__=='__main__':
app.run() | 25.409091 | 77 | 0.675313 |
c94abc02ec26c5e120241965ee1760edb37aa362 | 909 | py | Python | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | null | null | null | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | 4 | 2021-07-02T17:49:44.000Z | 2021-09-27T01:06:41.000Z | cuticle_analysis/models/e2e.py | ngngardner/cuticle_analysis | 7ef119d9ee407df0faea63705dcea76d9f42614b | [
"MIT"
] | null | null | null |
import numpy as np
from .cnn import CNN
from .kviews import KViews
from .. import const
| 24.567568 | 66 | 0.567657 |
c94aca271568ab00f3c86f9599a88f50e9eeab3a | 95 | py | Python | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | null | null | null | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | 16 | 2019-02-21T14:12:01.000Z | 2019-03-11T08:00:15.000Z | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.833333 | 34 | 0.768421 |
c94dc603c09e41f347618a870bb8e3d545494ed0 | 61 | py | Python | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | 3 | 2021-04-17T08:34:08.000Z | 2021-04-17T08:57:23.000Z | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | null | null | null | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | null | null | null | import os
os.system('python function_18351015.py > log.txt')
| 20.333333 | 50 | 0.770492 |
c950e89a11e706b3a1a0ba3575143820351f7247 | 3,337 | py | Python | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | import sys, os
import upandas as upd
# Run a single Python script
# For many simple, single file projects, you may find it inconvenient
# to write a complete Dockerfile. In such cases, you can run a Python
# script by using the Python Docker image directly:
#versions to consider: 3 (600+ MB), slim (150 MB) alpine (90 MB)
# $ docker run -it --rm --name my-running-script -v "$PWD":/usr/src/myapp -w /usr/src/myapp python:3 python your-daemon-or-script.py
# $ docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas python:alpine python upandas_test.py
if __name__ == '__main__':
if len(sys.argv) < 2:
print('no testing approach supplied, see...')
sys.exit(1)
env = sys.argv[1]
if env == 'local':
print('Testing locally')
elif env == 'docker':
print('Using docker to test')
ex = os.system(
'docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas '
'python:alpine python upandas_test.py local')
sys.exit(os.WEXITSTATUS(ex))
elif env == 'virtualenv':
raise NotImplementedError
else:
print('Unsupported environment: {}'.format(env))
sys.argv = sys.argv[:1] #strip our settings out
import unittest
import math
skip_pandas_tests = True #TODO: make this explicit in the sys.argv stuff above
try:
import pandas as pd
skip_pandas_tests = False
except:
pass
# Series methods
# ==============
if __name__ == '__main__':
unittest.main()
| 29.530973 | 132 | 0.574768 |
c953f88756774d3e9d070501efa3054134aaa4e2 | 6,555 | py | Python | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from __future__ import annotations
from typing import Literal
from prettyqt import constants, core, gui, widgets
from prettyqt.qt import QtCore, QtWidgets
from prettyqt.utils import InvalidParamError, bidict
ECHO_MODE = bidict(
normal=QtWidgets.QLineEdit.EchoMode.Normal,
no_echo=QtWidgets.QLineEdit.EchoMode.NoEcho,
password=QtWidgets.QLineEdit.EchoMode.Password,
echo_on_edit=QtWidgets.QLineEdit.EchoMode.PasswordEchoOnEdit,
)
EchoModeStr = Literal["normal", "no_echo", "password", "echo_on_edit"]
ACTION_POSITION = bidict(
leading=QtWidgets.QLineEdit.ActionPosition.LeadingPosition,
trailing=QtWidgets.QLineEdit.ActionPosition.TrailingPosition,
)
ActionPositionStr = Literal["leading", "trailing"]
QtWidgets.QLineEdit.__bases__ = (widgets.Widget,)
if __name__ == "__main__":
app = widgets.app()
widget = LineEdit()
action = widgets.Action(text="hallo", icon="mdi.folder")
widget.add_action(action)
widget.setPlaceholderText("test")
widget.setClearButtonEnabled(True)
# widget.set_regex_validator("[0-9]+")
widget.setFont(gui.Font("Consolas"))
widget.show()
app.main_loop()
| 31.066351 | 89 | 0.653547 |
c9544ffadc07ec885bd33e7c84ffb14a0d5a171b | 555 | py | Python | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | # ------------------------------------
# CODE BOOLA 2015 PYTHON WORKSHOP
# Mike Wu, Jonathan Chang, Kevin Tan
# Puzzle Challenges Number 8
# ------------------------------------
# INSTRUCTIONS:
# Write a function that takes an integer
# as its argument and converts it to a
# string. Return the first character of
# of that string.
# EXAMPLE:
# select(12345) => "1"
# select(519) => "5"
# select(2) => "2"
# HINT:
# Use str() to convert an integer to a string.
# Remember that a string can be indexed
# just like a list!
| 21.346154 | 46 | 0.585586 |
c95465582eabaa7004deb1d71c383aba26908941 | 1,086 | py | Python | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
"""
A chi square (X2) statistic is used to investigate whether distributions
of categorical variables differ from one another. Here we consider 3 degrees
of freedom for our system. Plotted against 95% line"""
lidar_nis = []
with open('NISvals_laser.txt') as f:
for line in f:
lidar_nis.append(line.strip())
print("Number of LIDAR Measurements :\t", len(lidar_nis))
radar_nis = []
with open('NISvals_radar.txt') as f:
for line in f:
radar_nis.append(line.strip())
print("Number of RADAR Measurements :\t", len(radar_nis))
k = [7.815 for x in range(len(lidar_nis))]
# We skip the first row to cut out the unrealistically high NIS value
# from the first measurement. The Kalman filter has not found its groove yet.
lidar_nis = lidar_nis[1:]
radar_nis = radar_nis[1:]
plt.plot(lidar_nis)
plt.plot(k)
plt.title("LIDAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.show()
plt.plot(radar_nis)
plt.plot(k)
plt.title("RADAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.ylim(0, 20)
plt.show()
| 24.681818 | 78 | 0.721915 |
c95546315e55dfb705f35c46c08aaa6f9bae96a5 | 695 | py | Python | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 50 | 2021-02-07T08:10:28.000Z | 2022-03-25T09:10:26.000Z | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 7 | 2021-07-29T14:58:31.000Z | 2022-02-01T08:02:54.000Z | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 4 | 2021-04-01T16:30:15.000Z | 2022-03-31T17:38:05.000Z | import torch
from offlinerl.utils.exp import select_free_cuda
task = "Hopper-v3"
task_data_type = "low"
task_train_num = 99
seed = 42
device = 'cuda'+":"+str(select_free_cuda()) if torch.cuda.is_available() else 'cpu'
obs_shape = None
act_shape = None
max_action = None
hidden_features = 256
hidden_layers = 2
atoms = 21
advantage_mode = 'mean'
weight_mode = 'exp'
advantage_samples = 4
beta = 1.0
gamma = 0.99
batch_size = 1024
steps_per_epoch = 1000
max_epoch = 200
lr = 1e-4
update_frequency = 100
#tune
params_tune = {
"beta" : {"type" : "continuous", "value": [0.0, 10.0]},
}
#tune
grid_tune = {
"advantage_mode" : ['mean', 'max'],
"weight_mode" : ['exp', 'binary'],
}
| 16.547619 | 83 | 0.680576 |
c9555f153510ab57941a2d63dc997b5c2a9d5575 | 8,325 | py | Python | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | from django.contrib.admin.options import get_content_type_for_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
# log texts that only contain {object}
LOG_TEXTS_BASIC = {
"cykel.bike.rent.unlock": _("{object} has been unlocked"),
"cykel.bike.rent.longterm": _("{object} has been running for a long time"),
"cykel.bike.forsaken": _("{object} had no rent in some time"),
"cykel.bike.missing_reporting": _("{object} (missing) reported its status again!"),
"cykel.tracker.missing_reporting": _(
"{object} (missing) reported its status again!"
),
"cykel.tracker.missed_checkin": _("{object} missed its periodic checkin"),
}
LOG_TEXTS = {
"cykel.bike.rent.finished.station": _(
"{object} finished rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.finished.freefloat": _(
"{object} finished rent freefloating at {location} with rent {rent}"
),
"cykel.bike.rent.started.station": _(
"{object} began rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.started.freefloat": _(
"{object} began rent freefloating at {location} with rent {rent}"
),
"cykel.bike.tracker.battery.critical": _(
"{object} (on Bike {bike}) had critical battery voltage {voltage} V"
),
"cykel.bike.tracker.battery.warning": _(
"{object} (on Bike {bike}) had low battery voltage {voltage} V"
),
"cykel.tracker.battery.critical": _(
"{object} had critical battery voltage {voltage} V"
),
"cykel.tracker.battery.warning": _("{object} had low battery voltage {voltage} V"),
"cykel.bike.tracker.missed_checkin": _(
"{object} (on Bike {bike}) missed its periodic checkin"
),
}
| 37.669683 | 87 | 0.538498 |
c9565831d1ae75fe2b15d03a39a78761d5e269d5 | 7,991 | py | Python | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | import unittest
import torch
from torch.nn.functional import binary_cross_entropy as bce, l1_loss
from mlx.od.ssd.utils import (
ObjectDetectionGrid, BoxList, compute_intersection, compute_iou, F1)
if __name__ == '__main__':
unittest.main() | 36.322727 | 79 | 0.511325 |
c956809dc40104300810383514543a84d7e16eb4 | 3,284 | py | Python | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | null | null | null | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | 1 | 2020-10-13T15:44:40.000Z | 2020-10-13T15:44:40.000Z | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | null | null | null | '''
AUTOR: Javier Carracedo
Date: 08/10/2020
Auxiliar class to test methods from WilliamHillURLs.py
'''
import WilliamHillURLs
if __name__ == "__main__":
myVariable = WilliamHillURLs.WilliamHillURLs()
# Print all matches played actually.
for item in myVariable.GetAllMatchsPlayedActually(myVariable.URL_FootballOnDirect):
print(item)
'''
OUTPUT EXAMPLE at 08/10/2020 20:19:29:
Islas Feroe Sub 21 v Espaa Sub 21: 90/1 | 15/2 | 1/40
Dornbirn v St Gallen: 90/1 | 15/2 | 1/40
Corellano v Pea Azagresa: 90/1 | 15/2 | 1/40
Esbjerg v Silkeborg: 90/1 | 15/2 | 1/40
Koge Nord v Ishoj: 90/1 | 15/2 | 1/40
Vasco da Gama Sub 20 v Bangu Sub 20: 90/1 | 15/2 | 1/40
Rangers de Talca v Dep. Valdivia: 90/1 | 15/2 | 1/40
San Marcos v Dep. Santa Cruz: 90/1 | 15/2 | 1/40
Melipilla v Puerto Montt: 90/1 | 15/2 | 1/40
Kray v TuRU Dusseldorf: 90/1 | 15/2 | 1/40
Siegen v Meinerzhagen: 90/1 | 15/2 | 1/40
1. FC M'gladbach v Kleve: 90/1 | 15/2 | 1/40
Waldgirmes v Turkgucu-Friedberg: 90/1 | 15/2 | 1/40
Zamalek v Wadi Degla: 90/1 | 15/2 | 1/40
Elva v Flora B: 90/1 | 15/2 | 1/40
Fujairah FC v Ajman: 90/1 | 15/2 | 1/40
Vanersborg v Ahlafors: 90/1 | 15/2 | 1/40
'''
# Print all URL mathes played actually.
for item in myVariable.GetAllUrlMatches(myVariable.URL_FootballOnDirect):
print(item)
'''OUTPUT EXAMPLE at 08/10/2020 20:19:29:
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701125/islas-feroe-sub-21--espaa-sub-21
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701988/dornbirn--st-gallen
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18702077/corellano--pea-azagresa
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694620/esbjerg--silkeborg
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18702062/koge-nord--ishoj
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701883/vasco-da-gama-sub-20--bangu-sub-20
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694610/rangers-de-talca--dep-valdivia
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694611/san-marcos--dep-santa-cruz
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694612/melipilla--puerto-montt
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694624/kray--turu-dusseldorf
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694625/siegen--meinerzhagen
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694626/1-fc-mgladbach--kleve
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694627/waldgirmes--turkgucu-friedberg
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18694162/zamalek--wadi-degla
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701762/elva--flora-b
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701661/fujairah-fc--ajman
https://sports.williamhill.es/betting/es-es/ftbol/OB_EV18701852/vanersborg--ahlafors
'''
| 49.014925 | 109 | 0.670524 |
c9570eba69366671540e993ccc63b21a8b23a785 | 3,185 | py | Python | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | import glob
import os
import shutil
import sys
import tarfile
from tempfile import TemporaryDirectory
from ..utils import ERROR
from ..utils import Spinner
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_verbose_argument
from ..utils import box_print
from ..utils import build_app
from ..utils import build_prepare
from ..utils import read_package_configuration
from ..utils import run
| 28.4375 | 85 | 0.674725 |
c957b9e1d84b2cf858f2f0ed59b9eda407c2dff9 | 1,011 | py | Python | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | 1 | 2020-02-29T20:14:32.000Z | 2020-02-29T20:14:32.000Z | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | 5 | 2018-10-24T17:28:48.000Z | 2019-10-22T11:09:19.000Z | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | null | null | null | """
A model class for Sale
"""
# local imports
from app.api.common.utils import dt
from app.api.v2.db_config import conn
from app.api.v2.models.cart import Cart
# cursor to perform database operations
cur = conn.cursor()
| 25.275 | 106 | 0.578635 |
c9582e0280978de265a7060549f58e588eceb72b | 3,306 | py | Python | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | import aiohttp
from bs4 import BeautifulSoup
import asyncio
from dembones.webpage import WebPage
import dembones.urltools as ut
import logging
log = logging.getLogger(__name__)
| 38.894118 | 95 | 0.629462 |
c9599538e684b00c1b9eb75ec04458b635c13ae8 | 501 | py | Python | py_tdlib/constructors/input_inline_query_result_video.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/input_inline_query_result_video.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/input_inline_query_result_video.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
| 31.3125 | 60 | 0.688623 |
c959a09cafe37155453fcdb077c647271d246317 | 710 | py | Python | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 10 | 2019-01-19T08:15:05.000Z | 2021-12-02T08:54:50.000Z | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | null | null | null | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 2 | 2019-01-25T21:19:49.000Z | 2019-03-21T11:38:13.000Z | import argparse
import train_args
def get_arg_parser() -> argparse.ArgumentParser:
'''
A set of parameters for evaluation
'''
parser = train_args.get_arg_parser()
parser.add_argument('--load_path', type=str, help='the path of the model to test')
parser.add_argument('--eval_train', action='store_true', help='eval on the train set')
parser.add_argument('--eval_test', action='store_true', help='eval on the test set')
parser.add_argument('--eval_fast', action='store_true', help='eval quickly if implemented and supported (Greedy)')
parser.add_argument('--output_file', type=str, default=None, help='if specified will store the translations in this file')
return parser | 50.714286 | 126 | 0.723944 |
c959fbbb426057adb9170ca9df4b29dd550126f4 | 43,792 | py | Python | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | 1 | 2021-12-16T14:23:46.000Z | 2021-12-16T14:23:46.000Z | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | null | null | null | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | null | null | null | """
Creates a fidelity estimator for any pure state, using randomized Pauli measurement strategy.
Author: Akshay Seshadri
"""
import warnings
import numpy as np
import scipy as sp
from scipy import optimize
import project_root # noqa
from src.optimization.proximal_gradient import minimize_proximal_gradient_nesterov
from src.utilities.qi_utilities import generate_random_state, generate_special_state, generate_Pauli_operator, generate_POVM, embed_hermitian_matrix_real_vector_space
from src.utilities.noise_process import depolarizing_channel
from src.utilities.quantum_measurements import Measurement_Manager
from src.fidelity_estimation import Fidelity_Estimation_Manager
def project_on_box(v, l, u):
"""
Projects the point v \in R^n on to the box C = {x \in R^n | l <= x <= u}, where the inequality x >= l and x <= u are to be interpreted
componentwise (i.e., x_k >= l_k and x_k <= u_k).
The projection of v on to the box is given as
\Pi(v)_k = l_k if v_k <= l_k
v_k if l_k <= v_k <= u_k
u_k if v_k >= u_k
Note that the above can be expressed in a compact form as \Pi(v)_k = min(max(v_k, l_k), u_k)
Here, l_k and u_k can be -\infty or \infty respectively.
"""
Pi_v = np.minimum(np.maximum(v, l), u)
return Pi_v
def generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes = False):
"""
Generates the outcomes (index pointing to appropriate POVM element) for a Pauli sampling measurement strategy.
The strategy involves sampling the non-identity Pauli group elements, measuring them, and only using the
eigenvalue (either +1 or -1) of the measured outcome.
The sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|.
We represent this procedure by an effective POVM containing two elements.
If outcome eigenvalue is +1, that corresponds to index 0 of the effective POVM, while eigenvalue -1 corresponds to index 1 of the effective POVM.
If flip_outcomes is True, we measure the measure Paulis, and later flip the measurement outcomes (+1 <-> -1) as necessary. If not, we directly
measure negative of the Pauli operator.
The function requires the target state (rho) and the actual state "prepared in the lab" (sigma) as inputs.
The states (density matrices) are expected to be flattened in row-major style.
"""
# dimension of the system; rho is expected to be flattened, but this expression is agnostic to that
n = int(np.sqrt(rho.size))
# number of qubits
nq = int(np.log2(n))
if 2**nq != n:
raise ValueError("Pauli measurements possible only in systems of qubits, i.e., the dimension should be a power of 2")
# ensure that the states are flattened
rho = rho.ravel()
sigma = sigma.ravel()
# index of each Pauli of which weights need to be computed
pauli_index_list = range(1, 4**nq)
# find Tr(rho W) for each Pauli operator W (identity excluded); this is only a heuristic weight if rho is not pure
# these are not the same as Flammia & Liu weights
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
pauli_weight_list = [np.real(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in pauli_index_list]
# phase of each pauli operator (either +1 or -1)
pauli_phase_list = [np.sign(pauli_weight) for pauli_weight in pauli_weight_list]
# set of pauli operators along with their phases from which we will sample
pauli_measurements = list(zip(pauli_index_list, pauli_phase_list))
# probability distribution for with which the Paulis should be sampled
pauli_sample_prob = np.abs(pauli_weight_list)
# normalization factor for pauli probability
NF = np.sum(pauli_sample_prob)
# normalize the sampling probability
pauli_sample_prob = pauli_sample_prob / NF
# the effective POVM for minimax optimal strategy consists of just two POVM elements
# however, the actual measurements performed are 'R' Pauli measurements which are uniformly sampled from the pauli operators
# np.random.choice doesn't allow list of tuples directly, so indices are sampled instead
# see https://stackoverflow.com/questions/30821071/how-to-use-numpy-random-choice-in-a-list-of-tuples/55517163
uniformly_sampled_indices = np.random.choice(len(pauli_measurements), size = int(R), p = pauli_sample_prob)
pauli_to_measure_with_repetitions = [pauli_measurements[index] for index in uniformly_sampled_indices]
# unique Pauli measurements to be performed, with phase
pauli_to_measure = sorted(list(set(pauli_to_measure_with_repetitions)), key = lambda x: x[0])
# get the number of repetitions to be performed for each unique Pauli measurement (i.e., number of duplicates)
R_list, _ = np.histogram([pauli_index for (pauli_index, _) in pauli_to_measure_with_repetitions], bins = [pauli_index for (pauli_index, _) in pauli_to_measure] + [pauli_to_measure[-1][0] + 1], density = False)
# list of number of POVM elements for each (type of) measurement
# if a number is provided, a list (of integers) is created from it
if type(num_povm_list) not in [list, tuple, np.ndarray]:
num_povm_list = [int(num_povm_list)] * len(R_list)
else:
num_povm_list = [int(num_povm) for num_povm in num_povm_list]
# generate POVMs for measurement
POVM_list = [None] * len(R_list)
for (count, num_povm) in enumerate(num_povm_list):
# index of pauli opetator to measure, along with the phase
pauli, phase = pauli_to_measure[count]
if flip_outcomes:
# don't include the phase while measuring
# the phase is incorporated after the measurement outcomes are obtained
phase = 1
# generate POVM depending on whether projectors on subpace or projectors on each eigenvector is required
# note that when n = 2, subspace and eigenbasis projectors match, in which case we give precedence to eigenbasis projection
# this is because in the next block after measurements are generated, we check if num_povm is n and if that's true include phase
# but if subspace was used first, then phase would already be included and this would be the same operation twice
# so we use check for eigenbasis projection first
if num_povm == n:
# ensure that the supplied Pauli operator is a string composed of 0, 1, 2, 3
if type(pauli) in [int, np.int64]:
if pauli > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# make sure pauli is a string
pauli = np.base_repr(pauli, base = 4)
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
elif type(pauli) == str:
# get the corresponding integer
pauli_num = np.array(list(pauli), dtype = 'int')
pauli_num = pauli_num.dot(4**np.arange(len(pauli) - 1, -1, -1))
if pauli_num > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
# we take POVM elements as rank 1 projectors on to the (orthonormal) eigenbasis of the Pauli operator specified by 'pauli' string
# - first create the computation basis POVM and then use the Pauli operator strings to get the POVM in the respective Pauli basis
computational_basis_POVM = generate_POVM(n = n, num_povm = n, projective = True, pauli = None, flatten = False, isComplex = True, verify = False)
# - to get Pauli X basis, we can rotate the computational basis using Hadamard
# - to get Pauli Y basis, we can rotate the computational basis using a matrix similar to Hadamard
# use a dictionary to make these mappings
comp_basis_transform_dict = {'0': np.eye(2, dtype = 'complex128'), '1': np.array([[1., 1.], [1., -1.]], dtype = 'complex128')/np.sqrt(2),\
'2': np.array([[1., 1.], [1.j, -1.j]], dtype = 'complex128')/np.sqrt(2), '3': np.eye(2, dtype = 'complex128')}
transform_matrix = np.eye(1)
# pauli contains tensor product of nq 1-qubit Pauli operators, so parse through them to get a unitary mapping computational basis to Pauli eigenbasis
for ithpauli in pauli:
transform_matrix = np.kron(transform_matrix, comp_basis_transform_dict[ithpauli])
# create the POVM by transforming the computational basis to given Pauli basis
# the phase doesn't matter when projecting on to the eigenbasis; the eigenvalues are +1, -1 or +i, -i, depending on the phase but we can infer that upon measurement
POVM = [transform_matrix.dot(Ei).dot(np.conj(transform_matrix.T)).ravel() for Ei in computational_basis_POVM]
elif num_povm == 2:
# the Pauli operator that needs to be measured
Pauli_operator = phase * generate_Pauli_operator(nq, pauli)[0]
# if W is the Pauli operator and P_+ and P_- are projectors on to the eigenspaces corresponding to +1 (+i) & -1 (-i) eigenvalues, then
# l P_+ - l P_- = W, and P_+ + P_- = \id. We can solve for P_+ and P_- from this. l \in {1, i}, depending on the pase.
# l = 1 or i can be obtained from the phase as sgn(phase) * phase, noting that phase is one of +1, -1, +i or -i
P_plus = 0.5*(np.eye(n, dtype = 'complex128') + Pauli_operator / (phase * np.sign(phase)))
P_minus = 0.5*(np.eye(n, dtype = 'complex128') - Pauli_operator / (phase * np.sign(phase)))
POVM = [P_plus.ravel(), P_minus.ravel()]
else:
raise ValueError("Pauli measurements with only 2 or 'n' POVM elements are supported")
# store the POVM for measurement
POVM_list[count] = POVM
# initiate the measurements
measurement_manager = Measurement_Manager(random_seed = None)
measurement_manager.n = n
measurement_manager.N = len(POVM_list)
measurement_manager.POVM_mat_list = [np.vstack(POVM) for POVM in POVM_list]
measurement_manager.N_list = [len(POVM) for POVM in POVM_list]
# perform the measurements
data_list = measurement_manager.perform_measurements(sigma, R_list, epsilon_o, num_sets_outcomes = 1, return_outcomes = True)[0]
# convert the outcomes of the Pauli measurements to those of the effective POVM
effective_outcomes = list()
for (count, data) in enumerate(data_list):
num_povm = num_povm_list[count]
pauli_index, phase = pauli_to_measure[count]
if flip_outcomes:
# store the actual phase for later use
actual_phase = int(phase)
# Pauli were measured without the phase, so do the conversion of outcomes to those of effective POVM with that in mind
phase = 1
# for num_povm = 2, there is nothing to do because outcome '0' corresponds to +1 eigenvalue and outcome 1 corresponds to -1 eigenvalue
# if flip_outcomes is False, then these are also the outcomes for the effective POVM because phase was already accounted for during measurement
# if flip_outcomes is True, then we will later flip the outcome index (0 <-> 1) to account for the phase
# for num_povm = n, we need to figure out the eigenvalue corresponding to outcome (an index from 0 to n - 1, pointing to the basis element)
# we map +1 value to 0 and -1 eigenvalue to 1, which corresponds to the respective indices of elements in the effective POVM
if num_povm == n:
# all Paulis have eigenvalues 1, -1, but we are doing projective measurements onto the eigenbasis of Pauli operators
# so, half of them will have +1 eigenvalue, the other half will have -1 eigenvalue
# we are mapping the computational basis to the eigenbasis of the Pauli operator to perform the measurement
# 0 for the ith qubit goes to the +1 eigenvalue eigenstate of the ith Pauli, and
# 1 for the ith qubit goes to the -1 eigenvalue eigenstate of the ith Pauli
# the exception is when the ith Pauli is identity, where the eigenstate is as described above but eigenvalue is always +1
# therefore, we assign an "eigenvalue weight" of 1 to non-identity 1-qubit Paulis (X, Y, Z) and an "eigenvalue weight" of 0 to the 1-qubit identity
# we then write the nq-qubit Pauli string W as an array of above weights w_1w_2...w_nq, where w_i is the "eigenvalue weight" of the ith Pauli in W
# then the computational basis state |i_1i_2...i_nq> has the eigenvalue (-1)^(i_1*w_1 + ... + i_nq*w_nq) when it has been transformed to an
# however, if the Pauli operator has a non-identity phase, the +1 and -1 eigenvalue are appropriately changed
# the general expression for eigenvalue takes the form phase * (-1)^(i_1*w_1 + ... + i_nq*w_nq)
# eigenstate of the Pauli operator W (using the transform_matrix defined in qi_utilities.generate_POVM)
# so given a pauli index (a number from 0 to 4^nq - 1), obtain the array of "eigenvalue weight" representing the Pauli operator as described above
# for this, convert the pauli index to an array of 0, 1, 2, 3 representing the Pauli operator (using np.base_repr, np.array), then set non-zero elements to 1 (using np.where)
pauli_eigval_weight = lambda pauli_index: np.where(np.array(list(np.base_repr(pauli_index, base = 4).rjust(nq, '0')), dtype = 'int8') == 0, 0, 1)
# get array of 0, 1 representing the computational basis element from the index (a number from 0 to 2^nq - 1) of the computational basis
computational_basis_array = lambda computational_basis_index: np.array(list(np.base_repr(computational_basis_index, base = 2).rjust(nq, '0')), dtype = 'int8')
# for the eigenvalues from the (computational basis) index of the outcome for each pauli measurement performed
# to convert the eigenvalue (+1 or -1) to index (0 or 1, respectively), we do the operation (1 - e) / 2, where e is the eigenvalue
# type-casted to integers because an index is expected as for each outcome
data = [int(np.real( (1 - phase*(-1)**(computational_basis_array(outcome_index).dot(pauli_eigval_weight(pauli_index)))) / 2 )) for outcome_index in data]
if flip_outcomes and actual_phase == -1:
# now that we have the data for the effective POVM (without considering the phase), we can flip the outcomes as necessary
data = [1 - outcome_index for outcome_index in data]
# include this in the list of outcomes for the effective measurement
effective_outcomes.extend(data)
return effective_outcomes
def fidelity_estimation_pauli_random_sampling(target_state = 'random', nq = 2, num_povm_list = 2, R = 100, epsilon = 0.05, risk = None, epsilon_o = 1e-5, noise = True,\
noise_type = 'depolarizing', state_args = None, flip_outcomes = False, tol = 1e-6, random_seed = 1, verify_estimator = False,\
print_result = True, write_to_file = False, dirpath = './Data/Computational/', filename = 'temp'):
"""
Generates the target_state defined by 'target_state' and state_args, and finds an estimator for fidelity using Juditsky & Nemirovski's approach for a specific measurement scheme
involving random sampling of Pauli operators.
The specialized approach allows for computation of the estimator for very large dimensions.
The random sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|, where W_i is the ith Pauli operator and rho is the target state.
This random sampling is accounted for by a single POVM, so number of types of measurement (N) is just one.
The estimator and the risk only depend on the dimension, the number of repetitions, the confidence level, and the normalization factor NF = \sum_i |tr(W_i rho)|.
If risk is a number less than 0.5, the number of repetitions of the minimax optimal measurement is chosen so that the risk of the estimator is less than or equal to the given risk.
The argument R is ignored in this case.
Checks are not performed to ensure that the given set of generators indeed form generators.
If verify_estimator is true, the estimator constructed for the special case of randomized Pauli measurement strategy is checked with the general construction
for Juditsky & Nemirovski's estimator.
"""
# set the random seed once here and nowhere else
if random_seed:
np.random.seed(int(random_seed))
# number of qubits
nq = int(nq)
# dimension of the system
n = int(2**nq)
### create the states
# create the target state from the specified generators
target_state = str(target_state).lower()
if target_state in ['ghz', 'w', 'cluster']:
state_args_dict = {'ghz': {'d': 2, 'M': nq}, 'w': {'nq': nq}, 'cluster': {'nq': nq}}
rho = generate_special_state(state = target_state, state_args = state_args_dict[target_state], density_matrix = True,\
flatten = True, isComplex = True)
elif target_state == 'stabilizer':
generators = state_args['generators']
# if generators are specified using I, X, Y, Z, convert them to 0, 1, 2, 3
generators = [g.lower().translate(str.maketrans('ixyz', '0123')) for g in generators]
rho = generate_special_state(state = 'stabilizer', state_args = {'nq': nq, 'generators': generators}, density_matrix = True, flatten = True, isComplex = True)
elif target_state == 'random':
rho = generate_random_state(n = n, pure = True, density_matrix = True, flatten = True, isComplex = True, verify = False, random_seed = None)
else:
raise ValueError("Please specify a valid target state. Currently supported arguments are GHZ, W, Cluster, stabilizer and random.")
# apply noise to the target state to create the actual state ("prepared in the lab")
if not ((noise is None) or (noise is False)):
# the target state decoheres due to noise
if type(noise) in [int, float]:
if not (noise >= 0 and noise <= 1):
raise ValueError("noise level must be between 0 and 1")
sigma = depolarizing_channel(rho, p = noise)
else:
sigma = depolarizing_channel(rho, p = 0.1)
else:
sigma = generate_random_state(n, pure = False, density_matrix = True, flatten = True, isComplex = True, verify = False,\
random_seed = None)
### generate the measurement outcomes for the effective (minimax optimal) POVM
# calculate the normalization factor
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
NF = np.sum([np.abs(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in range(1, 4**nq)])
# if risk is given, then choose the number of repetitions to achieve that risk (or a slightly lower risk)
if risk is not None:
if risk < 0.5:
R = int(np.ceil(2*np.log(2/epsilon) / np.abs(np.log(1 - (n/NF)**2 * risk**2))))
else:
raise ValueError("Only risk < 0.5 can be achieved by choosing appropriate number of repetitions of the minimax optimal measurement.")
effective_outcomes = generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes)
### obtain the fidelity estimator
PSFEM = Pauli_Sampler_Fidelity_Estimation_Manager(n, R, NF, epsilon, epsilon_o, tol)
fidelity_estimator, risk = PSFEM.find_fidelity_estimator()
# obtain the estimate
estimate = fidelity_estimator(effective_outcomes)
# verify the estimator created for the specialized case using the general approach
if verify_estimator:
# the effective POVM for the optimal measurement strategy is simply {omega_1 rho + omega_2 Delta_rho, (1 - omega_1) rho + (1 - omega_2) Delta_rho},
# where omega_1 = (n + NF - 1)/2NF, omega_2 = (NF - 1)/2NF, and Delta_rho = I - rho
omega1 = 0.5 * (n + NF - 1) / NF
omega2 = 0.5 * (1 - 1/NF)
Delta_rho = np.eye(2**nq).ravel() - rho
POVM_list = [[omega1 * rho + omega2 * Delta_rho, (1 - omega1) * rho + (1 - omega2) * Delta_rho]]
# Juditsky & Nemirovski estimator
FEMC = Fidelity_Estimation_Manager_Corrected(R, epsilon, rho, POVM_list, epsilon_o, tol)
fidelity_estimator_general, risk_general = FEMC.find_fidelity_estimator()
# matrices at optimum
sigma_1_opt, sigma_2_opt = embed_hermitian_matrix_real_vector_space(FEMC.sigma_1_opt, reverse = True, flatten = True), embed_hermitian_matrix_real_vector_space(FEMC.sigma_2_opt, reverse = True, flatten = True)
# constraint at optimum
constraint_general = np.real(np.sum([np.sqrt((np.conj(Ei).dot(sigma_1_opt) + epsilon_o/2)*(np.conj(Ei).dot(sigma_2_opt) + epsilon_o/2)) / (1 + epsilon_o) for Ei in POVM_list[0]]))
if print_result:
print("True fidelity", np.real(np.conj(rho).dot(sigma)))
print("Estimate", estimate)
print("Risk", risk)
print("Repetitions", R)
# print results from the general approach
if verify_estimator:
print("Risk (general)", risk_general)
print("Constraint (general)", constraint_general, "Lower constraint bound", (epsilon / 2)**(1/R))
if not verify_estimator:
return PSFEM
else:
return (PSFEM, FEMC)
| 59.258457 | 226 | 0.643999 |
c95c3a9b1e12620c6fdf7ce0fba7e46782237c62 | 2,054 | py | Python | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | 3 | 2018-09-19T11:46:53.000Z | 2018-10-09T04:48:28.000Z | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | null | null | null | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 10:29:52 2018
@author: lin
"""
import numpy as np
import matplotlib.pyplot as plt
data1 = np.load("datasets/breast-cancer.npz")
data2 = np.load("datasets/diabetes.npz")
data3 = np.load("datasets/digit.npz")
data4 = np.load("datasets/iris.npz")
data5 = np.load("datasets/wine.npz")
| 25.358025 | 79 | 0.601266 |
c960f97df84624c96f4c85fc91f46edd0a467d9e | 11,996 | py | Python | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | 1 | 2020-01-30T17:59:50.000Z | 2020-01-30T17:59:50.000Z | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | null | null | null | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | null | null | null | # dumpfreeze
# Create MySQL dumps and backup to Amazon Glacier
import os
import logging
import datetime
import click
import uuid
import sqlalchemy as sa
from dumpfreeze import backup as bak
from dumpfreeze import aws
from dumpfreeze import inventorydb
from dumpfreeze import __version__
logger = logging.getLogger(__name__)
# Backup operations
# Archive operations
main.add_command(backup)
main.add_command(archive)
main.add_command(poll_jobs, name='poll-jobs')
main(obj={})
| 29.766749 | 79 | 0.614288 |
c96260912cab6b5833f970ad06a26821cebe5439 | 886 | py | Python | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | import csv
FLAGS = None
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='make coordinate.csv for data')
parser.add_argument('--width', '-w', type=int,
required=False,
help='input width')
parser.add_argument('--height', '-t', type=int,
required=False,
help='input height')
FLAGS = parser.parse_args()
main()
| 28.580645 | 80 | 0.497743 |
c96277ac68a88dc09c944967b21d05e1368096d4 | 3,546 | py | Python | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | 2 | 2022-02-04T10:47:37.000Z | 2022-03-15T13:03:19.000Z | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | null | null | null | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | 1 | 2022-01-13T15:55:17.000Z | 2022-01-13T15:55:17.000Z | """
Script to create dataframe from serpent bumat files
including all the nuclides.
Zsolt Elter 2019
"""
import json
import os
with open ('nuclides.json') as json_file:
nuclidesDict = json.load(json_file)
#final name of the file
dataFrame='PWR_UOX-MOX_BigDataFrame-SF-GSRC-noReactorType.csv'
def readInventory(filename):
"""Function to read Serpent bumat files
Parameter
---------
filename : str
path to the bumatfile to be read
Returns
-------
inventory : dict
dictionary to store the inventory. keys are ZAID identifiers (str), values
are atom densities (str) in b^{-1}cm^{-1}
"""
mat=open(filename)
matfile=mat.readlines()
mat.close()
inventory={}
for line in matfile[6:]:
x=line.strip().split()
inventory[x[0][:-4]]=x[1]
return inventory
#header of file
dataFrameStr=',BU,CT,IE,fuelType,TOT_SF,TOT_GSRC,TOT_A,TOT_H'
for nuclIDi in nuclidesDict.values():
dataFrameStr=dataFrameStr+',%s'%nuclIDi #here we add the nuclide identifier to the header!
dataFrameStr=dataFrameStr+'\n'
#header ends
f = open(dataFrame,'w')
f.write(dataFrameStr)
f.close()
#let's open the file linking to the outputs
csv=open('file_log_PWR_UOX-MOX.csv').readlines()
depfileOld=''
for line in csv[1:]:
x=line.strip().split(',')
####SFRATE AND GSRC
if x[4]=='UOX':
deppath='/UOX/serpent_files/' #since originally I have not included a link to the _dep.m file, here I had to fix that
depfileNew='%s/IE%d/BU%d/sPWR_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1])) #and find out from the BIC parameters
else: #the path to the _dep.m file...
deppath='/MOX/serpent_files/'
depfileNew='%s/IE%d/BU%d/sPWR_MOX_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1]))
if depfileNew != depfileOld: #of course there is one _dep.m file for all the CT's for a given BU-IE, so we keep track what to open. And we only do it once
#things we grep here are lists!
TOTSFs=os.popen('grep TOT_SF %s -A 2'%depfileNew).readlines()[2].strip().split() #not the most time efficient greping, but does the job
TOTGSRCs=os.popen('grep TOT_GSRC %s -A 2'%depfileNew).readlines()[2].strip().split()
TOTAs=os.popen('grep "TOT_A =" %s -A 2'%depfileNew).readlines()[2].strip().split() #TOT_A in itself matches TOT_ADENS, that is why we need "" around it
TOTHs=os.popen('grep TOT_H %s -A 2'%depfileNew).readlines()[2].strip().split()
depfileOld=depfileNew
else:
depfileOld=depfileNew
####
inv=readInventory(x[-1]) #extract inventory from the outputfile
idx=int(x[-1][x[-1].find('bumat')+5:]) #get an index, since we want to know which value from the list to take
totsf=TOTSFs[idx]
totgsrc=TOTGSRCs[idx]
tota=TOTAs[idx]
toth=TOTHs[idx]
#we make a big string for the entry, storing all the columns
newentry=x[0]+','+x[1]+','+x[2]+','+x[3]+','+x[4]+','+totsf+','+totgsrc+','+tota+','+toth
for nucli in nuclidesDict.keys():
newentry=newentry+',%s'%(inv[nucli])
newentry=newentry+'\n'
#entry is created, so we append
f = open(dataFrame,'a')
f.write(newentry)
f.close()
#and we print just to see where is the process at.
if int(x[0])%1000==0:
print(x[0])
| 35.818182 | 164 | 0.620135 |
c963dca9a730234f66f325086da0df26ded50d93 | 453 | py | Python | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | from mongoengine import disconnect
from waitress import serve
from todolist_backend.server import app, get_configs
from .database import panic_init
from .info import MONGOENGINE_ALIAS
| 22.65 | 52 | 0.743929 |
c964301c7d47d614f521b894d1e55685f398fbd2 | 86 | py | Python | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | a = int(input("a = "))
b = int(input("b = "))
print("{} + {} = {}".format(a, b, a+b)) | 21.5 | 39 | 0.406977 |
c965792691ce7606e38e36d2ae95ee8c42d4351b | 2,953 | py | Python | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | null | null | null | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | 1 | 2022-02-08T22:54:54.000Z | 2022-02-08T22:54:54.000Z | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | null | null | null | # File: archer_views.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
| 38.350649 | 95 | 0.562817 |
c96886f093360dec7c0ce79819456ac3947c46e0 | 12,198 | py | Python | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
from collections import defaultdict
from types import TracebackType
from typing import (
Callable,
DefaultDict,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
Union,
)
# This is a mapping of plugin_name -> PluginError instances
# all PluginErrors get added to this in PluginError.__init__
PLUGIN_ERRORS: DefaultDict[str, List['PluginError']] = defaultdict(list)
# standard tuple type returned from sys.exc_info()
ExcInfoTuple = Tuple[Type[Exception], Exception, Optional[TracebackType]]
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
Distribution = importlib_metadata.Distribution
def format_exceptions(plugin_name: str, as_html: bool = False):
"""Return formatted tracebacks for all exceptions raised by plugin.
Parameters
----------
plugin_name : str
The name of a plugin for which to retrieve tracebacks.
as_html : bool
Whether to return the exception string as formatted html,
defaults to False.
Returns
-------
str
A formatted string with traceback information for every exception
raised by ``plugin_name`` during this session.
"""
_plugin_errors: List[PluginError] = PLUGIN_ERRORS.get(plugin_name)
if not _plugin_errors:
return ''
from napari import __version__
format_exc_info = get_tb_formatter()
_linewidth = 80
_pad = (_linewidth - len(plugin_name) - 18) // 2
msg = [
f"{'=' * _pad} Errors for plugin '{plugin_name}' {'=' * _pad}",
'',
f'{"napari version": >16}: {__version__}',
]
err0 = _plugin_errors[0]
package_meta = fetch_module_metadata(err0.plugin_module)
if package_meta:
msg.extend(
[
f'{"plugin package": >16}: {package_meta["package"]}',
f'{"version": >16}: {package_meta["version"]}',
f'{"module": >16}: {err0.plugin_module}',
]
)
msg.append('')
for n, err in enumerate(_plugin_errors):
_pad = _linewidth - len(str(err)) - 10
msg += ['', f'ERROR #{n + 1}: {str(err)} {"-" * _pad}', '']
msg.append(format_exc_info(err.info(), as_html))
msg.append('=' * _linewidth)
return ("<br>" if as_html else "\n").join(msg)
def get_tb_formatter() -> Callable[[ExcInfoTuple, bool], str]:
"""Return a formatter callable that uses IPython VerboseTB if available.
Imports IPython lazily if available to take advantage of ultratb.VerboseTB.
If unavailable, cgitb is used instead, but this function overrides a lot of
the hardcoded citgb styles and adds error chaining (for exceptions that
result from other exceptions).
Returns
-------
callable
A function that accepts a 3-tuple and a boolean ``(exc_info, as_html)``
and returns a formatted traceback string. The ``exc_info`` tuple is of
the ``(type, value, traceback)`` format returned by sys.exc_info().
The ``as_html`` determines whether the traceback is formated in html
or plain text.
"""
try:
import IPython.core.ultratb
except ImportError:
import cgitb
import traceback
# cgitb does not support error chaining...
# see https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# this is a workaround
def cgitb_html(exc: Exception) -> str:
"""Format exception with cgitb.html."""
info = (type(exc), exc, exc.__traceback__)
return cgitb.html(info)
return format_exc_info
def fetch_module_metadata(dist: Union[Distribution, str]) -> Dict[str, str]:
"""Attempt to retrieve name, version, contact email & url for a package.
Parameters
----------
distname : str or Distribution
Distribution object or name of a distribution. If a string, it must
match the *name* of the package in the METADATA file... not the name of
the module.
Returns
-------
package_info : dict
A dict with metadata about the package
Returns None of the distname cannot be found.
"""
if isinstance(dist, Distribution):
meta = dist.metadata
else:
try:
meta = importlib_metadata.metadata(dist)
except importlib_metadata.PackageNotFoundError:
return {}
return {
'package': meta.get('Name', ''),
'version': meta.get('Version', ''),
'summary': meta.get('Summary', ''),
'url': meta.get('Home-page') or meta.get('Download-Url', ''),
'author': meta.get('Author', ''),
'email': meta.get('Author-Email') or meta.get('Maintainer-Email', ''),
'license': meta.get('License', ''),
}
ANSI_STYLES = {
1: {"font_weight": "bold"},
2: {"font_weight": "lighter"},
3: {"font_weight": "italic"},
4: {"text_decoration": "underline"},
5: {"text_decoration": "blink"},
6: {"text_decoration": "blink"},
8: {"visibility": "hidden"},
9: {"text_decoration": "line-through"},
30: {"color": "black"},
31: {"color": "red"},
32: {"color": "green"},
33: {"color": "yellow"},
34: {"color": "blue"},
35: {"color": "magenta"},
36: {"color": "cyan"},
37: {"color": "white"},
}
| 33.237057 | 79 | 0.565175 |
c9693a49a18c1714e3e73fb34025f16a983d9fca | 572 | py | Python | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 14 | 2020-04-03T08:18:21.000Z | 2021-11-10T04:39:45.000Z | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 2 | 2021-08-31T20:25:23.000Z | 2021-09-21T14:40:56.000Z | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 1 | 2020-08-27T17:04:29.000Z | 2020-08-27T17:04:29.000Z | import uvicorn
from gql import gql, reference_resolver, query
from stargql import GraphQL
from helper import get_user_by_id, users
type_defs = gql("""
type Query {
me: User
}
type User @key(fields: "id") {
id: ID!
name: String
username: String
}
""")
app = GraphQL(type_defs=type_defs, federation=True)
if __name__ == '__main__':
uvicorn.run(app, port=8082)
| 16.342857 | 51 | 0.687063 |
c96971b273caac5ab991341745cb2d8e72b76d77 | 2,519 | py | Python | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-05-29T08:57:58.000Z | 2022-02-19T07:09:25.000Z | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-05-31T10:18:36.000Z | 2022-01-25T11:39:03.000Z | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-05-29T13:27:10.000Z | 2021-05-29T13:27:10.000Z | from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "fee_123"
TEST_FEEREFUND_ID = "fr_123"
| 35.985714 | 78 | 0.687574 |
c96a052abb332bba00f134e10d854c779b770b2a | 866 | py | Python | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | 8 | 2020-02-21T22:21:01.000Z | 2022-02-16T05:30:54.000Z | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | null | null | null | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | 3 | 2020-08-05T05:42:35.000Z | 2021-08-30T05:39:51.000Z |
busyschedule() | 30.928571 | 82 | 0.39261 |
c96af4a490471a665152773f8f3b2a90f985672a | 607 | py | Python | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from unittest import TestCase
from fundamentals.backtracking.path_through_grid import PathThroughGrid
| 22.481481 | 82 | 0.507414 |
c96b923ab99cdd18285399edd12e8dfeb03b5f78 | 343 | py | Python | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | null | null | null | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | 63 | 2019-08-25T07:48:54.000Z | 2019-10-18T01:52:29.000Z | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | null | null | null | import sqlite3
import Sources.Parser
conn = sqlite3.connect("Database/vitg.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM Locations")
results = cursor.fetchall()
print(results)
conn.close()
parser = Sources.Parser.Parser()
words = [u"", u""]
for word in words:
command = parser.getCommand(word)
print(command)
| 19.055556 | 42 | 0.725948 |
c96d512247f8395a641feee824bc046d0dbdc522 | 7,018 | py | Python | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | 3 | 2019-03-31T12:32:25.000Z | 2020-01-04T20:57:14.000Z | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | null | null | null | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | 1 | 2020-10-24T23:48:15.000Z | 2020-10-24T23:48:15.000Z | import numpy as np
import pandas as pd
import sys
import string
import time
import subprocess
from collections import Counter
import string
import random
#First argument is the gene score distribution that you want to draw from, the second is the type of clusters to generate
#If 'large' only clusters with a large number of shared genes will be simulated
#If 'mixed' one cluster with only a few shared genes will be simulated
subprocess.call('mkdir NewSims_nothreshenforced',shell = True)
if len(sys.argv) < 3:
sys.exit("Enter the ICD10 code of interest as the first argument, and either 'mixed' or 'large' as the second argument depending on desired number of significant genes in a cluster.")
main()
| 43.320988 | 265 | 0.727273 |
c96fe90561c66a9922b3825850ab89dad8c3224a | 7,273 | py | Python | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | """Convenience functions for dictionary access and YAML"""
from sklearn.utils import Bunch
from collections import OrderedDict
from collections.abc import Mapping
import copy
import yaml
# ----------------------------------------------------------------------------
def deep_convert_list_dict(d, skip_list_level=0):
"""In nested dict `d` convert all lists into dictionaries.
Args:
skip_list_level - top-n nested list levels to ignore for
dict conversion
"""
if isinstance(d, str):
return d
try:
for k,v in d.items():
d[k] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
except AttributeError:
if skip_list_level:
skip_list_level -= 1
for k,v in enumerate(d):
d[k] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
else:
dd = {}
try:
for k,v in enumerate(d):
dd[str(k)] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
return dd
except:
raise
except TypeError:
pass
return d
def xml_dict(xml, skip_list_level=0):
"""Parse `xml` source and return a nested dictionary.
Since pandas.json_normalize has special treatment for nested lists,
it is possible to control how many levels of nested lists are ignored before
recursively converting lists into dicts.
"""
import xmltodict
return deep_convert_list_dict(
xmltodict.parse(xml, dict_constructor=dict),
skip_list_level=skip_list_level)
# ----------------------------------------------------------------------------
# manipulate class objects
def set_class_dict(cls, clsdict):
"""Set builtin class properties"""
return type(cls.__name__, (cls,), clsdict)
def set_docstr(cls, docstr, **kwargs):
"""Modify the docstring of a class `cls`"""
return set_class_dict(cls, {'__doc__': docstr, **kwargs})
# ----------------------------------------------------------------------------
# working with dict and Bunch
def deep_update(d1, d2):
"""
Recursively updates `d1` with `d2`
:param d1: A dictionary (possibly nested) to be updated.
:type d1: dict
:param d2: A dictionary (possibly nested) which will be used to update d1.
:type d2: dict
:return: An updated version of d1, where d2 values were used to update the values of d1. Will
add d2 keys if not present in d1. If a key does exist in d1, that key's value will be
overwritten by the d2 value. Works recursively to update nested dictionaries.
:rtype: dict
"""
if all((isinstance(d, Mapping) for d in (d1, d2))):
for k, v in d2.items():
d1[k] = deep_update(d1.get(k), v)
return d1
return d2
def nested_value(d, keys):
"""Access an element in nested dictioary `d` with path given by list of `keys`"""
for k in keys:
d = d[k]
return d
def select_keys(d, keys):
"""Returns the items in dict `d` whose keys are listen in `keys`"""
return {k: v for k, v in d.items() if k in keys}
def merge_dicts(d1, d2):
"""
Performs a deep_update() of d1 using d2.
Recursively updates `d1` with `d2`, while also making a deep copy of d1.
:param d1: A dictionary (possibly nested) to be updated.
:type d1: dict
:param d2: A dictionary (possibly nested) which will be used to update d1.
:type d2: dict
:return: An updated & deep-copied version of d1, where d2 values were used to update the values
of d1. Will add d2 keys if not present in d1. If a key does exist in d1, that key's
value will be overwritten by the d2 value. Works recursively to update nested
dictionaries.
:rtype: dict
"""
"""Recursively update `d1` with `d2` using a deep copy of `d1`"""
md = copy.deepcopy(d1)
return deep_update(md, d2)
def make_Bunch(docstr, *args, **kwargs):
'''Construct a Bunch collection with alternative doc string
All arguments after `docstr` are passed to the Bunch dict constructor.
The main appeal of a bunch d over a dict, is that keys can be accessed
via d.key rather than just d['key']
Example:
B = make_Bunch("""Container for special custom data""",a=1)
B.b = 3
print(B)
help(B)
'''
# TODO: the docstring modification causes issue with pickle serialization
# If you might want to use pickle, consider to just construct the sklearn.utils.Bunch
# object directly and don't use this construciton method here.
return set_docstr(Bunch, docstr)(*args, **kwargs)
# ----------------------------------------------------------------------------
# YAML functions
def _map_from_ordered_pairs(pairs, MapType=Bunch):
"""Construct a custom dict type (e.g. Bunch) from pairs."""
return MapType(**dict(pairs)) # dict in python >= 3.6, preserves insertion order
def _setup_yaml():
"""Have custom dict types produce standard format YAML output for dicts"""
yaml.add_multi_representer(OrderedDict, _dict_representer)
yaml.add_multi_representer(Bunch, _dict_representer)
def yload(datastr, Loader=yaml.SafeLoader, MapType=Bunch, **kwargs):
"""
Load object from YAML input string or stream
:param datastr: A string or stream containing YAML formatted text
:type datastr: str or stream
:param Loader: The yaml loader object to use, defaults to yaml.SaveLoader
:type Loader: yaml.Loader Object, optional
:param MapType: type of dictionary to construct, defaults to Bunch
:type MapType: type, optional
:param kwargs: Further keyword args are passed on to yaml.load()
:return: Python object representation of the YAML string/stream
:rtype: Specified in MapType parameter
"""
return _ordered_load(datastr, Loader=Loader, MapType=MapType, **kwargs)
def ydump(data, *args, sort_keys=False, **kwargs):
"""
Create YAML output string for data object. If data is an OrderedDict, original key ordering
is preserved in internal call to yaml.dump().
:param data:
:type data: dict or Bunch
:param args: Additional args passed on to yaml.dump()
:param sort_keys: defaults to False
:type sort_keys: bool
:param kwargs: Further keyword args are passed on to yaml.dump()
:return: YAML string representation of data
:rtype: str
"""
return yaml.dump(data, *args, sort_keys=sort_keys, **kwargs)
_setup_yaml()
| 33.210046 | 99 | 0.641001 |
c9702823a44c14ac03b736bffeea367a229f28da | 6,612 | py | Python | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>15</version>
<name>TS_CMAGENT_SetSessionId</name>
<primitive_test_id/>
<primitive_test_name>CMAgent_SetSessionId</primitive_test_name>
<primitive_test_version>5</primitive_test_version>
<status>FREE</status>
<synopsis>TC_CMAGENT_1 - Set Session ID API Validation</synopsis>
<groups_id>4</groups_id>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMAGENT_1</test_case_id>
<test_objective>To Validate "Set Session ID" Function of CM Agent</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component"
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
CMAgent_SetSessionId
Input
1.sessionId as 0
2.pathname (Device.X_CISCO_COM_CableModem.)
3.override as 0 (This parameter will enable the reading of current session id and check set session id api with value read)
4. priority as 0</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(CMAgent_SetSessionId - func name - "If not exists already"
cmagent - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CMAGENT_SetSessionId.py)
3.Execute the generated Script(TS_CMAGENT_SetSessionId.py) using excution page of Test Manager GUI
4.cmagentstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named CMAgent_SetSessionId through registered TDK cmagentstub function along with necessary Entry Values as arguments
5.CMAgent_SetSessionId function will call CCSP Base Interface Function named CcspBaseIf_SendcurrentSessionIDSignal, that inturn will call "CcspCcMbi_CurrentSessionIdSignal" along with provided input arguments to assign session id to global value of CM Agent
6.Responses(printf) from TDK Component,Ccsp Library function and cmagentstub would be logged in Agent Console log based on the debug info redirected to agent console
7.cmagentstub will validate the available result (from agent console log and Pointer to instance as non null ) with expected result (Eg:"Session ID assigned Succesfully") and the same is updated in agent console log
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from cmagentstub</automation_approch>
<except_output>CheckPoint 1:
Session ID assigned log from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CMAGENT_SetSessionId</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmagent","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMAGENT_SetSessionId');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load CM Agent Stub from env TDK Path]"
print "[Exiting the Script]"
exit();
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CMAgent_SetSessionId');
#Input Parameters
tdkTestObj.addParameter("pathname","Device.X_CISCO_COM_CableModem.");
tdkTestObj.addParameter("priority",0);
tdkTestObj.addParameter("sessionId",0);
tdkTestObj.addParameter("override",0);
expectedresult = "SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
print "[TEST EXECUTION RESULT] : %s" %resultDetails ;
obj.unloadModule("cmagent");
| 44.675676 | 259 | 0.73775 |
c970887827dfacb25a04d949c110b21b2a98595f | 492 | py | Python | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | 1 | 2021-06-01T21:57:23.000Z | 2021-06-01T21:57:23.000Z | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | import logging
import yaml
logger = logging.getLogger(__name__)
| 23.428571 | 66 | 0.632114 |
c97156d460bdc88e5f228d10d1465d45738af933 | 8,536 | py | Python | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | 1 | 2020-08-23T23:12:56.000Z | 2020-08-23T23:12:56.000Z | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | null | null | null | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | 1 | 2020-08-23T23:16:47.000Z | 2020-08-23T23:16:47.000Z | #!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
# 2017, Samantha Klasfeld, the Wagner Lab
# the Perelman School of Medicine, the University of Pennsylvania
# Samantha Klasfeld, 12-21-2017
import argparse
import sys
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="this script takes \
in a 2 tables and performs a \
joins them to create a merged table")
parser.add_argument('left_table', help='left table file name')
parser.add_argument('right_table', help='right table file name')
parser.add_argument('out_table', help='output table file name')
parser.add_argument('-w','--how', help='Type of merge to be performed: \
`left`,`right`,`outer`,`inner`, `antileft`. Default:`inner`',
choices=['left', 'right', 'outer', 'inner', 'antileft'], default='inner')
parser.add_argument('-j','--on', help='Column or index level names \
to join on. These must be found in both DataFrames. If on is None \
and not merging on indexes then this defaults to the intersection \
of the columns in both DataFrames.', nargs='+')
parser.add_argument('-lo','--left_on', help='Column or index level names \
to join on in the left DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ro','--right_on', help='Column or index level names \
to join on in the right DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ml','--merge_left_index', help='Use the index from the left \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-mr','--merge_right_index', help='Use the index from the right \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-or','--order', help='Order the join keys \
lexicographically in the result DataFrame. If False, the \
order of the join keys depends on the join type (how keyword).', \
action='store_true', default=False)
parser.add_argument('-su','--suffixes', help='Tuple of (str,str). Each str is a \
Suffix to apply to overlapping column names in the left and right side, \
respectively. To raise an exception on overlapping columns \
use (False, False). Default:(`_x`,`_y`)', nargs=2)
parser.add_argument('-nl', '--noheader_l', action='store_true', default=False, \
help='Set if `left_table` has no header. If this is set, \
user must also set `colnames_l`')
parser.add_argument('-nr', '--noheader_r', action='store_true', default=False, \
help='Set if `right_table` has no header. If this is set, \
user must also set `colnames_r`')
parser.add_argument('-cl', '--colnames_l', nargs='+', \
help='`If `noheader_l` is set, add column names \
to `left_table`. Otherwise, rename the columns.')
parser.add_argument('-cr', '--colnames_r', nargs='+', \
help='`If `noheader_r` is set, add column names \
to `right_table`. Otherwise, rename the columns.')
parser.add_argument('--left_sep', '-sl', default="\t", \
help='table delimiter of `left_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--right_sep', '-sr', default="\t", \
help='table delimiter of `right_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--out_sep', '-so', default="\t", \
help='table delimiter of `out_table`. By default, \
the out table will be tab-delimited')
parser.add_argument('--left_indexCol', '-il', \
help='Column(s) to use as the row labels of the \
`left_table`, either given as string name or column index.')
parser.add_argument('--right_indexCol', '-ir', \
help='Column(s) to use as the row labels of the \
`right_table`, either given as string name or column index.')
parser.add_argument('-clc','--change_left_cols', nargs='+',
help='list of specific column names you want to change in left table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
parser.add_argument('-crc','--change_right_cols', nargs='+',
help='list of specific column names you want to change in right table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
#parser.add_argument('--header','-H', action='store_true', default=False, \
# help='true if header in table')
args = parser.parse_args()
if args.noheader_l and not args.colnames_l:
sys.exit("Error: If `noheader_l` is set, user must also set `colnames_l`\n")
if args.noheader_r and not args.colnames_r:
sys.exit("Error: If `noheader_r` is set, user must also set `colnames_r`\n")
if args.change_left_cols and args.colnames_l:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_left_cols\n"+
"\t* colnames_l\n")
if args.change_right_cols and args.colnames_r:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_right_cols\n"+
"\t* colnames_r\n")
if not args.on:
if not args.left_on and not args.right_on:
sys.exit("Error: must set columns to join on.")
# 1. Read input files
read_ltable_param={}
read_rtable_param={}
read_ltable_param["sep"]=args.left_sep
read_rtable_param["sep"]=args.right_sep
if args.noheader_l:
read_ltable_param["header"]=None
if args.noheader_r:
read_rtable_param["header"]=None
if args.left_indexCol:
read_ltable_param["index_col"]=args.left_indexCol
if args.right_indexCol:
read_rtable_param["index_col"]=args.right_indexCol
left_df = pd.read_csv(args.left_table, **read_ltable_param)
right_df = pd.read_csv(args.right_table, **read_rtable_param)
# 2. Change/Update column names of the input tables
if args.colnames_l:
if len(left_df.columns) != len(args.colnames_l):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(left_df.columns), len(args.colnames_l)))
left_df.columns = args.colnames_l
if args.colnames_r:
if len(right_df.columns) != len(args.colnames_r):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(right_df.columns), len(args.colnames_r)))
right_df.columns = args.colnames_r
if args.change_left_cols:
for left_changeCol_param in args.change_left_cols:
if len(left_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_left_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_left_cols = dict(x.split(",") for x in args.change_left_cols)
left_df = left_df.rename(columns=rename_left_cols)
if args.change_right_cols:
for right_changeCol_param in args.change_right_cols:
if len(right_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_right_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_right_cols = dict(x.split(",") for x in args.change_right_cols)
right_df = right_df.rename(columns=rename_right_cols)
# 3. Set merge parameters
merge_param={}
if args.how == "antileft":
merge_param['how']="left"
else:
merge_param['how']=args.how
if args.on:
merge_param['on']=args.on
if args.left_on:
merge_param['left_on']=args.left_on
if args.right_on:
merge_param['right_on']=args.right_on
if args.merge_left_index:
merge_param['left_index']=args.merge_left_index
if args.merge_right_index:
merge_param['right_index']=args.merge_right_index
if args.order:
merge_param['sort']=args.order
if args.suffixes:
merge_param['suffixes']=args.suffixes
# 4. Perform Merge
merge_df = left_df.merge(
right_df, **merge_param)
# 4B. There is an extra step for a left anti-join
# 5. Export merged table
out_param={}
out_param["sep"]=args.out_sep
if not args.left_indexCol:
out_param["index"]=False
if args.how == "antileft":
antimerge_df = left_df.loc[merge_df.index,:].copy()
antimerge_df.to_csv(args.out_table, **out_param)
else:
merge_df.to_csv(args.out_table, **out_param) | 42.467662 | 85 | 0.72493 |
c971e430652331e744f0b8b0fc1ac07db5704fb9 | 884 | py | Python | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | # parse the input
with open("6-input.txt") as f:
fish = [int(n) for n in f.readline().split(",")]
startcounts = dict(zip(range(0, 9), [0 for x in range(9)]))
for f in fish:
startcounts[f] += 1
counts = startcounts
for day in range(80):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
counts = startcounts
for day in range(256):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
| 25.257143 | 59 | 0.616516 |
c97337433ecaa8303091ad4ba921fe29802304f0 | 3,287 | py | Python | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
#
standalone = True
import os, numpy as np
os.environ['MCVINE_MPI_BINDING'] = 'NONE'
import unittestX as unittest
if __name__ == "__main__": unittest.main()
# End of file
| 35.728261 | 98 | 0.606632 |
c973d138beb4bdeb8b96079770c98d55a9dad08e | 693 | py | Python | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | 5 | 2020-07-10T21:00:28.000Z | 2022-02-23T01:41:01.000Z | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | null | null | null | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | 4 | 2020-09-13T14:31:45.000Z | 2022-03-23T04:06:38.000Z | from ZeroKnowledge import primality
import random
if __name__ == "__main__":
owp = bbs()
print(owp(70203203))
print(owp(12389))
| 21 | 66 | 0.685426 |
c9743d63b6769b341831d17f36b94f9161097eb4 | 5,811 | py | Python | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
###############################################################################
# Author: Greg Zynda
# Last Modified: 12/11/2019
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2019, Greg Zynda
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from quicksect import IntervalTree
import logging
from differannotate.constants import FORMAT
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN, format=FORMAT)
def _strand(strand):
return not isinstance(strand, bool)
strand_dict = {'+':0, '-':1, 0:'+', 1:'-'}
def interval2tuple(interval):
'''
Converts an interval to a tuple
# Usage
>>> IT = iterit()
>>> IT.add(0, 10, (0, 0))
>>> IT.add(5, 15, (1, 1))
>>> for i in map(interval2tuple, IT.iterintervals()): print i
(0, 10, 0, 0)
(5, 15, 1, 1)
'''
if interval.data:
return (interval.start, interval.end)+tuple(interval.data)
else:
return (interval.start, interval.end)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.584211 | 134 | 0.66202 |
c9743e069ad8fe0a795c53358dc5e0951de0d7c7 | 2,113 | py | Python | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | 13 | 2017-08-03T22:30:25.000Z | 2019-01-23T16:32:28.000Z | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTS | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
DOC="""plotCurve
plotCurve is used to create vertical profiles of different lateral ylabel statistics of FEOTS output.
Usage:
plotCurve plot <file> [--out=<out>] [--opts=<opts>] [--scalex=<scalex>] [--xlabel=<xlabel>] [--ylabel=<ylabel>]
Commands:
plot Create a vertical profile plot of the chosen statistics for the given FEOTS output ylabel.
Options:
-h --help Display this help screen
--out=<out> The path to place the output files [default: ./]
--opts=<opts> Comma separated list of plot options. [default: none]
--scalex=<scalex> Amount to scale the x dimension by for the plot (multiplicative). [default: 1.0]
--xlabel=<xlabel> Label for the x-dimension in the plot. [default: x]
--ylabel=<ylabel> Label for the y-dimension in the plot. [default: y]
"""
import numpy as np
from matplotlib import pyplot as plt
from docopt import docopt
import feotsPostProcess as feots
#END parse_cli
#END loadCurve
#END plotCurve
#END main
if __name__ == '__main__':
main()
| 26.08642 | 114 | 0.644108 |
c974860e7717afdaa174abddb3959a9916ac8f90 | 6,535 | py | Python | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | 1 | 2020-05-27T03:38:36.000Z | 2020-05-27T03:38:36.000Z | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | null | null | null | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: walkthrough.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='walkthrough.proto',
package='walkthrough',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11walkthrough.proto\x12\x0bwalkthrough\"\x16\n\x05Hello\x12\r\n\x05world\x18\x01 \x01(\t\"\x0e\n\x0c\x41notherHello\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x07\n\x05\x45ventb\x06proto3')
)
_HELLO = _descriptor.Descriptor(
name='Hello',
full_name='walkthrough.Hello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world', full_name='walkthrough.Hello.world', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=56,
)
_ANOTHERHELLO = _descriptor.Descriptor(
name='AnotherHello',
full_name='walkthrough.AnotherHello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=72,
)
_COUNTER = _descriptor.Descriptor(
name='Counter',
full_name='walkthrough.Counter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='walkthrough.Counter.value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=98,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='walkthrough.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='walkthrough.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=129,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='walkthrough.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=138,
)
DESCRIPTOR.message_types_by_name['Hello'] = _HELLO
DESCRIPTOR.message_types_by_name['AnotherHello'] = _ANOTHERHELLO
DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Hello = _reflection.GeneratedProtocolMessageType('Hello', (_message.Message,), dict(
DESCRIPTOR = _HELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Hello)
))
_sym_db.RegisterMessage(Hello)
AnotherHello = _reflection.GeneratedProtocolMessageType('AnotherHello', (_message.Message,), dict(
DESCRIPTOR = _ANOTHERHELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.AnotherHello)
))
_sym_db.RegisterMessage(AnotherHello)
Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
DESCRIPTOR = _COUNTER,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Counter)
))
_sym_db.RegisterMessage(Counter)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Event)
))
_sym_db.RegisterMessage(Event)
# @@protoc_insertion_point(module_scope)
| 28.413043 | 286 | 0.72303 |
c9778ad426ae5b59849224563d916aed7af67c6a | 2,438 | py | Python | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | 3 | 2022-01-03T12:10:41.000Z | 2022-03-21T22:14:51.000Z | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | null | null | null | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | null | null | null | #import pcap
#import dpkt
#import dnet
from collections import defaultdict
from scapy.all import *
from scapy.all import send as ssend
import netifaces
import getopt
import datetime
conf.sniff_promisc=True
pcap_specified = False
detection_map = defaultdict(list)
if __name__ == "__main__":
main()
| 29.731707 | 123 | 0.651354 |
c977bbeabde9764661a77f5cb005a889127439bd | 534 | py | Python | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 9 | 2018-01-15T22:44:24.000Z | 2021-05-28T11:13:03.000Z | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 140 | 2018-01-12T10:07:47.000Z | 2021-08-02T23:03:49.000Z | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 11 | 2018-01-16T19:49:35.000Z | 2022-01-18T16:30:34.000Z | """Detail Yeti's Malware object structure."""
from .entity import Entity
Entity.datatypes[Malware.type] = Malware
| 19.777778 | 50 | 0.683521 |
c978b614564b15ad98ff9be9b231eda20bb8f13d | 6,405 | py | Python | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-05-10T22:19:44.000Z | 2020-07-21T07:28:39.000Z | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 187 | 2018-04-13T17:19:24.000Z | 2020-04-21T00:41:15.000Z | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-07-10T00:14:07.000Z | 2019-07-25T17:59:44.000Z | from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
| 48.157895 | 274 | 0.444653 |
c978cd7b9db932291bd60fddc562ff295cb80fc4 | 192 | py | Python | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | time = eval(input())
qtdtime = [3600, 60, 1]
result = []
for i in qtdtime:
qtd = time // i
result.append(str(qtd))
time -= qtd * i
print(f'{result[0]}:{result[1]}:{result[2]}')
| 16 | 45 | 0.557292 |
c979df9649b375b708736b82938ddd72a6f161b7 | 161 | py | Python | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | from collections import Counter | 32.2 | 45 | 0.714286 |
c97a5d77ecd44aba596f1a6d89d78783ed1f6a39 | 5,458 | py | Python | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | null | null | null | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | 3 | 2020-04-06T19:13:58.000Z | 2020-05-22T22:21:31.000Z | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | null | null | null | import threading
import functools
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
"""
Once an engine is created is is not destroyed until the program itself exits.
Engines are used to produce a new session when a context is entered.
When a context is exited, the session for that context is destroyed.
"""
global_database_context = threading.local()
class BigQueryDatabaseContext(DatabaseContext):
def __init__(self, project='', default_dataset='', **kwargs):
"""
Args:
project (Optional[str]): The project name, defaults to
your credential's default project.
default_dataset (Optional[str]): The default dataset.
This is used in the case where the table has no
dataset referenced in it's __tablename__
**kwargs (kwargs): Keyword arguments are passed to create_engine.
Example:
'bigquery://some-project/some-dataset' '?'
'credentials_path=/some/path/to.json' '&'
'location=some-location' '&'
'arraysize=1000' '&'
'clustering_fields=a,b,c' '&'
'create_disposition=CREATE_IF_NEEDED' '&'
'destination=different-project.different-dataset.table' '&'
'destination_encryption_configuration=some-configuration' '&'
'dry_run=true' '&'
'labels=a:b,c:d' '&'
'maximum_bytes_billed=1000' '&'
'priority=INTERACTIVE' '&'
'schema_update_options=ALLOW_FIELD_ADDITION,ALLOW_FIELD_RELAXATION' '&'
'use_query_cache=true' '&'
'write_disposition=WRITE_APPEND'
These keyword arguments match those in the job configuration:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html#google.cloud.bigquery.job.QueryJobConfig
"""
connection_str = 'bigquery://{}/{}'.format(project, default_dataset)
if len(kwargs) > 0:
connection_str += '?'
for k, v in kwargs.items():
connection_str += '{}={}&'.format(k, v)
connection_str = connection_str[:-1]
super(BigQueryDatabaseContext, self).__init__(
connection_str
)
def requires_database_context(f):
"""
Dectorator that causes the function
to throw a DatabaseContextError if the function is called
but a DatabaseContext has not been entered.
"""
return wrapper
| 34.327044 | 177 | 0.622206 |
c97aeafdeaa32ce81d91fe53e55f4082c9dd290e | 444 | py | Python | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | 1 | 2020-12-28T13:58:34.000Z | 2020-12-28T13:58:34.000Z | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | null | null | null | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | null | null | null | import numpy as np
from rover_sates import *
from state_machine import *
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
| 23.368421 | 87 | 0.702703 |
c97ce1f34312b0218b91e4e2faa6b094d0a6ab72 | 188 | py | Python | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | 1 | 2020-10-05T01:09:15.000Z | 2020-10-05T01:09:15.000Z | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | import sys
from loguru import logger
logger.remove()
logger.add(
sys.stdout,
format='{level.icon} {time:YYYY-MM-DD HH:mm:ss} <lvl>{level}\t{message}</lvl>',
colorize=True,
)
| 17.090909 | 83 | 0.664894 |
c97d3cc7b903e622320da5991308503b0ba6a84c | 1,770 | py | Python | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | 1 | 2020-04-13T17:44:28.000Z | 2020-04-13T17:44:28.000Z | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 36.875 | 76 | 0.716949 |
c97d6ba493e05a165ce59471439dfde7e1eb3a10 | 2,953 | py | Python | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | 1 | 2021-09-05T18:12:27.000Z | 2021-09-05T18:12:27.000Z | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | null | null | null | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | null | null | null | import os
from dash import dcc, html
URL_PATH_SEP = '/'
URL_BASE_PATHNAME = os.getenv('REPORT_URL_BASE', URL_PATH_SEP)
if URL_BASE_PATHNAME[-1] != URL_PATH_SEP:
URL_BASE_PATHNAME += URL_PATH_SEP
def make_dash_table(df):
"""Return a dash definition of an HTML table for a Pandas dataframe"""
table = []
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td([row[i]]))
table.append(html.Tr(html_row))
return table
| 29.53 | 79 | 0.385371 |
c97e6b1f40a5bb81ae2c559b1a1285a802b08835 | 53 | py | Python | social/backends/ubuntu.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 1,987 | 2015-01-01T16:12:45.000Z | 2022-03-29T14:24:25.000Z | social/backends/ubuntu.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 731 | 2015-01-01T22:55:25.000Z | 2022-03-10T15:07:51.000Z | virtual/lib/python3.6/site-packages/social/backends/ubuntu.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
] | 1,082 | 2015-01-01T16:27:26.000Z | 2022-03-22T21:18:33.000Z | from social_core.backends.ubuntu import UbuntuOpenId
| 26.5 | 52 | 0.886792 |
c97f4aad4afc2d34135bd0a531bcabb3725f19f6 | 10,715 | py | Python | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# pylint: disable=3rd-party-module-not-gated
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import tempfile
import shutil
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
mock_open,
patch)
# Import Salt Libs
import salt.states.virt as virt
import salt.utils.files
| 44.832636 | 110 | 0.466729 |