blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
261
content_id
stringlengths
40
40
detected_licenses
sequencelengths
0
45
license_type
stringclasses
2 values
repo_name
stringlengths
8
111
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
72 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
530k
616M
star_events_count
int64
0
102k
fork_events_count
int64
0
24.6k
gha_license_id
stringclasses
9 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
40 values
src_encoding
stringclasses
10 values
language
stringclasses
1 value
is_vendor
bool
1 class
is_generated
bool
2 classes
length_bytes
int64
11
4.05M
extension
stringclasses
25 values
content
stringlengths
10
4.04M
authors
sequencelengths
1
1
author_id
stringclasses
578 values
fb2c64c0218df858e821204c4c485f29f4b33c74
e0527bce5c53a196752d3a16adf50cb60754de5f
/10-How to Stop Programs Crashing Demos/3-is_square.py
8bf01fcece7fa35279f95d25ece62fa140398965
[]
no_license
ARWA-ALraddadi/python-tutorial-for-beginners
ddeb657f419fbc176bea273bc9fb6b88d1894191
21cedfc47871ca4d25c2382464c60ab0a2121205
refs/heads/master
2023-06-30T20:24:30.688000
2021-08-08T08:22:29
2021-08-08T08:22:29
193,094,651
0
0
null
null
null
null
UTF-8
Python
false
false
3,066
py
################################################################ ## ## As a demonstration of a function which applies defensive ## programming in different ways, consider a predicate ## which is intended to return True if a given natural ## number (i.e., a non-negative integer) is a square of ## another natural number. ## ## From this description the function could be "misused" in ## three ways: ## ## 1) It could be given a negative number. ## 2) It could be given a floating point number. ## 3) It could be given a value which is not a number at ## all. ## ## By adding some "defensive" code we can make a naive ## implementation more robust by responding appropriately ## to each of these cases: ## ## 1) A negative number can never be a square of another ## number, so we can always return False in this case. ## Here we choose to do so "silently", not drawing ## attention to the unexpected value at all, since the ## answer returned is still "correct" mathematically. ## 2) A positive floating point number could be a square of ## a natural number so, even though we're not required ## to handle floating point numbers we can still do so, ## but choose to generate a "warning" message in this ## case. ## 3) If the function is given a non-numerical value it ## is reasonable to assume that something is seriously ## wrong with the calling code, so in this case we ## generate an "error" message and return the special ## value None. #--------------------------------------------------------- # Return True if the given natural number is the square of # some other natural number def is_square(natural_number): from math import sqrt # Three "defensive" checks follow ## # Check that the parameter is a number ## if not (isinstance(natural_number, int) or isinstance(natural_number, float)): ## print('ERROR - parameter must be numeric, given:', repr(natural_number)) ## return None ## ## # Check that the parameter is positive ## if natural_number < 0: ## return False ## ## # Check that the parameter is a natural number ## if isinstance(natural_number, float): ## print('Warning - expected natural, given float:', natural_number) # Return True if the number's square root is a whole number return sqrt(natural_number) % 1 == 0 #--------------------------------------------------------- # Some tests # # The first of these tests is a "valid" one, but the remaining # three all provide unexpected inputs. Uncommenting the # "defensive" checks above will cause the function to respond # appropriately. (It will crash until the defensive code is # uncommented. Why?) print(is_square(36)) # expected input print() print(is_square(-1)) # unexpected input, but handled silently print() print(is_square(225.0)) # unexpected input, handled with warning print() print(is_square('August')) # unexpected input, handled as an error
[ "noreply@github.com" ]
noreply@github.com
d44bbb217114c0831167824d694d57c29ab86665
e3f3f911019ac126d01c056eafc7c3183107a5af
/Traffic Sign Detection/all_signs_combined/src/predict.py
19ed9a428015b625610be9930dfee35938fb451b
[]
no_license
uncctrafficsigndetection/Traffic-Sign-Detection
595258766f865c4b3c628b002d7b93a774168a9b
3ff4be52357f4b6340fef94124f8c835ab66fd8a
refs/heads/master
2020-04-09T20:28:33.910000
2018-12-05T21:29:50
2018-12-05T21:29:50
160,574,509
0
0
null
null
null
null
UTF-8
Python
false
false
959
py
import numpy as np import time from sample_model import Model from data_loader import data_loader from generator import Generator checkpoint_dir='tf_data/sample_model' X='C:/Users/Karthick/Desktop/cvproject/data/5/00000_00000.ppmspeed_2_.ppm' M = Model(mode = 'test') yhat = M.predict(X = X, checkpoint_dir = checkpoint_dir) # save_dir="C:/Users/Karthick/Desktop/cvproject/speedlimitckp/" # #saver = tf.train.Saver() # sess = tf.Session() # saver = tf.train.import_meta_graph('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/model_epoch70.ckpt.meta') # saver.restore(sess,tf.train.latest_checkpoint('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/')) # #checkpoint_name = tf.train.latest_checkpoint(save_dir) # #saver.restore(sess, checkpoint_name) # yhat_numpy = sess.run(yhat, feed_dict = {X : X, keep_prob: 1.0}) # print(yhat_numpy) # #C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model
[ "noreply@github.com" ]
noreply@github.com
948e7570c22e3a814efc70ef208bb5769a7b3ba1
f2568af5aacdb3045f8dd20ec2fd91e395ba57d1
/createmylvm.py
a2bcdb8ebcc803398d9d04adf65faf10eb88ceef
[]
no_license
shubhambhardwaj007/lvm-automation
e93d9efe61e9951710dc5ee6579ef01d139304e3
e446f794fc05d1f3dac8e152d428cfc9657b817e
refs/heads/master
2023-03-26T02:07:51.421000
2021-03-27T19:51:46
2021-03-27T19:51:46
352,161,993
0
0
null
null
null
null
UTF-8
Python
false
false
1,070
py
import subprocess def createmylv(): print(subprocess.getoutput('lsblk')) device = input("Choose the devices for PV separated by space in between : ").split(" ") for i in device: pvcreate = subprocess.getstatusoutput("pvcreate {0}".format(i)) if pvcreate[0] == 0: print("{0} pv created".format(i)) else: print("{0} pv failed".format(i)) vgname = input("Enter VG name: ") x= ' '.join(device) vgcreate = subprocess.getstatusoutput("vgcreate {0} {1}".format(vgname,x)) lvname = input("Enter LV name: ") size = input("Enter Size of LV: ") lvcreate = subprocess.getstatusoutput("lvcreate --size {0} --name {1} {2}".format(size,lvname,vgname)) mount = input("Enter the mountpoint: ") formating = subprocess.getstatusoutput("mkfs.ext4 /dev/{0}/{1}".format(vgname,lvname)) mount_path = subprocess.getstatusoutput("mount /dev/{0}/{1} {2}".format(vgname,lvname,mount)) if mount_path[0] == 0: print("Done") else: print("Can't mount") createlv()
[ "noreply@github.com" ]
noreply@github.com
c7e2d80388cbe425136e01a06bdb2ea24fa604c6
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
/sdBs/AllRun/sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_coadd.py
39e21f206956741881cd664d37e0bb5ecdba667f
[]
no_license
tboudreaux/SummerSTScICode
73b2e5839b10c0bf733808f4316d34be91c5a3bd
4dd1ffbb09e0a599257d21872f9d62b5420028b0
refs/heads/master
2021-01-20T18:07:44.723000
2016-08-08T16:49:53
2016-08-08T16:49:53
65,221,159
0
0
null
null
null
null
UTF-8
Python
false
false
498
py
from gPhoton.gMap import gMap def main(): gMap(band="NUV", skypos=[248.990167,34.240833], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_count_coadd.fits", overwrite=True, verbose=3) if __name__ == "__main__": main()
[ "thomas@boudreauxmail.com" ]
thomas@boudreauxmail.com
f82a7850addf3773f1ce92a89e4d51f96cf3f763
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_tkdkop_pancake.py
259ec04a68548d92ceed7f438162fc6b46baa760
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405000
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
286
py
#!/usr/bin/env python import sys import itertools m = sys.stdin.readline() i = 0 for line in sys.stdin.readlines(): line = line.strip() i += 1 out_str = "Case #%d: " % i line += '+' k = itertools.groupby(line) out_str += str(len(list(k))-1) print out_str
[ "[dhuo@tcd.ie]" ]
[dhuo@tcd.ie]
4723c6f7c093e3989d133cebab10e0c13bf512a0
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03997/s926418877.py
acd277945016fcae9d48adcc8806653b1aeeec5f
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763000
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
58
py
a,b,c,d=eval('['+'int(input()),'*3+'0]');print((a+b)*c//2)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
9567422e1472a65046cf8160b1bdae8fbcf7dcd3
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
/MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/types/internal.py
c56c7aa6d7790b4c36d248603f2282e60af08a39
[ "Apache-2.0" ]
permissive
Portfolio-Projects42/UsefulResourceRepo2.0
1dccc8961a09347f124d3ed7c27c6d73b9806189
75b1e23c757845b5f1894ebe53551a1cf759c6a3
refs/heads/master
2023-08-04T12:23:48.862000
2021-09-15T12:51:35
2021-09-15T12:51:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,129
py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Types internal to TensorFlow. These types should not be exported. External code should not rely on these. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # TODO(mdan): Is this strictly needed? Only ops.py really uses it. class NativeObject(object): """Types natively supported by various TF operations. The most notable example of NativeObject is Tensor. """
[ "bryan.guner@gmail.com" ]
bryan.guner@gmail.com
91781778b2e281bd6402914cfd6ee441e7a46194
fe17c327916695ca3f21c0f9bb85396237be3125
/DSA_in_python/DSA_BST.py
a8068494dc2cb5cb28703d631d7f490f052d2915
[]
no_license
tanmay6414/Python
d222255f3b4e60b42c7bed7613f11ef449ebc00e
54659aebe0ed15f722cd469d10a42cea82f6c7f6
refs/heads/master
2021-07-12T18:26:59.590000
2020-08-20T08:15:11
2020-08-20T08:15:11
195,175,648
0
0
null
null
null
null
UTF-8
Python
false
false
2,732
py
class Node: def __init__(self,value): self.left = None self.right = None self.value = value class BST: def __init__(self): self.root = None def insert( self, node, value): # If the tree is empty, return a new node if node is None: return Node(value) # Otherwise recur down the tree if value < node.value: node.left = self.insert(node.left, value) else: node.right = self.insert(node.right, value) # return the (unchanged) node pointer return node def inorder(self,root): if root: self.inorder(root.left) print(root.value) self.inorder(root.right) def preorder(self,root): if root: print(root.value) self.preorder(root.left) self.preorder(root.right) def postorder(self,root): if root: self.postorder(root.left) self.preorder(root.right) print(root.value) def minval_node(self,node): current = node while(current.left is not None): current = current.left return current def deleteNode(self,root,value): if root is None: return root if value<root.value: root.left = self.deleteNode(root.left,value) elif(value > root.value): root.right = self.deleteNode(root.right,value) else: if root.left is None: temp = root.right root = None return temp elif root.right is None: temp = root.right root = None return temp temp = self.minval_node(root.right) root.value = temp.value root.right = self.deleteNode(root.right, temp.value) print(value," deleted") return root def search(self,value): if self.root!=None: return self._search(value,self.root) else: return False def _search(self,value,node): if value==node.value: return True elif value<node.value and node.left != None: self._search(value, node.left) elif value>node.value and node.right != None: self._search(value, node.right) return False print("*"*25, "Delete Node BST", "*"*25) root = Node(50) s = BST() s.insert(root,40) s.insert(root,30) s.insert(root,4) s.insert(root,78) print("\nInorder :") s.inorder(root) print("\nPostorder :") s.postorder(root) print("\nPreorder :") s.preorder(root) print("\n\tSearch Result :",s.search(50)) print("\n") s.deleteNode(root,30) print("\n") s.preorder(root)
[ "noreply@github.com" ]
noreply@github.com
808ac7632e66327e3f8d1fe634dab41d619f065e
786de89be635eb21295070a6a3452f3a7fe6712c
/CorAna/tags/V00-00-04/src/ConfigParametersCorAna.py
8baf5f326ca6758d621cc3f9f8cf43ac75c28720
[]
no_license
connectthefuture/psdmrepo
85267cfe8d54564f99e17035efe931077c8f7a37
f32870a987a7493e7bf0f0a5c1712a5a030ef199
refs/heads/master
2021-01-13T03:26:35.494000
2015-09-03T22:22:11
2015-09-03T22:22:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
31,606
py
#-------------------------------------------------------------------------- # File and Version Information: # $Id$ # # Description: # Module ConfigParametersCorAna... # #------------------------------------------------------------------------ """Is intended as a storage for configuration parameters for CorAna project. This software was developed for the LCLS project. If you use all or part of it, please give an appropriate acknowledgment. @version $Id: template!python!py 4 2008-10-08 19:27:36Z salnikov $ @author Mikhail S. Dubrovin """ #------------------------------ # Module's version from CVS -- #------------------------------ __version__ = "$Revision: 4 $" # $Source$ #-------------------------------- # Imports of standard modules -- #-------------------------------- import sys import os from copy import deepcopy #----------------------------- # Imports for other modules -- #----------------------------- #import ConfigParameters as cpbase from ConfigParameters import * # ConfigParameters from Logger import logger from PyQt4 import QtGui # for icons only... import AppDataPath as apputils # for icons #--------------------- # Class definition -- #--------------------- class ConfigParametersCorAna ( ConfigParameters ) : """Is intended as a storage for configuration parameters for CorAna project. #@see BaseClass ConfigParameters #@see OtherClass Parameters """ list_pars = [] def __init__ ( self, fname=None ) : """Constructor. @param fname the file name with configuration parameters, if not specified then it will be set to the default value at declaration. """ ConfigParameters.__init__(self) self.declareCorAnaParameters() self.readParametersFromFile ( fname ) self.initRunTimeParameters() self.defineStyles() def initRunTimeParameters( self ) : self.char_expand = u' \u25BE' # down-head triangle self.iconsAreLoaded = False self.plotarray_is_on = False self.plotg2_is_on = False self.autoRunStatus = 0 # 0=inctive, 1=split, 2=process, 3=merge #self.plotimgspe = None self.plotimgspe_g = None #----------------------------- def setIcons(self) : if self.iconsAreLoaded : return self.iconsAreLoaded = True path_icon_contents = apputils.AppDataPath('CorAna/icons/contents.png').path() path_icon_mail_forward = apputils.AppDataPath('CorAna/icons/mail-forward.png').path() path_icon_button_ok = apputils.AppDataPath('CorAna/icons/button_ok.png').path() path_icon_button_cancel = apputils.AppDataPath('CorAna/icons/button_cancel.png').path() path_icon_exit = apputils.AppDataPath('CorAna/icons/exit.png').path() path_icon_home = apputils.AppDataPath('CorAna/icons/home.png').path() path_icon_redo = apputils.AppDataPath('CorAna/icons/redo.png').path() path_icon_undo = apputils.AppDataPath('CorAna/icons/undo.png').path() path_icon_reload = apputils.AppDataPath('CorAna/icons/reload.png').path() path_icon_save = apputils.AppDataPath('CorAna/icons/save.png').path() path_icon_save_cfg = apputils.AppDataPath('CorAna/icons/fileexport.png').path() path_icon_edit = apputils.AppDataPath('CorAna/icons/edit.png').path() path_icon_browser = apputils.AppDataPath('CorAna/icons/fileopen.png').path() path_icon_monitor = apputils.AppDataPath('CorAna/icons/icon-monitor.png').path() path_icon_unknown = apputils.AppDataPath('CorAna/icons/icon-unknown.png').path() path_icon_logviewer = apputils.AppDataPath('CorAna/icons/logviewer.png').path() path_icon_locked = apputils.AppDataPath('CorAna/icons/locked-icon.png').path() path_icon_unlocked = apputils.AppDataPath('CorAna/icons/unlocked-icon.png').path() self.icon_contents = QtGui.QIcon(path_icon_contents ) self.icon_mail_forward = QtGui.QIcon(path_icon_mail_forward) self.icon_button_ok = QtGui.QIcon(path_icon_button_ok) self.icon_button_cancel = QtGui.QIcon(path_icon_button_cancel) self.icon_exit = QtGui.QIcon(path_icon_exit ) self.icon_home = QtGui.QIcon(path_icon_home ) self.icon_redo = QtGui.QIcon(path_icon_redo ) self.icon_undo = QtGui.QIcon(path_icon_undo ) self.icon_reload = QtGui.QIcon(path_icon_reload ) self.icon_save = QtGui.QIcon(path_icon_save ) self.icon_save_cfg = QtGui.QIcon(path_icon_save_cfg ) self.icon_edit = QtGui.QIcon(path_icon_edit ) self.icon_browser = QtGui.QIcon(path_icon_browser ) self.icon_monitor = QtGui.QIcon(path_icon_monitor ) self.icon_unknown = QtGui.QIcon(path_icon_unknown ) self.icon_logviewer = QtGui.QIcon(path_icon_logviewer) self.icon_lock = QtGui.QIcon(path_icon_locked ) self.icon_unlock = QtGui.QIcon(path_icon_unlocked ) #base_dir = '/usr/share/icons/Bluecurve/24x24/' #self.icon_contents = QtGui.QIcon(base_dir + 'actions/contents.png') #self.icon_mail_forward = QtGui.QIcon(base_dir + '../../gnome/24x24/actions/mail-forward.png') #self.icon_button_ok = QtGui.QIcon(base_dir + 'actions/button_ok.png') #self.icon_button_cancel = QtGui.QIcon(base_dir + 'actions/button_cancel.png') #self.icon_exit = QtGui.QIcon(base_dir + 'actions/exit.png') #self.icon_home = QtGui.QIcon(base_dir + 'actions/gohome.png') #self.icon_redo = QtGui.QIcon(base_dir + 'actions/redo.png') #self.icon_undo = QtGui.QIcon(base_dir + 'actions/undo.png') #self.icon_reload = QtGui.QIcon(base_dir + 'actions/reload.png') #self.icon_stop = QtGui.QIcon(base_dir + 'actions/stop.png') #self.icon_save_cfg = QtGui.QIcon(base_dir + 'actions/fileexport.png') #self.icon_save = QtGui.QIcon(base_dir + 'stock/stock-save.png') #self.icon_edit = QtGui.QIcon(base_dir + 'actions/edit.png') #self.icon_browser = QtGui.QIcon(base_dir + 'actions/fileopen.png') #self.icon_monitor = QtGui.QIcon(base_dir + 'apps/icon-monitor.png') #self.icon_unknown = QtGui.QIcon(base_dir + 'apps/icon-unknown.png') #self.icon_logviewer = QtGui.QIcon(base_dir + '../32x32/apps/logviewer.png') self.icon_logger = self.icon_edit self.icon_help = self.icon_unknown self.icon_reset = self.icon_reload #----------------------------- def declareCorAnaParameters( self ) : # Possible typs for declaration : 'str', 'int', 'long', 'float', 'bool' # GUIInstrExpRun.py.py # self.fname_cp = self.declareParameter( name='FNAME_CONFIG_PARS', val_def='confpars.txt', type='str' ) # self.fname_ped = self.declareParameter( name='FNAME_PEDESTALS', val_def='my_ped.txt', type='str' ) # self.fname_dat = self.declareParameter( name='FNAME_DATA', val_def='my_dat.txt', type='str' ) # self.instr_dir = self.declareParameter( name='INSTRUMENT_DIR', val_def='/reg/d/psdm', type='str' ) # self.instr_name = self.declareParameter( name='INSTRUMENT_NAME', val_def='XCS', type='str' ) # self.exp_name = self.declareParameter( name='EXPERIMENT_NAME', val_def='xcsi0112', type='str' ) # self.str_run_number = self.declareParameter( name='RUN_NUMBER', val_def='0015', type='str' ) # self.str_run_number_dark= self.declareParameter( name='RUN_NUMBER_DARK', val_def='0014', type='str' ) # GUIMainTB.py # GUIMainSplit.py self.current_tab = self.declareParameter( name='CURRENT_TAB' , val_def='Files', type='str' ) # GUILogger.py self.log_level = self.declareParameter( name='LOG_LEVEL_OF_MSGS', val_def='info', type='str' ) # GUIFiles.py self.current_file_tab = self.declareParameter( name='CURRENT_FILE_TAB' , val_def='Work/Results', type='str' ) # GUIRun.py self.current_run_tab = self.declareParameter( name='CURRENT_RUN_TAB' , val_def='Input', type='str' ) # GUIWorkResDirs.py self.dir_work = self.declareParameter( name='DIRECTORY_WORK', val_def='./work', type='str' ) self.dir_results = self.declareParameter( name='DIRECTORY_RESULTS', val_def='./results', type='str' ) self.fname_prefix = self.declareParameter( name='FILE_NAME_PREFIX', val_def='cora-', type='str' ) self.fname_prefix_cora = self.declareParameter( name='FILE_NAME_PREFIX_CORA', val_def='cora-proc', type='str' ) # GUIDark.py self.use_dark_xtc_all = self.declareParameter( name='USE_DARK_XTC_ALL_CHUNKS', val_def=True, type='bool' ) self.in_dir_dark = self.declareParameter( name='IN_DIRECTORY_DARK', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' ) self.in_file_dark = self.declareParameter( name='IN_FILE_NAME_DARK', val_def='e167-r0020-s00-c00.xtc',type='str' ) # GUIFlatField.py self.ccdcorr_flatfield = self.declareParameter( name='CCD_CORRECTION_FLATFIELD', val_def=False, type='bool' ) self.dname_flat = self.declareParameter( name='DIRECTORY_FLAT', val_def='.',type='str' ) self.fname_flat = self.declareParameter( name='FILE_NAME_FLAT', val_def='flat_field.txt',type='str' ) #self.in_dir_flat = self.declareParameter( name='IN_DIRECTORY_FLAT', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' ) #self.in_file_flat = self.declareParameter( name='IN_FILE_NAME_FLAT', val_def='e167-r0020-s00-c00.xtc',type='str' ) # GUIBlemish.py self.ccdcorr_blemish = self.declareParameter( name='CCD_CORRECTION_BLEMISH', val_def=False, type='bool' ) self.dname_blem = self.declareParameter( name='DIRECTORY_BLEM', val_def='.',type='str' ) self.fname_blem = self.declareParameter( name='FILE_NAME_BLEM', val_def='blemish.txt',type='str' ) #self.in_dir_blem = self.declareParameter( name='IN_DIRECTORY_BLEM', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' ) #self.in_file_blem = self.declareParameter( name='IN_FILE_NAME_BLEM', val_def='e167-r0020-s00-c00.xtc',type='str' ) # GUIData.py self.use_data_xtc_all = self.declareParameter( name='USE_DATA_XTC_ALL_CHUNKS', val_def=True, type='bool' ) self.is_active_data_gui = self.declareParameter( name='IS_ACTIVE_DATA_GUI', val_def=True, type='bool' ) self.in_dir_data = self.declareParameter( name='IN_DIRECTORY_DATA', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' ) self.in_file_data = self.declareParameter( name='IN_FILE_NAME_DATA', val_def='e167-r0020-s00-c00.xtc',type='str' ) # GUISetupBeamZero.py self.x_coord_beam0 = self.declareParameter( name='X_COORDINATE_BEAM_ZERO', val_def=1234.5, type='float' ) self.y_coord_beam0 = self.declareParameter( name='Y_COORDINATE_BEAM_ZERO', val_def=1216.5, type='float' ) self.x0_pos_in_beam0 = self.declareParameter( name='X_CCD_POS_IN_BEAM_ZERO', val_def=-59, type='float' ) self.y0_pos_in_beam0 = self.declareParameter( name='Y_CCD_POS_IN_BEAM_ZERO', val_def=175, type='float' ) # GUISetupSpecular.py self.x_coord_specular = self.declareParameter( name='X_COORDINATE_SPECULAR', val_def=-1, type='float' ) self.y_coord_specular = self.declareParameter( name='Y_COORDINATE_SPECULAR', val_def=-2, type='float' ) self.x0_pos_in_specular = self.declareParameter( name='X_CCD_POS_IN_SPECULAR', val_def=-3, type='float' ) self.y0_pos_in_specular = self.declareParameter( name='Y_CCD_POS_IN_SPECULAR', val_def=-4, type='float' ) # GUISetupData.py self.x0_pos_in_data = self.declareParameter( name='X_CCD_POS_IN_DATA', val_def=-51, type='float' ) self.y0_pos_in_data = self.declareParameter( name='Y_CCD_POS_IN_DATA', val_def=183, type='float' ) # GUISetupInfoLeft.py self.sample_det_dist = self.declareParameter( name='SAMPLE_TO_DETECTOR_DISTANCE', val_def=4000.1, type='float' ) self.exp_setup_geom = self.declareParameter( name='EXP_SETUP_GEOMETRY', val_def='Baem Zero', type='str' ) self.photon_energy = self.declareParameter( name='PHOTON_ENERGY', val_def=7.6543, type='float' ) self.nominal_angle = self.declareParameter( name='NOMINAL_ANGLE', val_def=-1, type='float' ) self.real_angle = self.declareParameter( name='REAL_ANGLE', val_def=-1, type='float' ) # GUIImgSizePosition.py self.col_begin = self.declareParameter( name='IMG_COL_BEGIN', val_def=0, type='int' ) self.col_end = self.declareParameter( name='IMG_COL_END', val_def=1339, type='int' ) self.row_begin = self.declareParameter( name='IMG_ROW_BEGIN', val_def=1, type='int' ) self.row_end = self.declareParameter( name='IMG_ROW_END', val_def=1299, type='int' ) # GUIKineticMode.py self.kin_mode = self.declareParameter( name='KINETICS_MODE', val_def='Non-Kinetics',type='str' ) self.kin_win_size = self.declareParameter( name='KINETICS_WIN_SIZE', val_def=1, type='int' ) self.kin_top_row = self.declareParameter( name='KINETICS_TOP_ROW', val_def=2, type='int' ) self.kin_slice_first = self.declareParameter( name='KINETICS_SLICE_FIRST', val_def=3, type='int' ) self.kin_slice_last = self.declareParameter( name='KINETICS_SLICE_LAST', val_def=4, type='int' ) # GUISetupPars.py self.bat_num = self.declareParameter( name='BATCH_NUM', val_def= 1, type='int' ) self.bat_num_max = self.declareParameter( name='BATCH_NUM_MAX', val_def= 9, type='int' ) #self.bat_data_is_used = self.declareParameter( name='BATCH_DATA_IS_USED', val_def=True, type='bool' ) self.bat_data_start = self.declareParameter( name='BATCH_DATA_START', val_def= 1, type='int' ) self.bat_data_end = self.declareParameter( name='BATCH_DATA_END' , val_def=-1, type='int' ) self.bat_data_total = self.declareParameter( name='BATCH_DATA_TOTAL', val_def=-1, type='int' ) self.bat_data_time = self.declareParameter( name='BATCH_DATA_TIME' , val_def=-1.0, type='float' ) self.bat_data_dt_ave = self.declareParameter( name='BATCH_DATA_DT_AVE', val_def=-1.0, type='float' ) self.bat_data_dt_rms = self.declareParameter( name='BATCH_DATA_DT_RMS', val_def=0.0, type='float' ) self.bat_dark_is_used = self.declareParameter( name='BATCH_DARK_IS_USED', val_def=True, type='bool' ) self.bat_dark_start = self.declareParameter( name='BATCH_DARK_START', val_def= 1, type='int' ) self.bat_dark_end = self.declareParameter( name='BATCH_DARK_END' , val_def=-1, type='int' ) self.bat_dark_total = self.declareParameter( name='BATCH_DARK_TOTAL', val_def=-1, type='int' ) self.bat_dark_time = self.declareParameter( name='BATCH_DARK_TIME' , val_def=-1.0, type='float' ) self.bat_dark_dt_ave = self.declareParameter( name='BATCH_DARK_DT_AVE', val_def=-1.0, type='float' ) self.bat_dark_dt_rms = self.declareParameter( name='BATCH_DARK_DT_RMS', val_def=0.0, type='float' ) #self.bat_flat_is_used = self.declareParameter( name='BATCH_FLAT_IS_USED', val_def=True, type='bool' ) self.bat_flat_start = self.declareParameter( name='BATCH_FLAT_START', val_def= 1, type='int' ) self.bat_flat_end = self.declareParameter( name='BATCH_FLAT_END' , val_def=-1, type='int' ) self.bat_flat_total = self.declareParameter( name='BATCH_FLAT_TOTAL', val_def=-1, type='int' ) self.bat_flat_time = self.declareParameter( name='BATCH_FLAT_TIME' , val_def=-1.0, type='float' ) self.bat_queue = self.declareParameter( name='BATCH_QUEUE', val_def='psfehq', type='str' ) self.bat_det_info = self.declareParameter( name='BATCH_DET_INFO', val_def='DetInfo(:Princeton)', type='str' ) #self.bat_det_info = self.declareParameter( name='BATCH_DET_INFO', val_def='DetInfo(XcsBeamline.0:Princeton.0)', type='str' ) self.bat_img_rec_mod = self.declareParameter( name='BATCH_IMG_REC_MODULE', val_def='ImgAlgos.PrincetonImageProducer', type='str' ) # BatchLogParser.py self.bat_img_rows = self.declareParameter( name='BATCH_IMG_ROWS', val_def= -1, type='int' ) self.bat_img_cols = self.declareParameter( name='BATCH_IMG_COLS', val_def= -1, type='int' ) self.bat_img_size = self.declareParameter( name='BATCH_IMG_SIZE', val_def= -1, type='int' ) self.bat_img_nparts = self.declareParameter( name='BATCH_IMG_NPARTS', val_def= 8, type='int' ) # GUIAnaSettingsLeft.py self.ana_type = self.declareParameter( name='ANA_TYPE', val_def='Static',type='str' ) self.ana_stat_meth_q = self.declareParameter( name='ANA_STATIC_METHOD_Q', val_def='evenly-spaced',type='str' ) self.ana_stat_meth_phi = self.declareParameter( name='ANA_STATIC_METHOD_PHI', val_def='evenly-spaced',type='str' ) self.ana_dyna_meth_q = self.declareParameter( name='ANA_DYNAMIC_METHOD_Q', val_def='evenly-spaced',type='str' ) self.ana_dyna_meth_phi = self.declareParameter( name='ANA_DYNAMIC_METHOD_PHI', val_def='evenly-spaced',type='str' ) self.ana_stat_part_q = self.declareParameter( name='ANA_STATIC_PARTITION_Q', val_def='1',type='str' ) self.ana_stat_part_phi = self.declareParameter( name='ANA_STATIC_PARTITION_PHI', val_def='2',type='str' ) self.ana_dyna_part_q = self.declareParameter( name='ANA_DYNAMIC_PARTITION_Q', val_def='3',type='str' ) self.ana_dyna_part_phi = self.declareParameter( name='ANA_DYNAMIC_PARTITION_PHI', val_def='4',type='str' ) self.ana_mask_type = self.declareParameter( name='ANA_MASK_TYPE', val_def='no-mask',type='str' ) self.ana_mask_fname = self.declareParameter( name='ANA_MASK_FILE', val_def='./roi-mask.txt',type='str' ) self.ana_mask_dname = self.declareParameter( name='ANA_MASK_DIRECTORY', val_def='.',type='str' ) # GUIAnaSettingsRight.py self.ana_ndelays = self.declareParameter( name='ANA_NDELAYS_PER_MTAU_LEVEL', val_def=4, type='int' ) self.ana_nslice_delays = self.declareParameter( name='ANA_NSLICE_DELAYS_PER_MTAU_LEVEL', val_def=4, type='int' ) self.ana_npix_to_smooth= self.declareParameter( name='ANA_NPIXELS_TO_SMOOTH', val_def=100, type='int' ) self.ana_smooth_norm = self.declareParameter( name='ANA_SMOOTH_SYM_NORM', val_def=False, type='bool' ) self.ana_two_corfuns = self.declareParameter( name='ANA_TWO_TIME_CORFUNS_CONTROL', val_def=False, type='bool' ) self.ana_spec_stab = self.declareParameter( name='ANA_CHECK_SPECKLE_STABILITY', val_def=False, type='bool' ) self.lld_type = self.declareParameter( name='LOW_LEVEL_DISC_TYPE', val_def='NONE',type='str' ) self.lld_adu = self.declareParameter( name='LOW_LEVEL_DISC_ADU', val_def=15, type='float' ) self.lld_rms = self.declareParameter( name='LOW_LEVEL_DISC_RMS', val_def=4, type='float' ) self.res_ascii_out = self.declareParameter( name='RES_ASCII_OUTPUT', val_def=True, type='bool' ) self.res_fit1 = self.declareParameter( name='RES_PERFORM_FIT1', val_def=False, type='bool' ) self.res_fit2 = self.declareParameter( name='RES_PERFORM_FIT1', val_def=False, type='bool' ) self.res_fit_cust = self.declareParameter( name='RES_PERFORM_FIT_CUSTOM', val_def=False, type='bool' ) self.res_png_out = self.declareParameter( name='RES_PNG_FILES', val_def=False, type='bool' ) self.res_save_log = self.declareParameter( name='RES_SAVE_LOG_FILE', val_def=False, type='bool' ) # GUILoadResults.py self.res_load_mode = self.declareParameter( name='RES_LOAD_MODE', val_def='NONE',type='str' ) self.res_fname = self.declareParameter( name='RES_LOAD_FNAME', val_def='NONE',type='str' ) # GUISystemSettingsRight.py self.thickness_type = self.declareParameter( name='THICKNESS_TYPE', val_def='NONORM',type='str' ) self.thickness_sample = self.declareParameter( name='THICKNESS_OF_SAMPLE', val_def=-1, type='float' ) self.thickness_attlen = self.declareParameter( name='THICKNESS_ATTENUATION_LENGTH', val_def=-2, type='float' ) self.ccd_orient = self.declareParameter( name='CCD_ORIENTATION', val_def='180', type='str' ) self.y_is_flip = self.declareParameter( name='Y_IS_FLIPPED', val_def='True', type='bool' ) # GUICCDSettings.py self.ccdset_pixsize = self.declareParameter( name='CCD_SETTINGS_PIXEL_SIZE', val_def=0.1, type='float' ) self.ccdset_adcsatu = self.declareParameter( name='CCD_SETTINGS_ADC_SATTURATION', val_def=12345, type='int' ) self.ccdset_aduphot = self.declareParameter( name='CCD_SETTINGS_ADU_PER_PHOTON', val_def=123, type='float' ) self.ccdset_ccdeff = self.declareParameter( name='CCD_SETTINGS_EFFICIENCY', val_def=0.55, type='float' ) self.ccdset_ccdgain = self.declareParameter( name='CCD_SETTINGS_GAIN', val_def=0.8, type='float' ) # GUIELogPostingDialog.py # GUIELogPostingFields.py #self.elog_post_cbx_state = self.declareParameter( name='ELOG_POST_CBX_STATE', val_def=True, type='bool' ) self.elog_post_rad = self.declareParameter( name='ELOG_POST_RAD_STATE', val_def='Default', type='str' ) self.elog_post_ins = self.declareParameter( name='ELOG_POST_INSTRUMENT', val_def='AMO', type='str' ) self.elog_post_exp = self.declareParameter( name='ELOG_POST_EXPERIMENT', val_def='amodaq09', type='str' ) self.elog_post_run = self.declareParameter( name='ELOG_POST_RUN', val_def='825', type='str' ) self.elog_post_tag = self.declareParameter( name='ELOG_POST_TAG', val_def='TAG1', type='str' ) self.elog_post_res = self.declareParameter( name='ELOG_POST_RESPONCE', val_def='None', type='str' ) self.elog_post_msg = self.declareParameter( name='ELOG_POST_MESSAGE', val_def='EMPTY MSG', type='str' ) self.elog_post_att = self.declareParameter( name='ELOG_POST_ATTACHED_FILE', val_def='None', type='str' ) #GUIViewControl.py self.vc_cbx_show_more = self.declareParameter( name='SHOW_MORE_BUTTONS', val_def=True, type='bool' ) #----------------------------- imon_names = [ ('BldInfo(FEEGasDetEnergy)', None ,'str'), \ ('BldInfo(XCS-IPM-02)', None ,'str'), \ ('BldInfo(XCS-IPM-mono)', None ,'str'), \ ('DetInfo(XcsBeamline.1:Ipimb.4)', None ,'str'), \ ('DetInfo(XcsBeamline.1:Ipimb.5)', None ,'str') ] self.imon_name_list = self.declareListOfPars( 'IMON_NAMES', imon_names ) #----------------------------- imon_short_names = [ ('FEEGasDetEnergy', None ,'str'), \ ('XCS-IPM-02', None ,'str'), \ ('XCS-IPM-mono', None ,'str'), \ ('Ipimb.4', None ,'str'), \ ('Ipimb.5', None ,'str') ] self.imon_short_name_list = self.declareListOfPars( 'IMON_SHORT_NAMES', imon_short_names ) #----------------------------- imon_cbxs = [ (True, True ,'bool'), \ (True, True ,'bool'), \ (True, True ,'bool'), \ (True, True ,'bool'), \ (True, True ,'bool') ] self.imon_ch1_list = self.declareListOfPars( 'IMON_CH1', deepcopy(imon_cbxs) ) self.imon_ch2_list = self.declareListOfPars( 'IMON_CH2', deepcopy(imon_cbxs) ) self.imon_ch3_list = self.declareListOfPars( 'IMON_CH3', deepcopy(imon_cbxs) ) self.imon_ch4_list = self.declareListOfPars( 'IMON_CH4', deepcopy(imon_cbxs) ) #----------------------------- imon_norm_cbx = [ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool') ] self.imon_norm_cbx_list = self.declareListOfPars( 'IMON_NORM_CBX', imon_norm_cbx ) #----------------------------- imon_sele_cbx = [ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool'), \ (False, False ,'bool') ] self.imon_sele_cbx_list = self.declareListOfPars( 'IMON_SELE_CBX', imon_sele_cbx ) #----------------------------- imon_sele_min = [ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float') ] self.imon_sele_min_list = self.declareListOfPars( 'IMON_SELE_MIN', imon_sele_min ) #----------------------------- imon_sele_max = [ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float'), \ (-1., -1. ,'float') ] self.imon_sele_max_list = self.declareListOfPars( 'IMON_SELE_MAX', imon_sele_max ) #----------------------------- self.imon_pars_list = zip( self.imon_name_list, self.imon_ch1_list, self.imon_ch2_list, self.imon_ch3_list, self.imon_ch4_list, self.imon_norm_cbx_list, self.imon_sele_cbx_list, self.imon_sele_min_list, self.imon_sele_max_list, self.imon_short_name_list ) #print self.imon_pars_list #----------------------------- def defineStyles( self ) : self.styleYellowish = "background-color: rgb(255, 255, 220); color: rgb(0, 0, 0);" # Yellowish self.stylePink = "background-color: rgb(255, 200, 220); color: rgb(0, 0, 0);" # Pinkish self.styleYellowBkg = "background-color: rgb(255, 255, 120); color: rgb(0, 0, 0);" # Pinkish self.styleGray = "background-color: rgb(230, 240, 230); color: rgb(0, 0, 0);" # Gray self.styleGreenish = "background-color: rgb(100, 255, 200); color: rgb(0, 0, 0);" # Greenish self.styleGreenPure = "background-color: rgb(150, 255, 150); color: rgb(0, 0, 0);" # Green self.styleBluish = "background-color: rgb(200, 200, 255); color: rgb(0, 0, 0);" # Bluish self.styleWhite = "background-color: rgb(255, 255, 255); color: rgb(0, 0, 0);" self.styleRedBkgd = "background-color: rgb(255, 0, 0); color: rgb(0, 0, 0);" # Red background #self.styleTitle = "background-color: rgb(239, 235, 231, 255); color: rgb(100, 160, 100);" # Gray bkgd #self.styleTitle = "color: rgb(150, 160, 100);" self.styleBlue = "color: rgb(000, 000, 255);" self.styleBuriy = "color: rgb(150, 100, 50);" self.styleRed = "color: rgb(255, 0, 0);" self.styleGreen = "color: rgb(0, 150, 0);" self.styleYellow = "color: rgb(0, 150, 150);" self.styleBkgd = self.styleYellowish self.styleTitle = self.styleBuriy self.styleLabel = self.styleBlue self.styleEdit = self.styleWhite self.styleEditInfo = self.styleGreenish self.styleEditBad = self.styleRedBkgd self.styleButton = self.styleGray self.styleButtonOn = self.styleBluish self.styleButtonClose = self.stylePink self.styleButtonWarning= self.styleYellowBkg self.styleButtonGood = self.styleGreenPure self.styleButtonBad = self.stylePink self.styleBox = self.styleGray self.styleCBox = self.styleYellowish self.styleStatusGood = self.styleGreen self.styleStatusWarning= self.styleYellow self.styleStatusAlarm = self.styleRed self.styleTitleBold = self.styleTitle + 'font-size: 18pt; font-family: Courier; font-weight: bold;' self.styleWhiteFixed = self.styleWhite + 'font-family: Fixed;' self.colorEditInfo = QtGui.QColor(100, 255, 200) self.colorEditBad = QtGui.QColor(255, 0, 0) self.colorEdit = QtGui.QColor('white') def printParsDirectly( self ) : logger.info('Direct use of parameter:' + self.fname_cp .name() + ' ' + self.fname_cp .value(), __name__ ) logger.info('Direct use of parameter:' + self.fname_ped.name() + ' ' + self.fname_ped.value(), __name__ ) logger.info('Direct use of parameter:' + self.fname_dat.name() + ' ' + self.fname_dat.value(), __name__ ) #----------------------------- confpars = ConfigParametersCorAna (fname=getConfigFileFromInput()) #----------------------------- # # In case someone decides to run this module # if __name__ == "__main__" : confpars.printParameters() #confpars.printParsDirectly() confpars.saveParametersInFile() confpars.printListOfPars('IMON_NAMES') sys.exit ( 'End of test for ConfigParametersCorAna' ) #-----------------------------
[ "dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7" ]
dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
becaebfd57de87517f83fb188ffe1860ee44300a
f08c79663074bfd104135e1347f3228b29620d24
/csrt.py
6da5c8ba236a0d1428f0aadc2f3e058f81921930
[]
no_license
battcheeks/Computer-Vision
140e3d0a3b20cba637b275dc6d7ebc5f413a2e31
ffa8f277312fc4553e25db09a6f53a107d7f4d41
refs/heads/master
2022-11-10T19:33:31.721000
2020-06-27T09:54:15
2020-06-27T09:54:15
275,339,008
0
0
null
null
null
null
UTF-8
Python
false
false
2,146
py
from imutils.video import VideoStream from imutils.video import FPS import argparse import imutils import time import cv2 global a,b ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", type=str, help="path to input video file") ap.add_argument("-t", "--tracker", type=str, default="kcf", help="OpenCV object tracker type") args = vars(ap.parse_args()) (major, minor) = cv2.__version__.split(".")[:2] if int(major) == 3 and int(minor) < 3: tracker = cv2.Tracker_create(args["tracker"].upper()) else: OPENCV_OBJECT_TRACKERS = { "csrt": cv2.TrackerCSRT_create, "kcf": cv2.TrackerKCF_create, "boosting": cv2.TrackerBoosting_create, "mil": cv2.TrackerMIL_create, "tld": cv2.TrackerTLD_create, "medianflow": cv2.TrackerMedianFlow_create, "mosse": cv2.TrackerMOSSE_create } tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]() initBB = None if not args.get("video", False): print("[INFO] starting video stream...") vs = VideoStream(src=0).start() time.sleep(1.0) else: vs = cv2.VideoCapture(args["video"]) fps = None # loop over frames from the video stream while True: # grab the current frame, then handle if we are using a # VideoStream or VideoCapture object frame = vs.read() frame = frame[1] if args.get("video", False) else frame # check to see if we have reached the end of the stream if frame is None: break frame = imutils.resize(frame, width=500) (H, W) = frame.shape[:2] # check to see if we are currently tracking an object if initBB is not None: (success, box) = tracker.update(frame) if success: (x, y, w, h) = [int(v) for v in box] cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) print(str(x+w/2)+","+str(y+h/2)) a=str(x+w/2) b=str(y+h/2) # update the FPS counter fps.update() fps.stop() cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("s"): initBB = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True) tracker.init(frame, initBB) fps = FPS().start() elif key == ord("q"): break if not args.get("video", False): vs.stop() else: vs.release() cv2.destroyAllWindows()
[ "noreply@github.com" ]
noreply@github.com
28e7dee0700c6fe42c004b939fcaa2b9ff69d27e
eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd
/homeassistant/components/trafikverket_weatherstation/const.py
7bb53dc5356a0b8a392104982912658806275659
[ "Apache-2.0" ]
permissive
JeffLIrion/home-assistant
53966b81b5d5816679f12fc761f79e8777c738d6
8f4ec89be6c2505d8a59eee44de335abe308ac9f
refs/heads/dev
2023-08-22T09:42:02.399000
2022-02-16T01:26:13
2022-02-16T01:26:13
136,679,169
5
2
Apache-2.0
2023-09-13T06:59:25
2018-06-09T00:58:35
Python
UTF-8
Python
false
false
466
py
"""Adds constants for Trafikverket Weather integration.""" from homeassistant.const import Platform DOMAIN = "trafikverket_weatherstation" CONF_STATION = "station" PLATFORMS = [Platform.SENSOR] ATTRIBUTION = "Data provided by Trafikverket" ATTR_MEASURE_TIME = "measure_time" ATTR_ACTIVE = "active" NONE_IS_ZERO_SENSORS = { "air_temp", "road_temp", "wind_direction", "wind_speed", "wind_speed_max", "humidity", "precipitation_amount", }
[ "noreply@github.com" ]
noreply@github.com
d3e3b20b1ce012f78bbc61c3eb7dc31075d016ca
c9094a4ed256260bc026514a00f93f0b09a5d60c
/tests/components/accuweather/test_system_health.py
749f516e44c748caf05503460e8a72ec34d085d3
[ "Apache-2.0" ]
permissive
turbokongen/home-assistant
824bc4704906ec0057f3ebd6d92788e096431f56
4ab0151fb1cbefb31def23ba850e197da0a5027f
refs/heads/dev
2023-03-12T05:49:44.508000
2021-02-17T14:06:16
2021-02-17T14:06:16
50,231,140
4
1
Apache-2.0
2023-02-22T06:14:30
2016-01-23T08:55:09
Python
UTF-8
Python
false
false
1,785
py
"""Test AccuWeather system health.""" import asyncio from unittest.mock import Mock from aiohttp import ClientError from homeassistant.components.accuweather.const import COORDINATOR, DOMAIN from homeassistant.setup import async_setup_component from tests.common import get_system_health_info async def test_accuweather_system_health(hass, aioclient_mock): """Test AccuWeather system health.""" aioclient_mock.get("https://dataservice.accuweather.com/", text="") hass.config.components.add(DOMAIN) assert await async_setup_component(hass, "system_health", {}) hass.data[DOMAIN] = {} hass.data[DOMAIN]["0123xyz"] = {} hass.data[DOMAIN]["0123xyz"][COORDINATOR] = Mock( accuweather=Mock(requests_remaining="42") ) info = await get_system_health_info(hass, DOMAIN) for key, val in info.items(): if asyncio.iscoroutine(val): info[key] = await val assert info == { "can_reach_server": "ok", "remaining_requests": "42", } async def test_accuweather_system_health_fail(hass, aioclient_mock): """Test AccuWeather system health.""" aioclient_mock.get("https://dataservice.accuweather.com/", exc=ClientError) hass.config.components.add(DOMAIN) assert await async_setup_component(hass, "system_health", {}) hass.data[DOMAIN] = {} hass.data[DOMAIN]["0123xyz"] = {} hass.data[DOMAIN]["0123xyz"][COORDINATOR] = Mock( accuweather=Mock(requests_remaining="0") ) info = await get_system_health_info(hass, DOMAIN) for key, val in info.items(): if asyncio.iscoroutine(val): info[key] = await val assert info == { "can_reach_server": {"type": "failed", "error": "unreachable"}, "remaining_requests": "0", }
[ "noreply@github.com" ]
noreply@github.com
1b32ea37e4c7f6126f63d235f5bc196330d2dc7e
d94b6845aeeb412aac6850b70e22628bc84d1d6d
/dimensions_of_motion/geometry.py
d7a317cb08a95e69785f8cd0af032ae5db8a1f29
[ "CC-BY-4.0", "Apache-2.0" ]
permissive
ishine/google-research
541aea114a68ced68736340e037fc0f8257d1ea2
c1ae273841592fce4c993bf35cdd0a6424e73da4
refs/heads/master
2023-06-08T23:02:25.502000
2023-05-31T01:00:56
2023-05-31T01:06:45
242,478,569
0
0
Apache-2.0
2020-06-23T01:55:11
2020-02-23T07:59:42
Jupyter Notebook
UTF-8
Python
false
false
7,466
py
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- """Functions for sampling and warping images. We use texture coordinates to represent points and offsets in images. They go from (0,0) in the top-left corner of an image to (1,1) in the bottom right. It is convenient to work with these coordinates rather than counts of pixels, because they are resolution-independent. """ import tensorflow as tf import tensorflow_addons as tfa import utils def check_input_shape(name, tensor, axis, value): """Utility function for checking tensor shapes.""" shape = tensor.shape.as_list() if shape[axis] != value: raise ValueError('Input "%s": dimension %d should be %s. Shape = %s' % (name, axis, value, shape)) def pixel_center_grid(height, width): """Produce a grid of (x,y) texture-coordinate pairs of pixel centers. Args: height: (integer) height, not a tensor width: (integer) width, not a tensor Returns: A tensor of shape [height, width, 2] where each entry gives the (x,y) texture coordinates of the corresponding pixel center. For example, for pixel_center_grid(2, 3) the result is: [[[1/6, 1/4], [3/6, 1/4], [5/6, 1/4]], [[1/6, 3/4], [3/6, 3/4], [5/6, 3/4]]] """ height_float = tf.cast(height, dtype=tf.float32) width_float = tf.cast(width, dtype=tf.float32) ys = tf.linspace(0.5 / height_float, 1.0 - 0.5 / height_float, height) xs = tf.linspace(0.5 / width_float, 1.0 - 0.5 / width_float, width) xs, ys = tf.meshgrid(xs, ys) grid = tf.stack([xs, ys], axis=-1) assert grid.shape.as_list() == [height, width, 2] return grid def sample_image(image, coords): """Sample points from an image, using bilinear filtering. Args: image: [B0, ..., Bn-1, height, width, channels] image data coords: [B0, ..., Bn-1, ..., 2] (x,y) texture coordinates Returns: [B0, ..., Bn-1, ..., channels] image data, in which each value is sampled with bilinear interpolation from the image at position indicated by the (x,y) texture coordinates. The image and coords parameters must have matching batch dimensions B0, ..., Bn-1. Raises: ValueError: if shapes are incompatible. """ check_input_shape('coords', coords, -1, 2) tfshape = tf.shape(image)[-3:-1] height = tf.cast(tfshape[0], dtype=tf.float32) width = tf.cast(tfshape[1], dtype=tf.float32) # Resampler expects coordinates where (0,0) is the center of the top-left # pixel and (width-1, height-1) is the center of the bottom-right pixel. pixel_coords = coords * [width, height] - 0.5 # tfa.image.resampler only works with exactly one batch dimension, i.e. it # expects image to be [batch, height, width, channels] and pixel_coords to be # [batch, ..., 2]. So we need to reshape, perform the resampling, and then # reshape back to what we had. batch_dims = len(image.shape.as_list()) - 3 assert (image.shape.as_list()[:batch_dims] == pixel_coords.shape.as_list() [:batch_dims]) batched_image, _ = utils.flatten_batch(image, batch_dims) batched_coords, unflatten_coords = utils.flatten_batch( pixel_coords, batch_dims) resampled = tfa.image.resampler(batched_image, batched_coords) # Convert back to the right shape to return resampled = unflatten_coords(resampled) return resampled def bilinear_forward_warp(image, coords, weights=None): """Forward warp each point in an image using bilinear filtering. This is a sort of reverse of sample_image, in the sense that scatter is the reverse of gather. A new image is generated of the same size as the input, in which each pixel has been splatted onto the 2x2 block containing the corresponding coordinates, using bilinear weights (multiplied with the input per-pixel weights, if supplied). Thus if two or more pixels warp to the same point, the result will be a blend of the their values. If no pixels warp to a location, the result at that location will be zero. Args: image: [B0, ..., Bn-1, height, width, channels] image data coords: [B0, ..., Bn-1, height, width, 2] (x,y) texture coordinates weights: [B0, ... ,Bn-1, height, width] weights for each point. If omitted, all points are weighed equally. Use this to implement, for example, soft z-buffering. Returns: [B0, ..., Bn-1, ..., channels] image data, in which each point in the input image has been moved to the position indicated by the corresponding (x,y) texture coordinates. The image and coords parameters must have matching batch dimensions B0, ..., Bn-1. """ # Forward-warp computed using the gradient of reverse-warp. We use a dummy # image of the right size for reverse-warping. An extra channel is used to # accumulate the total weight for each pixel which we'll then divide by. image_and_ones = tf.concat([image, tf.ones_like(image[Ellipsis, -1:])], axis=-1) dummy = tf.zeros_like(image_and_ones) if weights is None: weighted_image = image_and_ones else: weighted_image = image_and_ones * weights[Ellipsis, tf.newaxis] with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(dummy) reverse = tf.reduce_sum( sample_image(dummy, coords) * weighted_image, [-3, -2]) grads = g.gradient(reverse, dummy) rgb = grads[Ellipsis, :-1] total = grads[Ellipsis, -1:] result = tf.math.divide_no_nan(rgb, total) return result def flow_warp(image, flow): """Warp images by resampling according to flow vectors. Args: image: [..., H, W, C] images flow: [..., H, W, 2] (x, y) texture offsets Returns: [..., H, W, C] resampled images. Each pixel in each output image has been bilinearly sampled from the corresponding pixel in its input image plus the (x, y) flow vector. The flow vectors are texture coordinate offsets, e.g. (1, 1) is an offset of the whole width and height of the image. Sampling outside the image yields zero values. """ width = image.shape.as_list()[-2] height = image.shape.as_list()[-3] grid = pixel_center_grid(height, width) coords = grid + flow return sample_image(image, coords) def flow_forward_warp(image, flow): """Forward-warp images according to flow vectors. Args: image: [..., H, W, C] images flow: [..., H, W, 2] (x, y) texture offsets Returns: [..., H, W, C] warped images. Each pixel in each image is offset according to the corresponding value in the flow, and splatted onto a 2x2 pixel block. (See bilinear_forward_warp for details.) If no points warp to a location, the result will be zero. The flow vectors are texture coordinate offsets, e.g. (1, 1) is an offset of the whole width and height of the image. """ width = image.shape.as_list()[-2] height = image.shape.as_list()[-3] grid = pixel_center_grid(height, width) coords = grid + flow return bilinear_forward_warp(image, coords)
[ "copybara-worker@google.com" ]
copybara-worker@google.com
6c67dfbe348126447354bd125a22c8c109b0ab15
a6bd7d3c2dfd6f22b22b7390a2230651e1f3febd
/1.py
412b8d7720f095722caac5fb02499d4d2a29fbb3
[]
no_license
NicolasQueiroga/Resolucao_PF_DesSoft--2020.1
5c9e8b8a19045763c5af1e32426fa4e2c1891096
fcafa170b0cec6dcaa658c3c72746d51ed8acc88
refs/heads/master
2022-11-06T14:15:31.544000
2020-06-22T21:02:12
2020-06-22T21:02:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
648
py
with open('criptografado.txt', 'r') as arquivo: conteudo = arquivo.readlines() for i in conteudo: i = i.strip() letra = [char for char in i] for e in range(len(letra)): if letra[e] == 's': letra[e] = 'z' elif letra[e] == 'a': letra[e] = 'e' elif letra[e] == 'r': letra[e] = 'b' elif letra[e] == 'b': letra[e] = 'r' elif letra[e] == 'e': letra[e] = 'a' elif letra[e] == 'z': letra[e] = 's' new = ''.join(letra) print(new)
[ "noreply@github.com" ]
noreply@github.com
85ef73de5c1fceffd5aff452e2b9902d1718602f
5ca6730fa1178582d5f5875155f340ec0f406294
/practice_problem-16.py
44785ae4df282d5b7cc6f83173866d825eb41375
[]
no_license
MahadiRahman262523/Python_Code_Part-1
9740d5ead27209d69af4497eea410f2faef50ff3
e2f08e3d0564a003400743ae6050fd687c280639
refs/heads/main
2023-07-25T09:10:53.649000
2021-09-05T19:39:14
2021-09-05T19:39:14
403,396,706
0
0
null
null
null
null
UTF-8
Python
false
false
135
py
# Write a program to count the number of zeros in the following tuple: # a = (7,0,8,0,0,9) a = (7,0,8,0,0,9) print(a.count(0))
[ "noreply@github.com" ]
noreply@github.com
a5235c799186a4e9446f729d5748ae459dd5f73e
4870960bc25aa9264d3ead399f1662bda3880e19
/Create_video.py
cdf7329a51f8592ae582ad5bbc39b6293f031836
[]
no_license
Megapixel-code/Video-maker-with-Reddit
5fff90a2241298044c8c567dcc39fc4e60218285
0f69670fce22e0de652448ee59236dfad29aee7b
refs/heads/main
2023-03-21T02:47:58.804000
2021-03-06T09:44:39
2021-03-06T09:44:39
344,571,437
1
0
null
null
null
null
UTF-8
Python
false
false
4,952
py
import glob import os import praw import requests import shutil import json import moviepy.editor as mp import moviepy.video as mpv import moviepy.video.fx.all as vfx from gtts import gTTS from PIL import Image, ImageDraw, ImageFont from unidecode import unidecode from os.path import isfile, join def delete_all_folder(): directory = 'reddit' os.chdir(directory) files = glob.glob('*') for file_name in files: os.unlink(file_name) os.chdir('..') def deemojify(input_str): return_output = '' for car in input_str: try: car.encode('ascii') return_output += car except UnicodeEncodeError: replaced = unidecode(str(car)) if replaced != '': return_output += replaced return " ".join(return_output.split()) def get_images(): directory = 'reddit' # https://www.reddit.com/r/mildlyinteresting/top/?t=week with open('credentials.json') as c: params = json.load(c) reddit = praw.Reddit( client_id=params['client_id'], client_secret=params['api_key'], password=params['password'], user_agent='<reddit_top> accessAPI:v0.0.1 (by/u/Megapixel_YTB)', username=params['username'] ) subreddit = reddit.subreddit('mildlyinteresting') name = 0 for submitions in subreddit.top("week", limit=50): name += 1 url = submitions.url file_name = str(name) if url.endswith('.jpg'): file_name += '.jpg' found = True else: found = False if found: r = requests.get(url) with open(file_name, 'wb') as f: f.write(r.content) shutil.move(file_name, directory) caption = submitions.title title = str(name) title += '.txt' with open(title, 'wt') as c: c.write(deemojify(caption)) c.close() shutil.move(title, directory) def resize(im, fill_color=(0, 0, 0, 0)): img = Image.open(im) x, y = img.size sizex = int(y / 1080 * 1920) sizey = y new_im = Image.new('RGB', (sizex, sizey), fill_color) new_im.paste(img, (int((sizex - x) / 2), int((sizey - y) / 2))) new_im = new_im.resize((1920, 1080), Image.LANCZOS) f = open(im[:-4] + '.txt', 'r') content = f.read() draw = ImageDraw.Draw(new_im) draw.rectangle(((0, 0), (1920, 25)), fill=(0, 0, 0)) font = ImageFont.truetype('arialbd.ttf', size=18) txt_size = draw.textsize(content, font=font)[0] draw.text((int((1920 - txt_size) / 2), 0), content, fill=(255, 255, 255), font=font) f.close() os.remove(im) new_im.save(im) def create_tts(): for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.txt')]: f = open('reddit/' + file, 'r') my_txt = f.read() f.close() out = gTTS(text=my_txt, lang='en', slow=False) out.save('reddit/' + file[:-4] + '.mp3') def finish_video(): all_clips = [] for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.mp3')]: sound = mp.AudioFileClip('reddit/' + file) sound = mp.concatenate_audioclips([sound, mp.AudioClip(lambda t: 0, duration=3)]) all_clips.append(sound) all_video_clips = [] x = 0 for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.jpg')]: resize('reddit/' + file) vid = mp.ImageClip('reddit/' + file, duration=all_clips[x].duration) all_video_clips.append(vid) x += 1 sound = mp.concatenate_audioclips(all_clips) video = mp.concatenate_videoclips(all_video_clips) video.audio = sound video.fps = 60 background = mp.VideoFileClip('space.mpeg') masked_clip = mpv.fx.all.mask_color(video, color=[0, 0, 0], thr=0, s=0) midle_video = mp.CompositeVideoClip([background, masked_clip]).set_duration(video.duration) intro = mp.VideoFileClip('Intro.mpeg') outro = mp.VideoFileClip('Outro.mpeg') final_video = mp.concatenate_videoclips([intro, midle_video, outro]) os.remove('ma_video.mp4') final_video.write_videofile('ma_video.mp4') def create(): print() delete_all_folder() print('Importing the images .....', end='') get_images() print(' done !') print('creating tts .............', end='') create_tts() print(' done !') print('Making the video .........') print('===============================================================================================') finish_video() print('===============================================================================================')
[ "noreply@github.com" ]
noreply@github.com
1b406b2dc38004db14248af19fb7f7be9b8e7f6c
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_FreeTShirt_a.py
0207b362ff64f55d6e7a49f758c368374d2c5dc1
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405000
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
404
py
def argmax(s): z = max(s) return [(idx, c) for idx, c in enumerate(s) if c == z] def last(s): if len(s) <= 1: return s return max([s[idx]+last(s[:idx])+s[idx+1:] for idx, c in argmax(s)]) fw = open('a-o', 'w') for idx, line in enumerate(open('A-small-i')): if idx == 0: continue s = line.strip() print(s) fw.write('Case #{0}: {1}\n'.format(idx,last(s)))
[ "[dhuo@tcd.ie]" ]
[dhuo@tcd.ie]
87476fc48dcc81c8407d184dc2ba254400452b87
c5389783a234bc755571f84e619ac296cff4aa4b
/views.py
f9c5a4ac7104989c4e658990236b9aeb89d4533d
[]
no_license
abhishekmajhi42/the_weather_app
0f5381b2f832077334bb6597c2f55eca6c4b7709
e52cf4a218c0464fbe542cf47a94b70aa103a796
refs/heads/master
2022-12-24T09:08:56.809000
2020-09-27T17:15:26
2020-09-27T17:15:26
299,080,529
0
0
null
null
null
null
UTF-8
Python
false
false
862
py
from django.shortcuts import render import requests # Create your views here. from weatherapp.forms import CityForm from weatherapp.models import City def index(request): url='http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=271d1234d3f497eed5b1d80a07b3fcd1' if request.method=="POST": form=CityForm(request.POST) form.save() #city='Las Vegas' form = CityForm() cities=City.objects.all() weather_data=[] for city in cities: r=requests.get(url.format(city)).json() city_weather={'city':city,'temperature':r['main']["temp"],'description':r["weather"][0]["description"],'icon':r["weather"][0]["icon"],} weather_data.append(city_weather) context={'weather_data':weather_data,'form':form} return render(request,'weather.html',context)
[ "noreply@github.com" ]
noreply@github.com
aaebcd30e1283732990421e052eb0d5cecb7a098
f2abbeb892780b584feb2fd94e7ec5da8ecdc763
/exporter/opentelemetry-exporter-otlp-proto-http/setup.py
510eceba6c5abfb14c1de8ec0b03b368df4c4f0c
[ "Apache-2.0" ]
permissive
SigNoz/opentelemetry-python
6fa5fd92584d2fb3ca71c958004cd56332c764a7
9e397c895797891b709a9f1c68345e9a1c357ad8
refs/heads/main
2023-07-15T10:43:17.064000
2021-09-02T12:25:18
2021-09-02T12:25:18
401,617,913
1
0
Apache-2.0
2021-08-31T07:49:24
2021-08-31T07:49:24
null
UTF-8
Python
false
false
943
py
# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import setuptools BASE_DIR = os.path.dirname(__file__) VERSION_FILENAME = os.path.join( BASE_DIR, "src", "opentelemetry", "exporter", "otlp", "proto", "http", "version.py", ) PACKAGE_INFO = {} with open(VERSION_FILENAME) as f: exec(f.read(), PACKAGE_INFO) setuptools.setup(version=PACKAGE_INFO["__version__"])
[ "noreply@github.com" ]
noreply@github.com
4f3a8886eb59966fc5887dccc5604e3f38aad5d6
89e21b0c761d450ef8381bc4575e16d29244fb79
/rr.py
70882a541779763715acdbdd1f495fc1d98a7fe4
[]
no_license
BnkColon/operating-systems
0669b2368cc98b363fdaaf1fd67e134ecdcce7d6
bf3b60f96f37e727e576e339520659ba5e7f8edd
refs/heads/master
2021-01-18T07:30:17.331000
2016-10-07T15:31:21
2016-10-07T15:31:21
68,657,227
0
0
null
null
null
null
UTF-8
Python
false
false
3,584
py
# Bianca I. Colon Rosado # $ python rr.py quanta input.txt from string import * from sys import argv class Process: """docstring for Process""" def __init__(self, pid, ptime): self.id = pid # Take the ID of that instance self.time = int(ptime) # Take the time of that instance self.qConsumption = 0 # Initialize the consumption time to 0 def __str__(self): # Return the string version of the instance return str(self.id) + str(self.qConsumption) def setTime(self, ptime): # Set the time self.time = ptime def getTime(self): # Return the time return self.time def getID(self): # Return the ID return self.id def setQuanta(self, qConsumption): # Set the Quanta self.qConsumption = qConsumption def getQuanta(self): # Return the Quanta return self.qConsumption def main(): if (len(argv) == 3): # If recive $ python rr.py quanta input.txt quanta = int(argv[1]) # Save the quanta number gived in the command line # print type(quanta) / <type 'int'> fileInput = argv[2] # Save the file input gived in the command line # print type(fileInput) / <type 'str'> else: # If not recieve this $ python rr.py quanta input.txt quanta = 3 # Assing quanta = 3 fileInput = 'input.txt' # Search for a file named input.txt [10,2,3,4] f = open(fileInput) # Open the file in read mode # print f / <open file 'input.txt', mode 'r' at 0x2b366f908e40> lists = f.readlines() # Read all the file f.close() # Close the file results = [None] * len(lists) # Create a empty list with the maxsize of the processes for i in range(len(lists)): # Iterate throught lists, to create the processes (instances) lists[i] = Process(i, int(lists[i].strip())) # Process('P'+str(i+i)+':') quantaTotal = 0 # Variable "Global" to get the quantum time of all processes average = 0 # Variable that save the average of all the processes while lists: # While lists is not empty finished_processes = [] # Empty list to save the index of the processes that finished for i in range(len(lists)): # Iterate all processes if (lists[i].getTime() <= quanta): # If the time of the process is minor or equal to the quantum if (lists[i].getTime() == quanta): # If is equal to the quantum quantaTotal += quanta # Save the quantum else: # If the time of the process is minor to the quantum quantaTotal += lists[i].getTime() # Save time of the process lists[i].setQuanta(quantaTotal) # Set the quantum to the process lists[i].setTime(0) # When finished set the time to 0 results[lists[i].getID()] = lists[i] # Insert the index to remove finished_processes.insert(0, i) # Insert to the list of finished processes #print i, lists[i].getQuanta() else: # If the time of the process is bigger to the quantum lists[i].setTime(int(lists[i].getTime()) - quanta) # To the time rest quanta quantaTotal += quanta # Save the quantum lists[i].setQuanta(quantaTotal) # Set the quantum to the process # print i, lists[i].getQuanta() for i in finished_processes: # Iterate the list of finished processes lists.pop(i) # Delete from the list of processes # Close While for i in range(len(results)): # Iterate the list of results print 'P%d:%d' %(results[i].getID() + 1,results[i].getQuanta()) # Print P(ID):Time spended average += results[i].getQuanta() # Save all the time spended average = float(average)/ len(results) # to calculate the average print 'Avg:%1.2f' % (average) # print Average if __name__ == '__main__': main()
[ "noreply@github.com" ]
noreply@github.com
35c9dd19ef1d0bbdfd5644a92542771a5c6fbf58
10659041996f62d28cebf9ba92dcad2d6d5ecb26
/factors of cofficent in counting.py
02e97f638b7ca1550ec438f04c2c5d2c91a83ad3
[]
no_license
shailajaBegari/loops
4e92d04ee55d0564de417b7b126d4b77dc5a8816
173d5a2b6c3a921efe5a38e2d763dd59759b05c4
refs/heads/main
2023-07-13T23:37:39.642000
2021-08-28T10:48:31
2021-08-28T10:48:31
400,764,317
0
0
null
null
null
null
UTF-8
Python
false
false
152
py
n=int(input('enter number')) i=1 fact=1 count=0 while i<=n: if n%i==0: print(i) count=count+1 i=i+1 print(count,'count')
[ "noreply@github.com" ]
noreply@github.com
d3e7e9dae606fe6dc77d9c43997e9c592fbcd477
982bc95ab762829c8b6913e44504415cdd77241a
/account_easy_reconcile/base_reconciliation.py
b50c06b9eed699d96da272f0fb9dd9613177c235
[]
no_license
smart-solution/natuurpunt-finance
6b9eb65be96a4e3261ce46d7f0c31de3589e1e0d
6eeb48468792e09d46d61b89499467a44d67bc79
refs/heads/master
2021-01-23T14:42:05.017000
2020-11-03T15:56:35
2020-11-03T15:56:35
39,186,046
0
1
null
2020-11-03T15:56:37
2015-07-16T08:36:54
Python
UTF-8
Python
false
false
7,776
py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright 2012-2013 Camptocamp SA (Guewen Baconnier) # Copyright (C) 2010 Sébastien Beau # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm from operator import itemgetter, attrgetter class easy_reconcile_base(orm.AbstractModel): """Abstract Model for reconciliation methods""" _name = 'easy.reconcile.base' _inherit = 'easy.reconcile.options' _columns = { 'account_id': fields.many2one( 'account.account', 'Account', required=True), 'partner_ids': fields.many2many( 'res.partner', string="Restrict on partners"), # other columns are inherited from easy.reconcile.options } def automatic_reconcile(self, cr, uid, ids, context=None): """ Reconciliation method called from the view. :return: list of reconciled ids, list of partially reconciled items """ if isinstance(ids, (int, long)): ids = [ids] assert len(ids) == 1, "Has to be called on one id" rec = self.browse(cr, uid, ids[0], context=context) return self._action_rec(cr, uid, rec, context=context) def _action_rec(self, cr, uid, rec, context=None): """ Must be inherited to implement the reconciliation :return: list of reconciled ids """ raise NotImplementedError def _base_columns(self, rec): """ Mandatory columns for move lines queries An extra column aliased as ``key`` should be defined in each query.""" aml_cols = ( 'id', 'debit', 'credit', 'date', 'period_id', 'ref', 'name', 'partner_id', 'account_id', 'move_id') return ["account_move_line.%s" % col for col in aml_cols] def _select(self, rec, *args, **kwargs): return "SELECT %s" % ', '.join(self._base_columns(rec)) def _from(self, rec, *args, **kwargs): return "FROM account_move_line" def _where(self, rec, *args, **kwargs): where = ("WHERE account_move_line.account_id = %s " "AND account_move_line.reconcile_id IS NULL ") # it would be great to use dict for params # but as we use _where_calc in _get_filter # which returns a list, we have to # accomodate with that params = [rec.account_id.id] if rec.partner_ids: where += " AND account_move_line.partner_id IN %s" params.append(tuple([l.id for l in rec.partner_ids])) return where, params def _get_filter(self, cr, uid, rec, context): ml_obj = self.pool.get('account.move.line') where = '' params = [] if rec.filter: dummy, where, params = ml_obj._where_calc( cr, uid, eval(rec.filter), context=context).get_sql() if where: where = " AND %s" % where return where, params def _below_writeoff_limit(self, cr, uid, rec, lines, writeoff_limit, context=None): precision = self.pool.get('decimal.precision').precision_get( cr, uid, 'Account') keys = ('debit', 'credit') sums = reduce( lambda line, memo: dict((key, value + memo[key]) for key, value in line.iteritems() if key in keys), lines) debit, credit = sums['debit'], sums['credit'] writeoff_amount = round(debit - credit, precision) return bool(writeoff_limit >= abs(writeoff_amount)), debit, credit def _get_rec_date(self, cr, uid, rec, lines, based_on='end_period_last_credit', context=None): period_obj = self.pool.get('account.period') def last_period(mlines): period_ids = [ml['period_id'] for ml in mlines] periods = period_obj.browse( cr, uid, period_ids, context=context) return max(periods, key=attrgetter('date_stop')) def last_date(mlines): return max(mlines, key=itemgetter('date')) def credit(mlines): return [l for l in mlines if l['credit'] > 0] def debit(mlines): return [l for l in mlines if l['debit'] > 0] if based_on == 'end_period_last_credit': return last_period(credit(lines)).date_stop if based_on == 'end_period': return last_period(lines).date_stop elif based_on == 'newest': return last_date(lines)['date'] elif based_on == 'newest_credit': return last_date(credit(lines))['date'] elif based_on == 'newest_debit': return last_date(debit(lines))['date'] # reconcilation date will be today # when date is None return None def _reconcile_lines(self, cr, uid, rec, lines, allow_partial=False, context=None): """ Try to reconcile given lines :param list lines: list of dict of move lines, they must at least contain values for : id, debit, credit :param boolean allow_partial: if True, partial reconciliation will be created, otherwise only Full reconciliation will be created :return: tuple of boolean values, first item is wether the items have been reconciled or not, the second is wether the reconciliation is full (True) or partial (False) """ if context is None: context = {} ml_obj = self.pool.get('account.move.line') writeoff = rec.write_off line_ids = [l['id'] for l in lines] below_writeoff, sum_debit, sum_credit = self._below_writeoff_limit( cr, uid, rec, lines, writeoff, context=context) date = self._get_rec_date( cr, uid, rec, lines, rec.date_base_on, context=context) rec_ctx = dict(context, date_p=date) if below_writeoff: if sum_credit < sum_debit: writeoff_account_id = rec.account_profit_id.id else: writeoff_account_id = rec.account_lost_id.id period_id = self.pool.get('account.period').find( cr, uid, dt=date, context=context)[0] ml_obj.reconcile( cr, uid, line_ids, type='auto', writeoff_acc_id=writeoff_account_id, writeoff_period_id=period_id, writeoff_journal_id=rec.journal_id.id, context=rec_ctx) return True, True elif allow_partial: ml_obj.reconcile_partial( cr, uid, line_ids, type='manual', context=rec_ctx) return True, False return False, False
[ "fabian.semal@smartsolution.be" ]
fabian.semal@smartsolution.be
0d4ab487c9de86cce3e199c7f5a4c2c87e57c607
2612f336d667a087823234daf946f09b40d8ca3d
/python/lib/Lib/site-packages/django/contrib/gis/tests/geoapp/models.py
89027eedfbc919466ac7c1335c42dfb57aea547a
[ "Apache-2.0" ]
permissive
tnorbye/intellij-community
df7f181861fc5c551c02c73df3b00b70ab2dd589
f01cf262fc196bf4dbb99e20cd937dee3705a7b6
refs/heads/master
2021-04-06T06:57:57.974000
2018-03-13T17:37:00
2018-03-13T17:37:00
125,079,130
2
0
Apache-2.0
2018-03-13T16:09:41
2018-03-13T16:09:41
null
UTF-8
Python
false
false
1,546
py
from django.contrib.gis.db import models from django.contrib.gis.tests.utils import mysql, spatialite # MySQL spatial indices can't handle NULL geometries. null_flag = not mysql class Country(models.Model): name = models.CharField(max_length=30) mpoly = models.MultiPolygonField() # SRID, by default, is 4326 objects = models.GeoManager() def __unicode__(self): return self.name class City(models.Model): name = models.CharField(max_length=30) point = models.PointField() objects = models.GeoManager() def __unicode__(self): return self.name # This is an inherited model from City class PennsylvaniaCity(City): county = models.CharField(max_length=30) objects = models.GeoManager() # TODO: This should be implicitly inherited. class State(models.Model): name = models.CharField(max_length=30) poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here. objects = models.GeoManager() def __unicode__(self): return self.name class Track(models.Model): name = models.CharField(max_length=30) line = models.LineStringField() objects = models.GeoManager() def __unicode__(self): return self.name if not spatialite: class Feature(models.Model): name = models.CharField(max_length=20) geom = models.GeometryField() objects = models.GeoManager() def __unicode__(self): return self.name class MinusOneSRID(models.Model): geom = models.PointField(srid=-1) # Minus one SRID. objects = models.GeoManager()
[ "dmitry.trofimov@jetbrains.com" ]
dmitry.trofimov@jetbrains.com
b6e187de710d37037dd7c0d830a50e7eaee1aa28
786027545626c24486753351d6e19093b261cd7d
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/xcoff/XCoffSectionHeaderFlags.pyi
43a745532a3157885655ec9c25a175e6ac3df2ec
[ "MIT" ]
permissive
kohnakagawa/ghidra_scripts
51cede1874ef2b1fed901b802316449b4bf25661
5afed1234a7266c0624ec445133280993077c376
refs/heads/main
2023-03-25T08:25:16.842000
2021-03-18T13:31:40
2021-03-18T13:31:40
338,577,905
14
1
null
null
null
null
UTF-8
Python
false
false
772
pyi
import java.lang class XCoffSectionHeaderFlags(object): STYP_BSS: int = 128 STYP_DATA: int = 64 STYP_DEBUG: int = 8192 STYP_EXCEPT: int = 128 STYP_INFO: int = 512 STYP_LOADER: int = 4096 STYP_OVRFLO: int = 32768 STYP_PAD: int = 8 STYP_TEXT: int = 32 STYP_TYPCHK: int = 16384 def __init__(self): ... def equals(self, __a0: object) -> bool: ... def getClass(self) -> java.lang.Class: ... def hashCode(self) -> int: ... def notify(self) -> None: ... def notifyAll(self) -> None: ... def toString(self) -> unicode: ... @overload def wait(self) -> None: ... @overload def wait(self, __a0: long) -> None: ... @overload def wait(self, __a0: long, __a1: int) -> None: ...
[ "tsunekou1019@gmail.com" ]
tsunekou1019@gmail.com
2b05aafb513ea6ad66865aaa00981d7ff30884e1
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2733/40186/320060.py
85feba17c1b35b4a3536d8fcea4725c382ec5d13
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122000
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
438
py
inp=input() a=input() if inp=='8 3' and a=='10 7 9 3 4 5 8 17': print(10) print(17) print(9) elif a=='5 27 1 3 4 2 8 17': print(5) print(27) print(5) elif a=='105 2 9 3 8 5 7 7': print(2) print(8) print(9) print(105) print(7) elif inp=='101011': print(18552) elif inp=='10101101010111110100110100101010110001010010101001': print(322173207) else: print(inp) print(a) print(b)
[ "1069583789@qq.com" ]
1069583789@qq.com
d16b64f8695cc6c84f4d5603fce8acf2f90a4ceb
bff6ba0d61a3226a4d4a2b48e37cb2d8c9db7e73
/child_python.py
b08cfb4eba4df2cfd1d93b121652a2df3004268c
[]
no_license
richoey/testrepo
bf4f14b2011fa3194e0c212fccc1a6ee04fd9264
6fea5e1bafccabdeab4dd739161ea0ed685b2d0e
refs/heads/main
2023-03-30T09:09:20.798000
2021-04-08T05:29:42
2021-04-08T05:29:42
355,756,548
0
0
null
2021-04-08T05:29:42
2021-04-08T03:52:06
Jupyter Notebook
UTF-8
Python
false
false
35
py
print("New child python to merge")
[ "noreply@github.com" ]
noreply@github.com
1cf8dbafbb2c140e16cc4c24f316af8cc7589ca6
a2d902c5976adce374dce2877b059cfb64e1d5b6
/testfile/testthread.py
dfc08c97b301cdb9073cd8daf4842b760d4e7420
[]
no_license
buaanostop/Autotest
53eebc387014b6fade9a93598eaf0f74814d2f3e
53de72f1d203b4f50725583ab90988bd254cce07
refs/heads/master
2020-05-03T00:34:34.500000
2019-05-14T08:37:53
2019-05-14T08:37:53
178,313,227
0
4
null
2019-05-11T16:32:42
2019-03-29T01:57:03
HTML
UTF-8
Python
false
false
11,424
py
# -*- coding: utf-8 -*- """Test类 调用Test类中的各种方法来对模拟器或手机界面进行操作。 """ import random import sys import time import threading from com.android.monkeyrunner import MonkeyRunner,MonkeyDevice,MonkeyImage class Operation(): """操作类,给Test类记录各种操作""" def __init__(self, optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring ): self.optype = optype self.x1 = x1 self.y1 = y1 self.x2 = x2 self.y2 = y2 self.number = number self.interval_time = interval_time self.drag_time = drag_time self.keyorstring = keyorstring class Test(threading.Thread): def __init__(self): """初始化""" threading.Thread.__init__(self) self.__flag = threading.Event() # 暂停标志 self.__flag.set() # 设为True self.__running = threading.Event() # 运行标志 self.__running.set() # 设为True self.__resolution_x = 0 # 分辨率x self.__resolution_y = 0 # 分辨率y self.__device = None # 设备 self.__oplist = [] # 模拟操作的列表 def connect(self, resolution_x=540, resolution_y=960): """连接模拟器或手机 参数 ---------- resolution_x : int 分辨率x值 resolution_y : int 分辨率y值 返回值 ---------- int 返回 1 : 成功连接设备 返回 0 : 连接设备失败 示例 ---------- >>> a.connect(540, 960) """ self.__resolution_x = resolution_x self.__resolution_y = resolution_y print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connect ...") self.__device = MonkeyRunner.waitForConnection() # 连接设备或模拟器 if not self.__device: print("Please connect a device to start.") return 0 else: print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connection succeeded.") return 1 def open_app(self, package_name, activity_name): """打开设备上的应用 参数 ---------- package_name : string 应用的Package Name 包名 activity_name: string 应用的Activity Name 活动名 示例 ---------- >>> a.open_app('com.Jelly.JellyFish','com.unity3d.player.UnityPlayerActivity') """ print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Oppen application ...") self.__device.startActivity(component = package_name + "/" + activity_name) MonkeyRunner.sleep(10) print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Open application succeeded.") def pause(self): print("pause") self.__flag.clear() def resume(self): print("resume") self.__flag.set() def stop(self): print("stop") self.__flag.set() self.__running.clear() def touch(self,pos_x, pos_y, touch_number=1, interval_time=1): """点击屏幕测试 参数 ------------- pos_x : int 点击的位置x pos_y : int 点击的位置y touch_numbere : int 点击的次数,默认为1 interval_time : float 多次点击时间隔时间,默认为1秒 """ #optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring op = Operation('touch',pos_x,pos_y,0,0,touch_number,interval_time,0,0) self.__oplist.append(op) def random_touch(self, touch_number, interval_time): """随机点击屏幕测试 参数 ----------- touch_number : int 点击的次数 interval_time : float 每两次点击间隔的时间,秒为单位 示例 ----------- >>> a.random_touch(200, 1) """ op = Operation('random_touch',0,0,0,0,touch_number,interval_time,0,0) self.__oplist.append(op) def press(self, key_name): """按键测试 参数 ----------- key_name : string 按键的名字 """ op = Operation('press',0,0,0,0,0,0,0,key_name) self.__oplist.append(op) def type(self, typestring): """键盘输入测试 参数 ------- typestring : string 要输入的字符串 """ op = Operation('type',0,0,0,0,0,0,0,typestring) self.__oplist.append(op) def drag(self,start_x, start_y, end_x, end_y, drag_time=1, drag_number=1, interval_time=1): """滑动屏幕测试 参数 --------------- start_x : int 滑动起始位置x start_y : int 滑动起始位置y end_x : int 滑动结束位置x end_y : int 滑动结束位置y drag_time : float 滑动持续时间,默认为1秒 drag_number : int 滑动次数,默认为1次 interval_time : float 滑动间隔时间,默认为1秒 """ #optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring op = Operation('drag',start_x,start_y,end_x,end_y,drag_number,interval_time,drag_time,0) self.__oplist.append(op) def random_drag(self, drag_number, interval_time): """随机滑动屏幕测试 参数 ----------- drag_number : int 滑动的次数 interval_time : float 每两次滑动间隔的时间,秒为单位 示例 ------------ >>> a.random_drag(200, 3) """ op = Operation('random_drag',0,0,0,0,drag_number,interval_time,1,0) self.__oplist.append(op) def run(self): opnum = len(self.__oplist) if(opnum <= 0): return for op in self.__oplist: # touch if op.optype == 'touch': touch_number = op.number pos_x = op.x1 pos_y = op.y1 interval_time = op.interval_time num = 1 while(num <= touch_number): if self.__running.isSet(): self.__flag.wait() print("%stouch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "), num, pos_x, pos_y)) self.__device.touch(pos_x, pos_y, 'DOWN_AND_UP') num += 1 MonkeyRunner.sleep(interval_time) else: self.__oplist[:] = [] return # random_touch elif op.optype == 'random_touch': touch_number = op.number interval_time = op.interval_time print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test start.") num = 1 while(num <= touch_number): if self.__running.isSet(): self.__flag.wait() x = random.randint(0, self.__resolution_x) # 随机生成位置x y = random.randint(0, self.__resolution_y) # 随机生成位置y print("%srandom_touch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x,y)) self.__device.touch(x, y, 'DOWN_AND_UP') # 点击(x,y) MonkeyRunner.sleep(interval_time) num += 1 else: self.__oplist[:] = [] return print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test finished.") # drag elif op.optype == 'drag': start_x = op.x1 start_y = op.y1 end_x = op.x2 end_y = op.y2 drag_time = op.drag_time drag_number = op.number interval_time = op.interval_time num = 1 while(num <= drag_number): if self.__running.isSet(): self.__flag.wait() print("%sdrag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,start_x,start_y,end_x,end_y)) self.__device.drag((start_x, start_y), (end_x, end_y), drag_time, 10) MonkeyRunner.sleep(interval_time) num += 1 else: self.__oplist[:] = [] return #random_drag elif op.optype == 'random_drag': drag_number = op.number interval_time = op.interval_time print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test start.") num = 1 while(num <= drag_number): if self.__running.isSet(): self.__flag.wait() x_start = random.randint(0, self.__resolution_x) y_start = random.randint(0, self.__resolution_y) x_end = random.randint(0,self.__resolution_x) y_end = random.randint(0,self.__resolution_y) print("%srandom_drag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x_start,y_start,x_end,y_end)) self.__device.drag((x_start, y_start), (x_end, y_end), 1, 10) MonkeyRunner.sleep(interval_time) num += 1 else: self.__oplist[:] = [] return print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test finished.") #press elif op.optype == 'press': key_name = op.keyorstring if self.__running.isSet(): self.__flag.wait() print("%spress %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),key_name)) self.__device.press(key_name, 'DOWN_AND_UP') else: self.__oplist[:] = [] return #type elif op.optype == 'type': typestring = op.keyorstring if self.__running.isSet(): print("%stype %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),typestring)) self.__device.type(typestring) else: self.__oplist[:] = [] return else: print("optype error") ##例子 ##t1 = Test() ##t1.connect() ##t1.random_touch(5,5) ##t1.start() ##time.sleep(6) ##t1.pause() ##time.sleep(6) ##t1.resume() ##time.sleep(6) ##t1.stop() ## ##t1.join()
[ "noreply@github.com" ]
noreply@github.com
8edcd266e14b62bb5053d6369487e7c9726e0dda
38c10c01007624cd2056884f25e0d6ab85442194
/chrome/chrome_resources.gyp
492536ca0787a392f82c67762f4eb395a3eb7c79
[ "BSD-3-Clause" ]
permissive
zenoalbisser/chromium
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
e71f21b9b4b9b839f5093301974a45545dad2691
refs/heads/master
2022-12-25T14:23:18.568000
2016-07-14T21:49:52
2016-07-23T08:02:51
63,980,627
0
2
BSD-3-Clause
2022-12-12T12:43:41
2016-07-22T20:14:04
null
UTF-8
Python
false
false
25,319
gyp
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/chrome', 'additional_modules_list_file': '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/internal/additional_modules_list.txt', }, 'targets': [ { # GN version: //chrome:extra_resources 'target_name': 'chrome_extra_resources', 'type': 'none', # These resources end up in resources.pak because they are resources # used by internal pages. Putting them in a separate pak file makes # it easier for us to reference them internally. 'actions': [ { # GN version: //chrome/browser/resources:memory_internals_resources 'action_name': 'generate_memory_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/memory_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:net_internals_resources 'action_name': 'generate_net_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/net_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:invalidations_resources 'action_name': 'generate_invalidations_resources', 'variables': { 'grit_grd_file': 'browser/resources/invalidations_resources.grd', }, 'includes': ['../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:password_manager_internals_resources 'action_name': 'generate_password_manager_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/password_manager_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:signin_internals_resources 'action_name': 'generate_signin_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/signin_internals_resources.grd', }, 'includes': ['../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:translate_internals_resources 'action_name': 'generate_translate_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/translate_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], 'conditions': [ ['OS != "ios"', { 'dependencies': [ '../components/components_resources.gyp:components_resources', '../content/browser/devtools/devtools_resources.gyp:devtools_resources', '../content/browser/tracing/tracing_resources.gyp:tracing_resources', 'browser/devtools/webrtc_device_provider_resources.gyp:webrtc_device_provider_resources', ], 'actions': [ { # GN version: //chrome/browser/resources:component_extension_resources 'action_name': 'generate_component_extension_resources', 'variables': { 'grit_grd_file': 'browser/resources/component_extension_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:options_resources 'action_name': 'generate_options_resources', 'variables': { 'grit_grd_file': 'browser/resources/options_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:settings_resources 'action_name': 'generate_settings_resources', 'variables': { 'grit_grd_file': 'browser/resources/settings/settings_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'copies': [ { # GN version: //chrome/browser/resources:extension_resource_demo 'destination': '<(PRODUCT_DIR)/resources/extension/demo', 'files': [ 'browser/resources/extension_resource/demo/library.js', ], }, ], }], ['chromeos==1 and disable_nacl==0 and disable_nacl_untrusted==0', { 'dependencies': [ 'browser/resources/chromeos/chromevox/chromevox.gyp:chromevox', ], }], ['enable_extensions==1', { 'actions': [ { # GN version: //chrome/browser/resources:quota_internals_resources 'action_name': 'generate_quota_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/quota_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/browser/resources:sync_file_system_internals_resources 'action_name': 'generate_sync_file_system_internals_resources', 'variables': { 'grit_grd_file': 'browser/resources/sync_file_system_internals_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], }], ], }, { # GN version: //chrome/browser:chrome_internal_resources_gen 'target_name': 'chrome_internal_resources_gen', 'type': 'none', 'conditions': [ ['branding=="Chrome"', { 'actions': [ { 'action_name': 'generate_transform_additional_modules_list', 'variables': { 'additional_modules_input_path': 'browser/internal/resources/additional_modules_list.input', 'additional_modules_py_path': 'browser/internal/transform_additional_modules_list.py', }, 'inputs': [ '<(additional_modules_input_path)', ], 'outputs': [ '<(additional_modules_list_file)', ], 'action': [ 'python', '<(additional_modules_py_path)', '<(additional_modules_input_path)', '<@(_outputs)', ], 'message': 'Transforming additional modules list', } ], }], ], }, { # TODO(mark): It would be better if each static library that needed # to run grit would list its own .grd files, but unfortunately some # of the static libraries currently have circular dependencies among # generated headers. # # GN version: //chrome:resources 'target_name': 'chrome_resources', 'type': 'none', 'dependencies': [ 'chrome_internal_resources_gen', 'chrome_web_ui_mojo_bindings.gyp:web_ui_mojo_bindings', ], 'actions': [ { # GN version: //chrome/browser:resources 'action_name': 'generate_browser_resources', 'variables': { 'grit_grd_file': 'browser/browser_resources.grd', 'grit_additional_defines': [ '-E', 'additional_modules_list_file=<(additional_modules_list_file)', '-E', 'root_gen_dir=<(SHARED_INTERMEDIATE_DIR)', ], }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/common:resources 'action_name': 'generate_common_resources', 'variables': { 'grit_grd_file': 'common/common_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/renderer:resources 'action_name': 'generate_renderer_resources', 'variables': { 'grit_grd_file': 'renderer/resources/renderer_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'conditions': [ ['enable_extensions==1', { 'actions': [ { # GN version: //chrome/common:extensions_api_resources 'action_name': 'generate_extensions_api_resources', 'variables': { 'grit_grd_file': 'common/extensions_api_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], } ], }], ], 'includes': [ '../build/grit_target.gypi' ], }, { # TODO(mark): It would be better if each static library that needed # to run grit would list its own .grd files, but unfortunately some # of the static libraries currently have circular dependencies among # generated headers. # # GN version: //chrome:strings 'target_name': 'chrome_strings', 'type': 'none', 'actions': [ { # GN version: //chrome/app/resources:locale_settings 'action_name': 'generate_locale_settings', 'variables': { 'grit_grd_file': 'app/resources/locale_settings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:chromium_strings 'action_name': 'generate_chromium_strings', 'variables': { 'grit_grd_file': 'app/chromium_strings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:generated_resources 'action_name': 'generate_generated_resources', 'variables': { 'grit_grd_file': 'app/generated_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:google_chrome_strings 'action_name': 'generate_google_chrome_strings', 'variables': { 'grit_grd_file': 'app/google_chrome_strings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:settings_strings 'action_name': 'generate_settings_strings', 'variables': { 'grit_grd_file': 'app/settings_strings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:settings_chromium_strings 'action_name': 'generate_settings_chromium_strings', 'variables': { 'grit_grd_file': 'app/settings_chromium_strings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, { # GN version: //chrome/app:settings_google_chrome_strings 'action_name': 'generate_settings_google_chrome_strings', 'variables': { 'grit_grd_file': 'app/settings_google_chrome_strings.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], }, { # GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory_gen_sources 'target_name': 'make_chrome_ui_string_overrider_factory', 'type': 'none', 'hard_dependency': 1, 'dependencies': [ 'chrome_strings', ], 'actions': [ { 'action_name': 'generate_ui_string_overrider', 'inputs': [ '../components/variations/service/generate_ui_string_overrider.py', '<(grit_out_dir)/grit/generated_resources.h' ], 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc', '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h', ], 'action': [ 'python', '../components/variations/service/generate_ui_string_overrider.py', '-N', 'chrome_variations', '-o', '<(SHARED_INTERMEDIATE_DIR)', '-S', 'chrome/browser/metrics/variations/ui_string_overrider_factory.cc', '-H', 'chrome/browser/metrics/variations/ui_string_overrider_factory.h', '<(grit_out_dir)/grit/generated_resources.h', ], 'message': 'Generating generated resources map.', } ], }, { # GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory 'target_name': 'chrome_ui_string_overrider_factory', 'type': 'static_library', 'dependencies': [ '../components/components.gyp:variations_service', 'make_chrome_ui_string_overrider_factory', ], 'sources': [ '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc', '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h', ], }, { # GN version: //chrome/app/resources:platform_locale_settings 'target_name': 'platform_locale_settings', 'type': 'none', 'variables': { 'conditions': [ ['OS=="win"', { 'platform_locale_settings_grd': 'app/resources/locale_settings_win.grd', },], ['OS=="linux"', { 'conditions': [ ['chromeos==1', { 'platform_locale_settings_grd': 'app/resources/locale_settings_<(branding_path_component)os.grd', }, { # chromeos==0 'platform_locale_settings_grd': 'app/resources/locale_settings_linux.grd', }], ], },], ['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "linux"', { 'platform_locale_settings_grd': 'app/resources/locale_settings_linux.grd', },], ['OS == "mac" or OS == "ios"', { 'platform_locale_settings_grd': 'app/resources/locale_settings_mac.grd', }], ], # conditions }, # variables 'actions': [ { 'action_name': 'generate_platform_locale_settings', 'variables': { 'grit_grd_file': '<(platform_locale_settings_grd)', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], }, { # GN version: //chrome/app/theme:theme_resources 'target_name': 'theme_resources', 'type': 'none', 'dependencies': [ '../ui/resources/ui_resources.gyp:ui_resources', 'chrome_unscaled_resources', ], 'actions': [ { 'action_name': 'generate_theme_resources', 'variables': { 'grit_grd_file': 'app/theme/theme_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], }, { # GN version: //chrome:packed_extra_resources 'target_name': 'packed_extra_resources', 'type': 'none', 'dependencies': [ 'chrome_extra_resources', 'packed_resources', ], 'actions': [ { 'includes': ['chrome_repack_resources.gypi'] }, ], 'conditions': [ ['OS != "mac" and OS != "ios"', { # We'll install the resource files to the product directory. The Mac # copies the results over as bundle resources in its own special way. 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ '<(SHARED_INTERMEDIATE_DIR)/repack/resources.pak' ], }, ], }], ], }, { # GN version: //chrome:packed_resources 'target_name': 'packed_resources', 'type': 'none', 'dependencies': [ # Update duplicate logic in repack_locales.py # MSVS needs the dependencies explictly named, Make is able to # derive the dependencies from the output files. 'chrome_resources', 'chrome_strings', 'platform_locale_settings', 'theme_resources', '<(DEPTH)/components/components_strings.gyp:components_strings', '<(DEPTH)/net/net.gyp:net_resources', '<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources', '<(DEPTH)/ui/strings/ui_strings.gyp:ui_strings', ], 'actions': [ { # GN version: //chrome:repack_locales_pack 'action_name': 'repack_locales_pack', 'variables': { 'pak_locales': '<(locales)', }, 'includes': ['chrome_repack_locales.gypi'] }, { # GN version: //chrome:repack_pseudo_locales_pack 'action_name': 'repack_pseudo_locales_pack', 'variables': { 'pak_locales': '<(pseudo_locales)', }, 'includes': ['chrome_repack_locales.gypi'] }, { 'includes': ['chrome_repack_chrome_100_percent.gypi'] }, { 'includes': ['chrome_repack_chrome_200_percent.gypi'] }, { 'includes': ['chrome_repack_chrome_material_100_percent.gypi'] }, { 'includes': ['chrome_repack_chrome_material_200_percent.gypi'] }, ], 'conditions': [ # GN version: chrome_repack_locales.gni template("_repack_one_locale") ['OS != "ios"', { 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/content/app/resources/content_resources.gyp:content_resources', '<(DEPTH)/content/app/strings/content_strings.gyp:content_strings', '<(DEPTH)/device/bluetooth/bluetooth_strings.gyp:bluetooth_strings', '<(DEPTH)/third_party/WebKit/public/blink_resources.gyp:blink_resources', ], }, { # else 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/ios/chrome/ios_chrome_resources.gyp:ios_strings_gen', ], 'actions': [ { 'includes': ['chrome_repack_chrome_300_percent.gypi'] }, ], }], ['use_ash==1', { 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/ash/ash_resources.gyp:ash_resources', '<(DEPTH)/ash/ash_strings.gyp:ash_strings', ], }], ['toolkit_views==1', { 'dependencies': [ '<(DEPTH)/ui/views/resources/views_resources.gyp:views_resources', ], }], ['chromeos==1', { 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/remoting/remoting.gyp:remoting_resources', '<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_resources', '<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_strings', ], }], ['enable_autofill_dialog==1 and OS!="android"', { 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/third_party/libaddressinput/libaddressinput.gyp:libaddressinput_strings', ], }], ['enable_extensions==1', { 'dependencies': [ # Update duplicate logic in repack_locales.py '<(DEPTH)/extensions/extensions_strings.gyp:extensions_strings', ], }], ['enable_app_list==1', { 'dependencies': [ '<(DEPTH)/ui/app_list/resources/app_list_resources.gyp:app_list_resources', ], }], ['OS != "mac" and OS != "ios"', { # Copy pak files to the product directory. These files will be picked # up by the following installer scripts: # - Windows: chrome/installer/mini_installer/chrome.release # - Linux: chrome/installer/linux/internal/common/installer.include # Ensure that the above scripts are updated when adding or removing # pak files. # Copying files to the product directory is not needed on the Mac # since the framework build phase will copy them into the framework # bundle directly. 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ '<(SHARED_INTERMEDIATE_DIR)/repack/chrome_100_percent.pak' ], }, { 'destination': '<(PRODUCT_DIR)/locales', 'files': [ '<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(locales))' ], }, { 'destination': '<(PRODUCT_DIR)/pseudo_locales', 'files': [ '<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(pseudo_locales))' ], }, ], 'conditions': [ ['branding=="Chrome"', { 'copies': [ { # This location is for the Windows and Linux builds. For # Windows, the chrome.release file ensures that these files # are copied into the installer. Note that we have a separate # section in chrome_dll.gyp to copy these files for Mac, as it # needs to be dropped inside the framework. 'destination': '<(PRODUCT_DIR)/default_apps', 'files': ['<@(default_apps_list)'] }, ], }], ['enable_hidpi == 1', { 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ '<(SHARED_INTERMEDIATE_DIR)/repack/chrome_200_percent.pak', ], }, ], }], ['enable_topchrome_md == 1', { 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ '<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_100_percent.pak', ], }, ], }], ['enable_hidpi == 1 and enable_topchrome_md == 1', { 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ '<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_200_percent.pak', ], }, ], }], ], # conditions }], # end OS != "mac" and OS != "ios" ], # conditions }, { # GN version: //chrome/app/theme:chrome_unscaled_resources 'target_name': 'chrome_unscaled_resources', 'type': 'none', 'actions': [ { 'action_name': 'generate_chrome_unscaled_resources', 'variables': { 'grit_grd_file': 'app/theme/chrome_unscaled_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], }, { # GN version: //chrome/browser/resources:options_test_resources 'target_name': 'options_test_resources', 'type': 'none', 'actions': [ { 'action_name': 'generate_options_test_resources', 'variables': { 'grit_grd_file': 'browser/resources/options_test_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], }, { # GN version: //chrome/test/data/resources:webui_test_resources 'target_name': 'webui_test_resources', 'type': 'none', 'actions': [ { 'action_name': 'generate_webui_test_resources', 'variables': { 'grit_grd_file': 'test/data/webui_test_resources.grd', }, 'includes': [ '../build/grit_action.gypi' ], }, ], 'includes': [ '../build/grit_target.gypi' ], }, { # GN version: //chrome:browser_tests_pak 'target_name': 'browser_tests_pak', 'type': 'none', 'dependencies': [ 'options_test_resources', 'webui_test_resources', ], 'actions': [ { 'action_name': 'repack_browser_tests_pak', 'variables': { 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/chrome/options_test_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/chrome/webui_test_resources.pak', ], 'pak_output': '<(PRODUCT_DIR)/browser_tests.pak', }, 'includes': [ '../build/repack_action.gypi' ], }, ], }, ], # targets }
[ "zeno.albisser@hemispherian.com" ]
zeno.albisser@hemispherian.com
37267b9d176703bfa0ccfc0f5b44ef463f69ea53
9930638a8061f1e9f7c2313c34846d6c5295d747
/Quiz41_Yusuf Syarif Iqbal_1201184320.py
2038f1a58232b71efd2b910a885abbdcba9de425
[ "Unlicense" ]
permissive
yusufsyarif/Quiz-4-Alpro
6ee82c066b53694c9e05c43d6921f46dda1a7657
dc59622409a500d73cc0ddbbed2fa8850c919ba7
refs/heads/master
2020-07-24T15:16:48.701000
2019-09-12T05:06:42
2019-09-12T05:06:42
207,966,670
0
0
null
null
null
null
UTF-8
Python
false
false
177
py
StudentofFRI = ["Anton", "Budi", "Doni", "Huda"] print("List of Student = ") print(StudentofFRI[0]) print(StudentofFRI[1]) print(StudentofFRI[2]) print(StudentofFRI[3])
[ "noreply@github.com" ]
noreply@github.com
63897bcb7d1d451d51497a89ed42b40c7c919bcd
8c7853822047c1908b7bb5f39531d721dacbed3f
/Python Practice/Assignment.py
af81f0a5477dd1bcad731c9ef95518de49085947
[]
no_license
AjayKrish24/Assessment
63cbd8386f4f6fe649abcc3603485ed8647cf6c3
6233e268b9812c7f5f859ec03a83691fd3419472
refs/heads/master
2022-04-08T06:35:11.142000
2020-02-28T11:37:22
2020-02-28T11:37:22
235,511,361
0
1
null
null
null
null
UTF-8
Python
false
false
2,325
py
1) string = input("Enter a string : ") str_list = [] count = "" for x in string: if x not in str_list: str_list.append(x) for x in str_list: count = count + x + str(string.count(x)) print(count) #=======================o/p====================================== Enter a string : aaabbcc a3b2c2 #*************************************************************************************************** 2) string = [(),("a", "b"),(" ", " ")] for i in string: if len(i) == 0: print("There is an empty tuple in the list") #=======================o/p====================================== There is an empty tuple in the list #*************************************************************************************************** 4) word = input() print(word.title()) #=======================o/p====================================== welcome to python Welcome To Python #*************************************************************************************************** 5) import re ip = input("Enter IP : ") print(re.match(r"\b(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\b", ip)) #=======================o/p====================================== Enter IP : 123.45.6.88 <_sre.SRE_Match object; span=(0, 11), match='123.45.6.88'> #*************************************************************************************************** 6) string_list = ["Welcome", "to", "Python"] print(" ".join(string_list)) #=======================o/p====================================== string_list = ["Welcome", "to", "Python"] print(" ".join(string_list)) #***************************************************************************************************
[ "noreply@github.com" ]
noreply@github.com
47508a3b9f2141ed5940c7582db50110eb72e9aa
eef1a0e31f723757c5ca8665b9433a9df86d17aa
/func/python/bench_json_loads.py
311fdb7808b7871b2a891b4608fb5b8789176806
[ "Apache-2.0" ]
permissive
robinvanemden/Faasm
09a69fce30300a12d5ba7df55c40a39d81ee5d8f
e005cca20fb4be4ee9ae30f25a5873964b2efd7f
refs/heads/master
2020-12-01T14:10:51.471000
2019-12-20T10:05:17
2019-12-20T10:05:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
import json from performance.benchmarks.bm_json_loads import DICT, TUPLE, DICT_GROUP, bench_json_loads if __name__ == "__main__": json_dict = json.dumps(DICT) json_tuple = json.dumps(TUPLE) json_dict_group = json.dumps(DICT_GROUP) objs = (json_dict, json_tuple, json_dict_group) for x in range(100): bench_json_loads(objs)
[ "noreply@github.com" ]
noreply@github.com
fe4155275d3a9240634ebe2b2de50705201231ac
a140a7ca1bc5f0af773cb3d22081b4bb75138cfa
/234_palindromLinkedList.py
b1b3a195574aefe83cc26bf49500c32c48a8a3b2
[]
no_license
YeahHuang/Leetcode
d02bc99d2e890ed0e829515b6f85c4ca6394a1a1
78d36486ad4ec2bfb88fd35a5fd7fd4f0003ee97
refs/heads/master
2021-07-14T01:53:06.701000
2020-06-22T03:01:46
2020-06-22T03:01:46
166,235,118
1
0
null
null
null
null
UTF-8
Python
false
false
585
py
class Solution: def isPalindrome(self, head: ListNode) -> bool: rev = None slow = fast = head while fast and fast.next: fast = fast.next.next rev, rev.next, slow = slow, rev, slow.next if fast: # fast is at the end, move slow one step further for comparison(cross middle one) slow = slow.next while rev and rev.val == slow.val: slow = slow.next rev = rev.next # if equivalent then rev become None, return True; otherwise return False return not rev
[ "noreply@github.com" ]
noreply@github.com
2b19f94d126f21b48d19683f2785c9ea50a508a4
24653fc7753145833651a39c5ccfd2dce9776ef9
/tests/test_kms/test_model.py
5d0ffc0978aeb6a962d1f2ed7df60755752a3331
[ "Apache-2.0" ]
permissive
cm-iwata/moto
fd47802b7bdec567eef575a14109a5fb0c92eea4
9640ec20d125248ac91243591c7db50daabfd135
refs/heads/master
2022-07-13T23:21:56.898000
2022-06-13T10:14:22
2022-06-13T10:14:22
143,237,437
0
0
Apache-2.0
2018-08-02T03:27:08
2018-08-02T03:27:08
null
UTF-8
Python
false
false
1,147
py
import pytest from moto.kms.models import KmsBackend PLAINTEXT = b"text" REGION = "us-east-1" @pytest.fixture def backend(): return KmsBackend(REGION) @pytest.fixture def key(backend): return backend.create_key( None, "ENCRYPT_DECRYPT", "SYMMETRIC_DEFAULT", "Test key", None, REGION ) def test_encrypt_key_id(backend, key): ciphertext, arn = backend.encrypt(key.id, PLAINTEXT, {}) assert ciphertext is not None assert arn == key.arn def test_encrypt_key_arn(backend, key): ciphertext, arn = backend.encrypt(key.arn, PLAINTEXT, {}) assert ciphertext is not None assert arn == key.arn def test_encrypt_alias_name(backend, key): backend.add_alias(key.id, "alias/test/test") ciphertext, arn = backend.encrypt("alias/test/test", PLAINTEXT, {}) assert ciphertext is not None assert arn == key.arn def test_encrypt_alias_arn(backend, key): backend.add_alias(key.id, "alias/test/test") ciphertext, arn = backend.encrypt( f"arn:aws:kms:{REGION}:{key.account_id}:alias/test/test", PLAINTEXT, {} ) assert ciphertext is not None assert arn == key.arn
[ "noreply@github.com" ]
noreply@github.com
cb07a323abf8740806bebc941c841ab0e659081b
e6ad1014aacaa92643f42952c278469177defc15
/napalm_ansible/napalm_diff_yang.py
d134e9bb1a69665bbfabcb13f326bcf956c8cb1d
[ "Apache-2.0" ]
permissive
cspeidel/napalm-ansible
d290ee7cc1abd9dd7d11044d5ddc542bd6658906
8ad4badb38d79ec5efd96faa666c71f7438dfa28
refs/heads/develop
2022-02-09T05:40:10.302000
2017-11-06T20:51:58
2017-11-06T20:51:58
110,727,639
0
0
Apache-2.0
2022-01-31T16:25:25
2017-11-14T18:18:35
Python
UTF-8
Python
false
false
3,409
py
#!/usr/bin/python # -*- coding: utf-8 -*- """ (c) 2017 David Barroso <dbarrosop@dravetech.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ from ansible.module_utils.basic import AnsibleModule try: import napalm_yang except ImportError: napalm_yang = None DOCUMENTATION = ''' --- module: napalm_diff_yang author: "David Barroso (@dbarrosop)" version_added: "0.0" short_description: "Return diff of two YANG objects" description: - "Create two YANG objects from dictionaries and runs mehtod" - "napalm_yang.utils.diff on them." requirements: - napalm-yang options: models: description: - List of models to parse required: True first: description: - Dictionary with the data to load into the first YANG object required: True second: description: - Dictionary with the data to load into the second YANG object required: True ''' EXAMPLES = ''' napalm_diff_yang: first: "{{ candidate.yang_model }}" second: "{{ running_config.yang_model }}" models: - models.openconfig_interfaces register: diff ''' RETURN = ''' diff: description: "Same output as the method napalm_yang.utils.diff" returned: always type: dict sample: { "interfaces": { "interface": { "both": { "Port-Channel1": { "config": { "description": { "first": "blah", "second": "Asadasd" } } } } } } ''' def get_root_object(models): """ Read list of models and returns a Root object with the proper models added. """ root = napalm_yang.base.Root() for model in models: current = napalm_yang for p in model.split("."): current = getattr(current, p) root.add_model(current) return root def main(): module = AnsibleModule( argument_spec=dict( models=dict(type="list", required=True), first=dict(type='dict', required=True), second=dict(type='dict', required=True), ), supports_check_mode=True ) if not napalm_yang: module.fail_json(msg="the python module napalm-yang is required") first = get_root_object(module.params["models"]) first.load_dict(module.params["first"]) second = get_root_object(module.params["models"]) second.load_dict(module.params["second"]) diff = napalm_yang.utils.diff(first, second) module.exit_json(yang_diff=diff) if __name__ == '__main__': main()
[ "dbarrosop@dravetech.com" ]
dbarrosop@dravetech.com
f50a62262f8a5fd229e3a174e46c8c9fedf3c950
cef09d1e6d5e7cd335387d0829211ffb0da18f48
/tests2/tests/wedge100/test_psumuxmon.py
73784296b42bf03dd786c25cca01bc61c37967ce
[]
no_license
theopolis/openbmc
a1ef2e3335efd19bf750117d79c1477d47948ff3
1784748ba29ee89bccacb2019a0bb86bd181c651
refs/heads/master
2020-12-14T07:20:40.273000
2019-04-20T05:25:17
2019-04-20T05:25:17
43,323,632
0
1
null
2015-09-28T19:56:24
2015-09-28T19:56:24
null
UTF-8
Python
false
false
2,143
py
#!/usr/bin/env python # # Copyright 2018-present Facebook. All Rights Reserved. # # This program file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License # along with this program in a file named COPYING; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA # import unittest import os import re from utils.shell_util import run_shell_cmd from utils.cit_logger import Logger class PsumuxmonTest(unittest.TestCase): def setUp(self): Logger.start(name=__name__) def tearDown(self): Logger.info("Finished logging for {}".format(self._testMethodName)) pass def test_psumuxmon_runit_sv_status(self): cmd = ["/usr/bin/sv status psumuxmon"] data = run_shell_cmd(cmd) self.assertIn("run", data, "psumuxmon process not running") def get_ltc_hwmon_path(self, path): pcard_vin = None result = re.split("hwmon", path) if os.path.isdir(result[0]): construct_hwmon_path = result[0] + "hwmon" x = None for x in os.listdir(construct_hwmon_path): if x.startswith('hwmon'): construct_hwmon_path = construct_hwmon_path + "/" + x + "/" + result[2].split("/")[1] return construct_hwmon_path return None def test_psumuxmon_ltc_sensor_path_exists(self): # Based on lab device deployment, sensor data might not be accessible. # Verify that path exists cmd = "/sys/bus/i2c/devices/7-006f/hwmon/hwmon*/in1_input" self.assertTrue(os.path.exists(self.get_ltc_hwmon_path(cmd)), "psumuxmon LTC sensor path accessible")
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
803c12056e1bb1f8bb8a7ab3310523f027750019
338a11833d8e83dd0e4580ab3dc21b95fe17183b
/logica.py
145a353284a5785f04491bdf85f74a8b95240a4a
[]
no_license
MaBlestastic/UML-TiendaElectronica
6f3294a68dca2ca9fc796669307886d108e0a32f
73a119e3224accdb9ffc90e4cb832f76590a8995
refs/heads/main
2023-09-06T00:47:24.907000
2021-11-13T00:04:01
2021-11-13T00:04:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
576
py
import beconnect def gestionarProv (nombreprod): beconnect.Mostrar("SELECT nombreprod FROM producto WHERE nombreprod = "+ nombreprod ) pass def controlarProd(): pass def comprarProd(): pass def controlarStockProd(): pass def venderCliente(): pass def reservarProd(): pass def gestionarProv (): Nombre = input ( "xd \t" ) Descripcion= input ("xd \t") sql = "INSERT INTO producto (nombreprod,descripprod) VALUES (%s,%s)" val= [(Nombre,Descripcion)] beconnect.EjecutarSQL_VAL(sql, val) gestionarProv ()
[ "noreply@github.com" ]
noreply@github.com
eecaffdbe17ebf356d4729447b601c155f4a4f9d
209c876b1e248fd67bd156a137d961a6610f93c7
/python/paddle/metric/metrics.py
aeec4022e218424eb20183b6917aa2f39a17d588
[ "Apache-2.0" ]
permissive
Qengineering/Paddle
36e0dba37d29146ebef4fba869490ecedbf4294e
591456c69b76ee96d04b7d15dca6bb8080301f21
refs/heads/develop
2023-01-24T12:40:04.551000
2022-10-06T10:30:56
2022-10-06T10:30:56
544,837,444
0
0
Apache-2.0
2022-10-03T10:12:54
2022-10-03T10:12:54
null
UTF-8
Python
false
false
28,411
py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import abc import numpy as np from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.layer_helper import LayerHelper from ..fluid.framework import core, _varbase_creator, _non_static_mode, _in_legacy_dygraph import paddle from paddle import _C_ops, _legacy_C_ops __all__ = [] def _is_numpy_(var): return isinstance(var, (np.ndarray, np.generic)) @six.add_metaclass(abc.ABCMeta) class Metric(object): r""" Base class for metric, encapsulates metric logic and APIs Usage: .. code-block:: text m = SomeMetric() for prediction, label in ...: m.update(prediction, label) m.accumulate() Advanced usage for :code:`compute`: Metric calculation can be accelerated by calculating metric states from model outputs and labels by build-in operators not by Python/NumPy in :code:`compute`, metric states will be fetched as NumPy array and call :code:`update` with states in NumPy format. Metric calculated as follows (operations in Model and Metric are indicated with curly brackets, while data nodes not): .. code-block:: text inputs & labels || ------------------ | || {model} || | || outputs & labels || | || tensor data {Metric.compute} || | || metric states(tensor) || | || {fetch as numpy} || ------------------ | || metric states(numpy) || numpy data | || {Metric.update} \/ ------------------ Examples: For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label` as inputs, we can calculate the correct prediction matrix between :code:`pred` and :code:`label` in :code:`compute`. For examples, prediction results contains 10 classes, while :code:`pred` shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size, and we only need to calculate accurary of top-1 and top-5, we could calculate the correct prediction matrix of the top-5 scores of the prediction of each sample like follows, while the correct prediction matrix shape is [N, 5]. .. code-block:: text def compute(pred, label): # sort prediction and slice the top-5 scores pred = paddle.argsort(pred, descending=True)[:, :5] # calculate whether the predictions are correct correct = pred == label return paddle.cast(correct, dtype='float32') With the :code:`compute`, we split some calculations to OPs (which may run on GPU devices, will be faster), and only fetch 1 tensor with shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1]. :code:`update` can be define as follows: .. code-block:: text def update(self, correct): accs = [] for i, k in enumerate(self.topk): num_corrects = correct[:, :k].sum() num_samples = len(correct) accs.append(float(num_corrects) / num_samples) self.total[i] += num_corrects self.count[i] += num_samples return accs """ def __init__(self): pass @abc.abstractmethod def reset(self): """ Reset states and result """ raise NotImplementedError( "function 'reset' not implemented in {}.".format( self.__class__.__name__)) @abc.abstractmethod def update(self, *args): """ Update states for metric Inputs of :code:`update` is the outputs of :code:`Metric.compute`, if :code:`compute` is not defined, the inputs of :code:`update` will be flatten arguments of **output** of mode and **label** from data: :code:`update(output1, output2, ..., label1, label2,...)` see :code:`Metric.compute` """ raise NotImplementedError( "function 'update' not implemented in {}.".format( self.__class__.__name__)) @abc.abstractmethod def accumulate(self): """ Accumulates statistics, computes and returns the metric value """ raise NotImplementedError( "function 'accumulate' not implemented in {}.".format( self.__class__.__name__)) @abc.abstractmethod def name(self): """ Returns metric name """ raise NotImplementedError( "function 'name' not implemented in {}.".format( self.__class__.__name__)) def compute(self, *args): """ This API is advanced usage to accelerate metric calculating, calulations from outputs of model to the states which should be updated by Metric can be defined here, where Paddle OPs is also supported. Outputs of this API will be the inputs of "Metric.update". If :code:`compute` is defined, it will be called with **outputs** of model and **labels** from data as arguments, all outputs and labels will be concatenated and flatten and each filed as a separate argument as follows: :code:`compute(output1, output2, ..., label1, label2,...)` If :code:`compute` is not defined, default behaviour is to pass input to output, so output format will be: :code:`return output1, output2, ..., label1, label2,...` see :code:`Metric.update` """ return args class Accuracy(Metric): """ Encapsulates accuracy metric logic. Args: topk (list[int]|tuple[int]): Number of top elements to look at for computing accuracy. Default is (1,). name (str, optional): String name of the metric instance. Default is `acc`. Example by standalone: .. code-block:: python import numpy as np import paddle x = paddle.to_tensor(np.array([ [0.1, 0.2, 0.3, 0.4], [0.1, 0.4, 0.3, 0.2], [0.1, 0.2, 0.4, 0.3], [0.1, 0.2, 0.3, 0.4]])) y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) m = paddle.metric.Accuracy() correct = m.compute(x, y) m.update(correct) res = m.accumulate() print(res) # 0.75 Example with Model API: .. code-block:: python import paddle from paddle.static import InputSpec import paddle.vision.transforms as T from paddle.vision.datasets import MNIST input = InputSpec([None, 1, 28, 28], 'float32', 'image') label = InputSpec([None, 1], 'int64', 'label') transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) model = paddle.Model(paddle.vision.models.LeNet(), input, label) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) model.prepare( optim, loss=paddle.nn.CrossEntropyLoss(), metrics=paddle.metric.Accuracy()) model.fit(train_dataset, batch_size=64) """ def __init__(self, topk=(1, ), name=None, *args, **kwargs): super(Accuracy, self).__init__(*args, **kwargs) self.topk = topk self.maxk = max(topk) self._init_name(name) self.reset() def compute(self, pred, label, *args): """ Compute the top-k (maximum value in `topk`) indices. Args: pred (Tensor): The predicted value is a Tensor with dtype float32 or float64. Shape is [batch_size, d0, ..., dN]. label (Tensor): The ground truth value is Tensor with dtype int64. Shape is [batch_size, d0, ..., 1], or [batch_size, d0, ..., num_classes] in one hot representation. Return: Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk]. """ pred = paddle.argsort(pred, descending=True) pred = paddle.slice(pred, axes=[len(pred.shape) - 1], starts=[0], ends=[self.maxk]) if (len(label.shape) == 1) or \ (len(label.shape) == 2 and label.shape[-1] == 1): # In static mode, the real label data shape may be different # from shape defined by paddle.static.InputSpec in model # building, reshape to the right shape. label = paddle.reshape(label, (-1, 1)) elif label.shape[-1] != 1: # one-hot label label = paddle.argmax(label, axis=-1, keepdim=True) correct = pred == label return paddle.cast(correct, dtype='float32') def update(self, correct, *args): """ Update the metrics states (correct count and total count), in order to calculate cumulative accuracy of all instances. This function also returns the accuracy of current step. Args: correct: Correct mask, a tensor with shape [batch_size, d0, ..., topk]. Return: Tensor: the accuracy of current step. """ if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): correct = correct.numpy() num_samples = np.prod(np.array(correct.shape[:-1])) accs = [] for i, k in enumerate(self.topk): num_corrects = correct[..., :k].sum() accs.append(float(num_corrects) / num_samples) self.total[i] += num_corrects self.count[i] += num_samples accs = accs[0] if len(self.topk) == 1 else accs return accs def reset(self): """ Resets all of the metric state. """ self.total = [0.] * len(self.topk) self.count = [0] * len(self.topk) def accumulate(self): """ Computes and returns the accumulated metric. """ res = [] for t, c in zip(self.total, self.count): r = float(t) / c if c > 0 else 0. res.append(r) res = res[0] if len(self.topk) == 1 else res return res def _init_name(self, name): name = name or 'acc' if self.maxk != 1: self._name = ['{}_top{}'.format(name, k) for k in self.topk] else: self._name = [name] def name(self): """ Return name of metric instance. """ return self._name class Precision(Metric): """ Precision (also called positive predictive value) is the fraction of relevant instances among the retrieved instances. Refer to https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers Noted that this class manages the precision score only for binary classification task. Args: name (str, optional): String name of the metric instance. Default is `precision`. Example by standalone: .. code-block:: python import numpy as np import paddle x = np.array([0.1, 0.5, 0.6, 0.7]) y = np.array([0, 1, 1, 1]) m = paddle.metric.Precision() m.update(x, y) res = m.accumulate() print(res) # 1.0 Example with Model API: .. code-block:: python import numpy as np import paddle import paddle.nn as nn class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') def __getitem__(self, idx): return self.x[idx], self.y[idx] def __len__(self): return self.n model = paddle.Model(nn.Sequential( nn.Linear(10, 1), nn.Sigmoid() )) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) model.prepare( optim, loss=nn.BCELoss(), metrics=paddle.metric.Precision()) data = Data() model.fit(data, batch_size=16) """ def __init__(self, name='precision', *args, **kwargs): super(Precision, self).__init__(*args, **kwargs) self.tp = 0 # true positive self.fp = 0 # false positive self._name = name def update(self, preds, labels): """ Update the states based on the current mini-batch prediction results. Args: preds (numpy.ndarray): The prediction result, usually the output of two-class sigmoid function. It should be a vector (column vector or row vector) with data type: 'float64' or 'float32'. labels (numpy.ndarray): The ground truth (labels), the shape should keep the same as preds. The data type is 'int32' or 'int64'. """ if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") sample_num = labels.shape[0] preds = np.floor(preds + 0.5).astype("int32") for i in range(sample_num): pred = preds[i] label = labels[i] if pred == 1: if pred == label: self.tp += 1 else: self.fp += 1 def reset(self): """ Resets all of the metric state. """ self.tp = 0 self.fp = 0 def accumulate(self): """ Calculate the final precision. Returns: A scaler float: results of the calculated precision. """ ap = self.tp + self.fp return float(self.tp) / ap if ap != 0 else .0 def name(self): """ Returns metric name """ return self._name class Recall(Metric): """ Recall (also known as sensitivity) is the fraction of relevant instances that have been retrieved over the total amount of relevant instances Refer to: https://en.wikipedia.org/wiki/Precision_and_recall Noted that this class manages the recall score only for binary classification task. Args: name (str, optional): String name of the metric instance. Default is `recall`. Example by standalone: .. code-block:: python import numpy as np import paddle x = np.array([0.1, 0.5, 0.6, 0.7]) y = np.array([1, 0, 1, 1]) m = paddle.metric.Recall() m.update(x, y) res = m.accumulate() print(res) # 2.0 / 3.0 Example with Model API: .. code-block:: python import numpy as np import paddle import paddle.nn as nn class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') def __getitem__(self, idx): return self.x[idx], self.y[idx] def __len__(self): return self.n model = paddle.Model(nn.Sequential( nn.Linear(10, 1), nn.Sigmoid() )) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) model.prepare( optim, loss=nn.BCELoss(), metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) data = Data() model.fit(data, batch_size=16) """ def __init__(self, name='recall', *args, **kwargs): super(Recall, self).__init__(*args, **kwargs) self.tp = 0 # true positive self.fn = 0 # false negative self._name = name def update(self, preds, labels): """ Update the states based on the current mini-batch prediction results. Args: preds(numpy.array): prediction results of current mini-batch, the output of two-class sigmoid function. Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'. labels(numpy.array): ground truth (labels) of current mini-batch, the shape should keep the same as preds. Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. """ if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") sample_num = labels.shape[0] preds = np.rint(preds).astype("int32") for i in range(sample_num): pred = preds[i] label = labels[i] if label == 1: if pred == label: self.tp += 1 else: self.fn += 1 def accumulate(self): """ Calculate the final recall. Returns: A scaler float: results of the calculated Recall. """ recall = self.tp + self.fn return float(self.tp) / recall if recall != 0 else .0 def reset(self): """ Resets all of the metric state. """ self.tp = 0 self.fn = 0 def name(self): """ Returns metric name """ return self._name class Auc(Metric): """ The auc metric is for binary classification. Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve. Please notice that the auc metric is implemented with python, which may be a little bit slow. The `auc` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. Args: curve (str): Specifies the mode of the curve to be computed, 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'. num_thresholds (int): The number of thresholds to use when discretizing the roc curve. Default is 4095. 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'. name (str, optional): String name of the metric instance. Default is `auc`. "NOTE: only implement the ROC curve type via Python now." Example by standalone: .. code-block:: python import numpy as np import paddle m = paddle.metric.Auc() n = 8 class0_preds = np.random.random(size = (n, 1)) class1_preds = 1 - class0_preds preds = np.concatenate((class0_preds, class1_preds), axis=1) labels = np.random.randint(2, size = (n, 1)) m.update(preds=preds, labels=labels) res = m.accumulate() Example with Model API: .. code-block:: python import numpy as np import paddle import paddle.nn as nn class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') def __getitem__(self, idx): return self.x[idx], self.y[idx] def __len__(self): return self.n model = paddle.Model(nn.Sequential( nn.Linear(10, 2), nn.Softmax()) ) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) def loss(x, y): return nn.functional.nll_loss(paddle.log(x), y) model.prepare( optim, loss=loss, metrics=paddle.metric.Auc()) data = Data() model.fit(data, batch_size=16) """ def __init__(self, curve='ROC', num_thresholds=4095, name='auc', *args, **kwargs): super(Auc, self).__init__(*args, **kwargs) self._curve = curve self._num_thresholds = num_thresholds _num_pred_buckets = num_thresholds + 1 self._stat_pos = np.zeros(_num_pred_buckets) self._stat_neg = np.zeros(_num_pred_buckets) self._name = name def update(self, preds, labels): """ Update the auc curve with the given predictions and labels. Args: preds (numpy.array): An numpy array in the shape of (batch_size, 2), preds[i][j] denotes the probability of classifying the instance i into the class j. labels (numpy.array): an numpy array in the shape of (batch_size, 1), labels[i] is either o or 1, representing the label of the instance i. """ if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): labels = labels.numpy() elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): preds = preds.numpy() elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") for i, lbl in enumerate(labels): value = preds[i, 1] bin_idx = int(value * self._num_thresholds) assert bin_idx <= self._num_thresholds if lbl: self._stat_pos[bin_idx] += 1.0 else: self._stat_neg[bin_idx] += 1.0 @staticmethod def trapezoid_area(x1, x2, y1, y2): return abs(x1 - x2) * (y1 + y2) / 2.0 def accumulate(self): """ Return the area (a float score) under auc curve Return: float: the area under auc curve """ tot_pos = 0.0 tot_neg = 0.0 auc = 0.0 idx = self._num_thresholds while idx >= 0: tot_pos_prev = tot_pos tot_neg_prev = tot_neg tot_pos += self._stat_pos[idx] tot_neg += self._stat_neg[idx] auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos, tot_pos_prev) idx -= 1 return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0 def reset(self): """ Reset states and result """ _num_pred_buckets = self._num_thresholds + 1 self._stat_pos = np.zeros(_num_pred_buckets) self._stat_neg = np.zeros(_num_pred_buckets) def name(self): """ Returns metric name """ return self._name def accuracy(input, label, k=1, correct=None, total=None, name=None): """ accuracy layer. Refer to the https://en.wikipedia.org/wiki/Precision_and_recall This function computes the accuracy using the input and label. If the correct label occurs in top k predictions, then correct will increment by one. Note: the dtype of accuracy is determined by input. the input and label dtype can be different. Args: input(Tensor): The input of accuracy layer, which is the predictions of network. A Tensor with type float32,float64. The shape is ``[sample_number, class_dim]`` . label(Tensor): The label of dataset. Tensor with type int64 or int32. The shape is ``[sample_number, 1]`` . k(int, optional): The top k predictions for each class will be checked. Data type is int64 or int32. correct(Tensor, optional): The correct predictions count. A Tensor with type int64 or int32. total(Tensor, optional): The total entries count. A tensor with type int64 or int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor, the correct rate. A Tensor with type float32. Examples: .. code-block:: python import paddle predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32') label = paddle.to_tensor([[2], [0]], dtype="int64") result = paddle.metric.accuracy(input=predictions, label=label, k=1) # [0.5] """ if label.dtype == paddle.int32: label = paddle.cast(label, paddle.int64) if _non_static_mode(): if correct is None: correct = _varbase_creator(dtype="int32") if total is None: total = _varbase_creator(dtype="int32") topk_out, topk_indices = paddle.topk(input, k=k) _acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label, correct, total) return _acc helper = LayerHelper("accuracy", **locals()) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'accuracy') topk_out, topk_indices = paddle.topk(input, k=k) acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: correct = helper.create_variable_for_type_inference(dtype="int32") if total is None: total = helper.create_variable_for_type_inference(dtype="int32") helper.append_op(type="accuracy", inputs={ "Out": [topk_out], "Indices": [topk_indices], "Label": [label] }, outputs={ "Accuracy": [acc_out], "Correct": [correct], "Total": [total], }) return acc_out
[ "noreply@github.com" ]
noreply@github.com
9b6a313c4143391d0e759e966d2a74b8e14b3fb2
6cee35876c6a1afdc1a2f9293fbcf41719f3852d
/chap_2/exercise2.py
c89851bc22530e3167a8bbdee2b1449bc3979f7b
[]
no_license
SiddhantAshtekar/python-algorithem-for-begginers
a7c31cd2cd96d70e13a2d0119da94fe7f38c5056
07803850aa78c07ce608d18173afebd398543121
refs/heads/master
2020-05-07T10:28:20.310000
2019-04-09T17:33:19
2019-04-09T17:33:19
180,417,449
3
0
null
null
null
null
UTF-8
Python
false
false
99
py
name=input("Enter your name ") print(f"the revers of your name is {name[-1::-1]}")#revers of sting
[ "noreply@github.com" ]
noreply@github.com
62057c8eb956315f5f52fa00e9a3237b9e78aa7e
c1faf35b2fe1beda6c839031465195ea58b4c495
/panelserverextension.py
eae784198ae449200859acbf4742f46ee152c279
[]
no_license
makwingchi/philly-route-finder
ff9f6001a39a7d838ff143ee5445cc848f456205
c807c76290772f4b31bd0cdaab7a1ab6e505d8e7
refs/heads/master
2020-05-22T10:19:23.003000
2019-07-14T15:25:46
2019-07-14T15:25:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
206
py
from subprocess import Popen def load_jupyter_server_extension(nbapp): """serve the app.ipynb directory with bokeh server""" Popen(["panel", "serve", "app.ipynb", "--allow-websocket-origin=*"])
[ "noreply@github.com" ]
noreply@github.com
README.md exists but content is empty.
Downloads last month
97