hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
dee0ea830b4e14533eb75ccbf58b75a95766df8d
3,369
py
Python
python/soma_workflow/constants.py
denisri/soma-workflow
bc6f2f50d34437e86e850cb0d05ff26b041d560d
[ "CECILL-B" ]
null
null
null
python/soma_workflow/constants.py
denisri/soma-workflow
bc6f2f50d34437e86e850cb0d05ff26b041d560d
[ "CECILL-B" ]
44
2018-10-30T16:57:10.000Z
2022-03-15T10:54:57.000Z
python/soma_workflow/constants.py
populse/soma-workflow
e6d3e3c33ad41107ee3c959adc4832e6edd047f4
[ "CECILL-B" ]
null
null
null
# -*- coding: utf-8 -*- ''' author: Soizic Laguitton organization: I2BM, Neurospin, Gif-sur-Yvette, France organization: CATI, France organization: IFR 49 License: `CeCILL version 2 <http://www.cecill.info/licences/Licence_CeCILL_V2-en.html>`_ ''' # # Soma-workflow constants # # ''' Job status: ''' NOT_SUBMITTED = "not_submitted" UNDETERMINED = "undetermined" QUEUED_ACTIVE = "queued_active" SYSTEM_ON_HOLD = "system_on_hold" USER_ON_HOLD = "user_on_hold" USER_SYSTEM_ON_HOLD = "user_system_on_hold" RUNNING = "running" SYSTEM_SUSPENDED = "system_suspended" USER_SUSPENDED = "user_suspended" USER_SYSTEM_SUSPENDED = "user_system_suspended" DONE = "done" FAILED = "failed" DELETE_PENDING = "delete_pending" KILL_PENDING = "kill_pending" SUBMISSION_PENDING = "submission_pending" WARNING = "warning" JOB_STATUS = [NOT_SUBMITTED, UNDETERMINED, QUEUED_ACTIVE, SYSTEM_ON_HOLD, USER_ON_HOLD, USER_SYSTEM_ON_HOLD, RUNNING, SYSTEM_SUSPENDED, USER_SUSPENDED, USER_SYSTEM_SUSPENDED, DONE, FAILED, DELETE_PENDING, KILL_PENDING, SUBMISSION_PENDING, WARNING] ''' Exit job status: ''' EXIT_UNDETERMINED = "exit_status_undetermined" EXIT_ABORTED = "aborted" EXIT_NOTRUN = "aborted_before_running" FINISHED_REGULARLY = "finished_regularly" FINISHED_TERM_SIG = "finished_signal" FINISHED_UNCLEAR_CONDITIONS = "finished_unclear_condition" USER_KILLED = "killed_by_user" JOB_EXIT_STATUS = [EXIT_UNDETERMINED, EXIT_ABORTED, FINISHED_REGULARLY, FINISHED_TERM_SIG, FINISHED_UNCLEAR_CONDITIONS, USER_KILLED, EXIT_NOTRUN] ''' File transfer status: ''' FILES_DO_NOT_EXIST = "do not exist" FILES_ON_CLIENT = "on client side" FILES_ON_CR = "on computing resource side" FILES_ON_CLIENT_AND_CR = "on both sides" TRANSFERING_FROM_CLIENT_TO_CR = "transfering client->cr" TRANSFERING_FROM_CR_TO_CLIENT = "transfering cr->client" FILES_UNDER_EDITION = "under edition" FILE_TRANSFER_STATUS = [FILES_DO_NOT_EXIST, FILES_ON_CLIENT, FILES_ON_CR, FILES_ON_CLIENT_AND_CR, TRANSFERING_FROM_CLIENT_TO_CR, TRANSFERING_FROM_CR_TO_CLIENT, FILES_UNDER_EDITION] ''' Transfer type ''' TR_FILE_C_TO_CR = "file transfer form client to cr" TR_DIR_C_TO_CR = "dir transfer from client to cr" TR_MFF_C_TO_CR = "multi file format from client to cr" TR_FILE_CR_TO_C = "file transfer form cr to client" TR_DIR_CR_TO_C = "dir transfer from cr to client" TR_MFF_CR_TO_C = "multi file format from cr to client" TRANSFER_TYPES = [TR_FILE_C_TO_CR, TR_DIR_C_TO_CR, TR_MFF_C_TO_CR, TR_FILE_CR_TO_C, TR_DIR_CR_TO_C, TR_MFF_CR_TO_C] ''' Workflow status: ''' WORKFLOW_NOT_STARTED = "worklflow_not_started" WORKFLOW_IN_PROGRESS = "workflow_in_progress" WORKFLOW_DONE = "workflow_done" WORKFLOW_STATUS = [WORKFLOW_NOT_STARTED, WORKFLOW_IN_PROGRESS, WORKFLOW_DONE, DELETE_PENDING, WARNING]
28.310924
88
0.655091
dee4241a76fbf19cf565aab66e0521ce2380cc65
250
py
Python
test/tests/global_and_local.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
1
2015-11-06T03:39:51.000Z
2015-11-06T03:39:51.000Z
test/tests/global_and_local.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
null
null
null
test/tests/global_and_local.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
null
null
null
# expected: fail # - this particular check isn't implemented yet # I would have expected this to be valid, but cPython and pypy err out saying "name 'x' is local and global" print "first" x = 1 print "calling" f(2) print x
16.666667
108
0.696
dee46fc1a2825aedf140afa6a83cd03a303bce36
1,980
py
Python
lab4_2/helpers/scanner.py
cinnamonbreakfast/flcd
f9168c1965976e9ae9477ee6b163a026f61acb1b
[ "MIT" ]
null
null
null
lab4_2/helpers/scanner.py
cinnamonbreakfast/flcd
f9168c1965976e9ae9477ee6b163a026f61acb1b
[ "MIT" ]
null
null
null
lab4_2/helpers/scanner.py
cinnamonbreakfast/flcd
f9168c1965976e9ae9477ee6b163a026f61acb1b
[ "MIT" ]
null
null
null
res_words = [] seps = [] ops = []
22.5
74
0.491414
dee4deb771683414d1b0181d259bc1acc86fbf9f
1,101
py
Python
fastspider/item/item.py
coco369/fastspider
464ba47176c005ed97005a79c5c4eee0bf0740b6
[ "MIT" ]
6
2021-08-09T01:35:44.000Z
2022-02-15T08:14:29.000Z
fastspider/item/item.py
coco369/fastspider
464ba47176c005ed97005a79c5c4eee0bf0740b6
[ "MIT" ]
null
null
null
fastspider/item/item.py
coco369/fastspider
464ba47176c005ed97005a79c5c4eee0bf0740b6
[ "MIT" ]
4
2021-08-13T06:41:13.000Z
2021-12-07T15:53:56.000Z
# encoding=utf-8 """ Auth: coco369 Email: 779598160@qq.com CreateTime: 2021/07/30 Desc: fastspider, Item """
20.018182
84
0.728429
dee8b0a49fcef498a3468a8ea4df153befa037f5
26,370
py
Python
src/third_party/wiredtiger/test/suite/run.py
benety/mongo
203430ac9559f82ca01e3cbb3b0e09149fec0835
[ "Apache-2.0" ]
null
null
null
src/third_party/wiredtiger/test/suite/run.py
benety/mongo
203430ac9559f82ca01e3cbb3b0e09149fec0835
[ "Apache-2.0" ]
null
null
null
src/third_party/wiredtiger/test/suite/run.py
benety/mongo
203430ac9559f82ca01e3cbb3b0e09149fec0835
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # # Public Domain 2014-present MongoDB, Inc. # Public Domain 2008-2014 WiredTiger, Inc. # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # [TEST_TAGS] # ignored_file # [END_TAGS] # # run.py # Command line test runner # from __future__ import print_function import glob, json, os, random, re, sys if sys.version_info[0] <= 2: print('WiredTiger requires Python version 3.0 or above') sys.exit(1) # Set paths suitedir = sys.path[0] wt_disttop = os.path.dirname(os.path.dirname(suitedir)) wt_3rdpartydir = os.path.join(wt_disttop, 'test', '3rdparty') # Check for a local build that contains the wt utility. First check if the # supplied an explicit build directory ('WT_BUILDDIR'), then the current # working directory, and finally in the disttop directory. # This isn't ideal - if a user has multiple builds in a tree we # could pick the wrong one. We also need to account for the fact that there # may be an executable 'wt' file the build directory. env_builddir = os.getenv('WT_BUILDDIR') curdir = os.getcwd() if env_builddir and os.path.isfile(os.path.join(env_builddir, 'wt')): wt_builddir = env_builddir elif os.path.isfile(os.path.join(curdir, 'wt')): wt_builddir = curdir elif os.path.isfile(os.path.join(curdir, 'wt.exe')): wt_builddir = curdir elif os.path.isfile(os.path.join(wt_disttop, 'wt')): wt_builddir = wt_disttop elif os.path.isfile(os.path.join(wt_disttop, 'wt.exe')): wt_builddir = wt_disttop else: print('Unable to find useable WiredTiger build') sys.exit(1) # Cannot import wiredtiger and supporting utils until we set up paths # We want our local tree in front of any installed versions of WiredTiger. # Don't change sys.path[0], it's the dir containing the invoked python script. sys.path.insert(1, os.path.join(wt_builddir, 'lang', 'python')) # Append to a colon separated path in the environment # If we built with libtool, explicitly put its install directory in our library # search path. This only affects library loading for subprocesses, like 'wt'. libsdir = os.path.join(wt_builddir, '.libs') if os.path.isdir(libsdir): append_env_path('LD_LIBRARY_PATH', libsdir) if sys.platform == "darwin": append_env_path('DYLD_LIBRARY_PATH', libsdir) # Add all 3rd party directories: some have code in subdirectories for d in os.listdir(wt_3rdpartydir): for subdir in ('lib', 'python', ''): if os.path.exists(os.path.join(wt_3rdpartydir, d, subdir)): sys.path.insert(1, os.path.join(wt_3rdpartydir, d, subdir)) break # unittest will be imported later, near to when it is needed. unittest = None # Find an executable of the given name in the execution path. # Follow a symbolic link, returning the target # Find all instances of a filename under a directory # Show an environment variable if verbose enough. # capture the category (AKA 'subsuite') part of a test name, # e.g. test_util03 -> util reCatname = re.compile(r"test_([^0-9]+)[0-9]*") # Look for a list of the form 0-9,11,15-17. def configRecord(cmap, tup): """ Records this tuple in the config. It is marked as None (appearing as null in json), so it can be easily adjusted in the output file. """ tuplen = len(tup) pos = 0 for name in tup: last = (pos == tuplen - 1) pos += 1 if not name in cmap: if last: cmap[name] = {"run":None} else: cmap[name] = {"run":None, "sub":{}} if not last: cmap = cmap[name]["sub"] def configGet(cmap, tup): """ Answers the question, should we do this test, given this config file? Following the values of the tuple through the map, returning the first non-null value. If all values are null, return True (handles tests that may have been added after the config was generated). """ for name in tup: if not name in cmap: return True run = cmap[name]["run"] if "run" in cmap[name] else None if run != None: return run cmap = cmap[name]["sub"] if "sub" in cmap[name] else {} return True if __name__ == '__main__': # Turn numbers and ranges into test module names preserve = timestamp = debug = dryRun = gdbSub = lldbSub = longtest = zstdtest = ignoreStdout = False removeAtStart = True asan = False parallel = 0 random_sample = 0 batchtotal = batchnum = 0 seed = seedw = seedz = 0 configfile = None configwrite = False dirarg = None scenario = '' verbose = 1 args = sys.argv[1:] testargs = [] hook_names = [] while len(args) > 0: arg = args.pop(0) from unittest import defaultTestLoader as loader # Command line options if arg[0] == '-': option = arg[1:] if option == '-asan': asan = True continue if option == '-batch' or option == 'b': if batchtotal != 0 or len(args) == 0: usage() sys.exit(2) # Batch expects an argument that has int slash int. # For example "-b 4/12" try: left, right = args.pop(0).split('/') batchnum = int(left) batchtotal = int(right) except: print('batch argument should be nnn/nnn') usage() sys.exit(2) if batchtotal <= 0 or batchnum < 0 or batchnum >= batchtotal: usage() sys.exit(2) continue if option == '-dir' or option == 'D': if dirarg != None or len(args) == 0: usage() sys.exit(2) dirarg = args.pop(0) continue if option == '-debug' or option == 'd': debug = True continue if option == '-dry-run' or option == 'n': dryRun = True continue if option == '-gdb' or option == 'g': gdbSub = True continue if option == '-lldb': lldbSub = True continue if option == '-help' or option == 'h': usage() sys.exit(0) if option == '-hook': if len(args) == 0: usage() sys.exit(2) hook_names.append(args.pop(0)) continue if option == '-long' or option == 'l': longtest = True continue if option == '-zstd' or option == 'z': zstdtest = True continue if option == '-noremove': removeAtStart = False continue if option == '-random-sample' or option == 'r': if len(args) == 0: usage() sys.exit(2) random_sample = int(args.pop(0)) if random_sample < 2 or random_sample > 1000: usage() sys.exit(2) continue if option == '-parallel' or option == 'j': if parallel != 0 or len(args) == 0: usage() sys.exit(2) parallel = int(args.pop(0)) continue if option == '-preserve' or option == 'p': preserve = True continue if option == '-scenario' or option == 's': if scenario != '' or len(args) == 0: usage() sys.exit(2) scenario = args.pop(0) continue if option == '-timestamp' or option == 't': timestamp = True continue if option == '-verbose' or option == 'v': if len(args) == 0: usage() sys.exit(2) verbose = int(args.pop(0)) if verbose > 3: verbose = 3 if verbose < 0: verbose = 0 continue if option == '--ignore-stdout' or option == 'i': ignoreStdout = True continue if option == '-config' or option == 'c': if configfile != None or len(args) == 0: usage() sys.exit(2) configfile = args.pop(0) continue if option == '-configcreate' or option == 'C': if configfile != None or len(args) == 0: usage() sys.exit(2) configfile = args.pop(0) configwrite = True continue if option == '-randomseed' or option == 'R': seedw = random.randint(1, 0xffffffff) seedz = random.randint(1, 0xffffffff) continue if option == '-seed' or option == 'S': if seed != 0 or len(args) == 0: usage() sys.exit(2) seed = args.pop(0) [seedw, seedz] = seed.split('.') if seedw == 0 or seedz == 0: usage() sys.exit(2) continue print('unknown arg: ' + arg) usage() sys.exit(2) testargs.append(arg) if asan: # To run ASAN, we need to ensure these environment variables are set: # ASAN_SYMBOLIZER_PATH full path to the llvm-symbolizer program # LD_LIBRARY_PATH includes path with wiredtiger shared object # LD_PRELOAD includes the ASAN runtime library # # Note that LD_LIBRARY_PATH has already been set above. The trouble with # simply setting these variables in the Python environment is that it's # too late. LD_LIBRARY_PATH is commonly cached by the shared library # loader at program startup, and that's already been done before Python # begins execution. Likewise, any preloading indicated by LD_PRELOAD # has already been done. # # Our solution is to set the variables as appropriate, and then restart # Python with the same argument list. The shared library loader will # have everything it needs on the second go round. # # Note: If the ASAN stops the program with the error: # Shadow memory range interleaves with an existing memory mapping. # ASan cannot proceed correctly. # # try rebuilding with the clang options: # "-mllvm -asan-force-dynamic-shadow=1" # and make sure that clang is used for all compiles. # # We'd like to show this as a message, but there's no good way to # detect this error from here short of capturing/parsing all output # from the test run. ASAN_ENV = "__WT_TEST_SUITE_ASAN" # if set, we've been here before ASAN_SYMBOLIZER_PROG = "llvm-symbolizer" ASAN_SYMBOLIZER_ENV = "ASAN_SYMBOLIZER_PATH" LD_PRELOAD_ENV = "LD_PRELOAD" SO_FILE_NAME = "libclang_rt.asan-x86_64.so" if not os.environ.get(ASAN_ENV): if verbose >= 2: print('Enabling ASAN environment and rerunning python') os.environ[ASAN_ENV] = "1" show_env(verbose, "LD_LIBRARY_PATH") if not os.environ.get(ASAN_SYMBOLIZER_ENV): os.environ[ASAN_SYMBOLIZER_ENV] = which(ASAN_SYMBOLIZER_PROG) if not os.environ.get(ASAN_SYMBOLIZER_ENV): error(ASAN_SYMBOLIZER_ENV, 'symbolizer program not found in PATH') show_env(verbose, ASAN_SYMBOLIZER_ENV) if not os.environ.get(LD_PRELOAD_ENV): symbolizer = follow_symlinks(os.environ[ASAN_SYMBOLIZER_ENV]) bindir = os.path.dirname(symbolizer) sofiles = [] if os.path.basename(bindir) == 'bin': libdir = os.path.join(os.path.dirname(bindir), 'lib') sofiles = find(libdir, SO_FILE_NAME) if len(sofiles) != 1: if len(sofiles) == 0: fmt = 'ASAN shared library file not found.\n' + \ 'Set {} to the file location and rerun.' error(3, SO_FILE_NAME, fmt.format(LD_PRELOAD_ENV)) else: fmt = 'multiple ASAN shared library files found\n' + \ 'under {}, expected just one.\n' + \ 'Set {} to the correct file location and rerun.' error(3, SO_FILE_NAME, fmt.format(libdir, LD_PRELOAD_ENV)) os.environ[LD_PRELOAD_ENV] = sofiles[0] show_env(verbose, LD_PRELOAD_ENV) # Restart python! python = sys.executable os.execl(python, python, *sys.argv) elif verbose >= 2: print('Python restarted for ASAN') # We don't import wttest until after ASAN environment variables are set. import wttest # Use the same version of unittest found by wttest.py unittest = wttest.unittest tests = unittest.TestSuite() from testscenarios.scenarios import generate_scenarios import wthooks hookmgr = wthooks.WiredTigerHookManager(hook_names) # All global variables should be set before any test classes are loaded. # That way, verbose printing can be done at the class definition level. wttest.WiredTigerTestCase.globalSetup(preserve, removeAtStart, timestamp, gdbSub, lldbSub, verbose, wt_builddir, dirarg, longtest, zstdtest, ignoreStdout, seedw, seedz, hookmgr) # Without any tests listed as arguments, do discovery if len(testargs) == 0: if scenario != '': sys.stderr.write( 'run.py: specifying a scenario requires a test name\n') usage() sys.exit(2) from discover import defaultTestLoader as loader suites = loader.discover(suitedir) # If you have an empty Python file, it comes back as an empty entry in suites # and then the sort explodes. Drop empty entries first. Note: this converts # suites to a list, but the sort does that anyway. Also note: there seems to be # no way to count other than iteration; there's a count method but it also # returns zero for test files that contain a test class with no test functions, # and it's not clear that dropping those here is correct. suites = [s for s in suites if not isempty(s)] suites = sorted(suites, key=lambda c: str(list(c)[0])) if configfile != None: suites = configApply(suites, configfile, configwrite) tests.addTests(restrictScenario(generate_scenarios(suites), '')) else: for arg in testargs: testsFromArg(tests, loader, arg, scenario) tests = hookmgr.filter_tests(tests) # Shuffle the tests and create a new suite containing every Nth test from # the original suite if random_sample > 0: random_sample_tests = [] for test in tests: random_sample_tests.append(test) random.shuffle(random_sample_tests) tests = unittest.TestSuite(random_sample_tests[::random_sample]) if debug: import pdb pdb.set_trace() if batchtotal != 0: # For test batching, we want to split up all the tests evenly, and # spread out the tests, so each batch contains tests of all kinds. We'd # like to prioritize the lowest scenario numbers first, so if there's a # failure, we won't have to do all X thousand of some test's scenarios # before we see a failure in the next test. To that end, we define a # sort function that sorts by scenario first, and test name second. hugetests = set() all_tests = sorted(tests, key = get_sort_keys) if not longtest: for name in hugetests: print("WARNING: huge test " + name + " has > 1000 scenarios.\n" + "That is only appropriate when using the --long option.\n" + "The number of scenarios for the test should be pruned") # At this point we have an ordered list of all the tests. # Break it into just our batch. tests = unittest.TestSuite(all_tests[batchnum::batchtotal]) if dryRun: for line in tests: print(line) else: result = wttest.runsuite(tests, parallel) sys.exit(0 if result.wasSuccessful() else 1) sys.exit(0)
40.631741
105
0.573834
dee9227b4b6629ca39d002a84205390a69b06f7b
29,997
py
Python
Code/Minner/SumDialog.py
lizhangjie316/Minner
f6aebd51cef981d726b53db8d62d1b1703fe2649
[ "MIT" ]
1
2020-11-05T07:11:33.000Z
2020-11-05T07:11:33.000Z
Code/Minner0827/ui/SumDialog.py
lizhangjie316/Minner
f6aebd51cef981d726b53db8d62d1b1703fe2649
[ "MIT" ]
null
null
null
Code/Minner0827/ui/SumDialog.py
lizhangjie316/Minner
f6aebd51cef981d726b53db8d62d1b1703fe2649
[ "MIT" ]
1
2020-11-05T07:19:44.000Z
2020-11-05T07:19:44.000Z
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'SumDialog.ui' # # Created by: PyQt5 UI code generator 5.15.0 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets from SumAllTable import DataGridAll
43.791241
116
0.657432
deeb28c75145a6bebc3771235fab7a32732db4c0
684
py
Python
models/t_complex_gateway.py
THM-MA/XSDATA-waypoint
dd94442f9d6677c525bf3ebb03c15fec52fa1079
[ "MIT" ]
null
null
null
models/t_complex_gateway.py
THM-MA/XSDATA-waypoint
dd94442f9d6677c525bf3ebb03c15fec52fa1079
[ "MIT" ]
null
null
null
models/t_complex_gateway.py
THM-MA/XSDATA-waypoint
dd94442f9d6677c525bf3ebb03c15fec52fa1079
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from typing import Optional from .t_expression import TExpression from .t_gateway import TGateway __NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
24.428571
71
0.622807
deebcfc8092fc857d0a9f335ddd1bffc49d0f520
2,371
py
Python
fixture/orm.py
NovikovMA/python_training_mantis
c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525
[ "Apache-2.0" ]
null
null
null
fixture/orm.py
NovikovMA/python_training_mantis
c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525
[ "Apache-2.0" ]
null
null
null
fixture/orm.py
NovikovMA/python_training_mantis
c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'M.Novikov' from model.project import Project # Mantis from pony.orm import * # from pymysql.converters import decoders #
59.275
186
0.567693
deecfb2ff8809fa583a186388e95973a391ea0c6
3,577
py
Python
volDB/migrations/0001_initial.py
leg2015/CSCapstone19Volunteers
ae0fcf1e8ce4fafe8578edd0a3943574703046fa
[ "MIT" ]
4
2020-01-13T23:30:34.000Z
2021-03-17T21:23:57.000Z
volDB/migrations/0001_initial.py
leg2015/CSCapstone19Volunteers
ae0fcf1e8ce4fafe8578edd0a3943574703046fa
[ "MIT" ]
5
2020-02-12T03:25:17.000Z
2021-06-10T22:29:16.000Z
volDB/migrations/0001_initial.py
leg2015/CSCapstone19Volunteers
ae0fcf1e8ce4fafe8578edd0a3943574703046fa
[ "MIT" ]
null
null
null
# Generated by Django 2.1.7 on 2019-02-23 18:47 from django.db import migrations, models import django.db.models.deletion
41.593023
137
0.574224
deedff750596df4bfdfcd2656752ec59911b5e80
2,713
py
Python
crawler/page_fetcher.py
AssisRaphael/PageColector
6753376996f12ee1cced96b89a3e34d6fdf66529
[ "MIT" ]
null
null
null
crawler/page_fetcher.py
AssisRaphael/PageColector
6753376996f12ee1cced96b89a3e34d6fdf66529
[ "MIT" ]
null
null
null
crawler/page_fetcher.py
AssisRaphael/PageColector
6753376996f12ee1cced96b89a3e34d6fdf66529
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup from threading import Thread import requests from urllib.parse import urlparse,urljoin from urllib import parse
33.085366
109
0.570586
def0c98ea1f503e25e5a4f61d70a095d8ff1d77d
141
py
Python
app/celery_worker.py
cjarv/celery_dev
1c0489ccf456249d5bd8d21da40ebe4572842af6
[ "MIT" ]
null
null
null
app/celery_worker.py
cjarv/celery_dev
1c0489ccf456249d5bd8d21da40ebe4572842af6
[ "MIT" ]
null
null
null
app/celery_worker.py
cjarv/celery_dev
1c0489ccf456249d5bd8d21da40ebe4572842af6
[ "MIT" ]
null
null
null
from factories.celery import create_celery from factories.application import create_application celery = create_celery(create_application())
35.25
52
0.87234
def0d455f3332a2d6ded90d585855fcbfa88a92a
2,098
py
Python
simublocks/dialog/importCodeDialog.py
bentoavb/simublocks
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
[ "MIT" ]
2
2020-05-14T12:34:43.000Z
2020-06-11T23:48:09.000Z
simublocks/dialog/importCodeDialog.py
bentoavb/simublocks
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
[ "MIT" ]
null
null
null
simublocks/dialog/importCodeDialog.py
bentoavb/simublocks
9d4a5600b8aecd2d188e9191d78789a1bd725ab8
[ "MIT" ]
1
2020-05-12T07:01:28.000Z
2020-05-12T07:01:28.000Z
# MIT License # # Copyright (c) 2020 Anderson Vitor Bento # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import tkinter as tk from tkinter.scrolledtext import ScrolledText from simublocks.dialog.dialogTools import dialogTools
38.851852
98
0.704957
def0f90a3cae5abac2b0927d079c001b98668c18
1,365
py
Python
python-socket-mult-thread/server.py
Programmer-Edilson/min-projects
62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e
[ "MIT" ]
1
2021-02-28T17:33:59.000Z
2021-02-28T17:33:59.000Z
python-socket-mult-thread/server.py
Programmer-Edilson/min-projects
62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e
[ "MIT" ]
null
null
null
python-socket-mult-thread/server.py
Programmer-Edilson/min-projects
62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e
[ "MIT" ]
null
null
null
import socket import os from _thread import start_new_thread ip = "localhost" port = 1234 global number_of_connections number_of_connections = 0 server = socket.socket() server.bind((ip, port)) server.listen(5) engine()
24.375
74
0.621245
def2f40bc3a8f54d1a406e95811076ed0688d708
658
py
Python
delete_unuse_callkit.py
eyolo2021/ios-ui-sdk-set
a8897320c356ddd6dbfe964ef68eb76701759f03
[ "MIT" ]
14
2021-03-06T08:47:30.000Z
2022-02-11T09:42:24.000Z
delete_unuse_callkit.py
eyolo2021/ios-ui-sdk-set
a8897320c356ddd6dbfe964ef68eb76701759f03
[ "MIT" ]
3
2021-03-19T11:12:42.000Z
2021-11-29T14:56:33.000Z
delete_unuse_callkit.py
Zuzi007/ios-ui-sdk-set
2e51added5d697b4d1ab1ba2887ad297b408e7b0
[ "MIT" ]
12
2021-07-02T02:44:52.000Z
2022-03-01T05:15:22.000Z
#coding=utf-8 import os delete_files=["RCCall.mm","RCCXCall.m"] start_key = "RCCallKit_Delete_Start" end_key = "RCCallKit_Delete_end" for root,dirs,files in os.walk("./CallKit"): for file in files: if file in delete_files: print("will delete %s" % file) delete_used(os.path.join(root,file))
15.666667
44
0.674772
def6b9e9ff86d2545be01b4fc202577ea606b159
525
py
Python
global_setting.py
aixiwang/mqtt_datajs
91091d63f73e64916e6ca3fa5e9279dd361d3c86
[ "BSD-3-Clause" ]
null
null
null
global_setting.py
aixiwang/mqtt_datajs
91091d63f73e64916e6ca3fa5e9279dd361d3c86
[ "BSD-3-Clause" ]
null
null
null
global_setting.py
aixiwang/mqtt_datajs
91091d63f73e64916e6ca3fa5e9279dd361d3c86
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/python #-*- coding: utf-8 -*- #--------------------------------------- # # Copyright(c) Aixi Wang 2014-2015 #--------------------------------------- # v1 -- initial version #--------------------------------------- #----------------------- # mail #----------------------- global mail_sender,mail_smtpserver,mail_username,mail_password global mail_enable,mail_to mail_to = 'xx@xxx.xxx' mail_username = 'xxx@xxx.xxx' mail_password = 'xxx' mail_smtpserver = 'xxx.xxx.xxx' mail_sender = 'xxx@xxx.xxx' mail_enable = 1
23.863636
62
0.493333
def712afd23e0562bd689a7a3ab2431ec0fae53a
631
py
Python
Chatbot_Rest/urls.py
chenpocufa/Chatbot_CN
5e13c129c159143610f4dfc99478d401dd5777e6
[ "Apache-2.0" ]
1
2019-08-02T06:09:34.000Z
2019-08-02T06:09:34.000Z
Chatbot_Rest/urls.py
yuxuan2015/Chatbot_CN
1adf1c01d3eced5f0644102bdec9be22705b6f3f
[ "Apache-2.0" ]
null
null
null
Chatbot_Rest/urls.py
yuxuan2015/Chatbot_CN
1adf1c01d3eced5f0644102bdec9be22705b6f3f
[ "Apache-2.0" ]
null
null
null
#-*- coding:utf-8 _*- """ @author:charlesXu @file: urls.py @desc: url @time: 2019/05/10 """ # =============== # # apis # # =============== from django.urls import path from intent_rest_controller import intent_controller from entity_extraction_controller import entity_ext_controller from bot_controller import get_chat_msg # from time_convert_server import time_convert # urlpatterns = [ path('entity', entity_ext_controller), # path('intent', intent_controller), # path('chat', get_chat_msg), # chatbot path('time_convert', time_convert) # ]
21.033333
62
0.667195
def7709b7d7d970c7608ad7be378d822d2e33518
14,334
py
Python
tests/frameworks/test_wsgi.py
tirkarthi/python-sensor
9872d146ac00baff2673fde5ba97fdbe596869a4
[ "MIT" ]
61
2017-09-27T02:50:17.000Z
2022-03-22T12:13:37.000Z
tests/frameworks/test_wsgi.py
tirkarthi/python-sensor
9872d146ac00baff2673fde5ba97fdbe596869a4
[ "MIT" ]
82
2017-07-11T13:47:33.000Z
2022-03-22T10:10:38.000Z
tests/frameworks/test_wsgi.py
takeaway/python-sensor
52d6eaa2d6a8e625201bad36ac2448201c4bd63d
[ "MIT" ]
27
2017-09-11T16:22:32.000Z
2022-03-11T17:21:49.000Z
# (c) Copyright IBM Corp. 2021 # (c) Copyright Instana Inc. 2020 from __future__ import absolute_import import time import urllib3 import unittest import tests.apps.flask_app from ..helpers import testenv from instana.singletons import agent, tracer
36.943299
102
0.647133
def7ae196a0259e7e64d4dfd6522b1ee72138646
16,178
py
Python
api/yolo_minimal/utils.py
simonsmh/www
1741545e636540b9eb250840347f091082fe301a
[ "MIT" ]
5
2015-12-19T11:18:54.000Z
2016-08-27T02:21:59.000Z
api/yolo_minimal/utils.py
simonsmh/www
1741545e636540b9eb250840347f091082fe301a
[ "MIT" ]
null
null
null
api/yolo_minimal/utils.py
simonsmh/www
1741545e636540b9eb250840347f091082fe301a
[ "MIT" ]
1
2020-10-30T13:25:33.000Z
2020-10-30T13:25:33.000Z
import math import os import random import cv2 import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torchvision def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ area1 = box_area(box1.t()) area2 = box_area(box2.t()) # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) inter = ( ( torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2]) ) .clamp(0) .prod(2) ) return inter / ( area1[:, None] + area2 - inter ) # iou = inter / (area1 + area2 - inter) def non_max_suppression( prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False, ): """ Performs Non-Maximum Suppression on inference results Returns detections with shape: nx6 (x1, y1, x2, y2, conf, cls) """ # Box constraints min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height method = "merge" nc = prediction[0].shape[1] - 5 # number of classes multi_label &= nc > 1 # multiple labels per box output = [None] * len(prediction) for xi, x in enumerate(prediction): # image index, image inference # Apply conf constraint x = x[x[:, 4] > conf_thres] # Apply width-height constraint x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)] # If none remain process next image if not x.shape[0]: continue # Compute conf x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(x[:, :4]) # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:] > conf_thres).nonzero().t() x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1) else: # best class only conf, j = x[:, 5:].max(1) x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1) # Filter by class if classes: x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)] # Apply finite constraint if not torch.isfinite(x).all(): x = x[torch.isfinite(x).all(1)] # If none remain process next image n = x.shape[0] # number of boxes if not n: continue # Sort by confidence # if method == 'fast_batch': # x = x[x[:, 4].argsort(descending=True)] # Batched NMS c = x[:, 5] * 0 if agnostic else x[:, 5] # classes boxes, scores = ( x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4], ) # boxes (offset by class), scores if method == "merge": # Merge NMS (boxes merged using weighted mean) i = torchvision.ops.boxes.nms(boxes, scores, iou_thres) if n < 1e4: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) # weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1) # box weights # weights /= weights.sum(0) # normalize # x[:, :4] = torch.mm(weights.T, x[:, :4]) weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[ None ] # box weights x[i, :4] = torch.mm( weights / weights.sum(1, keepdim=True), x[:, :4] ).float() # merged boxes elif method == "vision": i = torchvision.ops.boxes.nms(boxes, scores, iou_thres) elif method == "fast": # FastNMS from https://github.com/dbolya/yolact iou = box_iou(boxes, boxes).triu_(diagonal=1) # upper triangular iou matrix i = iou.max(0)[0] < iou_thres output[xi] = x[i] return output
34.49467
117
0.52627
def8727d101b934efb5715bc01f3842eeeee3ee3
4,934
py
Python
ec2stack/__init__.py
sureshanaparti/cloudstack-ec2stack
8e07435d3d04357995f2a5d337adef62ecbfdd8d
[ "Apache-2.0" ]
13
2015-05-06T13:38:13.000Z
2021-11-09T21:39:01.000Z
ec2stack/__init__.py
sureshanaparti/cloudstack-ec2stack
8e07435d3d04357995f2a5d337adef62ecbfdd8d
[ "Apache-2.0" ]
3
2015-08-21T17:31:20.000Z
2021-07-07T08:39:11.000Z
ec2stack/__init__.py
sureshanaparti/cloudstack-ec2stack
8e07435d3d04357995f2a5d337adef62ecbfdd8d
[ "Apache-2.0" ]
17
2015-07-24T06:00:59.000Z
2021-11-09T21:38:52.000Z
#!/usr/bin/env python # encoding: utf-8 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """This module creates the flask application. """ import os import sys import argparse from alembic import command from alembic.config import Config as AlembicConfig from flask import Flask from ConfigParser import SafeConfigParser from ec2stack.controllers import * from ec2stack.core import DB from ec2stack.models import User def create_app(settings=None): """ Creates a flask application. @param settings: Settings override object. @return: The flask application. """ app = Flask(__name__) if settings: app.config.from_object(settings) else: args = _generate_args() profile = args.pop('profile') app.config['DEBUG'] = args.pop('debug') config_file = _load_config_file() database_uri = _load_database() _config_from_config_profile(config_file, profile, app) app.config['SQLALCHEMY_DATABASE_URI'] = database_uri DB.init_app(app) default_controller = __import__( 'ec2stack.controllers.' + 'default', None, None, 'DEFAULT' ) default_controller = getattr(default_controller, 'DEFAULT') app.register_blueprint(default_controller) return app def _generate_args(): """ Generate command line arguments for ec2stack-configure. @return: args. """ parser = argparse.ArgumentParser() parser.add_argument( '-p', '--profile', required=False, help='The profile to run ec2stack with, default is initial', default='initial' ) parser.add_argument( '-d', '--debug', required=False, help='Turn debug on for application', default=False ) args = parser.parse_args() return vars(args) def _load_config_file(): """ Checks that the user's configuration file exists and returns its path. @return: The path to the user's configuration file. """ config_file = os.path.join( os.path.expanduser('~'), '.ec2stack/ec2stack.conf' ) if not os.path.exists(config_file): sys.exit('No configuration found, please run ec2stack-configure') return config_file def _config_from_config_profile(config_file, profile, app): """ Configures ec2stack app based on configuration profile. @param config_file: current config file configuration. @param profile: the profile to set the attribute in. """ config = SafeConfigParser() config.read(config_file) if not config.has_section(profile): sys.exit('No profile matching ' + profile + ' found in configuration, please run ec2stack-configure -p ' + profile) for attribute in config.options(profile): app.config[attribute.upper()] = config.get(profile, attribute) instance_type_map = {} instance_section = profile + "instancemap" if config.has_section(instance_section): for attribute in config.options(instance_section): instance_type_map[attribute] = config.get( instance_section, attribute) app.config['INSTANCE_TYPE_MAP'] = instance_type_map resource_type_map = {} resource_section = profile + "resourcemap" if config.has_section(resource_section): for attribute in config.options(resource_section): resource_type_map[attribute] = config.get( resource_section, attribute) app.config['RESOURCE_TYPE_MAP '] = resource_type_map def _load_database(): """ Checks that the user's database exists and returns its uri. @return: The uri to the user's database. """ database_file = os.path.join( os.path.expanduser('~'), '.ec2stack/ec2stack.sqlite' ) if not os.path.exists(database_file): directory = os.path.join(os.path.dirname(__file__), '../migrations') config = AlembicConfig(os.path.join( directory, 'alembic.ini' )) config.set_main_option('script_location', directory) command.upgrade(config, 'head', sql=False, tag=None) return 'sqlite:///' + database_file
28.356322
88
0.676125
def8fdb574ef8e8309feae4ee72edbfe9a0a3beb
926
py
Python
schematics_proto3/unset.py
mlga/schematics-proto3
588fe5bc212e203688166638a1c52dfeda931403
[ "MIT" ]
null
null
null
schematics_proto3/unset.py
mlga/schematics-proto3
588fe5bc212e203688166638a1c52dfeda931403
[ "MIT" ]
11
2020-04-09T13:33:54.000Z
2020-08-19T17:38:26.000Z
schematics_proto3/unset.py
mlga/schematics-proto3
588fe5bc212e203688166638a1c52dfeda931403
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- """ Test module docstring. """ import threading from typing import Type Unset = UnsetType() # pylint: disable=invalid-name
17.807692
56
0.580994
def92f706f99835e10af1d0e6310107f2432dbe5
193
py
Python
aws_lambda_powertools/event_handler/__init__.py
nayaverdier/aws-lambda-powertools-python
cd15ee97746356a84c6f196dbd2d26a34ea50411
[ "Apache-2.0", "MIT-0" ]
1,208
2020-05-20T19:06:29.000Z
2022-03-30T14:17:47.000Z
aws_lambda_powertools/event_handler/__init__.py
nayaverdier/aws-lambda-powertools-python
cd15ee97746356a84c6f196dbd2d26a34ea50411
[ "Apache-2.0", "MIT-0" ]
859
2020-05-22T09:59:54.000Z
2022-03-31T08:31:30.000Z
aws_lambda_powertools/event_handler/__init__.py
nayaverdier/aws-lambda-powertools-python
cd15ee97746356a84c6f196dbd2d26a34ea50411
[ "Apache-2.0", "MIT-0" ]
163
2020-05-18T21:08:25.000Z
2022-03-28T12:03:37.000Z
""" Event handler decorators for common Lambda events """ from .api_gateway import ApiGatewayResolver from .appsync import AppSyncResolver __all__ = ["AppSyncResolver", "ApiGatewayResolver"]
21.444444
51
0.797927
def98cf0f4126cdcda2bee2e5c8d96a01bc4937b
1,351
py
Python
solutions/5/guillaume/LookAhead.py
larsbratholm/champs_kaggle
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
[ "MIT" ]
9
2020-08-14T23:11:16.000Z
2021-08-09T16:23:43.000Z
solutions/5/guillaume/LookAhead.py
larsbratholm/champs_kaggle
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
[ "MIT" ]
1
2020-11-19T09:29:14.000Z
2020-11-19T09:29:14.000Z
solutions/5/guillaume/LookAhead.py
larsbratholm/champs_kaggle
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
[ "MIT" ]
2
2020-09-09T02:53:57.000Z
2020-12-06T08:20:52.000Z
import itertools as it from torch.optim import Optimizer
37.527778
75
0.559585
defcc91baa71d0c94f476ef6cc3d35765b3516a0
2,263
py
Python
addexp.py
Shajm44n/Expense
db3355d4d81d5dd57ceea81b1170724b8893e523
[ "MIT" ]
null
null
null
addexp.py
Shajm44n/Expense
db3355d4d81d5dd57ceea81b1170724b8893e523
[ "MIT" ]
null
null
null
addexp.py
Shajm44n/Expense
db3355d4d81d5dd57ceea81b1170724b8893e523
[ "MIT" ]
null
null
null
from tkinter import * # import expdate import mysql.connector db_connect=mysql.connector.connect(host="localhost",user="root",password="maan",database="expense") db_cursor=db_connect.cursor()
32.797101
236
0.643836
defd13835bf657af58b23494bf16c9abcbbae2e8
1,217
py
Python
BlackJack/UserInterface/BlackJackUI.py
Kasyx709/BlackJack
a99cd9327e466ed51dadbf4b5407c2370f998b82
[ "MIT" ]
null
null
null
BlackJack/UserInterface/BlackJackUI.py
Kasyx709/BlackJack
a99cd9327e466ed51dadbf4b5407c2370f998b82
[ "MIT" ]
null
null
null
BlackJack/UserInterface/BlackJackUI.py
Kasyx709/BlackJack
a99cd9327e466ed51dadbf4b5407c2370f998b82
[ "MIT" ]
null
null
null
from __future__ import division from BlackJack.UserInterface import tk from BlackJack.UserInterface import tkFont from BlackJack.UserInterface import BlackJackWindows from BlackJack.UserInterface import SelectGameType from BlackJack.UserInterface import Helpers
40.566667
92
0.696795
defd4d718f41568b76388eb0230161d0d48bb24e
263
py
Python
winecasino/core/entities/__init__.py
harlov/winecasino
ae29b2c8f75bfd05ad141fd3b596f1db7c103690
[ "MIT" ]
null
null
null
winecasino/core/entities/__init__.py
harlov/winecasino
ae29b2c8f75bfd05ad141fd3b596f1db7c103690
[ "MIT" ]
null
null
null
winecasino/core/entities/__init__.py
harlov/winecasino
ae29b2c8f75bfd05ad141fd3b596f1db7c103690
[ "MIT" ]
null
null
null
from .country import Country from .game import Game from .game import Bid from .user import User from .grape import Grape from .wine import Wine from .base import new_id __all__ = [ "new_id", "Country", "Game", "Grape", "User", "Wine", ]
15.470588
28
0.653992
defdbd583ad5f6b3a08353cba72476c7dbaff00c
295
py
Python
download-deveres/para-execicios-curso-em-video/exe019.py
Hugo-Oliveira-RDO11/meus-deveres
b5e41015e2cb95946262678e82197e5f47d56271
[ "MIT" ]
null
null
null
download-deveres/para-execicios-curso-em-video/exe019.py
Hugo-Oliveira-RDO11/meus-deveres
b5e41015e2cb95946262678e82197e5f47d56271
[ "MIT" ]
null
null
null
download-deveres/para-execicios-curso-em-video/exe019.py
Hugo-Oliveira-RDO11/meus-deveres
b5e41015e2cb95946262678e82197e5f47d56271
[ "MIT" ]
null
null
null
import random p = str(input('digite o nome do primeiro aluno :')) s = str(input('o nome do segundo aluno :')) t = str(input('o nome do terceiro aluno :')) q = str(input('o nome do quato aluno :')) lista = [p, s, t, q] aluno = random.choice(lista) print('o aluno sorteado foi {}'.format(aluno))
29.5
51
0.657627
defde4b16a7fe68a1c0b7ba26a303a5bb6a695bc
12,389
py
Python
cma-evolve.py
simondlevy/CMA-Gym
ce0056873d42eae2b6769fe22fcf872459694f30
[ "Apache-2.0" ]
null
null
null
cma-evolve.py
simondlevy/CMA-Gym
ce0056873d42eae2b6769fe22fcf872459694f30
[ "Apache-2.0" ]
null
null
null
cma-evolve.py
simondlevy/CMA-Gym
ce0056873d42eae2b6769fe22fcf872459694f30
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import gym import torch import numpy as np import multiprocessing as mp import os import pickle import sys import time import logging import cma import argparse from torchmodel import StandardFCNet def fitness_shift(x): x = np.asarray(x).flatten() ranks = np.empty(len(x)) ranks[x.argsort()] = np.arange(len(x)) ranks /= (len(x) - 1) ranks -= .5 return ranks def train(config, logger): task_queue = mp.SimpleQueue() result_queue = mp.SimpleQueue() stop = mp.Value('i', False) stats = SharedStats(config.state_dim) normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)] for normalizer in normalizers: normalizer.offline_stats.load(stats) workers = [CMAWorker(id, normalizers[id], task_queue, result_queue, stop, config) for id in range(config.num_workers)] for w in workers: w.start() opt = cma.CMAOptions() opt['tolfun'] = -config.target opt['popsize'] = config.pop_size opt['verb_disp'] = 0 opt['verb_log'] = 0 opt['maxiter'] = sys.maxsize es = cma.CMAEvolutionStrategy(config.initial_weight, config.sigma, opt) total_steps = 0 initial_time = time.time() training_rewards = [] training_steps = [] training_timestamps = [] test_mean, test_std = test(config, config.initial_weight, stats) logger.info('total steps %8d, %+4.0f(%+4.0f)' % (total_steps, test_mean, test_std)) training_rewards.append(test_mean) training_steps.append(0) training_timestamps.append(0) while True: solutions = es.ask() for id, solution in enumerate(solutions): task_queue.put((id, solution)) while not task_queue.empty(): continue result = [] while len(result) < len(solutions): if result_queue.empty(): continue result.append(result_queue.get()) result = sorted(result, key=lambda x: x[0]) total_steps += np.sum([r[2] for r in result]) cost = [r[1] for r in result] best_solution = solutions[np.argmin(cost)] elapsed_time = time.time() - initial_time test_mean, test_std = test(config, best_solution, stats) best = -np.min(cost) logger.info('total steps = %8d test = %+4.0f (%4.0f) best = %+4.0f (%+4.0f) elapased time = %4.0f sec' % (total_steps, test_mean, test_std, best, config.target, elapsed_time)) training_rewards.append(test_mean) training_steps.append(total_steps) training_timestamps.append(elapsed_time) #with open('data/%s-best_solution_%s.bin' % (TAG, config.task), 'wb') as f: # XXX gets stuck # pickle.dump(solutions[np.argmin(result)], f) if best > config.target: logger.info('Best score of %f exceeds target %f' % (best, config.target)) break if config.max_steps and total_steps > config.max_steps: logger.info('Maximum number of steps exceeded') stop.value = True break cost = fitness_shift(cost) es.tell(solutions, cost) # es.disp() for normalizer in normalizers: stats.merge(normalizer.online_stats) normalizer.online_stats.zero() for normalizer in normalizers: normalizer.offline_stats.load(stats) stop.value = True for w in workers: w.join() return [training_rewards, training_steps, training_timestamps] def test(config, solution, stats): normalizer = StaticNormalizer(config.state_dim) normalizer.offline_stats.load_state_dict(stats.state_dict()) evaluator = Evaluator(config, normalizer) evaluator.model.set_weight(solution) rewards = [] for i in range(config.test_repetitions): reward, _ = evaluator.single_run() rewards.append(reward) return np.mean(rewards), np.std(rewards) / config.repetitions def multi_runs(task, logger, runs=1): if not os.path.exists('log'): os.makedirs('log') fh = logging.FileHandler('log/%s-%s.txt' % (task.tag, task.task)) fh.setLevel(logging.DEBUG) logger.addHandler(fh) stats = [] for run in range(runs): logger.info('Run %3d/%3d' % (run+1, runs)) stats.append(train(task, logger)) with open('data/%s-stats-%s.bin' % (task.tag, task.task), 'wb') as f: pickle.dump(stats, f) if __name__ == '__main__': main()
33.574526
123
0.600291
defec38e9abb5a9b6de6de6949355eb8f83f8c74
342
py
Python
src/arcclimate/temperature.py
youworks/arcclimate
62a9eece267e42ccddfc5145e8ee50776470f7bf
[ "MIT" ]
null
null
null
src/arcclimate/temperature.py
youworks/arcclimate
62a9eece267e42ccddfc5145e8ee50776470f7bf
[ "MIT" ]
null
null
null
src/arcclimate/temperature.py
youworks/arcclimate
62a9eece267e42ccddfc5145e8ee50776470f7bf
[ "MIT" ]
1
2022-03-08T01:04:47.000Z
2022-03-08T01:04:47.000Z
""" """ import numpy as np def get_corrected_TMP(TMP: np.ndarray, ele_gap: float) -> np.ndarray: """ Args: TMP (np.ndarray): [] ele_gap (np.ndarray): [m] Returns: np.ndarray: [C] Notes: 0.0065/m """ return TMP + ele_gap * -0.0065
15.545455
69
0.564327
defeff29d76d14fa0aceaad7cd54a55164f7136c
2,386
py
Python
rastervision/data/label_store/default.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
4
2019-03-11T12:38:15.000Z
2021-04-06T14:57:52.000Z
rastervision/data/label_store/default.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
null
null
null
rastervision/data/label_store/default.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
1
2019-10-29T09:22:09.000Z
2019-10-29T09:22:09.000Z
from abc import (ABC, abstractmethod) import os import rastervision as rv
27.425287
86
0.65088
720247461650041909a7ce79fd85da841234a38b
6,425
py
Python
app.py
aniketjana03/TimeSheet
519b3bdad79dedb7210747906bf4b8e24e64691a
[ "Apache-2.0" ]
null
null
null
app.py
aniketjana03/TimeSheet
519b3bdad79dedb7210747906bf4b8e24e64691a
[ "Apache-2.0" ]
null
null
null
app.py
aniketjana03/TimeSheet
519b3bdad79dedb7210747906bf4b8e24e64691a
[ "Apache-2.0" ]
null
null
null
import sys import os from flask import Flask, flash, redirect, render_template, request, url_for, session from flaskext.mysql import MySQL from flask_login import LoginManager from flask_bcrypt import Bcrypt from flask_session import Session from database import Database from makedb import MakeDB from helpers import generate_weekID import pymysql from boto.s3.connection import S3Connection # init flask app app = Flask(__name__) app.config['TESTING'] = False # session secret key app.secret_key = os.environ.get('SECRETKEYFLASK') # Ensure templates are auto-reloaded app.config["TEMPLATES_AUTO_RELOAD"] = True app.config["SESSION_PERMANENT"] = False app.config["SESSION_TYPE"] = "filesystem" Session(app) # Password Hashing bcrypt = Bcrypt(app) # init MySQL database mysql = MySQL() mysql.init_app(app) #Make database if not EXISTS MakeDB() if __name__ == "__main__": app.run()
32.125
144
0.598599
7202ced44b536e7785d48d42a3fe09355e98fc12
448
py
Python
guestbook/models.py
Bespolezniy/geek-world
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
[ "MIT" ]
null
null
null
guestbook/models.py
Bespolezniy/geek-world
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
[ "MIT" ]
null
null
null
guestbook/models.py
Bespolezniy/geek-world
8fbaf451b4e87e48e73eb289035ec0ea68ea0e68
[ "MIT" ]
null
null
null
from django.db import models # Create your models here.
37.333333
92
0.694196
72043f3633eddba64964dbbdb6f17d84cf1d6267
34,859
py
Python
PA1/PA1_Q2/P21CS007_VGG16.py
aryachiranjeev/Dependable-AI
750570572c1baaa2590a89c0982e2f71b15b48b9
[ "MIT" ]
null
null
null
PA1/PA1_Q2/P21CS007_VGG16.py
aryachiranjeev/Dependable-AI
750570572c1baaa2590a89c0982e2f71b15b48b9
[ "MIT" ]
null
null
null
PA1/PA1_Q2/P21CS007_VGG16.py
aryachiranjeev/Dependable-AI
750570572c1baaa2590a89c0982e2f71b15b48b9
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[2]: import numpy as np import pandas as pd import random import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.keras.layers import Dense,Flatten,GlobalAveragePooling2D,Input,Lambda from tensorflow.keras.models import Model,load_model import tensorflow.keras.backend as K from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications.vgg16 import preprocess_input from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical from sklearn.metrics import accuracy_score,confusion_matrix from skimage.color import rgb2gray import cv2 from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image # In[110]: # In[150]: # In[112]: # In[113]: # In[151]: test_brute_model_on_gray_scale_test_images(brute_model) # In[115]: #brute model print("/nbrute model/n") brute_model = brute_vgg16() test_brute_model_on_gray_scale_test_images(brute_model) class_accuracy_brute_model = class_wise_accuracy(brute_model) bias_metrics(class_accuracy_brute_model,brute_model) # In[40]: # In[68]: layer_names = ["block5_conv3","block4_conv2"] for l in layer_names: print("layer name:",l) make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg",brute_model,classified="correct",layer_name=l) make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg",brute_model,classified="correct",layer_name=l) make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg",brute_model,classified="incorrect",layer_name=l) make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg",brute_model,classified="incorrect",layer_name =l) # In[161]: layer_names = ["block5_conv3","block4_conv2"] for l in layer_names: print("layer name:",l) img_path1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg" img_path2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg" img_path3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg" img_path4 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg" img1 = cv2.imread(img_path1) cam1 = grad_cam_pp(brute_model, img1,layer_name=l, label_name=labels,category_id=int(img_path1[-5])) plot(img1,cam1) img2 = cv2.imread(img_path2) cam2 = grad_cam_pp(brute_model, img2,layer_name=l, label_name=labels,category_id=int(img_path2[-5])) plot(img2,cam2) img3 = cv2.imread(img_path3) cam3 = grad_cam_pp(brute_model, img3,layer_name=l, label_name=labels,category_id=int(img_path3[-5])) plot(img3,cam3) img4 = cv2.imread(img_path4) cam4 = grad_cam_pp(brute_model, img4,layer_name=l, label_name=labels,category_id=int(img_path4[-5])) plot(img4,cam4) # In[162]: # In[38]: # In[39]: #preporocess model print("\npreporocess model\n") preprocessed_model = preprocessed_data_model() x_test_preprocessed,y_test_preprocessed = preprocess_helper() preprocessed_model1 = tf.keras.models.load_model("/home/euclid/Desktop/Chiranjeev/DAI/vgg16_cifar10_preprocessed_rot_new") class_accuracy_preprocessed_model1 = class_wise_accuracy_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed) bias_metrics_preprocess(class_accuracy_preprocessed_model1,preprocessed_model1, x_test_preprocessed,y_test_preprocessed) create_results_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed) # In[118]: # In[119]: #method model print("/nmethod model/n") kl_model = method_model() class_accuracy_kl_model = class_wise_accuracy(kl_model) bias_metrics(class_accuracy_kl_model,kl_model) create_results(kl_model) # In[120]: print("\npreprocessed model\n") class_accuracy_preprocessed = class_wise_accuracy(preprocessed_model) print("each class accuracies preprocessed",class_accuracy_preprocessed) bias_metrics(class_accuracy_preprocessed,preprocessed_model) print("\nmethod model\n") class_accuracy_method = class_wise_accuracy(kl_model) print("each class accuracies mehtod",class_accuracy_method) bias_metrics(class_accuracy_method,kl_model) # In[187]: # In[200]: print("cross entropy loss") filename1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/categorical_cross_entropy/test_gender_across_race_age_y_test_pred2_optimizer2_45.csv" dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename1) print("\nfocal loss") filename2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/focal_loss/test_gender_across_race_age_y_test_pred2_optimizer2_45_focal_loss.csv" dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename2) print("\nLinearsvm") filename3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/svm/test_gender_across_race_age_y_test_pred2_optimizer2_svm.csv" dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename3)
33.16746
235
0.685304
72045094280bf8b19ef8956f47fe38ea87d738b3
1,027
py
Python
notebooks/general.py
transientlunatic/grasshopper
1d3822427970d200341ff9d2823949fb4b27e001
[ "0BSD" ]
3
2020-09-26T01:27:13.000Z
2020-09-30T05:47:42.000Z
notebooks/general.py
transientlunatic/gravpy
1d3822427970d200341ff9d2823949fb4b27e001
[ "0BSD" ]
null
null
null
notebooks/general.py
transientlunatic/gravpy
1d3822427970d200341ff9d2823949fb4b27e001
[ "0BSD" ]
null
null
null
import numpy as np import astropy.units as u def snr(signal, detector): """ Calculate the SNR of a signal in a given detector, assuming that it has been detected with an optimal filter. See e.g. arxiv.org/abs/1408.0740 Parameters ---------- signal : Source A Source object which describes the source producing the signal, e.g. a CBC. detector : Detector A Detector object describing the instrument making the observation e.g. aLIGO. Returns ------- SNR : float The signal-to-noise ratio of the signal in the detector. """ if signal.ncycles(): ncycles = np.sqrt(2*signal.ncycles(detector.frequencies)) else: ncycles = 1 noise = detector.psd(detector.frequencies) ampli = signal.raw_strain(detector.frequencies) * ncycles fraction = 4*(np.abs(ampli)**2 / noise) fraction[np.isnan(fraction)]=0 return np.sqrt(np.trapz(fraction, x=detector.frequencies, dx=0.01*u.hertz))
30.205882
79
0.635833
72082ffdc0eb8ab81095d7d094328792a40cbcea
6,898
py
Python
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
lin-tan/fairness-variance
7f6aee23160707ffe78f429e5d960022ea1c9fe4
[ "BSD-3-Clause" ]
null
null
null
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
lin-tan/fairness-variance
7f6aee23160707ffe78f429e5d960022ea1c9fe4
[ "BSD-3-Clause" ]
null
null
null
dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py
lin-tan/fairness-variance
7f6aee23160707ffe78f429e5d960022ea1c9fe4
[ "BSD-3-Clause" ]
null
null
null
''' Script to plot the accuracy and the fairness measures for different algorithms from the log files ''' import matplotlib matplotlib.use('agg') from matplotlib import pyplot as plt import os print(os.getcwd()) import numpy as np plt.style.use('ggplot') if __name__ == "__main__": gen_all_plots()
36.691489
81
0.621919
72085eb6f35c638ad1743b5ae7bd6a8de18fc6f3
682
py
Python
conqueror/scraper/base_yandex.py
piotrmaslanka/yandex-conqueror
cd0b50a43e25551f91150e0bee4f9cd307e4adce
[ "MIT" ]
12
2022-03-01T22:45:05.000Z
2022-03-16T05:46:24.000Z
conqueror/scraper/base_yandex.py
piotrmaslanka/yandex-conqueror
cd0b50a43e25551f91150e0bee4f9cd307e4adce
[ "MIT" ]
1
2022-03-02T10:18:05.000Z
2022-03-02T11:03:52.000Z
conqueror/scraper/base_yandex.py
piotrmaslanka/yandex-conqueror
cd0b50a43e25551f91150e0bee4f9cd307e4adce
[ "MIT" ]
1
2022-03-02T10:18:35.000Z
2022-03-02T10:18:35.000Z
import requests from satella.coding.decorators import retry
28.416667
64
0.692082
72097fdf43f5937088d329748fec0dc61447255f
6,142
py
Python
engine/azbatchengine.py
asedighi/azure_realtime_batch
c2cf4c8edc2bbded8377842fcad6370fd35af44e
[ "MIT" ]
3
2020-05-08T16:20:07.000Z
2021-10-06T11:16:10.000Z
engine/azbatchengine.py
asedighi/azure_realtime_batch
c2cf4c8edc2bbded8377842fcad6370fd35af44e
[ "MIT" ]
null
null
null
engine/azbatchengine.py
asedighi/azure_realtime_batch
c2cf4c8edc2bbded8377842fcad6370fd35af44e
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation # # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # @author: asedighi import asyncio import sys sys.path.append('.') sys.path.append('..') sys.path.append('/mnt/resource/batch/tasks/shared/') sys.path.append('/mnt/resource/batch/tasks/shared/engine') sys.path.append('/mnt/resource/batch/tasks/shared/batchwrapper') sys.path.append('/mnt/resource/batch/tasks/shared/tasks') from batchwrapper.config import getRandomizer from batchwrapper.config import AzureCredentials from batchwrapper.config import ReadConfig from batchwrapper.config import TaskConfig from batchwrapper.config import find_file_path import argparse import ntpath from engine.taskengine import task_loop from subprocess import * from azure.storage.blob import BlobServiceClient from azure.servicebus import ServiceBusClient import os if __name__ == '__main__': print("Starting engine ...") #all_input = sys.argv[1:]; #data_input = ' '.join(all_input[1:]) #foo = (all_input[0], data_input) #print(foo) #exit(1) engine = AzureBatchEngine() engine.do()
29.38756
137
0.667209
720a41d918f83d5bbf26dfd204b04b9dc1b4ac43
1,090
py
Python
j.py
chirag127/Language-Translator-Using-Tkinter-in-Python
c790a0672c770cf703559d99c74ad581643f4d2f
[ "MIT" ]
null
null
null
j.py
chirag127/Language-Translator-Using-Tkinter-in-Python
c790a0672c770cf703559d99c74ad581643f4d2f
[ "MIT" ]
null
null
null
j.py
chirag127/Language-Translator-Using-Tkinter-in-Python
c790a0672c770cf703559d99c74ad581643f4d2f
[ "MIT" ]
null
null
null
import tkinter as tk import sys if __name__ == '__main__': while True: try: print("qiaulfskhdnliukf") root = tk.Tk() t = tk.Text() t.pack() # create instance of file like object pl = PrintLogger(t) # replace sys.stdout with our object sys.stdout = pl # now we can print to stdout or file print('hello world') print('hello world') root.mainloop() except: print("exception")
24.772727
82
0.542202
720b01f5be1444386ad583c605e2465546f819c4
2,695
py
Python
byteweiser.py
urbanware-org/byteweiser
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
[ "MIT" ]
3
2017-11-27T00:35:04.000Z
2017-12-13T22:41:31.000Z
byteweiser.py
urbanware-org/byteweiser
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
[ "MIT" ]
1
2017-03-08T19:04:49.000Z
2017-03-08T19:04:49.000Z
byteweiser.py
urbanware-org/byteweiser
fc90d17b51ead44af53401dc9c8ca5f0efc5e72e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ============================================================================ # ByteWeiser - Byte comparison and replacement tool # Main script # Copyright (C) 2021 by Ralf Kilian # Distributed under the MIT License (https://opensource.org/licenses/MIT) # # GitHub: https://github.com/urbanware-org/byteweiser # GitLab: https://gitlab.com/urbanware-org/byteweiser # ============================================================================ import os import sys if __name__ == "__main__": main() # EOF
34.551282
78
0.562152
720b83b3d481df1e875ae4b17eade77f3a7f0679
9,798
py
Python
scripts/st_dashboard.py
rsmith49/simple-budget-pld
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
[ "Apache-2.0" ]
1
2022-01-01T14:44:40.000Z
2022-01-01T14:44:40.000Z
scripts/st_dashboard.py
rsmith49/simple-budget-pld
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
[ "Apache-2.0" ]
null
null
null
scripts/st_dashboard.py
rsmith49/simple-budget-pld
1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743
[ "Apache-2.0" ]
null
null
null
import altair as alt import os import pandas as pd import streamlit as st import sys from datetime import datetime from dateutil.relativedelta import relativedelta from dotenv import load_dotenv from plaid.api_client import ApiClient from plaid.exceptions import ApiException from pathlib import Path from traceback import format_exc from urllib.error import URLError sys.path.append(os.getcwd()) load_dotenv() from src.budget import Budget from src.transactions import get_transactions_df from src.user_modifications import transform_pipeline from src.views import top_vendors EXISTING_TRANSACTIONS_FILE = f"{Path.home()}/.ry-n-shres-budget-app/all_transactions.csv" TRANSACTION_GRACE_BUFFER = relativedelta(days=10) # How far before latest transaction to pull from def write_df(df: pd.DataFrame): """Helper function to st.write a DF with amount stylized to dollars""" st.dataframe( df.style.format({ col_name: "{:,.2f}" for col_name in ["amount", "Total Spent"] }) ) # TODO: Make non-budgeted columns show up on bar chart, just without ticks # TODO: Make all-time a budget period option (figure out what to do about this - maybe it only shows up for one month?) # TODO: Allow you to set custom start date for your budget period (i.e. make your monthly spending start on the 3rd) # TODO: Fix the duplicate charge issue with pending charges def single_inc_spending_summary(df: pd.DataFrame, date_inc_key: str, curr_date: str, is_current: bool = False) -> None: """Creates display for a single date increment Parameters ---------- df Transactions Dataframe date_inc_key The key for date increment (one of week, month, year) curr_date The selected date increment value is_current Whether the date represents the most recent date increment """ budget = Budget(df) curr_df = df[df[date_inc_key] == curr_date] total_spending_str = f"{curr_df['amount'].sum():,.2f}" if budget.budget_plan: show_budget = st.checkbox("Budget View", value=True) total_budget = budget.total_limit(date_inc_key) if budget.budget_plan and show_budget: metric_col1, metric_col2 = st.columns(2) with metric_col1: st.metric(f"Total Spending", total_spending_str) with metric_col2: st.metric(f"Total Budget", f"{total_budget:,.2f}") simple_summary = budget.simple_summary(date_inc_key, curr_date) bar = alt.Chart(simple_summary).mark_bar().encode( y="category", x="spent", tooltip=alt.Tooltip(field="spent", aggregate="sum", type="quantitative"), ).properties( height=alt.Step(60) ) ticks = alt.Chart(simple_summary).mark_tick( color="red", thickness=3, size=60 * 0.9, ).encode( y="category", x="total_budget", tooltip=alt.Tooltip(field="total_budget", aggregate="sum", type="quantitative") ) if is_current: ticks += alt.Chart(simple_summary).mark_tick( color="white", thickness=2, size=60 * 0.9, ).encode( y="category", x="projected_budget", ) st.altair_chart(bar + ticks, use_container_width=True) else: st.metric(f"Total Spending", total_spending_str) chart = alt.Chart(curr_df).mark_bar().encode( x=alt.X("sum(amount)", axis=alt.Axis(title='Spent')), y=alt.Y("category_1", axis=alt.Axis(title="Category")), tooltip=alt.Tooltip(field="amount", aggregate="sum", type="quantitative"), ).properties( height=alt.Step(40), ) st.altair_chart(chart, use_container_width=True) with st.expander("Largest Transactions"): write_df( curr_df[["date", "amount", "name", "category_1", "category_2"]].sort_values( by="amount", ascending=False ) ) def df_for_certain_categories(df: pd.DataFrame) -> pd.DataFrame: """Helper function to get a DF filtered by any user selected categories""" categories = st.multiselect( f"Select any categories to only see spending for", options=sorted(df['category_1'].unique()), default=[], ) if len(categories) > 0: bool_key = df['category_1'] == 'NOT_A CATEGORY' for cat in categories: bool_key = bool_key | (df['category_1'] == cat) df = df[bool_key] return df if __name__ == "__main__": main()
33.101351
119
0.610431
720ee96617fe84100cbf9c9517c56d368835bd2c
16,818
py
Python
scripts/devvnet_manager.py
spmckenney/Devv-Core
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
[ "MIT" ]
null
null
null
scripts/devvnet_manager.py
spmckenney/Devv-Core
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
[ "MIT" ]
null
null
null
scripts/devvnet_manager.py
spmckenney/Devv-Core
eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98
[ "MIT" ]
null
null
null
import yaml import argparse import sys import os import subprocess import time def get_nodes(yml_dict, host_index_map): nodes = [] shard_index = yml_dict['shard_index'] try: host_index_map = yml_dict['host_index_map'] print("Using shard's {} for shard {}".format(host_index_map, shard_index)) except: print("Using devvnet host_index_map ({}) for shard {}".format(host_index_map, shard_index)) for proc in yml_dict['process']: try: print("creating {} {} processes".format(len(host_index_map), proc['name'])) for node_index in host_index_map: node = Node(shard_index, node_index, proc['name'], host_index_map[node_index], proc['bind_port']) try: rawsubs = proc['subscribe'] for sub in proc['subscribe']: try: si = sub['shard_index'] except: si = yml_dict['shard_index'] node.add_raw_sub(sub['name'], si, sub['node_index']) except: pass nodes.append(node) except: nodes.append(Node(shard_index, ind, proc['name'], proc['host'], proc['bind_port'])) print("creating a "+proc['name']+" process") return nodes def run_validator(node): # ./devcash --node-index 0 --config ../opt/basic_shard.conf --config ../opt/default_pass.conf --host-list tcp://localhost:56551 --host-list tcp://localhost:56552 --host-list tcp://localhost:57550 --bind-endpoint tcp://*:56550 cmd = [] cmd.append("./devcash") cmd.extend(["--shard-index", str(node.get_shard_index())]) cmd.extend(["--node-index", str(node.get_index())]) cmd.extend(["--num-consensus-threads", "1"]) cmd.extend(["--num-validator-threads", "1"]) cmd.extend(["--config", node.get_config_file()]) cmd.extend(["--config", node.get_password_file()]) cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())]) for sub in node.get_subscriber_list(): cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())]) return cmd def run_announcer(node): # ./announcer --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-announcer.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --bind-endpoint 'tcp://*:50020' --working-dir ../../tmp/working/input/laminar4/ --key-pass password --separate-ops true cmd = [] cmd.append("./pb_announcer") cmd.extend(["--shard-index", str(node.get_shard_index())]) cmd.extend(["--node-index", str(node.get_index())]) cmd.extend(["--config", node.get_config_file()]) cmd.extend(["--config", node.get_password_file()]) cmd.extend(["--mode", node.get_type()]) cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())]) cmd.extend(["--separate-ops", "true"]) cmd.extend(["--start-delay", str(30)]) cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 100)]) return cmd def run_repeater(node): # ./repeater --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-repeater.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --working-dir ../../tmp/working/output/repeater --host-list tcp://localhost:56550 --key-pass password cmd = [] cmd.append("./repeater") cmd.extend(["--shard-index", str(node.get_shard_index())]) cmd.extend(["--node-index", str(node.get_index())]) cmd.extend(["--num-consensus-threads", "1"]) cmd.extend(["--num-validator-threads", "1"]) cmd.extend(["--mode", node.get_type()]) cmd.extend(["--working-dir", node.get_working_dir()]) cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 200)]) for sub in node.get_subscriber_list(): cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())]) return cmd if __name__ == '__main__': parser = argparse.ArgumentParser(description='Launch a devvnet.') parser.add_argument('--logdir', action="store", dest='logdir', help='Directory to log output') parser.add_argument('--start-processes', action="store_true", dest='start', default=True, help='Start the processes') parser.add_argument('--hostname', action="store", dest='hostname', default=None, help='Debugging output') parser.add_argument('--debug', action="store_true", dest='start', default=False, help='Debugging output') parser.add_argument('devvnet', action="store", help='YAML file describing the devvnet') args = parser.parse_args() print(args) print("logdir: " + args.logdir) print("start: " + str(args.start)) print("hostname: " + str(args.hostname)) print("devvnet: " + args.devvnet) devvnet = get_devvnet(args.devvnet) d = Devvnet(devvnet) num_nodes = d.get_num_nodes() logfiles = [] cmds = [] for s in d.get_shards(): for n in s.get_nodes(): if args.hostname and (args.hostname != n.get_host()): continue if n.get_name() == 'validator': cmds.append(run_validator(n)) elif n.get_name() == 'repeater': cmds.append(run_repeater(n)) elif n.get_name() == 'announcer': cmds.append(run_announcer(n)) logfiles.append(os.path.join(args.logdir, n.get_name()+"_s"+ str(n.get_shard_index())+"_n"+ str(n.get_index())+"_output.log")) ps = [] for index,cmd in enumerate(cmds): print("Node " + str(index) + ":") print(" Command: ", *cmd) print(" Logfile: ", logfiles[index]) if args.start: with open(logfiles[index], "w") as outfile: ps.append(subprocess.Popen(cmd, stdout=outfile, stderr=outfile)) time.sleep(1.5) if args.start: for p in ps: print("Waiting for nodes ... ctl-c to exit.") p.wait() print("Goodbye.")
33.171598
276
0.576347
72103568b2899de2bb48ee1f49834b293ab3bb81
5,896
py
Python
run_qasm.py
t-imamichi/qiskit-utility
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
[ "Apache-2.0" ]
6
2019-02-27T11:53:18.000Z
2022-03-02T21:28:05.000Z
run_qasm.py
t-imamichi/qiskit-utility
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
[ "Apache-2.0" ]
null
null
null
run_qasm.py
t-imamichi/qiskit-utility
2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b
[ "Apache-2.0" ]
2
2019-05-03T23:52:03.000Z
2020-12-22T12:12:38.000Z
#!/usr/bin/env python # coding: utf-8 # Copyright 2018, IBM. # # This source code is licensed under the Apache License, Version 2.0 found in # the LICENSE.txt file in the root directory of this source tree. ''' This tool submits a QASM file to any backend and show the result. It requires 'Qconfig.py' to set a token of IBM Quantum Experience. It supports the following backends: ibmqx2(5 qubits), ibmqx4(5 qubits), ibmqx5(16 qubits), simulator(32 qubits). see https://quantumexperience.ng.bluemix.net/qx/devices for more details of the backends. Examples: $ python run_qasm.py -b # show backend information $ python run_qasm.py -c # show remaining credits $ python run_qasm.py -l 10 # show job list (10 jobs) $ python run_qasm.py -j (job id) # show the result of a job $ python run_qasm.py -q (qasm file) # submit a qasm file $ python run_qasm.py -z -l 10 # show job list (10 jobs) of qconsole $ python run_qasm.py -z -d ibmq_20_tokyo -q (qasm file) # submit a qasm file to ibmq_20_tokyo ''' import json import time from argparse import ArgumentParser from IBMQuantumExperience import IBMQuantumExperience try: import Qconfig except ImportError: raise RuntimeError('You need "Qconfig.py" with a token in the same directory.') if __name__ == '__main__': main()
39.046358
116
0.608887
72118299f37a0a55a9f0a207024fcdd8ae01fcd7
445
py
Python
alembic/versions/175f5441bd46_adding_usrname_column.py
thiere18/fastapi-boilerplate
6760e0e49caa915563d44897262d493b012207c0
[ "MIT" ]
5
2021-12-10T17:35:31.000Z
2021-12-30T18:36:23.000Z
alembic/versions/175f5441bd46_adding_usrname_column.py
thiere18/fastapi-boilerplate
6760e0e49caa915563d44897262d493b012207c0
[ "MIT" ]
1
2021-11-21T13:59:03.000Z
2021-11-21T13:59:03.000Z
alembic/versions/175f5441bd46_adding_usrname_column.py
thiere18/fastapi-boilerplate
6760e0e49caa915563d44897262d493b012207c0
[ "MIT" ]
1
2021-12-07T14:08:12.000Z
2021-12-07T14:08:12.000Z
"""adding usrname column Revision ID: 175f5441bd46 Revises: 186abcf43cae Create Date: 2021-11-20 22:54:04.157131 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '175f5441bd46' down_revision = '186abcf43cae' branch_labels = None depends_on = None
16.481481
79
0.72809
7211ad9fb739bb9a8cf35bb0752773293df5ab6b
2,356
py
Python
api/teams/models.py
wepickheroes/wepickheroes.github.io
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
[ "MIT" ]
3
2018-02-15T20:04:23.000Z
2018-09-29T18:13:55.000Z
api/teams/models.py
wepickheroes/wepickheroes.github.io
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
[ "MIT" ]
5
2018-01-31T02:01:15.000Z
2018-05-11T04:07:32.000Z
api/teams/models.py
prattl/wepickheroes
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
[ "MIT" ]
null
null
null
from django.conf import settings from django.contrib.auth import get_user_model from django.db import models from nucleus.models import ( AbstractBaseModel, EmailRecord, TeamMember, ) User = get_user_model() INVITE_TEMPLATE = """Hello, You've been invited to join a team on push.gg. Click the link below to sign up: {signup_link} - Push League """
29.45
94
0.639219
7211e7dcde6526670f3ae011a8fd15606f93b81e
1,826
py
Python
tables_io/lazy_modules.py
LSSTDESC/tables_io
1c2f119c928d05d237b1c8509e340d29650ceb8b
[ "MIT" ]
1
2021-08-13T15:41:58.000Z
2021-08-13T15:41:58.000Z
tables_io/lazy_modules.py
LSSTDESC/tables_io
1c2f119c928d05d237b1c8509e340d29650ceb8b
[ "MIT" ]
18
2021-08-12T00:09:36.000Z
2022-02-24T21:11:18.000Z
tables_io/lazy_modules.py
LSSTDESC/tables_io
1c2f119c928d05d237b1c8509e340d29650ceb8b
[ "MIT" ]
null
null
null
""" Lazy loading modules """ import sys import importlib.util def lazyImport(modulename): """ This will allow us to lazy import various modules Parameters ---------- modulename : `str` The name of the module in question Returns ------- module : `importlib.LazyModule` A lazy loader for the module in question """ try: return sys.modules[modulename] except KeyError: spec = importlib.util.find_spec(modulename) if spec is None: print("Can't find module %s" % modulename) return DeferredModuleError(modulename) module = importlib.util.module_from_spec(spec) loader = importlib.util.LazyLoader(spec.loader) # Make module with proper locking and get it inserted into sys.modules. loader.exec_module(module) try: _ = dir(module) except ValueError: pass return module tables = lazyImport('tables') apTable = lazyImport('astropy.table') fits = lazyImport('astropy.io.fits') h5py = lazyImport('h5py') pd = lazyImport('pandas') pq = lazyImport('pyarrow.parquet') HAS_TABLES = tables is not None HAS_PQ = pq is not None HAS_FITS = fits is not None HAS_ASTROPY = apTable is not None HAS_HDF5 = h5py is not None HAS_PANDAS = pd is not None
26.852941
82
0.645126
721392272e51a8013f6d83d05f9c457dc8ce2f53
4,811
py
Python
print_results.py
MicImbriani/Keras-PRBX
ab9dd8196e6f184336f5b30715635670d3586136
[ "CC0-1.0" ]
1
2021-09-18T12:42:28.000Z
2021-09-18T12:42:28.000Z
print_results.py
MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50
ab9dd8196e6f184336f5b30715635670d3586136
[ "CC0-1.0" ]
null
null
null
print_results.py
MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50
ab9dd8196e6f184336f5b30715635670d3586136
[ "CC0-1.0" ]
null
null
null
import numpy as np from keras.optimizers import Adam, SGD from tensorflow.keras.metrics import AUC import metrics from networks.unet_nn import unet from networks.unet_res_se_nn import unet_res_se from networks.focus import get_focusnetAlpha from networks.resnet import get_res from data_processing.generate_new_dataset import generate_targets from tensorflow.keras.applications.resnet50 import preprocess_input ########### SEGMENTATION ########### # U-Net model = unet(batch_norm=False) model.load_weights("/var/tmp/mi714/NEW/models/UNET/unet10/unet10_weights.h5") # U-Net BatchNorm # model = unet(batch_norm=True) # model.load_weights("/var/tmp/mi714/NEW/models/UNET_BN/unet_bn10/unet_bn10_weights.h5") # U-Net Res SE # model = unet_res_se() # model.load_weights("/var/tmp/mi714/NEW/models/UNET_RES_SE/unet_res_se10/unet_res_se10_weights.h5") #Focusnet # model = get_focusnetAlpha() # model.load_weights("/var/tmp/mi714/NEW/models/FOCUS/focusnet10/focusnet10_weights.h5") ########### CLASSIFICATION ########### # model = get_res() # Original # model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_OG/resnet_og10/resnet_og10_weights.h5") # U-Net # model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet10/resnet_unet10_weights.h5") # U-Net BatchNorm # model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet_bn10/resnet_unet_bn10_weights.h5") # Res SE U-Net # model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_RES_SE/resnet_unet_res_se10/resnet_unet_res_se10_weights.h5") # FocusNet # model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_FOCUSNET/resnet_focusnet7/resnet_focusnet7_weights.h5") # Data, Masks & Classification target labels # trainData = np.load('/var/tmp/mi714/test_new_npy2/data.npy') # valData = np.load('/var/tmp/mi714/test_new_npy2/dataval.npy') testData = np.load('/var/tmp/mi714/NEW/npy_dataset/datatest.npy') # Segmentation masks # trainMask = np.load('/var/tmp/mi714/test_new_npy2/dataMask.npy') # valMask = np.load('/var/tmp/mi714/test_new_npy2/dataMaskval.npy') testMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMasktest.npy') ########### SEGMENTATION ########### X = testData y = testMask X = X.astype('float32') y /= 255. # scale masks to [0, 1] my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07) model.compile(optimizer=my_adam, loss=metrics.focal_loss, metrics=[metrics.dice_coef_loss, metrics.jaccard_coef_loss, metrics.true_positive, metrics.true_negative, ]) score = model.evaluate(X, y, verbose=1) dice_coef_loss = score[1] jac_indx_loss = score[2] true_positive = score[3] true_negative = score[4] print(f""" RESULTS: Dice Coefficient Loss: {dice_coef_loss} Jaccard Index Loss: {jac_indx_loss} True Positive: {true_positive} True Negative: {true_negative} """) ########### CLASSIFICATION ########### # # Classification data # # x_train = np.concatenate((trainData,)*3, axis=-1) # # x_train = preprocess_input(x_train) # # x_val = np.concatenate((valData,)*3, axis=-1) # # x_val = preprocess_input(x_val) # x_test = np.concatenate((testData,)*3, axis=-1) # x_test = preprocess_input(x_test) # # Classification target labels # path = "/var/tmp/mi714/NEW/aug_dataset/" # # y_train = generate_targets(path + "ISIC-2017_Training_Data", # # path + "ISIC-2017_Training_Part3_GroundTruth.csv") # # y_val = generate_targets(path + "ISIC-2017_Validation_Data", # # path + "ISIC-2017_Validation_Part3_GroundTruth.csv") # y_test = generate_targets(path + "ISIC-2017_Test_v2_Data", # path + "ISIC-2017_Test_v2_Part3_GroundTruth.csv") # X = x_test # y = y_test # my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07) # # Compile model and print summary # rocauc = AUC(num_thresholds=200, # curve="ROC", # summation_method="interpolation", # name=None, # dtype=None, # thresholds=None, # multi_label=False, # label_weights=None, # ) # model.compile(loss='categorical_crossentropy', # optimizer=my_adam, # metrics=[metrics.sensitivity, # metrics.specificity, # rocauc, # 'acc' # ]) # score = model.evaluate(X, y, verbose=1) # binary_ce = score[0] # sensitivity = score[1] # specificity = score[2] # rocauc = score[3] # acc = score[4] # print(f""" # RESULTS: # Binary Cross-Entropy Loss: {binary_ce} # Sensitivity: {sensitivity} # Specificity: {specificity} # AUC ROC: {rocauc} # Accuracy: {acc} # """)
29.335366
129
0.675327
72140b20f916fb997edbec8a00bb1402df3614ca
9,466
py
Python
game.py
distortedsignal/bohnanza
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
[ "Apache-2.0" ]
null
null
null
game.py
distortedsignal/bohnanza
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
[ "Apache-2.0" ]
null
null
null
game.py
distortedsignal/bohnanza
dfbcfafbdd07cb924cbbc2adc36db7e51673e546
[ "Apache-2.0" ]
null
null
null
""" An implementation of Bohnanza @author: David Kelley, 2018 """ import random from collections import defaultdict
34.421818
88
0.555145
72155749ca290c85d0fa365110369fcce2862271
1,872
py
Python
pytype/tests/test_calls.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
11
2017-02-12T12:19:50.000Z
2022-03-06T08:56:48.000Z
pytype/tests/test_calls.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
null
null
null
pytype/tests/test_calls.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
2
2017-06-27T14:41:57.000Z
2021-12-05T11:27:33.000Z
"""Tests for calling other functions, and the corresponding checks.""" from pytype import utils from pytype.tests import test_inference if __name__ == "__main__": test_inference.main()
26.742857
73
0.553953
7216c0aa91d2cb7e990847e2823233ead4e36ab3
724
py
Python
test/test_learning_00.py
autodrive/NAIST_DeepLearning
ac2c0512c43f71ea7df68567c5e24e689ac18aea
[ "Apache-2.0" ]
1
2018-09-26T01:52:35.000Z
2018-09-26T01:52:35.000Z
test/test_learning_00.py
autodrive/NAIST_DeepLearning
ac2c0512c43f71ea7df68567c5e24e689ac18aea
[ "Apache-2.0" ]
5
2015-12-31T10:56:43.000Z
2018-11-16T08:57:12.000Z
test/test_learning_00.py
autodrive/NAIST_DeepLearning
ac2c0512c43f71ea7df68567c5e24e689ac18aea
[ "Apache-2.0" ]
1
2018-09-26T01:52:37.000Z
2018-09-26T01:52:37.000Z
import unittest import lecture1_code00 as dl from sklearn.datasets.samples_generator import make_blobs
27.846154
97
0.585635
7217f6133fa71477eb286daa69250fadb04142e7
2,389
py
Python
edumediaitem/views_manage.py
shagun30/djambala-2
06f14e3dd237d7ebf535c62172cfe238c3934f4d
[ "BSD-3-Clause" ]
null
null
null
edumediaitem/views_manage.py
shagun30/djambala-2
06f14e3dd237d7ebf535c62172cfe238c3934f4d
[ "BSD-3-Clause" ]
null
null
null
edumediaitem/views_manage.py
shagun30/djambala-2
06f14e3dd237d7ebf535c62172cfe238c3934f4d
[ "BSD-3-Clause" ]
null
null
null
#-*-coding: utf-8 -*- """ /dms/edumediaitem/views_manage.py .. enthaelt den View fuer die Management-Ansicht des Medienpaketes Django content Management System Hans Rauch hans.rauch@gmx.net Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 11.09.2007 Beginn der Arbeit """ from django.utils.translation import ugettext as _ from dms.queries import get_site_url from dms.roles import require_permission from dms.roles import UserEditPerms from dms.folder.views_manage import do_manage from dms_ext.extension import * # dms-Funktionen ueberschreiben # -----------------------------------------------------
38.532258
95
0.577648
721813c43ddcb76146e7ed608cacf427665451b5
414
py
Python
crop_yield_prediction/models/deep_gaussian_process/__init__.py
facebookresearch/Context-Aware-Representation-Crop-Yield-Prediction
9c29459e9521303f40d9d6aaa938da0c23ab4ad8
[ "MIT" ]
12
2020-09-17T21:55:18.000Z
2022-01-14T21:05:23.000Z
crop_yield_prediction/models/deep_gaussian_process/__init__.py
hulaba/Context-Aware-Representation-Crop-Yield-Prediction
9c29459e9521303f40d9d6aaa938da0c23ab4ad8
[ "MIT" ]
null
null
null
crop_yield_prediction/models/deep_gaussian_process/__init__.py
hulaba/Context-Aware-Representation-Crop-Yield-Prediction
9c29459e9521303f40d9d6aaa938da0c23ab4ad8
[ "MIT" ]
5
2020-10-10T10:18:14.000Z
2021-12-21T07:36:27.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .feature_engineering import get_features_for_deep_gaussian from .convnet import ConvModel from .rnn import RNNModel __all__ = ['get_features_for_deep_gaussian', 'ConvModel', 'RNNModel']
29.571429
65
0.748792
7218bf5a5c0b747c4d1438e666c01d7117d13c58
3,546
py
Python
projector_server/projects/views.py
changyang-liu/Projector
4924400f54e0ce823fd488f9cfd0f7e52d2df55f
[ "MIT" ]
null
null
null
projector_server/projects/views.py
changyang-liu/Projector
4924400f54e0ce823fd488f9cfd0f7e52d2df55f
[ "MIT" ]
2
2021-06-10T19:12:07.000Z
2021-09-22T19:00:50.000Z
projector_server/projects/views.py
CS97-Projector/Projector
4924400f54e0ce823fd488f9cfd0f7e52d2df55f
[ "MIT" ]
null
null
null
from django.http import HttpResponse, JsonResponse, Http404 from django.core.exceptions import PermissionDenied from rest_framework.parsers import JSONParser, FormParser, MultiPartParser from rest_framework.permissions import IsAuthenticatedOrReadOnly from rest_framework import generics from rest_framework.response import Response from projects.models import Project from projects.serializers import ProjectSerializer from projects.permissions import IsOwnerOrReadOnly from django.contrib.auth.models import User
38.129032
86
0.683305
721a5ce052e7d21ea063652b0a161c21042f7f06
1,089
py
Python
tests/test_muduapiclient.py
hanqingliu/mudu-api-python-client
92541df27a518dad5312b39749dfbb8bd471a6b8
[ "Apache-2.0" ]
null
null
null
tests/test_muduapiclient.py
hanqingliu/mudu-api-python-client
92541df27a518dad5312b39749dfbb8bd471a6b8
[ "Apache-2.0" ]
null
null
null
tests/test_muduapiclient.py
hanqingliu/mudu-api-python-client
92541df27a518dad5312b39749dfbb8bd471a6b8
[ "Apache-2.0" ]
null
null
null
import ddt import mock from unittest import TestCase from muduapiclient.client import MuduApiClient, gen_signed_params import time
33
91
0.662994
721b25bd54ec37339248810e92f0fd66777d24b1
586
py
Python
Projectreview/Projectreview/urls.py
bestgunman/Gitwaxingproduct
44c5fdd57aaa87765929e12a828d1cd17cfbbc0d
[ "MIT" ]
1
2017-01-30T07:02:51.000Z
2017-01-30T07:02:51.000Z
Projectreview/Projectreview/urls.py
bestgunman/Gitwaxingproduct
44c5fdd57aaa87765929e12a828d1cd17cfbbc0d
[ "MIT" ]
null
null
null
Projectreview/Projectreview/urls.py
bestgunman/Gitwaxingproduct
44c5fdd57aaa87765929e12a828d1cd17cfbbc0d
[ "MIT" ]
null
null
null
from django.conf.urls import url, include from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from productapp.views import product_list from reviewapp.views import index urlpatterns = [ url(r'^$', index, name='index'), url(r'^admin/', admin.site.urls), url(r'^review/', include('reviewapp.urls')), url(r'^product/', include('productapp.urls')), url(r'^account/', include('accountapp.urls')), ] if settings.DEBUG: urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
30.842105
61
0.711604
721e9bba1e7ea66054b20c27b7571b65855aeaa1
5,970
py
Python
ttt.py
YukkuriC/PyTicTacToe
c38b330faeb956d82b401e5863c4982f725e5dab
[ "MIT" ]
null
null
null
ttt.py
YukkuriC/PyTicTacToe
c38b330faeb956d82b401e5863c4982f725e5dab
[ "MIT" ]
null
null
null
ttt.py
YukkuriC/PyTicTacToe
c38b330faeb956d82b401e5863c4982f725e5dab
[ "MIT" ]
null
null
null
__doc__ = ''' ''' from threading import Thread from time import process_time if 'enums': OK = 0 # ENDGAME = 1 # DRAW = 2 # INVALID = -1 # / CONFILCT = -2 # ERROR = -3 # TIMEOUT = -4 # def match(self): """ : """ self.board = Board() timeouts = [self.timeout] * 2 self.timeout_history = [] for nround in range(9): # plr_idx = nround % 2 thread_output = {} frame = self.board.get_board(plr_idx + 1) thr = Thread(target=self._thread_wrap, args=(self.codes[plr_idx], frame, thread_output)) # thr.start() thr.join(timeouts[plr_idx]) # if thr.is_alive(): return self._get_result(1 - plr_idx, TIMEOUT, '') # timeouts[plr_idx] -= thread_output['dt'] if timeouts[plr_idx] < 0: return self._get_result(1 - plr_idx, TIMEOUT) self.timeout_history.append(timeouts.copy()) # if thread_output['error']: return self._get_result( 1 - plr_idx, ERROR, thread_output['error'], ) # res = self.board.drop(plr_idx + 1, thread_output['result']) if res == OK: # continue return self._get_result( plr_idx if res == ENDGAME else 1 - plr_idx, res, ) return self._get_result(None, DRAW) # if __name__ == '__main__': import codes.dumb_ordered as plr1, codes.dumb_random as plr2 game = Game([plr1, plr2]) print(game.match())
25.512821
78
0.469514
721ec82c86e8517afd6fcd583254496b9ad3500f
400
py
Python
cursoemvideo/ex008.py
rafaelsantosmg/cev_python3
2fa2561b46409bebbd6a2020c60aa8f946fe6244
[ "MIT" ]
1
2021-03-22T03:08:41.000Z
2021-03-22T03:08:41.000Z
cursoemvideo/ex008.py
rafaelsantosmg/cev_python3
2fa2561b46409bebbd6a2020c60aa8f946fe6244
[ "MIT" ]
null
null
null
cursoemvideo/ex008.py
rafaelsantosmg/cev_python3
2fa2561b46409bebbd6a2020c60aa8f946fe6244
[ "MIT" ]
null
null
null
"""Escreva um programa que leia o valor em metros e o exiba convertido em centmetros e milmetros""" from utilidadescev.dado import leia_real n = leia_real('Digite a metragem: ') km = n / 1000 hec = n / 100 dam = n / 10 dec = n * 10 cent = n * 100 mil = n * 1000 print(f'{km:.3f}km') print(f'{hec:.2f}hm') print(f'{dam:.1f}dam') print(f'{dec:.0f}dm') print(f'{cent:.0f}cm ') print(f'{mil:.0f}mm')
22.222222
101
0.6525
72207e110b7ba0434449b56ad831fee21813b6dc
1,015
py
Python
Minor Project/Weather GUI/pyowm_helper.py
ComputerScientist-01/Technocolabs-Internship-Project
3675cc6b9a40a885a29b105ec9b29945a1e4620c
[ "MIT" ]
4
2020-07-08T11:32:29.000Z
2021-08-05T02:54:02.000Z
Minor Project/Weather GUI/pyowm_helper.py
ComputerScientist-01/Technocolabs-Internship-Project
3675cc6b9a40a885a29b105ec9b29945a1e4620c
[ "MIT" ]
null
null
null
Minor Project/Weather GUI/pyowm_helper.py
ComputerScientist-01/Technocolabs-Internship-Project
3675cc6b9a40a885a29b105ec9b29945a1e4620c
[ "MIT" ]
null
null
null
import os import pyowm from datetime import datetime from timezone_conversion import gmt_to_eastern #API_KEY = os.environ['API_KEY'] owm=pyowm.OWM('0833f103dc7c2924da06db624f74565c') mgr=owm.weather_manager() if __name__ == '__main__': get_temperature()
28.194444
63
0.639409
7221d6876591c7703ef947738f8354cdcf1efa5d
82,647
py
Python
MCDR.py
Qltan/MCDR
ad8abfafcbe19dd50f31fc4122faf3eb633be9d5
[ "MIT" ]
null
null
null
MCDR.py
Qltan/MCDR
ad8abfafcbe19dd50f31fc4122faf3eb633be9d5
[ "MIT" ]
null
null
null
MCDR.py
Qltan/MCDR
ad8abfafcbe19dd50f31fc4122faf3eb633be9d5
[ "MIT" ]
null
null
null
import copy import datetime import json import math import multiprocessing import numpy as np import os import pandas as pd import pydotplus import random import re import time from math import * from sklearn import metrics _CLUSTER_DATA = './bike_sharing_data/mydata' RATEDATA = './bike_sharing_data/mydata/' rateName = 'rental_return_rate_cluster_6_month_678_timedelta_5.json' # STATION_STATUS = './station_status_by_id' ########################### # MCTS algorithm def start(availStations, neighbor, lostNums, visitedPath, cumulativeDis, startStation, balanceNum, mutex, realtimeBikes, day, olderNeighbor): print("start running, the process number is %d" % (os.getpid())) mcts = MCTS(availStations) selectedSta = startStation starttime = 0 rateData = getRateData() station_status, totalDocksDict = getStation_status() # visitedPath = [] # cumulativeDis = [] info = {} visitedPath.append(selectedSta) totalLost = 0 print('start station:' + str(selectedSta)) # lostNums = {} isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(selectedSta, selectedSta, starttime, cumulativeDis, rateData, station_status, totalDocksDict, day) lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost) totalLost += lostNums[str(selectedSta)] info['time'] = starttime info['realbikes'] = realbikes realtimeBikes[str(selectedSta)] = info if int(dropNum) > 0: balanceNum[str(selectedSta)] = -int(dropNum) elif int(pickNum) > 0: balanceNum[str(selectedSta)] = int(pickNum) else: balanceNum[str(selectedSta)] = 0 if isRequest: print('sub-process:pid=%d' % os.getpid()) print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum)) print('customer loss:' + str(lostNums[str(selectedSta)])) print('current time:' + str(starttime) + ' min') print('travel distance:') print(cumulativeDis) # bikeSystem.update(selectedSta) availStations.remove(str(selectedSta)) mcts.fileCount = 0 while 1: lastSta = selectedSta info = {} mutex.acquire() if not len(availStations): print('There are no stations need to be balanced') lostNums['totalLost'] = totalLost mutex.release() break selectedSta = mcts.get_action(lastSta, starttime, neighbor, rateData, station_status, totalDocksDict, day, olderNeighbor) mcts.fileCount += 1 print('through station:' + str(selectedSta)) # bikeSystem.update(selectedSta) availStations.remove(str(selectedSta)) mutex.release() visitedPath.append(selectedSta) isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(lastSta, selectedSta, starttime, cumulativeDis, rateData, station_status, totalDocksDict, day) lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost) totalLost += lostNums[str(selectedSta)] info['time'] = starttime info['realbikes'] = realbikes realtimeBikes[str(selectedSta)] = info if int(dropNum) > 0: balanceNum[str(selectedSta)] = -int(dropNum) elif int(pickNum) > 0: balanceNum[str(selectedSta)] = int(pickNum) else: balanceNum[str(selectedSta)] = 0 if isRequest: print('sub-process:pid=%d' % os.getpid()) print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum)) print('customer loss:' + str(lostNums[str(selectedSta)])) print('current time:' + str(starttime) + ' min') print('travel distance:') print(cumulativeDis) if not len(availStations): print('There are no stations need to be balanced') lostNums['totalLost'] = totalLost break print('****************************************************') if __name__ == '__main__': mctsAlgorithm() # noReposition() # staticReposition() # nearestNeihborReposition() # nearestNeihborBaseServiceLevelReposition()
42.77795
121
0.527799
7222707469c1717bc369a16b35dc8703f4ba96c7
4,692
py
Python
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
Vinicius-Tanigawa/Undergraduate-Research-Project
e92372f07882484b127d7affe305eeec2238b8a9
[ "MIT" ]
null
null
null
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
Vinicius-Tanigawa/Undergraduate-Research-Project
e92372f07882484b127d7affe305eeec2238b8a9
[ "MIT" ]
null
null
null
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py
Vinicius-Tanigawa/Undergraduate-Research-Project
e92372f07882484b127d7affe305eeec2238b8a9
[ "MIT" ]
null
null
null
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass # Lithium_Ion_LiFePO4_18650.py # # Created: Feb 2020, M. Clarke # Modified: Sep 2021, R. Erhard # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- # suave imports from SUAVE.Core import Units from .Lithium_Ion import Lithium_Ion # package imports import numpy as np ## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
53.931034
136
0.449915
7222d418de71e1a6408735de5ac964a29e9e3865
83,182
py
Python
motor/__init__.py
globocom/motor
a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e
[ "Apache-2.0" ]
null
null
null
motor/__init__.py
globocom/motor
a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e
[ "Apache-2.0" ]
null
null
null
motor/__init__.py
globocom/motor
a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e
[ "Apache-2.0" ]
null
null
null
# Copyright 2011-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Motor, an asynchronous driver for MongoDB and Tornado.""" import collections import functools import inspect import socket import time import warnings import weakref from tornado import ioloop, iostream, gen, stack_context from tornado.concurrent import Future import greenlet import bson import pymongo import pymongo.auth import pymongo.common import pymongo.database import pymongo.errors import pymongo.mongo_client import pymongo.mongo_replica_set_client import pymongo.son_manipulator from pymongo.pool import _closed, SocketInfo import gridfs from pymongo.database import Database from pymongo.collection import Collection from pymongo.cursor import Cursor, _QUERY_OPTIONS from gridfs import grid_file import util __all__ = ['MotorClient', 'MotorReplicaSetClient', 'Op'] version_tuple = (0, 1, '+') version = get_version_string() """Current version of Motor.""" # TODO: ensure we're doing # timeouts as efficiently as possible, test performance hit with timeouts # from registering and cancelling timeouts HAS_SSL = True try: import ssl except ImportError: ssl = None HAS_SSL = False callback_type_error = TypeError("callback must be a callable") def motor_sock_method(method): """Wrap a MotorSocket method to pause the current greenlet and arrange for the greenlet to be resumed when non-blocking I/O has completed. """ return _motor_sock_method def callback_from_future(future): """Return a callback that sets a Future's result or exception""" return callback def asynchronize(motor_class, sync_method, has_write_concern, doc=None): """Decorate `sync_method` so it accepts a callback or returns a Future. The method runs on a child greenlet and calls the callback or resolves the Future when the greenlet completes. :Parameters: - `motor_class`: Motor class being created, e.g. MotorClient. - `sync_method`: Bound method of pymongo Collection, Database, MongoClient, or Cursor - `has_write_concern`: Whether the method accepts getLastError options - `doc`: Optionally override sync_method's docstring """ # This is for the benefit of motor_extensions.py, which needs this info to # generate documentation with Sphinx. method.is_async_method = True method.has_write_concern = has_write_concern name = sync_method.__name__ if name.startswith('__') and not name.endswith("__"): # Mangle, e.g. Cursor.__die -> Cursor._Cursor__die classname = motor_class.__delegate_class__.__name__ name = '_%s%s' % (classname, name) method.pymongo_method_name = name if doc is not None: method.__doc__ = doc return method def check_delegate(obj, attr_name): if not obj.delegate: raise pymongo.errors.InvalidOperation( "Call open() on %s before accessing attribute '%s'" % ( obj.__class__.__name__, attr_name)) DelegateMethod = ReadOnlyProperty """A method on the wrapped PyMongo object that does no I/O and can be called synchronously""" def _delegate_init_args(self): """Override MotorOpenable._delegate_init_args to ensure auto_start_request is False and _pool_class is MotorPool. """ kwargs = self._init_kwargs.copy() kwargs['auto_start_request'] = False kwargs['_pool_class'] = functools.partial(MotorPool, self.io_loop) return self._init_args, kwargs class MotorClient(MotorClientBase): __delegate_class__ = pymongo.mongo_client.MongoClient kill_cursors = AsyncCommand() fsync = AsyncCommand() unlock = AsyncCommand() nodes = ReadOnlyProperty() host = ReadOnlyProperty() port = ReadOnlyProperty() _simple_command = AsyncCommand(attr_name='__simple_command') def __init__(self, *args, **kwargs): """Create a new connection to a single MongoDB instance at *host:port*. :meth:`open` or :meth:`open_sync` must be called before using a new MotorClient. No property access is allowed before the connection is opened. MotorClient takes the same constructor arguments as `MongoClient`_, as well as: :Parameters: - `io_loop` (optional): Special :class:`tornado.ioloop.IOLoop` instance to use instead of default .. _MongoClient: http://api.mongodb.org/python/current/api/pymongo/mongo_client.html """ super(MotorClient, self).__init__( None, kwargs.pop('io_loop', None), *args, **kwargs) def _async_get_socket(self, pool): """Return a ``Future`` that will resolve to a socket.""" # MongoClient passes host and port into the pool for each call to # get_socket. return pool.async_get_socket((self.host, self.port)) class MotorReplicaSetClient(MotorClientBase): __delegate_class__ = pymongo.mongo_replica_set_client.MongoReplicaSetClient primary = ReadOnlyProperty() secondaries = ReadOnlyProperty() arbiters = ReadOnlyProperty() hosts = ReadOnlyProperty() seeds = ReadOnlyProperty() close = DelegateMethod() _simple_command = AsyncCommand(attr_name='__simple_command') def __init__(self, *args, **kwargs): """Create a new connection to a MongoDB replica set. :meth:`open` or :meth:`open_sync` must be called before using a new MotorReplicaSetClient. No property access is allowed before the connection is opened. MotorReplicaSetClient takes the same constructor arguments as `MongoReplicaSetClient`_, as well as: :Parameters: - `io_loop` (optional): Special :class:`tornado.ioloop.IOLoop` instance to use instead of default .. _MongoReplicaSetClient: http://api.mongodb.org/python/current/api/pymongo/mongo_replica_set_client.html """ super(MotorReplicaSetClient, self).__init__( None, kwargs.pop('io_loop', None), *args, **kwargs) def open_sync(self): """Synchronous open(), returning self. Under the hood, this method creates a new Tornado IOLoop, runs :meth:`open` on the loop, and deletes the loop when :meth:`open` completes. """ super(MotorReplicaSetClient, self).open_sync() # We need to wait for open_sync() to complete and restore the # original IOLoop before starting the monitor. self.delegate._MongoReplicaSetClient__monitor.start_motor(self.io_loop) return self def open(self, callback=None): """Actually initialize. Takes an optional callback, or returns a Future that resolves to self when opened. :Parameters: - `callback`: Optional function taking parameters (self, error) """ if callback and not callable(callback): raise callback_type_error if callback: super(MotorReplicaSetClient, self)._open(callback=opened) else: future = Future() # The final callback run from inside opened callback = callback_from_future(future) super(MotorReplicaSetClient, self)._open(callback=opened) return future def _async_get_socket(self, pool): """Return a ``Future`` that will resolve to a socket.""" # MongoReplicaSetClient sets pools' host and port when it creates them. return pool.async_get_socket() # PyMongo uses a background thread to regularly inspect the replica set and # monitor it for changes. In Motor, use a periodic callback on the IOLoop to # monitor the set. MotorGridIn.set = asynchronize( MotorGridIn, gridfs.GridIn.__setattr__, False, doc=""" Set an arbitrary metadata attribute on the file. Stores value on the server as a key-value pair within the file document once the file is closed. If the file is already closed, calling `set` will immediately update the file document on the server. Metadata set on the file appears as attributes on a :class:`~MotorGridOut` object created from the file. :Parameters: - `name`: Name of the attribute, will be stored as a key in the file document on the server - `value`: Value of the attribute - `callback`: Optional callback to execute once attribute is set. """) def Op(fn, *args, **kwargs): """Obsolete; here for backwards compatibility with Motor 0.1. Op had been necessary for ease-of-use with Tornado 2 and @gen.engine. But Motor 0.2 is built for Tornado 3, @gen.coroutine, and Futures, so motor.Op is deprecated. """ msg = "motor.Op is deprecated, simply call %s and yield its Future." % ( fn.__name__) warnings.warn(msg, DeprecationWarning, stacklevel=2) result = fn(*args, **kwargs) assert isinstance(result, Future) return result
36.936945
118
0.608112
72230a4712ff2722d5fd895c22c3d235aabfdf44
3,544
py
Python
del_dupli_in_fasta.py
ba1/BioParsing
8a0257d4765a7bc86fef7688762abbeaaf3cef07
[ "MIT" ]
1
2017-06-19T15:15:26.000Z
2017-06-19T15:15:26.000Z
del_dupli_in_fasta.py
ba1/BioParsing
8a0257d4765a7bc86fef7688762abbeaaf3cef07
[ "MIT" ]
null
null
null
del_dupli_in_fasta.py
ba1/BioParsing
8a0257d4765a7bc86fef7688762abbeaaf3cef07
[ "MIT" ]
null
null
null
''' Created on Oct 20, 2015 @author: bardya ''' import os import argparse from Bio import SeqIO if __name__ == '__main__': args = parse_args() try: inputfile = open(args.infilepath.name, 'r') outputfile = open(args.outfilepath.name, 'w') # if not os.path.basename(args.outfilepath.name) == "basename": # outputfile = open(args.outfilepath.name, 'w') # else: # outputfile = open(os.path.join(os.path.dirname(args.outfilepath.name),os.path.basename(args.infilepath.name) + '_consensus.faa'), 'w') except: print('IOError occured') seqlst, stats_dict = readfasta(args.infilepath.name, keep_flag=args.keep_flag, rename_flag=args.rename_flag) printStats(stats_dict) writefasta(outputfile, seqlst)
35.79798
165
0.615406
7224268eb003eeb3fc96967b78416eccf0509110
491
py
Python
lclpy/aidfunc/logging.py
nobody1570/lspy
1cf6efbafbbf8ddb54ba7a875e82c562f010edd1
[ "MIT" ]
3
2021-11-27T22:11:38.000Z
2022-02-10T11:42:06.000Z
lclpy/aidfunc/logging.py
nobody1570/lspy
1cf6efbafbbf8ddb54ba7a875e82c562f010edd1
[ "MIT" ]
null
null
null
lclpy/aidfunc/logging.py
nobody1570/lspy
1cf6efbafbbf8ddb54ba7a875e82c562f010edd1
[ "MIT" ]
null
null
null
def log_improvement(value): """function to log improvements to the command line. Parameters ---------- value : int or float The value for the improvement """ print("Improvement : " + str(value)) def log_passed_worse(value): """function to log the passing of worse solutions to the command line. Parameters ---------- value : int or float The value for the improvement """ print("Passed worse: " + str(value))
18.185185
74
0.592668
72244921b06692f1b6e6b261aabc73be9e8ccb0e
897
py
Python
tests/test_as_class_methods.py
pokidovea/immobilus
42f115a13b4aa060b7ed186e81fe56e1a07c4b2d
[ "Apache-2.0" ]
13
2016-11-26T16:13:11.000Z
2021-12-21T11:10:50.000Z
tests/test_as_class_methods.py
pokidovea/immobilus
42f115a13b4aa060b7ed186e81fe56e1a07c4b2d
[ "Apache-2.0" ]
20
2017-03-06T00:50:22.000Z
2019-08-26T20:12:39.000Z
tests/test_as_class_methods.py
pokidovea/immobilus
42f115a13b4aa060b7ed186e81fe56e1a07c4b2d
[ "Apache-2.0" ]
6
2017-08-28T07:23:54.000Z
2021-12-03T13:03:50.000Z
# https://github.com/pokidovea/immobilus/issues/30 from immobilus import immobilus # noqa from immobilus.logic import fake_time, fake_localtime, fake_gmtime, fake_strftime, fake_mktime from datetime import datetime
18.6875
94
0.723523
722517ddb7cf57ba0cdaeeaa839501f03c9155b4
322
py
Python
plotter/plotter.py
kalinkinisaac/modular
301d26ad222a5ef3278aaf251908e0a8537bb58f
[ "MIT" ]
null
null
null
plotter/plotter.py
kalinkinisaac/modular
301d26ad222a5ef3278aaf251908e0a8537bb58f
[ "MIT" ]
null
null
null
plotter/plotter.py
kalinkinisaac/modular
301d26ad222a5ef3278aaf251908e0a8537bb58f
[ "MIT" ]
null
null
null
import abc
23
67
0.63354
72279efb6ba56531335b2f093691a4196e8f4923
2,531
py
Python
ardupilot/Tools/autotest/param_metadata/wikiemit.py
quadrotor-IITKgp/emulate_GPS
3c888d5b27b81fb17e74d995370f64bdb110fb65
[ "MIT" ]
1
2021-07-17T11:37:16.000Z
2021-07-17T11:37:16.000Z
ardupilot/Tools/autotest/param_metadata/wikiemit.py
arl-kgp/emulate_GPS
3c888d5b27b81fb17e74d995370f64bdb110fb65
[ "MIT" ]
null
null
null
ardupilot/Tools/autotest/param_metadata/wikiemit.py
arl-kgp/emulate_GPS
3c888d5b27b81fb17e74d995370f64bdb110fb65
[ "MIT" ]
null
null
null
#!/usr/bin/env python import re from param import * from emit import Emit # Emit docs in a form acceptable to the APM wiki site
34.671233
121
0.468589
722ad974ef9283199399d93bbd17a334c7d31249
1,038
py
Python
master.py
iAzurel/thepicturesorter
21a3aee26adcfca0838db63be1434f7c49cd9548
[ "MIT" ]
null
null
null
master.py
iAzurel/thepicturesorter
21a3aee26adcfca0838db63be1434f7c49cd9548
[ "MIT" ]
null
null
null
master.py
iAzurel/thepicturesorter
21a3aee26adcfca0838db63be1434f7c49cd9548
[ "MIT" ]
null
null
null
#!/usr/bin/env python from PIL import Image import os, os.path import cv2 import sys # Detect faces, then returns number of faces. # Moves pictures based on detection of faces. if __name__ == "__main__": main()
23.590909
76
0.716763
722c1e45a1768734b80ecc9c4958f9182d017de1
1,241
py
Python
src/si/supervised/Ensemble.py
pg42862/Sistemas_Inteligentes_para_a_MBIONF
5dd5b487da5b3c6a0989274598c911cc639138a3
[ "Apache-2.0" ]
null
null
null
src/si/supervised/Ensemble.py
pg42862/Sistemas_Inteligentes_para_a_MBIONF
5dd5b487da5b3c6a0989274598c911cc639138a3
[ "Apache-2.0" ]
null
null
null
src/si/supervised/Ensemble.py
pg42862/Sistemas_Inteligentes_para_a_MBIONF
5dd5b487da5b3c6a0989274598c911cc639138a3
[ "Apache-2.0" ]
1
2021-11-15T16:15:50.000Z
2021-11-15T16:15:50.000Z
from.Model import Model import numpy as np
32.657895
140
0.65834
722cd36e985871c8a2f5b1f558365071a7104a73
11,915
py
Python
pygto900.py
marissakotze/timDIMM
dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f
[ "BSD-3-Clause" ]
1
2021-06-06T15:26:36.000Z
2021-06-06T15:26:36.000Z
pygto900.py
marissakotze/timDIMM
dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f
[ "BSD-3-Clause" ]
null
null
null
pygto900.py
marissakotze/timDIMM
dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f
[ "BSD-3-Clause" ]
3
2015-07-29T15:16:35.000Z
2017-12-01T13:02:36.000Z
#!/usr/bin/env python """pygto900 contains commands to interface with a astro-physics gto900 mount """ import serial import io import sys import string import math import time from binutils import * from datetime import datetime from astropy.coordinates import Angle def status(g): """Chck the values for the telescope""" ra = g.ra() dec = g.dec() lst = g.lst() ha = Angle('%s hour' % lst) - Angle('%s hour' % ra) alt = g.alt() az = g.az() a = Angle(alt, unit='degree') z = airmass(a) p = g.pier() return ra,dec,ha,lst,alt,az,z,p def slew(g, ra, dec, niter=100): """Slew to a location Paramters --------- ra: astropy.coordinates.Angle Right Ascension of source dec: astropy.coordinates.Angle Declination of source niter: int Number of loops to attempt if monitoring progress """ g.command_ra(ra.hms[0], ra.hms[1], ra.hms[2]) g.command_dec(dec.dms[0], dec.dms[1], dec.dms[2]) g.slew() for i in range(niter): try: r = Angle(g.ra(), unit='hour') d = Angle(g.dec(), unit='degree') except Exception,e: print e continue dist = ((r.degree - ra.degree)**2 + (d.degree-dec.degree)**2)**0.5 if dist < 1.0/60.0: print 'Done Slewing' return else: print '%5.2f degrees to go until target' % dist return def init(g): """Initialize the telescope""" print "Initializing mount...." g.startup() return def nudge(g, mdir): """Nudge the telescope in one direction""" g.set_center_rate(10) g.move(mdir) time.sleep(1) g.halt(mdir) time.sleep(1) def usage(): """Print the usage string""" usage_str = """ Usage for pygto900: python pygto900 [init/status/log/move/nudge/slew/sync/park/help] [optional] """ print usage_str if __name__=='__main__': task = sys.argv[1].lower() if len(sys.argv) < 2: usage() exit() if task in ['help']: usage() exit() with GTO900() as g: if task == 'status': results = status(g) output ="At RA = %s, Dec = %s, HA = %s, LST = %s, Alt = %s, Az = %s, secz = %.2f, on the %s side of the pier" % results print output elif task == 'log': results = status(g) print '%s %s %s %s %s %s %.2f %s' % results elif task == 'init': init(g) elif task == 'park': g.park_mode() elif task == 'park_off': g.park_off() elif task == 'sync': g.sync() elif task == 'move': g.move(sys.argv[2]) elif task == 'nudge': nudge(g, sys.argv[2]) elif task == 'slew': ra = Angle(sys.argv[2], unit='hour') dec = Angle(sys.argv[3], unit='degree') slew(g, ra, dec) elif task == 'help': usage() else: usage() #y=GTO900() #print y.ra(), y.dec() #y.lst(), y.get_local_time(), y.get_local_date() #print y.get_UTC_offset(), y.get_longitude(), y.get_latitude() #print y.command_ra(12,01,34) #print y.command_dec(-37,01,34) #print y.slew() #print y.ra(), y.dec() #print y.alt(), y.az() #print y.move('e') #print y.alt(), y.az()
24.56701
130
0.544943
7230fd2e2774f3460096d023d321613a2a314e63
2,850
py
Python
webscripts/plotlygraphs.py
KathrynDH/DataDashboard
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
[ "MIT" ]
null
null
null
webscripts/plotlygraphs.py
KathrynDH/DataDashboard
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
[ "MIT" ]
null
null
null
webscripts/plotlygraphs.py
KathrynDH/DataDashboard
1bf61497480f778a1c7cc9ce9fc7fb48b3067606
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Jun 23 15:56:55 2021 @author: Kathryn Haske Create plotly graphs for webpage """ import pandas as pd import plotly.graph_objs as go def line_graph(x_list, df, name_col, y_cols, chart_title, x_label, y_label): """ Function to create plotly line graph Args: x_list (list): graph x values df (Pandas DataFrame): dataframe to use for series and y-values name_col (string): df column to use for series names y_cols (int or slice object): df column numbers to use for y-values chart_title (string): title for chart x_label (string): label for x-axis y_label (string): label for y-axis Returns: dictionary for plotly line graph """ graph = [] for index, row in df.iterrows(): graph.append(go.Scatter( x = x_list, y = row.tolist()[y_cols], mode = 'lines', name = row[name_col] )) graph_layout = dict(title = chart_title, xaxis = dict(title = x_label), yaxis = dict(title = y_label), ) return dict(data=graph, layout=graph_layout) def scatter_plot(x_vals, y_vals, names, chart_title, x_label, y_label): """ Function to create plotly scatter plot Args: x_vals (list): graph x values y_vals (list): graph y values names (list of strings): title for each marker chart_title (string): title for chart x_label (string): label for x-axis y_label (string): label for y-axis Returns: dictionary for plotly scatter plot """ graph= [go.Scatter( x = x_vals, y = y_vals, mode = 'markers', text=names, marker=dict( color=y_vals, #set color equal to a variable colorscale='Viridis' # plotly colorscale ) )] graph_layout = dict(title = chart_title, xaxis = dict(title = x_label), yaxis = dict(title = y_label), ) return dict(data=graph, layout=graph_layout) def bar_chart(x_vals, y_vals, chart_title, x_label, y_label): """ Function to create plotly bar graph Args: x_vals (list): graph x values y_vals (list): graph y values chart_title (string): title for chart x_label (string): label for x-axis y_label (string): label for y-axis Returns: dictionary for plotly bar graph """ graph = [go.Bar( x = x_vals, y = y_vals )] graph_layout = dict(title = chart_title, xaxis = dict(title = x_label), yaxis = dict(title = y_label), ) return dict(data=graph, layout=graph_layout)
27.403846
76
0.567018
72314feeba462045a5c4c66db5b70dc7ce89e3a1
2,505
py
Python
jsl/experimental/seql/agents/bfgs_agent.py
AdrienCorenflos/JSL
8a3ba27179a2bd90207214fccb81df884b05c3d0
[ "MIT" ]
null
null
null
jsl/experimental/seql/agents/bfgs_agent.py
AdrienCorenflos/JSL
8a3ba27179a2bd90207214fccb81df884b05c3d0
[ "MIT" ]
null
null
null
jsl/experimental/seql/agents/bfgs_agent.py
AdrienCorenflos/JSL
8a3ba27179a2bd90207214fccb81df884b05c3d0
[ "MIT" ]
null
null
null
import jax.numpy as jnp from jax import vmap from jax.scipy.optimize import minimize import chex import typing_extensions from typing import Any, NamedTuple import warnings from jsl.experimental.seql.agents.agent_utils import Memory from jsl.experimental.seql.agents.base import Agent from jsl.experimental.seql.utils import posterior_noise, mse Params = Any def bfgs_agent(objective_fn: ObjectiveFn = mse, model_fn: ModelFn = lambda mu, x: x @ mu, obs_noise: float = 0.01, buffer_size: int = jnp.inf, threshold: int = 1): assert threshold <= buffer_size memory = Memory(buffer_size) return Agent(init_state, update, predict)
26.09375
68
0.578842
72320fd783db7905693b184e50b586992cf4d02b
2,379
py
Python
abusech/urlhaus.py
threatlead/abusech
6c62f51f773cb17ac6943d87fb697ce1e9dae049
[ "MIT" ]
null
null
null
abusech/urlhaus.py
threatlead/abusech
6c62f51f773cb17ac6943d87fb697ce1e9dae049
[ "MIT" ]
null
null
null
abusech/urlhaus.py
threatlead/abusech
6c62f51f773cb17ac6943d87fb697ce1e9dae049
[ "MIT" ]
null
null
null
from .abusech import AbuseCh from collections import namedtuple from datetime import datetime
43.254545
121
0.584279
7232736c521560e15f88e41ebeb5b1e597203059
114
py
Python
ufmt/tests/__init__.py
pmeier/ufmt
29385731d3399d065968921b7502d321acf6faef
[ "MIT" ]
null
null
null
ufmt/tests/__init__.py
pmeier/ufmt
29385731d3399d065968921b7502d321acf6faef
[ "MIT" ]
null
null
null
ufmt/tests/__init__.py
pmeier/ufmt
29385731d3399d065968921b7502d321acf6faef
[ "MIT" ]
null
null
null
# Copyright 2021 John Reese # Licensed under the MIT license from .cli import CliTest from .core import CoreTest
19
32
0.789474
7233257f2eb3efc2be88861adf3e83bb76f78498
442
py
Python
tests/components/eafm/conftest.py
erogleva/core
994ae09f69afe772150a698953c0d7386a745de2
[ "Apache-2.0" ]
2
2021-05-19T19:05:08.000Z
2021-06-06T06:51:05.000Z
tests/components/eafm/conftest.py
erogleva/core
994ae09f69afe772150a698953c0d7386a745de2
[ "Apache-2.0" ]
56
2020-08-03T07:30:54.000Z
2022-03-31T06:02:04.000Z
tests/components/eafm/conftest.py
erogleva/core
994ae09f69afe772150a698953c0d7386a745de2
[ "Apache-2.0" ]
2
2020-12-25T16:31:22.000Z
2020-12-30T20:53:56.000Z
"""eafm fixtures.""" import pytest from tests.async_mock import patch
22.1
84
0.719457
7233678cd98a3bf61296f7c1aa2006b01024a6ac
5,894
py
Python
thorbanks/checks.py
Jyrno42/django-thorbanks
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
[ "BSD-3-Clause" ]
6
2015-06-15T12:47:05.000Z
2019-04-24T01:32:12.000Z
thorbanks/checks.py
Jyrno42/django-thorbanks
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
[ "BSD-3-Clause" ]
13
2015-12-23T14:29:26.000Z
2021-02-18T18:35:56.000Z
thorbanks/checks.py
Jyrno42/django-thorbanks
a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1
[ "BSD-3-Clause" ]
3
2016-08-08T10:35:39.000Z
2020-12-29T23:10:55.000Z
import os from django.conf import settings from django.core.checks import Error, register from thorbanks.settings import configure, parse_banklinks
35.293413
119
0.449779
72349a7b999fcd7724c457b5d3ee54f95ec82969
36,028
py
Python
otk/trains.py
draustin/otk
c6e91423ec79b85b380ee9385f6d27c91f92503d
[ "MIT" ]
7
2020-05-17T14:26:42.000Z
2022-02-14T04:52:54.000Z
otk/trains.py
uamhforever/otk
c6e91423ec79b85b380ee9385f6d27c91f92503d
[ "MIT" ]
17
2020-04-10T22:50:00.000Z
2020-06-18T04:54:19.000Z
otk/trains.py
uamhforever/otk
c6e91423ec79b85b380ee9385f6d27c91f92503d
[ "MIT" ]
1
2022-02-14T04:52:45.000Z
2022-02-14T04:52:45.000Z
"""Defining and analysing axisymmetric optical systems.""" import itertools from functools import singledispatch from dataclasses import dataclass from abc import ABC, abstractmethod from typing import Sequence, Tuple, Mapping import numpy as np import scipy.optimize from . import abcd, paraxial, functions, ri from .functions import calc_sphere_sag # TODO Make Interface composition of Surface and refractive indeices. class Surface(ABC): """An axisymmetric surface between two media. TODO define radius. Is sag constant outside this i.e. is sag(rho) = sag(radius) for rho > radius?""" roc: float radius: float class ConicSurface(Surface): # TODO move to rt1 # def make_profile(self): # return rt1.ConicProfile(self.roc, self.kappa, self.alphas) def calc_sag(self, rho, derivative: bool = False): """Calculate sag of surface. Positive ROC means positive sag. Args: rho: Distance from center. derivative: If True, tuple of sag and its derivative is returned. """ sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, False) if derivative: grad_sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, True) return sag, grad_sag else: return sag # TODO make radius of each segment the outer radius rather than the incremental radius. class SingletSequence: def __init__(self, singlets: Sequence[Singlet], spaces: Sequence[float], n_external: ri.Index = ri.vacuum): """A sequence of singlets in a homogeneous medium with external and internal spaces. The first and last spaces must be included i.e. len(spaces) = len(singlets) + 1. """ assert len(spaces) == len(singlets) + 1 self.singlets = tuple(singlets) self.spaces = tuple(spaces) self.n_external = n_external self.center_length = sum(self.spaces) + sum(s.thickness for s in self.singlets)
39.634763
199
0.601865
723547959ebc4a91f17440d870c4a23f152e86d1
4,705
py
Python
rm_protection/rm_p.py
https-waldoww90-wadewilson-com/rm-protection
4dcc678fa687373fb4439c5c4409f7649e653084
[ "MIT" ]
490
2017-02-03T14:15:50.000Z
2022-03-31T02:57:20.000Z
rm_protection/rm_p.py
https-waldoww90-wadewilson-com/rm-protection
4dcc678fa687373fb4439c5c4409f7649e653084
[ "MIT" ]
8
2017-02-03T16:13:53.000Z
2017-05-28T05:20:45.000Z
rm_protection/rm_p.py
alanzchen/rm-protection
4dcc678fa687373fb4439c5c4409f7649e653084
[ "MIT" ]
41
2017-02-04T15:13:26.000Z
2021-12-19T08:58:38.000Z
from sys import argv, exit from os.path import expanduser as expu, expandvars as expv from os.path import basename, dirname, abspath, isdir, exists from subprocess import Popen, PIPE from builtins import input from rm_protection.config import Config c = Config() evaledpaths = [] if __name__ == "__main__": rm()
30.953947
120
0.530287
72381b6de058125b33932e8f4cd988e19b104ff7
6,856
py
Python
src/text_normalizer/tokenization/_tokenize.py
arkataev/text_normalizer
a99326e31012157980d014c9730ac94bd1d18c1d
[ "MIT" ]
null
null
null
src/text_normalizer/tokenization/_tokenize.py
arkataev/text_normalizer
a99326e31012157980d014c9730ac94bd1d18c1d
[ "MIT" ]
null
null
null
src/text_normalizer/tokenization/_tokenize.py
arkataev/text_normalizer
a99326e31012157980d014c9730ac94bd1d18c1d
[ "MIT" ]
null
null
null
""" """ import logging import re import string from enum import IntEnum from functools import lru_cache from typing import Tuple, Iterator from nltk.corpus import stopwords from nltk.tokenize import ToktokTokenizer from nltk.tokenize.api import TokenizerI from ..config import RegexConfigType, PipelineConfigType, load_regex_conf, load_conf __all__ = [ 'sent_tokenize', 'TokTok', 'token_type', 'to_token', 'TokenType', 'iTokenTuple', 'russian_stopwords', 'replace_bigrams', 'KILO_POSTFIX', 'init_cache', 'cache_clear', 'get_tokenizer' ] logger = logging.getLogger('rtn') # , "" (e.g. 5, 5 ) KILO_POSTFIX = '%' russian_stopwords = stopwords.words("russian") _spaces = string.whitespace _punct = set(f'{string.punctuation}{"=#-``"}{string.whitespace}') _isolating_punct = {'"', "'", '{', '}', '[', ']', '(', ')', '', ''} _synonyms = load_conf(PipelineConfigType.SYNONIMS) _regex_time = load_regex_conf(RegexConfigType.TIME) def sent_tokenize(sentence: str, tokenizer: TokenizerI) -> Iterator[iTokenTuple]: """ :param sentence: :param tokenizer: NLTK-TokenizerI """ return map(to_token, tokenizer.tokenize(sentence)) def token_type(token_string: str) -> TokenType: """ """ if not token_string: return TokenType.NONE if token_string in _spaces: # "in" works faster then calling a method ' '.isspace() return TokenType.SPACE elif token_string in _isolating_punct: return TokenType.PUNKT_ISO elif token_string in _punct: return TokenType.PUNKT elif token_string.isnumeric(): return TokenType.NUM rextype = get_regex_type() type_ = rextype(token_string) if type_ is not TokenType.NONE: return type_ return TokenType.TXT def to_token(token_string: str) -> iTokenTuple: """ >>> to_token('.') ('.', TokenType.PUNKT) >>> to_token('1') ('1', TokenType.NUM) >>> to_token('hello@gmail.com') ('hello@gmail.com', TokenType.EMAIL) :param token_string: """ return token_string, token_type(token_string) def replace_bigrams(tokens: Iterator[iTokenTuple]) -> Iterator[iTokenTuple]: """ . " " "-", . >>> from text_normalizer.tokenization import replace_bigrams >>> replace_bigrams(iter(['', TokenType.TXT), ('', TokenType.TXT)])) ('-', TokenType.TXT) """ crnt = None buffer = [] for token, _type in tokens: crnt, prev = token, crnt synonym = _synonyms.get(f'{crnt}', crnt) if prev: bigram = _synonyms.get(f'{prev} {crnt}') if bigram: buffer[-1] = (bigram, _type) continue buffer.append((synonym, _type)) yield from buffer def init_cache(): get_regex_type() get_tokenizer() logger.debug('Cache initiated') def cache_clear(): get_regex_type.cache_clear() get_tokenizer.cache_clear() logger.debug('Cache cleared')
27.534137
107
0.641774
723871eadc62b9694db68243f51e537122c22e01
299
py
Python
tests/data/test_to_array.py
maki-nage/rxsci
64c9956752cbdd4c65aa9f054b6b28318a056625
[ "MIT" ]
3
2021-05-03T13:40:46.000Z
2022-03-06T07:59:30.000Z
tests/data/test_to_array.py
maki-nage/rxsci
64c9956752cbdd4c65aa9f054b6b28318a056625
[ "MIT" ]
9
2020-10-22T21:08:10.000Z
2021-08-05T09:01:26.000Z
tests/data/test_to_array.py
maki-nage/rxsci
64c9956752cbdd4c65aa9f054b6b28318a056625
[ "MIT" ]
2
2021-01-05T16:48:54.000Z
2021-08-07T12:51:01.000Z
from array import array import rx import rxsci as rs
17.588235
54
0.595318
7239365caa1436583482800c75a7cb1d2a4fbe35
18,942
py
Python
pi/los.py
Coding-Badly/Little-Oven
3d1178f495aea1180e25bddbb4f139d8e37e6a65
[ "Apache-2.0" ]
null
null
null
pi/los.py
Coding-Badly/Little-Oven
3d1178f495aea1180e25bddbb4f139d8e37e6a65
[ "Apache-2.0" ]
null
null
null
pi/los.py
Coding-Badly/Little-Oven
3d1178f495aea1180e25bddbb4f139d8e37e6a65
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """============================================================================= los for Little-Oven. los (Little Oven Setup) prepares a Raspberry Pi for Little-Oven development. This module does the actual work. los (no extension) is a bash script that creates a service that runs this code. Running the following puts the whole mess in motion... curl -s "https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi/los" | bash journalctl -u los.service ---------------------------------------------------------------------------- Copyright 2019 Brian Cook (aka Coding-Badly) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================""" import grp import json import os import pathlib import pwd import requests import stat import subprocess import time import uuid def wall(text): subprocess.run(['wall',text], check=True) def wall_and_print(text, step=None): if step is not None: text = 'Step #{}: {}'.format(int(step), text) wall(text) print(text) def update_then_upgrade(): time.sleep(5.0) wall('Update the APT package list.') subprocess.run(['apt-get','-y','update'], check=True) wall('Upgrade APT packages.') subprocess.run(['apt-get','-y','upgrade'], check=True) def simple_get(source_url, destination_path): r = requests.get(source_url, stream=True) r.raise_for_status() with destination_path.open('wb') as f: for chunk in r.iter_content(64*1024): f.write(chunk) def check_global_config(): global global_config if path_los_json.exists(): with path_los_json.open() as f: global_config = json.load(f) else: global_config = dict() csm = CurrentStepManager() path_los_json = pathlib.Path('los.json') check_global_config() MODE_EXECUTABLE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH need_reboot = False go_again = True while go_again: go_again = False if csm.get_current_step() == 1: wall_and_print('Ensure the operating system is up-to-date.', csm.get_current_step()) update_then_upgrade() need_reboot = True csm.increment_current_step() elif csm.get_current_step() == 2: wall_and_print('Install Git.', csm.get_current_step()) subprocess.run(['apt-get','-y','install','git'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 3: wall_and_print('Install Python development.', csm.get_current_step()) subprocess.run(['apt-get','-y','install','python3-dev'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 4: wall_and_print('Ensure the operating system is up-to-date again.', csm.get_current_step()) update_then_upgrade() need_reboot = True csm.increment_current_step() elif csm.get_current_step() == 5: wall_and_print('Install pip.', csm.get_current_step()) path_get_pip = pathlib.Path('get-pip.py') simple_get('https://bootstrap.pypa.io/get-pip.py', path_get_pip) subprocess.run(['python3',str(path_get_pip)], check=True) path_get_pip.unlink() go_again = True csm.increment_current_step() elif csm.get_current_step() == 6: wall_and_print('Install Python modules required by this module.', csm.get_current_step()) subprocess.run(['pip','install', 'xkcdpass'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 7: wall_and_print('Get the global configuration file.', csm.get_current_step()) base_url = os.environ.get('LOS_BASE_URL', 'https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi') get_this = base_url + '/' + 'los.json' try: simple_get(get_this, path_los_json) except requests.exceptions.HTTPError: pass check_global_config() go_again = True csm.increment_current_step() elif csm.get_current_step() == 8: wall_and_print('Set the password using the https://xkcd.com/936/ technique.', csm.get_current_step()) from xkcdpass import xkcd_password as xp wordfile = xp.locate_wordfile() mywords = xp.generate_wordlist(wordfile=wordfile, min_length=5, max_length=8) new_password = xp.generate_xkcdpassword(mywords, delimiter=',', numwords=3) wall_and_print(' The new password is...') wall_and_print(' {}'.format(new_password)) # fix: Send the new password to a repository. new_password = 'whatever' # rmv pi_new_password = ('pi:' + new_password).encode('ascii') subprocess.run("chpasswd", input=pi_new_password, check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 9: wall_and_print('Change the hostname.', csm.get_current_step()) path_hostname = pathlib.Path('/etc/hostname') path_hostname.write_text('Little-Oven\n') subprocess.run(['sed','-i',"s/raspberrypi/Little-Oven/",'/etc/hosts'], check=True) need_reboot = True csm.increment_current_step() elif csm.get_current_step() == 10: wall_and_print('Change the timezone.', csm.get_current_step()) # Why localtime has to be removed... # https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1554806 # date "+%Z %z" pathlib.Path('/etc/timezone').write_text('America/Chicago\n') pathlib.Path('/etc/localtime').unlink() subprocess.run(['dpkg-reconfigure','-f','noninteractive','tzdata'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 11: wall_and_print('Change the keyboard layout.', csm.get_current_step()) # debconf-get-selections | grep keyboard-configuration # The top entry is suspect. "gb" was the value after changing # keyboards using dpkg-reconfigure. keyboard_conf = """ keyboard-configuration\tkeyboard-configuration/xkb-keymap\tselect\tus keyboard-configuration\tkeyboard-configuration/layoutcode\tstring\tus keyboard-configuration\tkeyboard-configuration/layout\tselect\tEnglish (US) keyboard-configuration\tkeyboard-configuration/variant\tselect\tEnglish (US) """.encode("ascii") subprocess.run("debconf-set-selections", input=keyboard_conf, check=True) subprocess.run(['dpkg-reconfigure','-f','noninteractive','keyboard-configuration'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 12: wall_and_print('Change the locale.', csm.get_current_step()) # locale locale_conf = """ locales\tlocales/locales_to_be_generated\tmultiselect\ten_US.UTF-8 UTF-8 locales\tlocales/default_environment_locale\tselect\ten_US.UTF-8 """.encode("ascii") subprocess.run("debconf-set-selections", input=locale_conf, check=True) subprocess.run(['sed','-i',"s/^# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/",'/etc/locale.gen'], check=True) subprocess.run(['dpkg-reconfigure','-f','noninteractive','locales'], check=True) subprocess.run(['update-locale','LANG=en_US.UTF-8'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 13: wall_and_print('Configure Git.', csm.get_current_step()) this_mac = format(uuid.getnode(), 'X') config_by_this_mac = global_config.get(this_mac, None) config_github = config_by_this_mac.get('github', None) if config_by_this_mac else None if config_github: # Set basic Git configuration. git_user_name = config_github.get('user.name', 'Git User Name Goes Here') git_user_email = config_github.get('user.email', 'whomever@dallasmakerspace.org') git_core_editor = config_github.get('core.editor', 'nano') subprocess.run(['git','config','--system','user.name',git_user_name], check=True) subprocess.run(['git','config','--system','user.email',git_user_email], check=True) subprocess.run(['git','config','--system','core.editor',git_core_editor], check=True) # Ensure the .ssh directory exists. path_dot_ssh = pathlib.Path('/home/pi/.ssh') # https://superuser.com/questions/215504/permissions-on-private-key-in-ssh-folder dm = DirectoryMaker() dm.mkdir(path_dot_ssh) # Add a Github section to the .ssh/config file. path_ssh_config = path_dot_ssh / 'config' with path_ssh_config.open('at') as f: f.write('Host github.com\n') f.write(' User git\n') f.write(' Hostname github.com\n') f.write(' PreferredAuthentications publickey\n') f.write(' IdentityFile ~/.ssh/github/id_rsa\n') dm.chown(path_ssh_config) # Create a github subdirectory for the Github key pair. path_github = path_dot_ssh / 'github' dm.mkdir(path_github) # Generate the Github key pair. path_id_rsa = path_github / 'id_rsa' # ssh-keygen -t rsa -C "arduino.tiny@gmail.com" -b 1024 -N '' -f ~/.ssh/github/id_rsa subprocess.run(['ssh-keygen','-t','rsa','-C',git_user_email,'-b','4096','-N','','-f',str(path_id_rsa)], check=True) dm.chown(path_id_rsa) dm.chown(path_id_rsa.with_suffix('.pub')) go_again = True csm.increment_current_step() elif csm.get_current_step() == 14: # wall_and_print('Install PiFace Digital 2 packages from GitHub.', csm.get_current_step()) # # Common # subprocess.run(['git','clone','git://github.com/piface/pifacecommon.git','/home/pi/python-things/pifacecommon'], check=True) # subprocess.run(['python3','/home/pi/python-things/pifacecommon/setup.py','install'], cwd='/home/pi/python-things/pifacecommon/', check=True) # #subprocess.run(['rm','-rf','/home/pi/python-things/pifacecommon'], check=True) # # Digital I/O # subprocess.run(['git','clone','git://github.com/piface/pifacedigitalio.git','/home/pi/python-things/pifacedigitalio'], check=True) # subprocess.run(['python3','/home/pi/python-things/pifacedigitalio/setup.py','install'], cwd='/home/pi/python-things/pifacedigitalio/', check=True) # #subprocess.run(['rm','-rf','/home/pi/python-things/pifacedigitalio'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 15: # wall_and_print('Install python-dispatch package from GitHub.', csm.get_current_step()) # subprocess.run(['git','clone','https://github.com/Coding-Badly/python-dispatch.git','/home/pi/python-things/python-dispatch'], check=True) # subprocess.run(['python3','/home/pi/python-things/python-dispatch/setup.py','install'], cwd='/home/pi/python-things/python-dispatch/', check=True) # #subprocess.run(['rm','-rf','/home/pi/python-dispatch'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 16: wall_and_print('Clone the Little Oven.', csm.get_current_step()) # git clone git@github.com:Coding-Badly/Little-Oven.git /home/pi/Little-Oven # git clone https://github.com/Coding-Badly/Little-Oven.git /home/pi/Little-Oven subprocess.run(['git','clone','https://github.com/Coding-Badly/Little-Oven.git','/home/pi/Little-Oven'], check=True) try: subprocess.run(['git','checkout','-t','remotes/origin/master'], cwd='/home/pi/Little-Oven', stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as exc: if not "already exists" in exc.stderr.decode("utf-8"): raise # Change the remote url to use ssh. # git remote set-url origin git@github.com:Coding-Badly/Little-Oven.git subprocess.run(['git','remote','set-url','origin','git@github.com:Coding-Badly/Little-Oven.git'], cwd='/home/pi/Little-Oven', check=True) # Use pip to install dependencies. path_requirements = pathlib.Path('/home/pi/Little-Oven/requirements.txt') if path_requirements.exists(): subprocess.run(['pip','install','-U','-r',str(path_requirements)], check=True) # Fix ownership of the Little-Oven repository. subprocess.run(['chown','-R','pi:pi','/home/pi/Little-Oven'], check=True) # Prepare the cache directory. dm = DirectoryMaker(default_final_mode=0o755) path_cache = pathlib.Path('/var/cache/Rowdy Dog Software/Little-Oven/pans') dm.mkdir(path_cache, parents=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 17: # wall_and_print('Install PiFace Digital 2 initialization service.', csm.get_current_step()) # subprocess.run(['cp','/home/pi/Little-Oven/pi/init_PiFace_Digital_2.service','/etc/systemd/system/init_PiFace_Digital_2.service'], check=True) # subprocess.run(['systemctl','enable','init_PiFace_Digital_2.service'], check=True) # need_reboot = True go_again = True csm.increment_current_step() elif csm.get_current_step() == 18: wall_and_print('Configure Rust to be easily installed.', csm.get_current_step()) # Download rustup.sh to a common location and make it Read + Execute # for everyone. Writable for the owner (root). path_rustup_sh = pathlib.Path('/usr/local/bin/rustup.sh') simple_get('https://sh.rustup.rs', path_rustup_sh) path_rustup_sh.chmod(MODE_EXECUTABLE) go_again = True csm.increment_current_step() elif csm.get_current_step() == 19: wall_and_print('Install FUSE (support for VeraCrypt).', csm.get_current_step()) subprocess.run(['apt-get','-y','install','fuse'], check=True) go_again = True csm.increment_current_step() elif csm.get_current_step() == 20: wall_and_print('Configure VeraCrypt to be easily installed.', csm.get_current_step()) # Prepare a directory for the VeraCrypt files. dm = DirectoryMaker(default_final_mode=0o755) path_temp = pathlib.Path('./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW') dm.mkdir(path_temp, parents=True) # Download the install script path_tar_bz2 = path_temp / 'veracrypt-setup.tar.bz2' simple_get('https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2', path_tar_bz2) # Extract the contents subprocess.run(['tar','xvfj',str(path_tar_bz2),'-C',str(path_temp)], check=True) path_src = path_temp / 'veracrypt-1.21-setup-console-armv7' path_dst = pathlib.Path('/usr/local/bin/veracrypt-setup') # Copy the console setup to a location on the PATH subprocess.run(['cp',str(path_src),str(path_dst)], check=True) # Remove the temporary directory subprocess.run(['rm','-rf',str(path_temp)], check=True) # Run the install script #subprocess.run(['bash',str(path_setup),'--quiet'], check=True) # mkdir veracrypt_CErQ2nnwvZCVeKQHhLV24TWW # wget --output-document=./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2 # tar xvfj ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 -C ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW # ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --check # ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --quiet # rm -rf veracrypt_CErQ2nnwvZCVeKQHhLV24TWW go_again = True csm.increment_current_step() elif csm.get_current_step() == 21: wall_and_print('Check for Rust and VeraCrypt after login.', csm.get_current_step()) # Write the following to /etc/profile.d/check_for_rust_and_veracrypt.sh and make it # executable. check_for_rust_and_veracrypt = """#!/bin/bash if [ ! -e $HOME/.cargo ]; then rustup.sh -y fi if ! command -v veracrypt; then veracrypt-setup fi """ path_check_for = pathlib.Path('/etc/profile.d/check_for_rust_and_veracrypt.sh') path_check_for.write_text(check_for_rust_and_veracrypt) path_check_for.chmod(MODE_EXECUTABLE) go_again = True csm.increment_current_step() #elif csm.get_current_step() == 20: # wall_and_print('One last reboot for good measure.', csm.get_current_step()) # need_reboot = True # csm.increment_current_step() # fix: Configure Little-Oven to automatically run on boot. else: wall_and_print('Little-Oven installed. Disabling the los service.') subprocess.run(['systemctl','disable','los.service'], check=True) if need_reboot: wall_and_print('REBOOT!') time.sleep(5.0) subprocess.run(['reboot'], check=True)
50.244032
184
0.658853
72395e4c87b9f1d8044b25a00bbab7ea6fe4633a
337
py
Python
tests/graphql/readme_forum/readme_forum_permissions/models.py
karlosss/simple_api
03f87035c648f161d5e7a59b24f4e04bd34399f1
[ "MIT" ]
2
2020-11-13T14:00:06.000Z
2020-12-19T11:50:22.000Z
tests/graphql/readme_forum/readme_forum_permissions/models.py
ladal1/simple_api
1b5d560476bccad9f68a7331d092dbdb68c48bf7
[ "MIT" ]
5
2021-02-04T14:27:43.000Z
2021-06-04T23:22:24.000Z
tests/graphql/readme_forum/readme_forum_permissions/models.py
ladal1/simple_api
1b5d560476bccad9f68a7331d092dbdb68c48bf7
[ "MIT" ]
1
2021-01-06T13:54:38.000Z
2021-01-06T13:54:38.000Z
from django.contrib.auth.models import User from django.db.models import Model, CharField, TextField, ForeignKey, CASCADE
28.083333
77
0.712166
723b9095a8d15e2c9c1b3f5d5be4c81a6f6e858e
2,304
py
Python
streamlit_app.py
fhebal/nlp-medical-notes
f1fed9e34ba47da14220b5719f28c1e720302f45
[ "MIT" ]
null
null
null
streamlit_app.py
fhebal/nlp-medical-notes
f1fed9e34ba47da14220b5719f28c1e720302f45
[ "MIT" ]
null
null
null
streamlit_app.py
fhebal/nlp-medical-notes
f1fed9e34ba47da14220b5719f28c1e720302f45
[ "MIT" ]
null
null
null
import streamlit as st import yaml from load_css import local_css import tensorflow as tf import tensorflow_hub as hub import tensorflow_text as text import numpy as np from random import sample import os local_css("style.css") prediction_key = { 0:'Gastroenterology', 1:'Neurology', 2:'Orthopedic', 3:'Radiology', 4:'Urology' } # Load model from file model = tf.keras.models.load_model('/home/muody/saved_model/my_model', compile=False) # load data if st.button("New Text Sample"): main()
29.538462
126
0.647569
723d7e8a6d6158d63e1b5536dbcf3fd946d29dec
2,440
py
Python
tests/test_functions.py
aerial-defence/pytak
e20c2dedfee88489bf21ad931970c2cb982d72ed
[ "Apache-2.0" ]
null
null
null
tests/test_functions.py
aerial-defence/pytak
e20c2dedfee88489bf21ad931970c2cb982d72ed
[ "Apache-2.0" ]
null
null
null
tests/test_functions.py
aerial-defence/pytak
e20c2dedfee88489bf21ad931970c2cb982d72ed
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Python Team Awareness Kit (PyTAK) Module Tests.""" import asyncio import urllib import pytest import pytak __author__ = 'Greg Albrecht W2GMD <oss@undef.net>' __copyright__ = 'Copyright 2022 Greg Albrecht' __license__ = 'Apache License, Version 2.0'
29.047619
73
0.702049
723d802339794483e5614abac1a27413e8db4aa8
1,054
py
Python
tests/test_aoc_day_02.py
ladokp/AdventOfCode2021
03f8b9f8579ae562d5f2784a131370a32ed19f8b
[ "BSD-2-Clause" ]
null
null
null
tests/test_aoc_day_02.py
ladokp/AdventOfCode2021
03f8b9f8579ae562d5f2784a131370a32ed19f8b
[ "BSD-2-Clause" ]
null
null
null
tests/test_aoc_day_02.py
ladokp/AdventOfCode2021
03f8b9f8579ae562d5f2784a131370a32ed19f8b
[ "BSD-2-Clause" ]
null
null
null
# test_aoc_day_02.py import pytest import solution.aoc_day_02 as aoc def test_parse_test_solution(test_solution): """Test that input is parsed properly""" assert test_solution.data == [ ("forward", 5), ("down", 5), ("forward", 8), ("up", 3), ("down", 8), ("forward", 2), ] def test_part1_test_solution(test_solution): """Test part 1 on example input""" assert test_solution.part1() == 150 def test_part2_test_solution(test_solution): """Test part 2 on example input""" assert test_solution.part2() == 900 def test_part1_exercise_solution(exercise_solution): """Test part 1 on exercise_solution input""" assert exercise_solution.part1() == 1383564 def test_part2_exercise_solution(exercise_solution): """Test part 2 on exercise_solution input""" assert exercise_solution.part2() == 1488311643
22.425532
52
0.683112
723e3c60c657572c4703c5d71bdcbccb656fe914
18,265
py
Python
src/elora/elora.py
morelandjs/elora
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
[ "MIT" ]
1
2021-07-26T20:36:32.000Z
2021-07-26T20:36:32.000Z
src/elora/elora.py
morelandjs/elora
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
[ "MIT" ]
null
null
null
src/elora/elora.py
morelandjs/elora
e902c40d66b0bf95a8d2374afa0cc165b87c9b82
[ "MIT" ]
null
null
null
from operator import add, sub import numpy as np from scipy.stats import norm
36.750503
80
0.594854
723f049018f7dbaf5f55c465cf88ce5aa8c8ec4d
48
py
Python
atcoder/abc179/a.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
atcoder/abc179/a.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
atcoder/abc179/a.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
s=input() print(s+'s' if s[-1]!='s' else s+'es')
24
38
0.520833
723fcadfa719088f86b59d8093c6f9655d115794
48,147
py
Python
steady_cell_phenotype/poly.py
knappa/steadycellphenotype
b033f01ebc1fa062d310296f19f2f11b484cb557
[ "MIT" ]
1
2021-12-13T22:20:19.000Z
2021-12-13T22:20:19.000Z
steady_cell_phenotype/poly.py
knappa/steadycellphenotype
b033f01ebc1fa062d310296f19f2f11b484cb557
[ "MIT" ]
5
2021-04-07T01:47:19.000Z
2021-11-17T01:46:19.000Z
steady_cell_phenotype/poly.py
knappa/steadycellphenotype
b033f01ebc1fa062d310296f19f2f11b484cb557
[ "MIT" ]
null
null
null
from __future__ import annotations import operator from enum import Enum from itertools import product from typing import Dict, Union import numpy as np #################################################################################################### def h(x, fx): """helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24""" fx = fx % 3 x = x % 3 if fx > x: return x + 1 elif fx < x: return x - 1 else: return x #################################################################################################### # monomial and sparse polynomial classes. These should be faster than the sympy versions due to # their reduced scope. #################################################################################################### #################################################################################################### def rename_helper(expression: Union[Expression, int], name_dict: Dict[str, str]): if is_integer(expression): return expression else: return expression.rename_variables(name_dict=name_dict) #################################################################################################### # actions on expressions, suitable for conversion to polynomial form. Not best for simulator. #################################################################################################### #################################################################################################### __rmul__ = __mul__ def as_poly(self): """converts this monomial to a polynomial with only one term""" return Mod3Poly({self: 1}) __repr__ = __str__ # def as_sympy(self): # # sympy empty product is 1, consistent with power_dict # return sympy.prod([sympy.Symbol(var, integer=True) ** pow # for var, pow in self._power_dict.items()]) # # Fun fact: sympy doesn't recognize Symbol(var) and Symbol(var, integer=True) to be the same #################################################################################################### def eval(self, variable_dict): """evaluates the polynomial. variable_dict is expected to be a dict containing str:Expression or Monomial:Expression pairs. The latter are constrained to be of single-variable type. """ if type(variable_dict) != dict: raise Exception("Mod3Poly.eval is not defined on this input") accumulator = Mod3Poly.zero() for monomial, coeff in self.coeff_dict.items(): accumulator += coeff * monomial.eval(variable_dict) return accumulator def get_variable_set(self): """return a set containing all variables which occur in this polynomial""" var_set = set() for monomial in self.coeff_dict: var_set = var_set.union(monomial.get_variable_set()) return var_set def __clear_zero_monomials(self): """purge unneeded data""" self.coeff_dict = {monomial: self.coeff_dict[monomial] for monomial in self.coeff_dict if self.coeff_dict[monomial] != 0} # assure at least one entry if len(self.coeff_dict) == 0: self.coeff_dict = {Monomial.unit(): 0} __radd__ = __add__ __rmul__ = __mul__ __repr__ = __str__ # def as_sympy(self): # return sum([coeff * expr.as_sympy() for expr, coeff in self.coeff_dict.items()])
38.985425
117
0.577897
72404631e2e0ae2fb28f9c18c6b107f7f88a83f4
23,165
py
Python
django_tidb/features.py
killuminatixhr/django-tidb
8de093dd7242fc70a5b9b5240711bef00722ff03
[ "Apache-2.0" ]
17
2021-07-30T17:02:53.000Z
2021-12-10T02:28:59.000Z
django_tidb/features.py
killuminatixhr/django-tidb
8de093dd7242fc70a5b9b5240711bef00722ff03
[ "Apache-2.0" ]
7
2021-08-02T09:56:27.000Z
2022-03-23T03:36:07.000Z
django_tidb/features.py
killuminatixhr/django-tidb
8de093dd7242fc70a5b9b5240711bef00722ff03
[ "Apache-2.0" ]
6
2021-07-30T10:04:15.000Z
2022-03-29T05:44:37.000Z
# Copyright 2021 PingCAP, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # See the License for the specific language governing permissions and # limitations under the License. import operator from django.db.backends.mysql.features import ( DatabaseFeatures as MysqlDatabaseFeatures, ) from django.utils.functional import cached_property
55.286396
118
0.699763
72404d3d39210b175e825c5b94b9e21a7e2698f1
421
py
Python
src/combine_npy.py
hongli-ma/RNANetMotif
34b4de443ec7edb59f4e4e06b17686543c438366
[ "MIT" ]
null
null
null
src/combine_npy.py
hongli-ma/RNANetMotif
34b4de443ec7edb59f4e4e06b17686543c438366
[ "MIT" ]
null
null
null
src/combine_npy.py
hongli-ma/RNANetMotif
34b4de443ec7edb59f4e4e06b17686543c438366
[ "MIT" ]
null
null
null
import numpy as np import sys import glob rbp=sys.argv[1] kmer=sys.argv[2] pfile_list=glob.glob("result_VDM3_"+rbp+"_positive_"+kmer+"_*.npy") pfile1=np.load(pfile_list[0]) psha=np.shape(pfile1) pmatrix=np.zeros(psha) for pfile in pfile_list: file=np.load(pfile) # file=np.fromfile(pfile,dtype=np.float32) pmatrix+=file np.save("positive_"+rbp+"_vdm3_nopaircontrol_distance_matrix_"+kmer+"mer.npy",pmatrix)
23.388889
86
0.750594
7241a2c99b3dfd4732a6af0ad6cf19b2e1c6a517
1,238
py
Python
fgvcdata/__init__.py
catalys1/fgvc-data-pytorch
e2666d011c71308c4975776fbc41e947424f0723
[ "MIT" ]
4
2020-07-05T10:19:20.000Z
2021-09-15T08:22:36.000Z
fgvcdata/__init__.py
catalys1/fgvc-data-pytorch
e2666d011c71308c4975776fbc41e947424f0723
[ "MIT" ]
1
2020-11-13T22:01:47.000Z
2020-11-13T22:01:47.000Z
fgvcdata/__init__.py
catalys1/fgvc-data-pytorch
e2666d011c71308c4975776fbc41e947424f0723
[ "MIT" ]
null
null
null
'''A common interface to FGVC datasets. Currently supported datasets are - CUB Birds - CUB Birds with expert labels - NA Birds - Stanford Cars - Stanford Dogs - Oxford Flowers - Oxford FGVC Aircraft - Tsinghua Dogs Datasets are constructed and used following the pytorch data.utils.data.Dataset paradigm, and have the signature fgvcdata.Dataset(root='path/to/data/'[,transform[,target_transform[,train]]]) `root` is the path to the base folder for the dataset. Additionally, `root` can end in `/train` or `/test`, to indicate whether to use train or test data -- even if the root folder does not contain `train` or `test` subfolders. The use of training or test data can also be specified through the use of the `train` flag (the path extension on `root` takes precedence). `transform` and `target_transform` are optional callables that preprocess data and targets respectively. It is common to use the torchvision.transforms module for this. ''' from .birds import * from .cars import * from .dogs import * from .aircraft import * from .flowers import * from .icub import * IMAGENET_STATS = ((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) datasets = [] for f in [birds,icub,cars,dogs,aircraft,flowers]: datasets += f.__all__
29.47619
79
0.747981
72422e4892bc1b2767ffe8812f2e9e5e44e84b64
10,491
py
Python
app/auth/api.py
Anti-Counter021/Anti-YouTube-back-end
eca9b26b4a1feb7e516c0164e86c5d6444af8db5
[ "MIT" ]
null
null
null
app/auth/api.py
Anti-Counter021/Anti-YouTube-back-end
eca9b26b4a1feb7e516c0164e86c5d6444af8db5
[ "MIT" ]
null
null
null
app/auth/api.py
Anti-Counter021/Anti-YouTube-back-end
eca9b26b4a1feb7e516c0164e86c5d6444af8db5
[ "MIT" ]
null
null
null
from typing import List from celery.result import AsyncResult from fastapi import APIRouter, status, Depends, Form, UploadFile, File, Request, WebSocket from fastapi.responses import RedirectResponse from app.auth import service from app.auth.models import User from app.auth.permission import is_active from app.auth.schemas import ( RegisterUser, VerificationUUID, Tokens, RefreshToken, AccessToken, Password, ChangeUserDataResponse, ChangeUserData, Channel, ChangePassword, Tasks, ) from app.config import oauth from app.db import async_session from app.schemas import Message from app.videos.schemas import GetVideo, SubscriptionsVideos auth_router = APIRouter()
29.304469
96
0.691354
7242536c3707c16822eadee50c71c7b05cdd3796
7,768
py
Python
concourse/steps/scan_container_images.py
jia-jerry/cc-utils
01322d2acb7343c92138dcf0b6ac913b276525bc
[ "Apache-2.0" ]
null
null
null
concourse/steps/scan_container_images.py
jia-jerry/cc-utils
01322d2acb7343c92138dcf0b6ac913b276525bc
[ "Apache-2.0" ]
null
null
null
concourse/steps/scan_container_images.py
jia-jerry/cc-utils
01322d2acb7343c92138dcf0b6ac913b276525bc
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed # under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import textwrap import typing import tabulate import clamav.util import mailutil from concourse.model.traits.image_scan import Notify from product.model import ComponentName, UploadResult def protecode_results_table(protecode_cfg, upload_results: typing.Iterable[UploadResult]): table = tabulate.tabulate( map(result_to_tuple, upload_results), headers=('Component Name', 'Greatest CVE', 'Container Image Reference'), tablefmt='html', ) return table
36.299065
99
0.660788
72430bcb51d12558e07e88c7e1a6d221c05d6f85
647
py
Python
py/cv/video.py
YodaEmbedding/experiments
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
[ "MIT" ]
null
null
null
py/cv/video.py
YodaEmbedding/experiments
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
[ "MIT" ]
null
null
null
py/cv/video.py
YodaEmbedding/experiments
567c6a1c18fac2d951fe2af54aaa4917b7d529d2
[ "MIT" ]
null
null
null
import cv2 import numpy as np height = 500 width = 700 gray = np.zeros((height, width), dtype=np.uint8) # fourcc = cv2.VideoWriter_fourcc(*"MJPG") # filename = "output.avi" fourcc = cv2.VideoWriter_fourcc(*"MP4V") filename = "output.mp4" writer = cv2.VideoWriter( filename, fourcc, fps=30, frameSize=(width, height), isColor=False ) # NOTE isColor doesn't seem to influence resulting file size xs = np.arange(width // 10) ys = np.arange(height // 10) locations = np.dstack(np.meshgrid(ys, xs)).reshape(-1, 2) for y, x in locations: gray[y, x] = 255 # gray_3c = cv2.merge([gray, gray, gray]) writer.write(gray) writer.release()
24.884615
70
0.689335
72450375a565716f2e2d7e0a06b152a00332048e
1,062
py
Python
po/loginpage.py
imzengyang/datadrivertestexample
a37520c2f5f0ae6dfbcaaa371586ba7e98540537
[ "MIT" ]
1
2018-06-03T05:31:46.000Z
2018-06-03T05:31:46.000Z
po/loginpage.py
imzengyang/datadrivertestexample
a37520c2f5f0ae6dfbcaaa371586ba7e98540537
[ "MIT" ]
null
null
null
po/loginpage.py
imzengyang/datadrivertestexample
a37520c2f5f0ae6dfbcaaa371586ba7e98540537
[ "MIT" ]
null
null
null
from po.base import BasePage from po.base import InvalidPageException
33.1875
100
0.733522
724561c601c848d5d6d0e629507abb99ee03ff0a
1,329
py
Python
app.py
jesseokeya/linkedin-scraper
6b9d5af5167c8c936db63a855a9885728efbfeb5
[ "MIT" ]
null
null
null
app.py
jesseokeya/linkedin-scraper
6b9d5af5167c8c936db63a855a9885728efbfeb5
[ "MIT" ]
1
2020-01-04T19:33:58.000Z
2021-09-07T15:03:03.000Z
app.py
jesseokeya/linkedin-scraper
6b9d5af5167c8c936db63a855a9885728efbfeb5
[ "MIT" ]
1
2021-12-02T06:51:46.000Z
2021-12-02T06:51:46.000Z
from lib import Scrape from typing import List from os import environ main()
27.122449
74
0.678706
724593364a3fe88c699961a3b8ddb8f17f617e15
100
py
Python
Loops/for_in.py
1302580MK/Udemy_Python
c7aef0645ae15a954c2356ba96288deaa087fb32
[ "MIT" ]
null
null
null
Loops/for_in.py
1302580MK/Udemy_Python
c7aef0645ae15a954c2356ba96288deaa087fb32
[ "MIT" ]
null
null
null
Loops/for_in.py
1302580MK/Udemy_Python
c7aef0645ae15a954c2356ba96288deaa087fb32
[ "MIT" ]
null
null
null
var1 = "hello world" # left what you get from the right for character in var1: print(character)
20
34
0.72
7247650bb946b4cd8155dc4709b0e70976c42ea4
546
py
Python
forum/migrations/0003_auto_20190307_1825.py
AdityaJ42/DJ-Comps-Book-Exchange
12bba45f016e1b708477c642c2595b7f15e3dcfc
[ "MIT" ]
null
null
null
forum/migrations/0003_auto_20190307_1825.py
AdityaJ42/DJ-Comps-Book-Exchange
12bba45f016e1b708477c642c2595b7f15e3dcfc
[ "MIT" ]
null
null
null
forum/migrations/0003_auto_20190307_1825.py
AdityaJ42/DJ-Comps-Book-Exchange
12bba45f016e1b708477c642c2595b7f15e3dcfc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2019-03-07 12:55 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
24.818182
125
0.64652
724a427f96e1eeeba039df150a14e4acaeb34725
495
py
Python
datahub/event/migrations/0018_move_to_metadata_trade_agreement.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
6
2019-12-02T16:11:24.000Z
2022-03-18T10:02:02.000Z
datahub/event/migrations/0018_move_to_metadata_trade_agreement.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
1,696
2019-10-31T14:08:37.000Z
2022-03-29T12:35:57.000Z
datahub/event/migrations/0018_move_to_metadata_trade_agreement.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
9
2019-11-22T12:42:03.000Z
2021-09-03T14:25:05.000Z
# Generated by Django 3.1.7 on 2021-04-08 11:12 from django.db import migrations, models
24.75
83
0.638384
724a51915fb64beb06bbeb6fa5488524f3f99f3f
16,905
py
Python
pbs/scripts/update_burn_dataOLD.py
jawaidm/pbs
87f5c535c976d6a5eccbfbbf2073589b6e366d04
[ "Apache-2.0" ]
null
null
null
pbs/scripts/update_burn_dataOLD.py
jawaidm/pbs
87f5c535c976d6a5eccbfbbf2073589b6e366d04
[ "Apache-2.0" ]
12
2019-10-22T23:16:38.000Z
2022-03-11T23:17:45.000Z
pbs/scripts/update_burn_dataOLD.py
jawaidm/pbs
87f5c535c976d6a5eccbfbbf2073589b6e366d04
[ "Apache-2.0" ]
5
2019-12-19T06:18:42.000Z
2022-01-07T01:16:18.000Z
''' ---------------------------------------------------------------------------------------- This script will update the Prescribed Burn System's ePFP data according to txt input files found in the relevant scripts folder. It requires user input of the Corporate Executive Approval date, which it will then use to set PREVIOUS_YEAR, TARGET_YEAR, DATE_APPROVED and DATE_APPROVED_TO variables used by relevant functions in the script. Sample output below: Please enter the date that the Burn Program was approved by Corporate Executive (dd/mm/yyyy): 03/07/2019 Script will run with the following details: - Previous Year: 2018/2019 - Target Year: 2019/2020 - Script Data Folder: pbs/scripts/eofy_data/2019 Do you wish to continue [y/n]? y Updating financial year and setting planning status modified date for carry over currently approved ePFPs from 2018/2019. Total prescriptions in query: 331 Financial year for ABC_123(2013/2014) is not 2018/2019 or already in 2019/2020. Updated financial year and set planning status modified date for 330 carry over currently approved ePFPs Applying corporate approval and setting planning status modified date for ePFPs currently seeking approval in 2019/2020. Total prescriptions in query: 51 Applied corporate approval and set planning status modified date for 51 ePFPs that were seeking approval Updating financial year only selected ePFPs from 2018/2019 Total prescriptions in query: 330 Financial year for ABC_123(2013/2014) is not 2018/2019. Updated financial year only for 0 ePFPs 329 records already in 2019/2020 Updating priority for selected ePFPs in 2019/2020 Financial year for ABC_123(2013/2014) is not 2019/2020. Updated priority for 412 ePFPs (expected 412) Updating area for selected ePFPs in 2019/2020 Financial year for ABC_123(2013/2014) is not 2019/2020. Updated area for 412 ePFPs (expected 412) Updating perimeters for selected ePFPs in 2019/2020 Financial year for ABC_123(2013/2014) is not 2019/2020. Updated perimeter for 412 ePFPs (expected 412) Updating overall rationale for selected ePFPs in 2019/2020 Financial year for ABC_123(2013/2014) is not 2019/2020. Updated rationale for 168 ePFPs (expected 168) ---------------------------------------------------------------------------------------- ''' import os import sys import confy from django.db import transaction from django.core.wsgi import get_wsgi_application from decimal import Decimal import csv from datetime import datetime, date import pytz application = get_wsgi_application() # This is so models get loaded. try: confy.read_environment_file('.env') except: print('ERROR: Script must be runs from PROJECT BASE_DIR') exit() proj_path = os.getcwd() sys.path.append(proj_path) os.chdir(proj_path) # ---------------------------------------------------------------------------------------- # Script starts here # ---------------------------------------------------------------------------------------- from pbs.prescription.models import Prescription if __name__ == "__main__": try: SCRIPT_FOLDER = 'pbs/scripts' DATE_APPROVED_INPUT = raw_input("Please enter the date that the Burn Program was approved " "by Corporate Executive (dd/mm/yyyy): ") DATE_APPROVED = datetime.strptime(DATE_APPROVED_INPUT, '%d/%m/%Y').replace(tzinfo=pytz.UTC) if DATE_APPROVED.month != 7 or DATE_APPROVED.year != date.today().year: print('Can only run this script in July of the current year') sys.exit() DATE_APPROVED_TO = date(DATE_APPROVED.year, 9, 30) PREVIOUS_YEAR = '{}/{}'.format(DATE_APPROVED.year-1, DATE_APPROVED.year) TARGET_YEAR = '{}/{}'.format(DATE_APPROVED.year, DATE_APPROVED.year+1) SCRIPT_DATA_FOLDER = '{}/eofy_data/{}'.format(SCRIPT_FOLDER, TARGET_YEAR.split('/')[0]) except BaseException: print('Error') sys.exit() print('\nScript will run with the following details:') print(' - Previous Year: {}'.format(PREVIOUS_YEAR)) print(' - Target Year: {}'.format(TARGET_YEAR)) print(' - Script Data Folder: {}/'.format(SCRIPT_DATA_FOLDER)) CONTINUE_INPUT = raw_input("Do you wish to continue [y/n]? ") if CONTINUE_INPUT == 'y': try: with transaction.atomic(): corp_approved_carryover_ids = read_ids('{}/corp_approved_carryover.txt'.format(SCRIPT_DATA_FOLDER)) carryover_currently_approved(corp_approved_carryover_ids) seeking_approval_ids = read_ids('{}/approve_seeking_approval.txt'.format(SCRIPT_DATA_FOLDER)) update_seeking_approval(seeking_approval_ids) update_financial_year_ids = read_ids('{}/financial_year_only.txt'.format(SCRIPT_DATA_FOLDER)) update_financial_year(update_financial_year_ids) burn_priority_tuples = read_id_tuples('{}/burn_priority.txt'.format(SCRIPT_DATA_FOLDER)) update_burn_priority(burn_priority_tuples) burn_area_tuples = read_id_tuples('{}/burn_areas.txt'.format(SCRIPT_DATA_FOLDER)) update_burn_areas(burn_area_tuples) burn_perimeter_tuples = read_id_tuples('{}/burn_perimeters.txt'.format(SCRIPT_DATA_FOLDER)) update_burn_perimeters(burn_perimeter_tuples) overall_rationale_tuples = read_id_tuples_pipe_separated('{}/overall_rationales.txt' .format(SCRIPT_DATA_FOLDER)) update_overall_rationales(overall_rationale_tuples) except BaseException: print('Error') else: sys.exit()
47.089136
125
0.644898
724b92184d8f2e9819e55008805cce856be796bd
4,012
py
Python
learnware/algorithm/anomaly_detect/iforest.py
marvinren/aiops_gaussian_learnware
47683546d6648a38bb71988c33f959cf7308376f
[ "Apache-2.0" ]
null
null
null
learnware/algorithm/anomaly_detect/iforest.py
marvinren/aiops_gaussian_learnware
47683546d6648a38bb71988c33f959cf7308376f
[ "Apache-2.0" ]
null
null
null
learnware/algorithm/anomaly_detect/iforest.py
marvinren/aiops_gaussian_learnware
47683546d6648a38bb71988c33f959cf7308376f
[ "Apache-2.0" ]
null
null
null
import numpy as np from scipy.stats import binom from sklearn.ensemble import IsolationForest from sklearn.preprocessing import MinMaxScaler from scipy.special import erf from learnware.algorithm.anomaly_detect.base import BaseAnomalyDetect
37.148148
79
0.598704
724cda3b3a14ff18ab5608878c35ee486f9afa69
217
py
Python
noxfile.py
rshnn/Practical-RL
f7688e224a342c7f67478f2c4cd6bb7b1a122205
[ "MIT" ]
3
2022-02-14T17:59:56.000Z
2022-02-15T10:08:43.000Z
noxfile.py
rshnn/Practical-RL
f7688e224a342c7f67478f2c4cd6bb7b1a122205
[ "MIT" ]
21
2021-11-02T21:35:26.000Z
2022-01-17T18:50:42.000Z
noxfile.py
rshnn/Practical-RL
f7688e224a342c7f67478f2c4cd6bb7b1a122205
[ "MIT" ]
2
2021-11-24T15:25:17.000Z
2022-02-14T19:04:56.000Z
from nox import session
24.111111
74
0.686636
724da380c925fd0be608bd11f30b6d426eb5746d
27
py
Python
megnet/data/__init__.py
abdalazizrashid/megnet
8ad0fca246465bd57d66392f790c5310c610dfff
[ "BSD-3-Clause" ]
367
2018-12-13T14:49:00.000Z
2022-03-31T10:17:04.000Z
megnet/data/__init__.py
kdmsit/MEGNet
4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1
[ "MIT" ]
162
2019-02-08T20:38:12.000Z
2022-03-31T21:13:06.000Z
megnet/data/__init__.py
kdmsit/MEGNet
4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1
[ "MIT" ]
119
2018-12-17T10:16:12.000Z
2022-03-31T17:26:57.000Z
""" Data manipulations """
6.75
18
0.62963