hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75b2e04cb5f586ec15b752e5cc06367509fd6133 | 1,004 | py | Python | RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 3 | 2018-08-23T13:40:24.000Z | 2019-12-05T21:16:03.000Z | RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
from RecoLocalCalo.HGCalRecProducers.hgcalLayerClusters_cfi import hgcalLayerClusters as hgcalLayerClusters_
from RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import dEdX, HGCalRecHit
from RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi import HGCalUncalibRecHit
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import fC_per_ele, hgceeDigitizer, hgchebackDigitizer
hgcalLayerClusters = hgcalLayerClusters_.clone()
hgcalLayerClusters.timeOffset = hgceeDigitizer.tofDelay
hgcalLayerClusters.plugin.dEdXweights = cms.vdouble(dEdX.weights)
hgcalLayerClusters.plugin.fcPerMip = cms.vdouble(HGCalUncalibRecHit.HGCEEConfig.fCPerMIP)
hgcalLayerClusters.plugin.thicknessCorrection = cms.vdouble(HGCalRecHit.thicknessCorrection)
hgcalLayerClusters.plugin.fcPerEle = cms.double(fC_per_ele)
hgcalLayerClusters.plugin.noises = cms.PSet(refToPSet_ = cms.string('HGCAL_noises'))
hgcalLayerClusters.plugin.noiseMip = hgchebackDigitizer.digiCfg.noise_MIP
| 50.2 | 110 | 0.878486 |
75b2efb0dac87ecec2330f57bb9b5abeb2ef6c62 | 1,705 | py | Python | modules/AzureBridge/main.py | open-edge-insights/eii-azure-bridge | 346da9d56be78c6e06a470dfbaf808d568427679 | [
"MIT"
] | null | null | null | modules/AzureBridge/main.py | open-edge-insights/eii-azure-bridge | 346da9d56be78c6e06a470dfbaf808d568427679 | [
"MIT"
] | null | null | null | modules/AzureBridge/main.py | open-edge-insights/eii-azure-bridge | 346da9d56be78c6e06a470dfbaf808d568427679 | [
"MIT"
] | 2 | 2022-02-07T09:05:54.000Z | 2022-03-17T04:32:50.000Z | # Copyright (c) 2020 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""EII Message Bus Azure Edge Runtime Bridge
"""
import asyncio
import traceback as tb
from eab.bridge_state import BridgeState
def main():
"""Main method.
"""
bs = None
try:
bs = BridgeState.get_instance()
loop = asyncio.get_event_loop()
loop.run_forever()
except Exception as e:
print(f'[ERROR] {e}\n{tb.format_exc()}')
raise
finally:
if bs is not None:
# Fully stop the bridge
bs.stop()
# Clean up asyncio
loop.stop()
loop.close()
if __name__ == "__main__":
main()
| 34.1 | 78 | 0.70088 |
75b2fe433461c1164efd99a7fb0d0c61b5a14512 | 8,033 | py | Python | src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py | spaceone-dev/plugin-google-cloud-inven-collector | 3e103412e7598ee9fa5f68b6241a831a40e8b9bc | [
"Apache-2.0"
] | null | null | null | import logging
import time
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.bigquery.sql_workspace import SQLWorkspaceConnector
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service import BigQueryWorkSpace, SQLWorkSpaceResource, \
SQLWorkSpaceResponse, ProjectModel
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service_type import CLOUD_SERVICE_TYPES
from datetime import datetime
_LOGGER = logging.getLogger(__name__)
| 42.957219 | 119 | 0.578115 |
75b72dca2e43b5612d13506d6b92693bca1eea41 | 192 | py | Python | 1_Python/Aulas/Aula13a.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | 1_Python/Aulas/Aula13a.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | 1_Python/Aulas/Aula13a.py | guilhermebaos/Curso-em-Video-Python | 0e67f6f59fa3216889bd2dde4a26b532c7c545fd | [
"MIT"
] | null | null | null | for a in range(0,6):
print('Ol!', a)
print('Parei. \n')
for b in range(6, 0, -1):
print('Ol1', b)
print('Parei. \n')
for c in range(0, 6, 2):
print('Ol!', c)
print('Parei. \n')
| 19.2 | 25 | 0.53125 |
75b763c3212f1f5ddcadc048b167842b24fdff2e | 1,732 | py | Python | worker_zeromq/resource.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | worker_zeromq/resource.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | worker_zeromq/resource.py | espang/projects | 3a4d93592bc3427a6abd8d2170081155862754a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:11:06 2016
@author: eikes
"""
import ConfigParser
from components import Component
from result import VariableResult
_config = ConfigParser.ConfigParser()
_config.read('scenario.cfg')
_section = 'MySection'
_results = 'results'
LP_FILE_PATH = _config.get(_section, 'lp')
TRC_FILE_PATH = _config.get(_section, 'trc')
QUANTITY = _config.getint(_section, 'quantity')
COMPONENTS = [ _create_comp(i) for i in range(1, QUANTITY+1) ]
RESULTS = _create_results()
SIMULATIONS = _config.getint(_section, 'simulations')
WORKER = _config.getint(_section, 'worker')
S_VALUE = float(1.5855e+07)
| 27.0625 | 90 | 0.663972 |
75b8a1f71cb2c99f52c326ad6e518a675e652f84 | 466 | py | Python | sub-array-algorithm-frustated-coders.py | annukamat/My-Competitive-Journey | adb13a5723483cde13e5f3859b3a7ad840b86c97 | [
"MIT"
] | 7 | 2018-11-08T11:39:27.000Z | 2020-09-10T17:50:57.000Z | sub-array-algorithm-frustated-coders.py | annukamat/My-Competitive-Journey | adb13a5723483cde13e5f3859b3a7ad840b86c97 | [
"MIT"
] | null | null | null | sub-array-algorithm-frustated-coders.py | annukamat/My-Competitive-Journey | adb13a5723483cde13e5f3859b3a7ad840b86c97 | [
"MIT"
] | 2 | 2019-09-16T14:34:03.000Z | 2019-10-12T19:24:00.000Z | ncoders = int(input("enter no. of coders : "))
l=map(int,input().split(" "))
sl=[]
l = sorted(list(l))
top = 1
for rotator in range(1,ncoders):
sl = l[:rotator]
if(top != ncoders):
if(max(sl) < l[top]):
l[l.index(max(sl))] = 0
top = top +1
elif(max(sl) == l[top]):
l[l.index(max(sl[:len(sl)-1]))] = 0
top = top+1
else:
break
print(l)
print(sum(l))
| 18.64 | 47 | 0.44206 |
75ba91add5ced077993a147299ed8098ccb69a59 | 8,081 | py | Python | source/soca/cluster_web_ui/api/v1/dcv/image.py | cfsnate/scale-out-computing-on-aws | 1cc316e988dca3200811ff5527a088a1706901e5 | [
"Apache-2.0"
] | 77 | 2019-11-14T22:54:48.000Z | 2022-02-09T06:06:39.000Z | source/soca/cluster_web_ui/api/v1/dcv/image.py | cfsnate/scale-out-computing-on-aws | 1cc316e988dca3200811ff5527a088a1706901e5 | [
"Apache-2.0"
] | 47 | 2020-01-15T18:51:32.000Z | 2022-03-08T19:46:39.000Z | source/soca/cluster_web_ui/api/v1/dcv/image.py | cfsnate/scale-out-computing-on-aws | 1cc316e988dca3200811ff5527a088a1706901e5 | [
"Apache-2.0"
] | 50 | 2019-11-14T22:51:28.000Z | 2022-03-14T22:49:53.000Z | ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import config
from flask_restful import Resource, reqparse
import logging
from decorators import admin_api, restricted_api, private_api
import botocore
import datetime
from models import db, AmiList
import boto3
import errors
from sqlalchemy import exc
from sqlalchemy.exc import SQLAlchemyError
logger = logging.getLogger("api")
session = boto3.session.Session()
aws_region = session.region_name
ec2_client = boto3.client('ec2', aws_region, config=config.boto_extra_config())
| 45.655367 | 205 | 0.516891 |
75bb6e08d53656c02653379a24d3bf7833708bba | 807 | py | Python | Day 5/python/main.py | BenBMoore/leetcode-challenges | 97359abbeb24daf8cc33fe2bf1d5748ac824aab4 | [
"MIT"
] | null | null | null | Day 5/python/main.py | BenBMoore/leetcode-challenges | 97359abbeb24daf8cc33fe2bf1d5748ac824aab4 | [
"MIT"
] | null | null | null | Day 5/python/main.py | BenBMoore/leetcode-challenges | 97359abbeb24daf8cc33fe2bf1d5748ac824aab4 | [
"MIT"
] | null | null | null | import argparse
from typing import List
if __name__ == "__main__":
main()
| 27.827586 | 85 | 0.619579 |
75bdd147dbc8647c0747f11af9d4431656daa233 | 947 | py | Python | ex2.py | timwuu/AnaPoker | 7cb125c4639a5cd557a6b45c92b5793dcc39def8 | [
"MIT"
] | null | null | null | ex2.py | timwuu/AnaPoker | 7cb125c4639a5cd557a6b45c92b5793dcc39def8 | [
"MIT"
] | null | null | null | ex2.py | timwuu/AnaPoker | 7cb125c4639a5cd557a6b45c92b5793dcc39def8 | [
"MIT"
] | null | null | null | import calcWinRate as cwr
k= 10000 # simulate k times
# --- example 0 ---
# --- 1-draw straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28] #K,J,8
pp( player_a, player_b, table_cards, k)
# --- straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs three of kind
player_a = [51,43] #AQ
player_b = [47,46] #KK
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs two pairs
player_a = [51,43] #AQ
player_b = [47,39] #KJs
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
| 22.023256 | 93 | 0.62302 |
75bf78052e28e2d4673d9f69709a11b7958bfff3 | 1,085 | py | Python | Utils/Permission.py | koi312500/Koi_Bot_Discord | 9d7a70f42cdb1110e6382125ade39d3aec21b3b9 | [
"MIT"
] | null | null | null | Utils/Permission.py | koi312500/Koi_Bot_Discord | 9d7a70f42cdb1110e6382125ade39d3aec21b3b9 | [
"MIT"
] | 1 | 2021-06-23T01:16:36.000Z | 2021-06-23T01:16:36.000Z | Utils/Permission.py | koi312500/Koi_Bot_Discord | 9d7a70f42cdb1110e6382125ade39d3aec21b3b9 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from Utils.UserClass import UserClass as User
permission_message = ["Guest [Permission Level : 0]", "User [Permission Level : 1]", "Developer [Permission Level : 2]", "Owner [Permission Level : 3]"] | 57.105263 | 161 | 0.682028 |
75bfcbaef981a9d2b8f3eecff56d9741a7a40637 | 436 | py | Python | 10.py | seanmanson/euler | b01418cf44c1113a0c574b5158aa5b89d725cca2 | [
"MIT"
] | null | null | null | 10.py | seanmanson/euler | b01418cf44c1113a0c574b5158aa5b89d725cca2 | [
"MIT"
] | null | null | null | 10.py | seanmanson/euler | b01418cf44c1113a0c574b5158aa5b89d725cca2 | [
"MIT"
] | null | null | null | import math
test = []
sumPrimes = 2
for i in range(3, 2000000, 2):
if not testPrime(i):
continue
sumPrimes+=i
if (i % 10000 == 1):
print("progress : ", i, sumPrimes)
print (sumPrimes)
| 18.166667 | 42 | 0.538991 |
75c33edb1fb71d6cd1c893b5ce0674035ed9e6dd | 37,403 | py | Python | clangelscript.py | gwihlidal/Clangelscript | e83f77d78bf57c25f67922b65aad2f8e74ce2699 | [
"MIT"
] | 1 | 2019-06-21T06:37:16.000Z | 2019-06-21T06:37:16.000Z | clangelscript.py | gwihlidal/clangelscript | e83f77d78bf57c25f67922b65aad2f8e74ce2699 | [
"MIT"
] | null | null | null | clangelscript.py | gwihlidal/clangelscript | e83f77d78bf57c25f67922b65aad2f8e74ce2699 | [
"MIT"
] | null | null | null | import sys
import re
import json
import os.path
import copy
from mako.template import Template
from clang import cindex
configfile = "clangelscript.json"
f = open(configfile)
data = f.read()
data = re.sub(r"//[^n]*n", "\n", data)
config = json.loads(data)
f.close()
if "ObjectTypes" in config:
arr = config["ObjectTypes"]
config["ObjectTypes"] = {}
for name in arr:
config["ObjectTypes"][re.compile(name)] = arr[name]
fir = get("FileIncludeRegex", None)
fer = get("FileExcludeRegex", None)
mir = get("MethodIncludeRegex", None)
mer = get("MethodExcludeRegex", None)
oir = get("ObjectIncludeRegex", None)
oer = get("ObjectExcludeRegex", None)
mfir = get("FieldIncludeRegex", None)
mfer = get("FieldExcludeRegex", None)
generic_regex = get("GenericWrapperRegex", None)
maahr = get("MethodArgumentAutoHandleRegex", None)
mrahr = get("MethodReturnAutoHandleRegex", None)
fir = re.compile(fir) if fir else fir
fer = re.compile(fer) if fer else fer
mir = re.compile(mir) if mir else mir
mer = re.compile(mer) if mer else mer
oir = re.compile(oir) if oir else oir
oer = re.compile(oer) if oer else oer
mfir = re.compile(mfir) if mfir else mfir
mfer = re.compile(mfer) if mfer else mfer
maahr = re.compile(maahr) if maahr else maahr
mrahr = re.compile(mrahr) if mrahr else mrahr
generic_regex = re.compile(generic_regex) if generic_regex else generic_regex
verbose = get("Verbose", False)
doassert = get("Assert", True)
keep_unknowns = get("KeepUnknowns", False)
output_filename = get("OutputFile", None)
funcname = get("FunctionName", "registerScripting")
generic_wrappers = []
index = cindex.Index.create()
clang_args = get("ClangArguments", [])
#clang_args.insert(0, "-I%s/clang/include" % os.path.dirname(os.path.abspath(__file__)))
new_args = []
for arg in clang_args:
new_args.append(arg.replace("${ConfigFilePath}", os.path.dirname(os.path.abspath(configfile))))
clang_args = new_args
tu = index.parse(None, clang_args, [], 13)
warn_count = 0
objecttype_scoreboard = {}
typedef = {}
as_builtins = {
"unsigned long": "uint64",
"unsigned int": "uint",
"unsigned short": "uint16",
"unsigned char": "uint8",
"long": "int64",
"void": "void",
"double": "double",
"float": "float",
"char": "int8",
"short": "int16",
"int": "int",
"long": "int64",
"bool": "bool"
}
operatornamedict = {
"-operator": "opNeg",
"~operator": "opCom",
"++operator": "opPreInc",
"--operator": "opPreDec",
"operator==": "opEquals",
#"operator!=": "opEquals",
"operator<": "opCmp",
# "operator<=": "opCmp",
# "operator>": "opCmp",
# "operator>=": "opCmp",
"operator++": "opPostInc",
"operator--": "opPostDec",
"operator+": "opAdd",
"operator-": "opSub",
"operator*": "opMul",
"operator/": "opDiv",
"operator%": "opMod",
"operator&": "opAnd",
"operator|": "opOr",
"operator^": "opXor",
"operator<<": "opShl",
"operator>>": "opShr",
"operator>>>": "opUShr",
"operator[]": "opIndex",
"operator=": "opAssign",
"operator+=": "opAddAssign",
"operator-=": "opSubAssign",
"operator*=": "opMulAssign",
"operator/=": "opDivAssign",
"operator%=": "opModAssign",
"operator&=": "opAndAssign",
"operator|=": "opOrAssign",
"operator^=": "opXorAssign",
"operator<<=": "opShlAssign",
"operator>>=": "opShrAssign",
"operator>>>=": "opUShrAssign",
}
objectindex = 0
typedefs = []
enums = []
objecttypes = {}
functions = []
objectmethods = []
objectfields = []
includes = []
behaviours = []
# Removes usage of object types that are used both as a reference and a value type
functions = unknown_filter(functions)
objectmethods = unknown_filter(objectmethods)
behaviours = unknown_filter(behaviours)
walk(tu.cursor)
# File processed, do some post processing
remove_ref_val_mismatches()
if not keep_unknowns:
remove_unknowns()
remove_duplicates()
remove_reference_destructors()
remove_pure_virtual_constructors()
if output_filename != None:
output_filename = output_filename.replace("${this_file_path}", os.path.dirname(os.path.abspath(configfile)))
ot = [objecttypes[o] for o in objecttypes]
ot.sort(cmp=lambda a, b: cmp(a.index, b.index))
for diag in tu.diagnostics:
logWarning("clang had the following to say: %s" % (diag.spelling))
objectTypeStrings = []
for o in ot:
objectTypeStrings.append(o.get_register_string())
typeDefStrings = []
for o in typedefs:
typeDefStrings.append(o.get_register_string())
functionStrings = []
for o in functions:
functionStrings.append(o.get_register_string())
behaviourStrings = []
for o in behaviours:
behaviourStrings.append(o.get_register_string())
objectMethodStrings = []
for o in objectmethods:
objectMethodStrings.append(o.get_register_string())
objectFieldStrings = []
for o in objectfields:
objectFieldStrings.append(o.get_register_string())
tpl = Template(filename='ScriptBind.mako')
rendered = tpl.render(
genericWrappers=generic_wrappers,
funcName=funcname,
includes=includes,
objectTypes=objectTypeStrings,
typeDefs=typeDefStrings,
hashDefines=_assert("engine->RegisterEnum(\"HASH_DEFINES\")"),
enums="",
functions=functionStrings,
behaviours=behaviourStrings,
objectMethods=objectMethodStrings,
objectFields=objectFieldStrings)
with open(output_filename, "w") as f:
f.write(rendered)
sys.stderr.write("Finished with %d warnings\n" % warn_count)
| 35.252592 | 221 | 0.558859 |
75c4b53c5b63ac8649c46b0cdcaee35a32ddb87c | 573 | py | Python | www/test.py | Lneedy/pyBlobDemo | 19ff1d9b5478f62bbc7f510bffa81adc7915a73b | [
"MIT"
] | null | null | null | www/test.py | Lneedy/pyBlobDemo | 19ff1d9b5478f62bbc7f510bffa81adc7915a73b | [
"MIT"
] | null | null | null | www/test.py | Lneedy/pyBlobDemo | 19ff1d9b5478f62bbc7f510bffa81adc7915a73b | [
"MIT"
] | null | null | null | '''
demo
>> mysql -u root -p < schema.sql
'''
import orm
from models import User, Blog, Comment
import asyncio
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test(loop))
print('Test finished')
loop.close() | 23.875 | 105 | 0.65445 |
75c58beec52cc06cb6843a182d38d84b973164ec | 1,358 | py | Python | serializers_test/avro_avg.py | lioritan/Side-Projects | 647bdbf0d3b71ea113739fb7ad2b299aea28c653 | [
"MIT"
] | null | null | null | serializers_test/avro_avg.py | lioritan/Side-Projects | 647bdbf0d3b71ea113739fb7ad2b299aea28c653 | [
"MIT"
] | null | null | null | serializers_test/avro_avg.py | lioritan/Side-Projects | 647bdbf0d3b71ea113739fb7ad2b299aea28c653 | [
"MIT"
] | null | null | null | import avro.schema
import json
import fastavro
SCHEMA = {
"namespace": "avg_obj",
"type": "record",
"name": "Meme",
"fields": [
{"name": "user", "type": {
"type": "record",
"name": "PostUser",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "first_name", "type": ["null", "string"], "default": "null"},
{"name": "last_name", "type": ["null", "string"], "default": "null"},
{"name": "user_type", "type": ["null",
{"type": "enum",
"name": "UserType",
"symbols": ["FREE", "REGULAR", "PREMIUM"]
}], "default": "null"},
]}},
{"name": "title", "type": ["null", "string"], "default": "null"},
{"name": "content", "type": ["null", "bytes"], "default": "null"},
{"name": "top_string", "type": ["null", "string"], "default": "null"},
{"name": "botom_string", "type": ["null", "string"], "default": "null"},
{"name": "likes", "type": ["null", "long"], "default": 0},
{"name": "hates", "type": ["null", "long"], "default": 0},
]
}
avro_schema = fastavro.parse_schema(SCHEMA)
| 38.8 | 89 | 0.401325 |
75c61bdb0e5516f5f220ea06ae4eb78827a719a4 | 210 | py | Python | naloga002.py | pzi-si/pzi-src-2 | 819069db98873becf8c8ff93bb1e8fb9dca3036c | [
"CC0-1.0"
] | null | null | null | naloga002.py | pzi-si/pzi-src-2 | 819069db98873becf8c8ff93bb1e8fb9dca3036c | [
"CC0-1.0"
] | null | null | null | naloga002.py | pzi-si/pzi-src-2 | 819069db98873becf8c8ff93bb1e8fb9dca3036c | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Program, ki vas vpraa po imenu, nato pa vas pozdravi. """
# povpraamo po imenu
ime = input("Kako ti je ime? ")
# pozdravimo
print(f"Pozdravljen_a, {ime}!") | 21 | 62 | 0.642857 |
75ca90abf615365ec5eda2bc92c9c7ddc159748c | 3,699 | py | Python | cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py | keetsky/tensorflow_learn | 77205434c2e3d70d482a756f5f679622d10f49b2 | [
"Apache-2.0"
] | null | null | null | cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py | keetsky/tensorflow_learn | 77205434c2e3d70d482a756f5f679622d10f49b2 | [
"Apache-2.0"
] | null | null | null | cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py | keetsky/tensorflow_learn | 77205434c2e3d70d482a756f5f679622d10f49b2 | [
"Apache-2.0"
] | null | null | null | '''
# Linear Regression: understanding loss function in linear regression
#----------------------------------
#
# This function shows how to use Tensorflow to
# solve linear regression.
# y = Ax + b
#
# We will use the iris data, specifically:
# y = Sepal Length
# x = Petal Width
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
#%%
#L2 Loss
ops.reset_default_graph()
sess=tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris=datasets.load_iris()
x_vals=np.array([x[3] for x in iris.data])
y_vals=np.array([y[0] for y in iris.data])
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_=tf.placeholder(shape=[None,1], dtype=tf.float32)
#create variable for linear regression
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
#declare model operations
y=tf.add(tf.matmul(x_data,A),b)
#declare loss functions (1/2/m) (y_-y)^2
loss=tf.reduce_mean(tf.square(y_- y))
#Declare optimizer
op=tf.train.GradientDescentOptimizer(0.4)
train_step=op.minimize(loss)
#initialize variables
init=tf.global_variables_initializer()
sess.run(init)
#training loop
loss_vec_l2=[]
for i in range(100):
rand_index=np.random.choice(len(x_vals),size=batch_size)#len(x_vals)25
rand_x=np.transpose([x_vals[rand_index]])
rand_y=np.transpose([y_vals[rand_index]])
sess.run(train_step,feed_dict={x_data:rand_x,y_:rand_y})
temp_loss=sess.run(loss,feed_dict={x_data:rand_x,y_:rand_y})
loss_vec_l2.append(temp_loss)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
print('Loss = ' + str(temp_loss))
#%%
#L1 Loss
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
# Declare batch size and number of iterations
batch_size = 25
learning_rate = 0.4 # Will not converge with learning rate at 0.4
iterations = 100
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss functions
loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))
# Initialize variables
init = tf.initialize_all_variables()
sess.run(init)
# Declare optimizers
my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)
train_step_l1 = my_opt_l1.minimize(loss_l1)
# Training loop
loss_vec_l1 = []
for i in range(iterations):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec_l1.append(temp_loss_l1)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
#%%
#plot loss over time(steps)
plt.plot(loss_vec_l1, 'k-', label='L1 Loss')
plt.plot(loss_vec_l2, 'r--', label='L2 Loss')
plt.title('L1 and L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L1 Loss')
plt.legend(loc='upper right')
plt.show()
| 30.073171 | 92 | 0.711544 |
75caae991e7575297539a0a5755bf9b4493ee335 | 3,258 | py | Python | pysh.py | tri-llionaire/tri-llionaire.github.io | 5134d3ec0ff1e3b7eab469ea05300b505895212f | [
"MIT"
] | 1 | 2018-04-24T14:53:23.000Z | 2018-04-24T14:53:23.000Z | pysh.py | tri-llionaire/tri-llionaire.github.io | 5134d3ec0ff1e3b7eab469ea05300b505895212f | [
"MIT"
] | null | null | null | pysh.py | tri-llionaire/tri-llionaire.github.io | 5134d3ec0ff1e3b7eab469ea05300b505895212f | [
"MIT"
] | 1 | 2018-08-25T21:15:07.000Z | 2018-08-25T21:15:07.000Z | #pysh: shell in python
import sys
cmdlist = ['start','exit','cd','md','ls','pd','cf','cl']
convert = []
waiting = 0
print 'pysh 1.0.5 19.03.11 #6. type start to enter, exit to leave.'
paths = ['pysh/']
direct = 'pysh/'
added = []
entered = raw_input(': ')
if entered == 'start':
while entered != ['exit']:
entered = raw_input('{} '.format(direct))
entered = entered.split()
for x in entered:
if x in cmdlist:
if waiting == 0:
if x == 'ls':
for i in paths:
if i.startswith(direct) and len(i) > len(direct):
temp = len(direct)
splitted = i[temp:].split('/')
if len(splitted) > 1 and (splitted[0] + '/') not in added:
print splitted[0] + '/'
added.append(splitted[0] + '/')
elif len(splitted) < 2 and splitted[0] not in added:
print splitted[0]
added.append(splitted[0])
else:
pass
else:
pass
elif x == 'pd':
print direct
elif x == 'cd':
waiting = 1
elif x == 'md':
waiting = 2
elif x == 'cf':
waiting = 3
elif x == 'start':
print 'already in pysh'
elif x == 'cl':
sys.stdout.write('\x1b[2J\x1b[H')
else:
break
else:
print 'pysh: consecutive cmd {}'.format(x)
else:
if waiting == 1:
if x == '..':
direct = direct[:-1].rsplit('/',1)[0] + '/'
else:
if direct + x + '/' in paths:
direct = direct + x + '/'
elif x.endswith('/'):
if direct + x in paths:
direct = direct + x
else:
print 'pysh: directory \'{}\' not found'.format(x)
else:
print 'pysh: can\'t cd to file \'{}\''.format(x)
waiting = 0
elif waiting == 2:
if x.endswith('/'):
paths.append(direct + x)
else:
paths.append(direct + x + '/')
waiting = 0
elif waiting == 3:
if x.endswith('/'):
paths.append(direct + x - '/')
else:
paths.append(direct + x)
waiting = 0
else:
print 'pysh: {} not found.'.format(x)
break
else:
print 'startup: {} not found'.format(entered)
| 40.222222 | 90 | 0.329343 |
75cd39985df9ba1fac685c50b84e7d3ed1571cd1 | 3,813 | py | Python | Scripts/IDA_SyncDecompiledFuncs.py | THEONLYDarkShadow/alive_reversing | 680d87088023f2d5f2a40c42d6543809281374fb | [
"MIT"
] | 208 | 2018-06-06T13:14:03.000Z | 2022-03-30T02:21:27.000Z | Scripts/IDA_SyncDecompiledFuncs.py | THEONLYDarkShadow/alive_reversing | 680d87088023f2d5f2a40c42d6543809281374fb | [
"MIT"
] | 537 | 2018-06-06T16:50:45.000Z | 2022-03-31T16:41:15.000Z | Scripts/IDA_SyncDecompiledFuncs.py | THEONLYDarkShadow/alive_reversing | 680d87088023f2d5f2a40c42d6543809281374fb | [
"MIT"
] | 42 | 2018-06-06T00:40:08.000Z | 2022-03-23T08:38:55.000Z | from idautils import *
from idaapi import *
from idc import *
import urllib2
if __name__ == '__main__':
main()
| 30.504 | 193 | 0.620771 |
75cdec8d921818ac60703e7cb57923284eb229e2 | 2,499 | py | Python | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 29.05814 | 77 | 0.605442 |
75d0eb05faa1f187e229cf597a3a8352882ca242 | 2,888 | py | Python | tests/tests_tabu.py | Antash696/VRP | 386b84adbe34be37aabc1e638515ce722849a952 | [
"MIT"
] | 33 | 2017-10-18T01:18:27.000Z | 2021-10-04T14:17:52.000Z | tests/tests_tabu.py | dj-boy/VRP | 386b84adbe34be37aabc1e638515ce722849a952 | [
"MIT"
] | 1 | 2020-12-21T01:59:21.000Z | 2020-12-21T01:59:21.000Z | tests/tests_tabu.py | dj-boy/VRP | 386b84adbe34be37aabc1e638515ce722849a952 | [
"MIT"
] | 19 | 2017-06-26T15:02:00.000Z | 2022-03-31T08:44:20.000Z | import unittest
from code import instance as i
from code import datamapping as dm
from code import greedyfirst as gf
from code import algorithm as a
from code import baseobjects as bo
from code import tabu
if __name__ == "__main__":
unittest.main()
| 39.027027 | 146 | 0.674169 |
75d24f00bd3d394aa053d2de0806888649ac3eca | 381 | py | Python | hedge_hog/metric/__init__.py | otivedani/hedge_hog | 62026e63b6bdc72cc4f0c984136712e6ee090f68 | [
"MIT"
] | null | null | null | hedge_hog/metric/__init__.py | otivedani/hedge_hog | 62026e63b6bdc72cc4f0c984136712e6ee090f68 | [
"MIT"
] | null | null | null | hedge_hog/metric/__init__.py | otivedani/hedge_hog | 62026e63b6bdc72cc4f0c984136712e6ee090f68 | [
"MIT"
] | null | null | null | """
examples on scikit-image :
call :
from skimage.feature import blob_dog, blob_log, blob_doh
structure :
skimage
feature
__init__.py (from .blob import blob_dog, blob_log, blob_doh)
blob.py (contains blob_dog, blob_log, blob_doh)
conclusion :
module imported because it was defined in module dir
"""
from .timemeter import timemeter | 20.052632 | 69 | 0.692913 |
75d35dd0f6991525937dfc89b52855e73f47aaa9 | 1,941 | py | Python | Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py | quguiliang/Machine-Learning-with-Spark-Second-Edition | 0ba131e6c15a3de97609c6cb5d976806ccc14f09 | [
"MIT"
] | 112 | 2017-05-13T15:44:29.000Z | 2022-02-19T20:14:14.000Z | Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py | tophua/Machine-Learning-with-Spark-Second-Edition | 0d93e992f6c79d55ad5cdcab735dbe6674143974 | [
"MIT"
] | 1 | 2017-05-25T00:10:43.000Z | 2017-05-25T00:10:43.000Z | Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py | tophua/Machine-Learning-with-Spark-Second-Edition | 0d93e992f6c79d55ad5cdcab735dbe6674143974 | [
"MIT"
] | 115 | 2017-05-06T10:49:00.000Z | 2022-03-08T07:48:54.000Z | import numpy as np
from com.sparksamples.util import get_records
from com.sparksamples.util import get_mapping
from com.sparksamples.util import extract_features
from com.sparksamples.util import extract_label
from com.sparksamples.util import extract_features_dt
#from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.tree import GradientBoostedTrees
from pyspark.mllib.regression import LabeledPoint
from com.sparksamples.util import squared_log_error
__author__ = 'Rajdeep Dua'
| 41.297872 | 107 | 0.714065 |
75d4809609a0cd8b60448ab7ac5fccbe7bba640b | 5,010 | py | Python | maze.py | vcxsd/muck-builder | 12c1defbb816395a119da1992c1352d614d5507b | [
"MIT"
] | null | null | null | maze.py | vcxsd/muck-builder | 12c1defbb816395a119da1992c1352d614d5507b | [
"MIT"
] | null | null | null | maze.py | vcxsd/muck-builder | 12c1defbb816395a119da1992c1352d614d5507b | [
"MIT"
] | null | null | null |
import random
import yaml
wallMaker = Grammar({
'wallMat': [ 'stone', 'rock', 'wood', 'paper', 'earth', 'crystal', 'leafy vagueness', 'sand', 'skin', 'bark', 'foliage', 'needles', 'delicate tiles', 'agate', 'quartz', 'glass', 'iron', 'copper' ],
'wallCond': [ 'dark', 'heavy', 'slick', 'moss-clung', 'twisted', 'fluted', 'greenish', 'dark', 'hot', 'lumpy', 'unsteady', 'slippery', 'geometrically flanged', 'sigil-eaten', 'consuming', 'blue', 'reddish', 'translucent', 'ultramarine', 'sky-blue', 'delicate pink', 'fuligin' ],
'walls': [ 'walls of [wallMat] close in; the way is [width].',
'[wallCond] walls of [wallMat] close in.',
'the walls are [wallCond] [wallMat]... the tunnels, [width].',
'all around, [wallCond] [wallMat].',
'all around, [wallMat].',
'there\'s [wallMat] everywhere here.',
'there\'s [wallMat] everywhere here. it\'s [wallCond].',
'[wallCond] [wallMat] all around.',
'the walls are made of [wallMat] here.',
'this place is built entirely of [wallMat].',
'it\'s very [wallCond] here.',
'[width], [wallCond].',
'[wallMat].',
'[wallCond].'],
'width': [ 'suffocatingly close', 'echoing', 'massive', 'wide', 'barely large enough to pass crawling', 'thin and straight', 'tall and narrow', 'tiny', 'spacious', 'vast' ],
'door': [ 'door', 'hatch', 'gate', 'opening', 'incision', 'grating', 'well', 'oubliette', 'tunnel', 'arch' ],
'doorMat': [ 'rock', 'oaken', 'papery', 'crystal', 'glass', 'iron', 'silver' ],
'hidden': [ 'half-hidden', 'in plain view', 'almost impossible to spot', 'staring you in the face', 'which can only be found by touch' ]
})
if __name__ == '__main__':
linkNames = [ "[N]orth;north;n", "[S]outh;south;s", "[E]ast;east;e", "[W]est;west;w", "[U]p;up;u" ]
project = { "projectName": "maze", "rooms": { } }
roomCount = 25
for i in range(0, roomCount):
desc = wallMaker.parse("[walls]\n\na [doorMat] [!door], [hidden].")
door = wallMaker.saved.pop( )
ID = "room-" + i.__str__()
project["rooms"][ ID ] = { "NAME": "Maze" }
project["rooms"][ ID ][ "LINKS" ] = { }
project["rooms"][ ID ][ "_/de" ] = desc
project["rooms"][ ID ][ "POSTSCRIPT" ] = { "BUILD": [ "@set here=D", "@tel here=#63" ] }
# Each room shall have 2-3 links to other random rooms. Don't try to be consistent.
ln = linkNames.copy( )
random.shuffle(ln)
for i in range( 0, random.choice([ 2, 3, 3, 3, 3, 4, 4, 4 ]) ):
project["rooms"][ ID ][ "LINKS" ][ "room-" + random.choice( range(0, roomCount) ).__str__() ] = {
"NAME": ln.pop( ),
"succ": "You force your way through the " + door + ".",
"osucc": "forces their way through the " + door + ".",
"odrop": "emerges through an obscure way from some other part of the maze." }
with open("maze.gen.yaml", "w") as fh:
fh.write( yaml.dump( project ) )
print( "write: maze.gen.yaml (probably.)" )
| 35.531915 | 282 | 0.48982 |
75d6d6a2674761306d18d16bf5fb2a0d2ba911d3 | 543 | py | Python | kratos/apps/trigger/views.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | 1 | 2020-11-30T09:53:40.000Z | 2020-11-30T09:53:40.000Z | kratos/apps/trigger/views.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | kratos/apps/trigger/views.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from kratos.apps.trigger import models, serializers
| 30.166667 | 92 | 0.762431 |
75d7637d4de985450afeaf8267ea59deab8e6e61 | 478 | py | Python | Module_04/ex00/test.py | CristinaFdezBornay/PythonPiscine | 143968c2e26f5ddddb5114f3bcdddd0b1f00d153 | [
"MIT"
] | 1 | 2021-11-17T10:04:30.000Z | 2021-11-17T10:04:30.000Z | Module_04/ex00/test.py | CristinaFdezBornay/PythonPiscine | 143968c2e26f5ddddb5114f3bcdddd0b1f00d153 | [
"MIT"
] | null | null | null | Module_04/ex00/test.py | CristinaFdezBornay/PythonPiscine | 143968c2e26f5ddddb5114f3bcdddd0b1f00d153 | [
"MIT"
] | null | null | null | from FileLoader import FileLoader
tests = [
"non_existing_file.csv",
"empty_file.csv",
"../data/athlete_events.csv",
]
if __name__=="__main__":
for test in tests:
print(f"==> TESTING {test}")
fl = FileLoader()
print(f"\n=> Loading file")
df = fl.load(test)
print(f"\n=> Display first 3 rows")
fl.display(df, 3)
print(f"\n=> Display lasts 3 rows")
fl.display(df, -3)
input("====>\n\n") | 20.782609 | 43 | 0.541841 |
75d9805219c61b5aa264d0f163f779ea93a814b4 | 492 | py | Python | lib/watchlists.py | nickcamel/IgApi | 19717cb8f3aea88adf060d8dad4762f8cd81e584 | [
"MIT"
] | 1 | 2021-10-02T00:30:17.000Z | 2021-10-02T00:30:17.000Z | lib/watchlists.py | nickcamel/IgApi | 19717cb8f3aea88adf060d8dad4762f8cd81e584 | [
"MIT"
] | null | null | null | lib/watchlists.py | nickcamel/IgApi | 19717cb8f3aea88adf060d8dad4762f8cd81e584 | [
"MIT"
] | null | null | null | # REF: https://labs.ig.com/rest-trading-api-reference
| 18.923077 | 53 | 0.422764 |
75d9f90c2f975ea4a2ae0b3fd9a26571c68ea1e6 | 21,236 | py | Python | MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py | adoebley/Griffin_analyses | 94a8246b45c3ebbf255cffaa60b97e7e05d5de78 | [
"BSD-3-Clause-Clear"
] | 6 | 2021-10-05T10:32:32.000Z | 2022-03-03T15:38:38.000Z | MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py | adoebley/Griffin_analyses | 94a8246b45c3ebbf255cffaa60b97e7e05d5de78 | [
"BSD-3-Clause-Clear"
] | null | null | null | MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py | adoebley/Griffin_analyses | 94a8246b45c3ebbf255cffaa60b97e7e05d5de78 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-11-03T07:19:16.000Z | 2021-11-03T07:19:16.000Z | #!/usr/bin/env python
# coding: utf-8
#AL - the above code is new for the griffin paper version
#modified print commands for python3
# Analyze all possible things from BAM-file
import sys
import argparse
from subprocess import call
import numpy
import scipy
import scipy.stats
import os.path
import os
import glob
# Parse command line arguments ###################################################################################
parser = argparse.ArgumentParser(description='Analyze epigenetic traces in cfDNA')
parser.add_argument('-b','--bam', dest='bam_file',
help='BAM file',required=True)
parser.add_argument('-o','--output', dest='name',
help='Output name for files and directory',required=True)
parser.add_argument('-cov','--mean-coverage', dest='mean_coverage',
help='Mean coverage along the genome [default:1]',default=1,type=float)
parser.add_argument('-ylimit','--plot-y-limit', dest='ylimit',
help='Plotting until this limit on y-axis [default:1.5]',default=1.5,type=float)
parser.add_argument('-norm-file','--normalize-file', dest='norm_log2',
help='Normalize by local copynumber from this file')
parser.add_argument('-calccov','--calculate-mean-coverage', dest='calc_cov',
help='Specify whether genome read depths should be calculated',action="store_true")
parser.add_argument('-hg38','--hg38', dest='hg38',
help='Use hg38 coordinates [default: hg19]',action="store_true")
parser.add_argument('-a','--analysis', dest='analysis',
help='Specify type of analysis (all|enhancer|histone|tf|ctcf|...)',required=True)
parser.add_argument('-tf','--trans-factor', dest='tf',
help='Specify transcription factor for VirChip data')
args = parser.parse_args()
####################################################################################################
# setup structure
print ("Setup structure") # AL mod
if not os.path.isdir(args.name):
os.mkdir(args.name)
####################################################################################################
# get genomewide coverage from bedtools genomecoverage
if args.calc_cov:
#AL added if/else
if os.path.isfile(args.name.rstrip("/")+"/"+args.name+".coverage"):
print('cov already complete')
else:
#AL tabbed over this section to add it to the if/else statement but did not change it
print ("Calc avg. coverage") # AL mod
OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".coverage","w")
if args.hg38:
call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg38.chrom_sizes.txt"],stdout=OUTPUT)
else:
call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg19.chrom_sizes.txt"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".short_coverage","w")
call(["./Scripts/get_avg_coverage.py",args.name.rstrip("/")+"/"+args.name+".coverage"],stdout=OUTPUT)
OUTPUT.close()
#end AL edits
INPUT = open(args.name.rstrip("/")+"/"+args.name+".short_coverage","r")
avg_coverage = 1
for line in INPUT.readlines():
chrom,cov = line.rstrip().split("\t")
if chrom == "genome":
avg_coverage = cov
INPUT.close()
else:
print ("Skipping genomewide-coverage calculation using mean coverage: "+str(args.mean_coverage)) # AL mod
avg_coverage = args.mean_coverage
####################################################################################################
# print statistics:
print ("Write Logs") # AL mod
OUT=open(args.name.rstrip("/")+"/log.txt","w")
OUT.write("BAM:\t"+args.bam_file+"\n")
OUT.write("Norm File:\t"+args.norm_log2+"\n")
OUT.write("cov:\t"+str(avg_coverage)+"\n")
OUT.write("analysis:\t"+args.analysis+"\n")
OUT.close()
####################################################################################################
# get chromosome coverage from output of bedtools genomecoverage
####################################################################################################
# CTCF analysis
#########################################################################
#########################################################################
#########################################################################
# TSS
####################################################################################################
# AndrogenReceptor
####################################################################################################
# Check for binding sites proximal and distal to Transcription start sites
####################################################################################################
if args.analysis == "all":
ctcf(args,avg_coverage)
tf_gtrd_chip_only(args,avg_coverage)
tss(args,avg_coverage)
elif args.analysis == "tss":
tss(args,avg_coverage)
elif args.analysis == "androgen":
androgen(args,avg_coverage)
elif args.analysis == "ctcf":
ctcf(args,avg_coverage)
elif args.analysis == "tf_gtrd":
tf_gtrd(args,avg_coverage)
elif args.analysis == "tf_gtrd_1000sites":
tf_gtrd_1000sites(args,avg_coverage)
else:
print ("Unknown analysis type") # AL mod
print (" Use any of:") # AL mod
print (" -) all") # AL mod
print (" -) ctcf") # AL mod
print (" -) androgen") # AL mod
print (" -) tf_gtrd") # AL mod
print (" -) tf_gtrd_1000sites") # AL mod
print (" -) tf_tss") # AL mod
| 71.743243 | 290 | 0.638915 |
75da6f1e542a2683ea21908b7c192b05e4167dbd | 1,094 | py | Python | Day 3/login-registrasi.py | adamsaparudin/python-datascience | 1b4164bb8a091f88def950f07108fe023737399c | [
"MIT"
] | null | null | null | Day 3/login-registrasi.py | adamsaparudin/python-datascience | 1b4164bb8a091f88def950f07108fe023737399c | [
"MIT"
] | null | null | null | Day 3/login-registrasi.py | adamsaparudin/python-datascience | 1b4164bb8a091f88def950f07108fe023737399c | [
"MIT"
] | null | null | null | import sys
main() | 27.35 | 76 | 0.576782 |
75db1e4b6ac368d1004f97e5c6edf9221b06b01a | 7,631 | py | Python | lagen/nu/regeringenlegacy.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 18 | 2015-03-12T17:42:44.000Z | 2021-12-27T10:32:22.000Z | lagen/nu/regeringenlegacy.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 13 | 2016-01-27T10:19:07.000Z | 2021-12-13T20:24:36.000Z | lagen/nu/regeringenlegacy.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 6 | 2016-11-28T15:41:29.000Z | 2022-01-08T11:16:48.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# this repo overrides ferenda.sources.legal.se.Regeringen to work
# against old downloaded
import re
import codecs
# from urllib.parse import urljoin
from rdflib import URIRef
from rdflib.namespace import SKOS
from ferenda.sources.legal.se import Regeringen, RPUBL
from ferenda.sources.legal.se.direktiv import DirRegeringen
from ferenda.sources.legal.se.sou import SOURegeringen
from ferenda.sources.legal.se.ds import Ds
from ferenda.sources.legal.se.propositioner import PropRegeringen
from ferenda.compat import urljoin
from . import SameAs
| 40.375661 | 135 | 0.576727 |
75de5bb6fec2ff1e86bf17bc2c0b9b36441cdf30 | 211 | py | Python | reverse_geocode_test.py | falcaopetri/trajectory-data | 7f81343086ccd00d3d9f52899a7032d987fc0a66 | [
"MIT"
] | 1 | 2019-05-21T15:52:28.000Z | 2019-05-21T15:52:28.000Z | reverse_geocode_test.py | falcaopetri/trajectory-data | 7f81343086ccd00d3d9f52899a7032d987fc0a66 | [
"MIT"
] | null | null | null | reverse_geocode_test.py | falcaopetri/trajectory-data | 7f81343086ccd00d3d9f52899a7032d987fc0a66 | [
"MIT"
] | 1 | 2020-08-18T14:38:52.000Z | 2020-08-18T14:38:52.000Z | import reverse_geocode
reverse_geocode.search([(35.6963860567411,139.686436661882)])
reverse_geocode.search([(-33.8236171057086,151.021885871887)])
reverse_geocode.search([(47.3111740195794,8.52681624913163)])
| 35.166667 | 62 | 0.824645 |
75e1a861f6479f18e30cc2201832823b09eb3ea9 | 803 | py | Python | src/data.py | voschezang/ABM | 523fcf30000057e73ba93f5a500d8896c945a35f | [
"MIT"
] | null | null | null | src/data.py | voschezang/ABM | 523fcf30000057e73ba93f5a500d8896c945a35f | [
"MIT"
] | null | null | null | src/data.py | voschezang/ABM | 523fcf30000057e73ba93f5a500d8896c945a35f | [
"MIT"
] | null | null | null | from mesa.datacollection import DataCollector
### datacollection functions
def density(model):
"""Density: number of cars per unit length of road."""
return len(model.schedule.agents) / model.space.length
def flow(model):
"""Flow: number of cars passing a reference point per unit of time."""
# get the flow in the current timestep
flow_in_timestep = model.data.flow
# reset flow counter
model.data.flow = 0
return flow_in_timestep / model.space.n_lanes
| 25.903226 | 74 | 0.674969 |
75e2178969612f0c7284d059eb5edd0c7915d7e5 | 2,850 | py | Python | lambda_assistant/mysql/client_handler.py | matiasvallejosdev/py-aws-lambda-handlers | 4643042bc02e557bb4a2953118de5f4eb5320d70 | [
"Apache-2.0"
] | null | null | null | lambda_assistant/mysql/client_handler.py | matiasvallejosdev/py-aws-lambda-handlers | 4643042bc02e557bb4a2953118de5f4eb5320d70 | [
"Apache-2.0"
] | null | null | null | lambda_assistant/mysql/client_handler.py | matiasvallejosdev/py-aws-lambda-handlers | 4643042bc02e557bb4a2953118de5f4eb5320d70 | [
"Apache-2.0"
] | null | null | null | import pymysql
import logging
from lambda_assistant.handlers.event_handler import EventHandler
from lambda_assistant.errors import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| 35.185185 | 135 | 0.565965 |
75e66488020f917b36c64a0fe8d0a2a1ac18f43c | 1,280 | py | Python | yatube/posts/models.py | PabloKor/Yatube | 5c835e9d66a29e95781f08a87a102ec017fbc91b | [
"MIT"
] | null | null | null | yatube/posts/models.py | PabloKor/Yatube | 5c835e9d66a29e95781f08a87a102ec017fbc91b | [
"MIT"
] | null | null | null | yatube/posts/models.py | PabloKor/Yatube | 5c835e9d66a29e95781f08a87a102ec017fbc91b | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = get_user_model()
| 34.594595 | 87 | 0.670313 |
75e695aeba900a9af2ded444426e995f02d6bb1e | 1,508 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension AMD.blend_minmax_factor
This module customises the behaviour of the
OpenGL.raw.GL.AMD.blend_minmax_factor to provide a more
Python-friendly API
Overview (from the spec)
The EXT_blend_minmax extension extended the GL's blending functionality
to allow the blending equation to be specified by the application. That
extension introduced the MIN_EXT and MAX_EXT blend equations, which caused the
result of the blend equation to become the minimum or maximum of the source
color and destination color, respectively.
The MIN_EXT and MAX_EXT blend equations, however, do not include the source
or destination blend factors in the arguments to the min and max functions.
This extension provides two new blend equations that produce the minimum
or maximum of the products of the source color and source factor, and the
destination color and destination factor.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/blend_minmax_factor.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.blend_minmax_factor import *
from OpenGL.raw.GL.AMD.blend_minmax_factor import _EXTENSION_NAME
def glInitBlendMinmaxFactorAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 40.756757 | 79 | 0.812334 |
75e72372c73d69ec71d6ae230b03dd3710c4e2a3 | 2,506 | py | Python | examples/building.py | jbermudezcabrera/campos | df34f93dd37b435a82663fb72ef37f669832af22 | [
"MIT"
] | null | null | null | examples/building.py | jbermudezcabrera/campos | df34f93dd37b435a82663fb72ef37f669832af22 | [
"MIT"
] | null | null | null | examples/building.py | jbermudezcabrera/campos | df34f93dd37b435a82663fb72ef37f669832af22 | [
"MIT"
] | null | null | null | """This example demonstrates the basics on building complete forms using campos.
It creates several fields, marking some of them as required and adding some
custom validation.
Finally fields are added to a CreationForm which have several buttons and a
custom callback connected to one of them. After added, some related fields
are grouped.
"""
__author__ = 'Juan Manuel Bermdez Cabrera'
if __name__ == '__main__':
import os
import sys
# set gui api to use
os.environ['QT_API'] = 'pyside'
from qtpy.QtWidgets import QMessageBox, QApplication
import campos
# set global settings for validation type and label positions
campos.Validation.set_current('instant')
campos.Labelling.set_current('top')
app = QApplication(sys.argv)
dialog = create_form()
sys.exit(dialog.exec_())
| 33.413333 | 80 | 0.634876 |
75e9882a624cfcf705ab7744f64aca22cda52bfb | 8,452 | py | Python | ai.py | LHGames-2017/nospace | 1f36fb980ee51cdc576b765eff2c4ad5533ea0e3 | [
"MIT"
] | null | null | null | ai.py | LHGames-2017/nospace | 1f36fb980ee51cdc576b765eff2c4ad5533ea0e3 | [
"MIT"
] | null | null | null | ai.py | LHGames-2017/nospace | 1f36fb980ee51cdc576b765eff2c4ad5533ea0e3 | [
"MIT"
] | null | null | null | from flask import Flask, request
from structs import *
import json
import numpy as np
import sys
import random, time
app = Flask(__name__)
dx=0
dy=0
def deserialize_map(serialized_map):
"""
Fonction utilitaire pour comprendre la map
"""
serialized_map = serialized_map[1:]
rows = serialized_map.split('[')
column = rows[0].split('{')
deserialized_map = [[Tile() for x in range(40)] for y in range(40)]
for i in range(len(rows) - 1):
column = rows[i + 1].split('{')
for j in range(len(column) - 1):
infos = column[j + 1].split(',')
end_index = infos[2].find('}')
content = int(infos[0])
x = int(infos[1])
y = int(infos[2][:end_index])
deserialized_map[i][j] = Tile(content, x, y)
return deserialized_map
#customs
'''
def searchg(x,y,grid,target, at):
if grid[x][y] == target:
at.append([x,y]) #found
return True
elif grid[x][y] == 1 or grid[x][y] == 3:
return False #wall or lava
elif grid[x][y] == 9:
return False #been here
at.append([x,y])
grid[x][y] == 9
if ((x<len(grid)-1 and search(x+1,y,grid,target, at))
or (y > 0 and search(x, y-1,grid,target, at))
or (x > 0 and search(x-1,y,grid,target, at))
or (y < len(grid)-1 and search(x, y+1,grid,target, at))):
return True
return False
'''
def bot():
"""
Main de votre bot.
"""
map_json = request.form["map"]
# Player info
encoded_map = map_json.encode()
map_json = json.loads(encoded_map)
p = map_json["Player"]
pos = p["Position"]
x = pos["X"]
y = pos["Y"]
house = p["HouseLocation"]
player = Player(p["Health"], p["MaxHealth"], Point(x,y),
Point(house["X"], house["Y"]), p["Score"],
p["CarriedResources"], p["CarryingCapacity"])
# Map
serialized_map = map_json["CustomSerializedMap"]
deserialized_map=deserialize_map(serialized_map)
transposed=np.transpose(deserialized_map)
targets = findTargets(deserialized_map, player)
visual(transposed[:20][::-1],x,y)
otherPlayers = []
'''
#print(map_json)
for player_dict in map_json["OtherPlayers"]:
#print(player_dict)
for player_name in player_dict.keys():
player_info = player_dict[player_name]
#print('---------')
#print(player_info)
#print('---------')
p_pos = player_info["Position"]
player_info = PlayerInfo(player_info["Health"],
player_info["MaxHealth"],
Point(p_pos["X"], p_pos["Y"]))
otherPlayers.append({player_name: player_info })
'''
# return decision
#targets =
tTargets = []
for target in targets[0]:#+targets[1]:
tTargets.append([distance([x,y],[target.X,target.Y]),target])
sortedTargets = sorted(tTargets, key=lambda x:x[0])
tEnemies = []
for enemy in otherPlayers:
tEnemies.append([distance([x,y],[enemy.X,enemy.Y]),enemy])
sortedEnemies = sorted(tEnemies, key=lambda x:x[0])
dx,dy=0,0
for i,line in enumerate(deserialized_map):
for j,tile in enumerate(line):
if tile.X==x and tile.Y==y:
dx = x-i
dy = y-j
#return decide(player, sortedEnemies, sortedTargets, deserialized_map)
print(player.__dict__,player.Position.__dict__)
return search_next(player, sortedTargets[0][1], deserialized_map,dx,dy)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000)
| 30.959707 | 172 | 0.565192 |
75ecad4259bc7591e4a570004208ede9470250fd | 92 | py | Python | src/__init__.py | jmknoll/ig-autofriend | 8de322b59c13346d21d6b11775cbad51b4e4920f | [
"MIT"
] | null | null | null | src/__init__.py | jmknoll/ig-autofriend | 8de322b59c13346d21d6b11775cbad51b4e4920f | [
"MIT"
] | null | null | null | src/__init__.py | jmknoll/ig-autofriend | 8de322b59c13346d21d6b11775cbad51b4e4920f | [
"MIT"
] | null | null | null | from InstaFriend import InstaFriend
friend = InstaFriend('bonesaw')
friend.say_something() | 18.4 | 35 | 0.815217 |
75ee6ab2f29331c5f95dba4b6e05f4612d407042 | 3,004 | py | Python | sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py | wellcomecollection/catalogue-pipeline | 360fa432a006f5e197a5b22d72cced7d6735d222 | [
"MIT"
] | 8 | 2019-08-02T09:48:40.000Z | 2019-12-20T14:06:58.000Z | sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py | wellcomecollection/catalogue | 17dcf7f1977f953fbaf35c60aa166aaa1413fdd2 | [
"MIT"
] | 329 | 2020-02-18T07:43:08.000Z | 2021-04-23T10:45:33.000Z | sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py | wellcomecollection/catalogue-pipeline | 360fa432a006f5e197a5b22d72cced7d6735d222 | [
"MIT"
] | 1 | 2019-08-22T11:44:34.000Z | 2019-08-22T11:44:34.000Z | import datetime as dt
import os
import attr
| 29.165049 | 78 | 0.508655 |
75eea75e6047e89d14c6be50606878240e707caf | 41 | py | Python | tests/components/mqtt_json/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/mqtt_json/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/mqtt_json/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the mqtt_json component."""
| 20.5 | 40 | 0.707317 |
75efb0136dccedb3a2615588ac4efa68a29d7748 | 414 | py | Python | wxpy/utils/__init__.py | frkhit/bl_wxpy | b03bc63d51592d32ee218ef6fd1022df6ef75069 | [
"MIT"
] | 3 | 2019-06-24T02:19:19.000Z | 2021-02-14T05:27:16.000Z | wxpy/utils/__init__.py | frkhit/bl_wxpy | b03bc63d51592d32ee218ef6fd1022df6ef75069 | [
"MIT"
] | null | null | null | wxpy/utils/__init__.py | frkhit/bl_wxpy | b03bc63d51592d32ee218ef6fd1022df6ef75069 | [
"MIT"
] | 1 | 2021-02-08T03:50:05.000Z | 2021-02-08T03:50:05.000Z | from .console import embed, shell_entry
from .misc import decode_webwx_emoji, enhance_connection, ensure_list, get_raw_dict, get_receiver, \
get_text_without_at_bot, get_username, match_attributes, match_name, match_text, new_local_msg_id, repr_message, \
smart_map, start_new_thread
from .puid_map import PuidMap
from .tools import detect_freq_limit, dont_raise_response_error, ensure_one, mutual_friends
| 59.142857 | 118 | 0.84058 |
75f1634c6e371274f9060f7f9a480ee9c930fa89 | 1,082 | py | Python | userbot/plugins/hpdiwali.py | yu9ohde/Marshmellow | 145c90470701c972ab458483ac1b9320d1a44e8e | [
"MIT"
] | 2 | 2020-12-06T03:46:08.000Z | 2022-02-19T20:34:52.000Z | userbot/plugins/hpdiwali.py | pro-boy/Marshmello | 4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c | [
"MIT"
] | 4 | 2020-11-07T07:39:51.000Z | 2020-11-10T03:46:41.000Z | userbot/plugins/hpdiwali.py | pro-boy/Marshmello | 4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c | [
"MIT"
] | 9 | 2020-11-28T11:30:44.000Z | 2021-06-01T07:11:57.000Z | # Plugin made by Dark cobra
# For Dark cobra
# Made by Shivam Patel(Team Cobra)
# Kang with credits..
import random
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import admin_cmd
from telethon import events, types, functions, utils
import asyncio
choser('hpdiwali', 'a929138153_by_Shivam_Patel_1_anim')
| 28.473684 | 134 | 0.621072 |
75f227cf59ba67118be0d4f419b2d0cc15fd93df | 1,024 | py | Python | scripts/parse-weka-results.py | jholewinski/ics-12-overlapped-tiling | af2b39bc957d33f68d4617865431ca731b18430a | [
"MIT"
] | 3 | 2015-12-31T11:19:50.000Z | 2017-11-30T03:14:56.000Z | scripts/parse-weka-results.py | jholewinski/ics-12-overlapped-tiling | af2b39bc957d33f68d4617865431ca731b18430a | [
"MIT"
] | null | null | null | scripts/parse-weka-results.py | jholewinski/ics-12-overlapped-tiling | af2b39bc957d33f68d4617865431ca731b18430a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
maximum = 0.0
selected = 0.0
results = []
for line in sys.stdin.readlines()[5:]:
line = line.strip()
if len(line) == 0:
continue
(inst, actual, predicted, error) = line.split()
results.append([inst, actual, predicted, error])
predicted = float(predicted)
if predicted > maximum:
maximum = predicted
selected = float(actual)
by_predicted = sorted(results, key=lambda entry: float(entry[2]))
by_predicted.reverse()
by_actual = sorted(results, key=lambda entry: float(entry[1]))
by_actual.reverse()
best_of_actuals = float(by_actual[0][1])
sys.stdout.write('Best of Actuals: %f\n' % best_of_actuals)
sys.stdout.write('Maximum Prediction: %s\n' %
str([x[2] for x in by_predicted[0:5]]))
sys.stdout.write('Selected Actual: %s\n' %
str([x[1] for x in by_predicted[0:5]]))
sys.stdout.write('Percentages: %s\n' %
str([float(x[1])/best_of_actuals for x in by_predicted[0:5]]))
| 26.947368 | 79 | 0.630859 |
75f3476923aa5142454f8d9f4ed05a21bd8875d9 | 941 | py | Python | symtuner/logger.py | audxo14/symtuner | 741e4e14cfcf09b7c7a71ce34cf28f1858f1f476 | [
"MIT"
] | null | null | null | symtuner/logger.py | audxo14/symtuner | 741e4e14cfcf09b7c7a71ce34cf28f1858f1f476 | [
"MIT"
] | 1 | 2022-01-26T12:51:32.000Z | 2022-01-26T12:51:32.000Z | symtuner/logger.py | audxo14/symtuner | 741e4e14cfcf09b7c7a71ce34cf28f1858f1f476 | [
"MIT"
] | 1 | 2022-01-26T12:42:24.000Z | 2022-01-26T12:42:24.000Z | '''Logging module for symtuner library
Logging module for symtuner library. All loggings in symtuner library use this module.
'''
import logging as _logging
_LOGGER = None
def get_logger():
'''Get a logger.
Get a singleton `Logger`. If `Logger` not defined make one and return. If `get_logger` called
previously, returns a `Logger` object created previously.
Returns:
A `Logger` object.
'''
global _LOGGER
if not _LOGGER:
_LOGGER = _logging.getLogger('symtuner')
if not _logging.getLogger().handlers:
formatter = _logging.Formatter(fmt='%(asctime)s symtuner [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
stderr_handler = _logging.StreamHandler()
stderr_handler.setFormatter(formatter)
_LOGGER.addHandler(stderr_handler)
_LOGGER.setLevel('INFO')
return _LOGGER
| 28.515152 | 98 | 0.631243 |
75f57c3ebdfa5b1c58a1a40cbcfe56a933e80e69 | 3,326 | py | Python | config/eval.py | XiaLiPKU/RESCAN-for-Deraining | e28d1d7cd3d8b276ce88de730de1603bafa30e23 | [
"MIT"
] | 292 | 2018-07-17T01:11:53.000Z | 2022-03-31T13:06:50.000Z | config/eval.py | XiaLiPKU/RESCAN-for-Deraining | e28d1d7cd3d8b276ce88de730de1603bafa30e23 | [
"MIT"
] | 18 | 2018-08-02T13:33:06.000Z | 2022-01-26T15:54:27.000Z | config/eval.py | XiaLiPKU/RESCAN-for-Deraining | e28d1d7cd3d8b276ce88de730de1603bafa30e23 | [
"MIT"
] | 87 | 2018-07-17T18:02:09.000Z | 2021-12-19T08:21:57.000Z | import os
import sys
import cv2
import argparse
import numpy as np
import torch
from torch import nn
from torch.nn import MSELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import settings
from dataset import TestDataset
from model import RESCAN
from cal_ssim import SSIM
logger = settings.logger
torch.cuda.manual_seed_all(66)
torch.manual_seed(66)
torch.cuda.set_device(settings.device_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='latest')
args = parser.parse_args(sys.argv[1:])
run_test(args.model)
| 28.672414 | 81 | 0.613049 |
75f5ca0e1019fe3f64db390c86a601c2f8792420 | 6,371 | py | Python | FastEMRIWaveforms/few/utils/modeselector.py | basuparth/ICERM_Workshop | ebabce680fc87e90ff1de30246dcda9beb384bb4 | [
"MIT"
] | null | null | null | FastEMRIWaveforms/few/utils/modeselector.py | basuparth/ICERM_Workshop | ebabce680fc87e90ff1de30246dcda9beb384bb4 | [
"MIT"
] | null | null | null | FastEMRIWaveforms/few/utils/modeselector.py | basuparth/ICERM_Workshop | ebabce680fc87e90ff1de30246dcda9beb384bb4 | [
"MIT"
] | null | null | null | # Online mode selection for FastEMRIWaveforms Packages
# Copyright (C) 2020 Michael L. Katz, Alvin J.K. Chua, Niels Warburton, Scott A. Hughes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from few.utils.citations import *
# check for cupy
try:
import cupy as xp
except (ImportError, ModuleNotFoundError) as e:
import numpy as xp
| 38.149701 | 95 | 0.622822 |
75f7e09f370f1d9746b214f2177f7c3fe2f5c339 | 81 | py | Python | coreapi/models/__init__.py | recentfahim/smartbusinessbd | 61a74ae629f2c6e2317c41da23476c8780446e84 | [
"Apache-2.0"
] | null | null | null | coreapi/models/__init__.py | recentfahim/smartbusinessbd | 61a74ae629f2c6e2317c41da23476c8780446e84 | [
"Apache-2.0"
] | null | null | null | coreapi/models/__init__.py | recentfahim/smartbusinessbd | 61a74ae629f2c6e2317c41da23476c8780446e84 | [
"Apache-2.0"
] | null | null | null | from .city import City
from .company import Company
from .country import Country
| 20.25 | 28 | 0.814815 |
75f9dd3819053deb8e0d3dbd4dc28b348322030d | 2,113 | py | Python | shared-data/python/opentrons_shared_data/deck/dev_types.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | shared-data/python/opentrons_shared_data/deck/dev_types.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | shared-data/python/opentrons_shared_data/deck/dev_types.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | """
opentrons_shared_data.deck.dev_types: types for deck defs
This should only be imported if typing.TYPE_CHECKING is True
"""
from typing import Any, Dict, List, NewType, Union
from typing_extensions import Literal, TypedDict
from ..module.dev_types import ModuleType
DeckSchemaVersion3 = Literal[3]
DeckSchemaVersion2 = Literal[2]
DeckSchemaVersion1 = Literal[1]
DeckSchema = NewType("DeckSchema", Dict[str, Any])
RobotModel = Union[Literal["OT-2 Standard"], Literal["OT-3 Standard"]]
Fixture = Union[
FixedLabwareBySlot, FixedLabwareByPosition, FixedVolumeBySlot, FixedVolumeByPosition
]
DeckDefinition = DeckDefinitionV3
| 19.564815 | 88 | 0.730715 |
75f9e56ae6c6a091caa3997bff09abbf8201e9db | 2,803 | py | Python | source/hsicbt/model/vgg.py | tongjian121/PK-HBaR | c564e0f08c2c09e0023384adecfcf25e2d53a8a3 | [
"MIT"
] | 9 | 2021-11-04T16:53:04.000Z | 2022-03-28T10:27:44.000Z | source/hsicbt/model/vgg.py | tongjian121/PK-HBaR | c564e0f08c2c09e0023384adecfcf25e2d53a8a3 | [
"MIT"
] | null | null | null | source/hsicbt/model/vgg.py | tongjian121/PK-HBaR | c564e0f08c2c09e0023384adecfcf25e2d53a8a3 | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
defaultcfg = {
11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
} | 32.593023 | 108 | 0.567249 |
75faae9bc5c91a63ded3c9f4f2e51213df5e1730 | 11,555 | py | Python | src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py | faradaym/Lantern | 536e48da79ee374527c669f77ad9e0a0776a0bb8 | [
"BSD-3-Clause"
] | 158 | 2018-03-28T21:58:07.000Z | 2022-02-22T00:49:46.000Z | src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py | douxiansheng/Lantern | f453de532da638c1f467953b32bbe49a3dedfa45 | [
"BSD-3-Clause"
] | 35 | 2018-09-03T21:27:15.000Z | 2019-05-11T02:17:49.000Z | src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py | douxiansheng/Lantern | f453de532da638c1f467953b32bbe49a3dedfa45 | [
"BSD-3-Clause"
] | 36 | 2017-06-30T00:28:59.000Z | 2022-01-24T12:20:42.000Z | """
Preprocessing script for Stanford Sentiment Treebank data.
"""
import os
import glob
#
# Trees and tree loading
#
def load_trees(dirpath):
const_trees, dep_trees, toks = [], [], []
with open(os.path.join(dirpath, 'parents.txt')) as parentsfile, \
open(os.path.join(dirpath, 'dparents.txt')) as dparentsfile, \
open(os.path.join(dirpath, 'sents.txt')) as toksfile:
parents, dparents = [], []
for line in parentsfile:
parents.append(map(int, line.split()))
for line in dparentsfile:
dparents.append(map(int, line.split()))
for line in toksfile:
toks.append(line.strip().split())
for i in xrange(len(toks)):
const_trees.append(load_constituency_tree(parents[i], toks[i]))
dep_trees.append(load_dependency_tree(dparents[i]))
return const_trees, dep_trees, toks
def load_constituency_tree(parents, words):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
word_idx = 0
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
word = words[word_idx]
word_idx += 1
while True:
tree = ConstTree()
parent = parents[idx] - 1
tree.word, tree.parent, tree.idx = word, parent, idx
word = None
if prev is not None:
if tree.left is None:
tree.left = prev
else:
tree.right = prev
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
if trees[parent].left is None:
trees[parent].left = tree
else:
trees[parent].right = tree
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
def load_dependency_tree(parents):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
while True:
tree = DepTree()
parent = parents[idx] - 1
# node is not in tree
if parent == -2:
break
tree.parent, tree.idx = parent, idx
if prev is not None:
tree.children.append(prev)
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
trees[parent].children.append(tree)
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
#
# Various utilities
#
if __name__ == '__main__':
print('=' * 80)
print('Preprocessing Stanford Sentiment Treebank')
print('=' * 80)
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(base_dir, 'data')
lib_dir = os.path.join(base_dir, 'lib')
sst_dir = os.path.join(data_dir, 'sst')
train_dir = os.path.join(sst_dir, 'train')
dev_dir = os.path.join(sst_dir, 'dev')
test_dir = os.path.join(sst_dir, 'test')
make_dirs([train_dir, dev_dir, test_dir])
# produce train/dev/test splits
split(sst_dir, train_dir, dev_dir, test_dir)
sent_paths = glob.glob(os.path.join(sst_dir, '*/sents.txt'))
# produce dependency parses
classpath = ':'.join([
lib_dir,
os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'),
os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')])
for filepath in sent_paths:
dependency_parse(filepath, cp=classpath, tokenize=False)
# get vocabulary
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab.txt'))
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab-cased.txt'), lowercase=False)
# write sentiment labels for nodes in trees
dictionary = load_dictionary(sst_dir)
write_labels(train_dir, dictionary)
write_labels(dev_dir, dictionary)
write_labels(test_dir, dictionary)
| 32.457865 | 87 | 0.539766 |
75fc997c30736fa87f40fddc061010fa3c1f2c9f | 12,703 | py | Python | models/relevance/relevance_google_net.py | sanglee/XAI-threshold-calibration | 24ddd5213b02d4fb919bca191392fe8b1a30aa88 | [
"Apache-2.0"
] | null | null | null | models/relevance/relevance_google_net.py | sanglee/XAI-threshold-calibration | 24ddd5213b02d4fb919bca191392fe8b1a30aa88 | [
"Apache-2.0"
] | null | null | null | models/relevance/relevance_google_net.py | sanglee/XAI-threshold-calibration | 24ddd5213b02d4fb919bca191392fe8b1a30aa88 | [
"Apache-2.0"
] | null | null | null | import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import torch.utils.model_zoo as model_zoo
from typing import Optional, Tuple, List, Callable, Any
from modules.layers import *
__all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"]
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor],
'aux_logits1': Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _GoogLeNetOutputs set here for backwards compat
_GoogLeNetOutputs = GoogLeNetOutputs
def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet":
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
return model
return GoogLeNet(**kwargs)
| 32.994805 | 119 | 0.581831 |
75fe3189c125c3919b270ac5067d6ecc73d03252 | 375 | py | Python | University Codesprint Contest/seperaenos.py | ukirderohit/Python-Hacker-rank-solutions | de3b60b00d864c15a452977225b33ead19c878a5 | [
"MIT"
] | null | null | null | University Codesprint Contest/seperaenos.py | ukirderohit/Python-Hacker-rank-solutions | de3b60b00d864c15a452977225b33ead19c878a5 | [
"MIT"
] | null | null | null | University Codesprint Contest/seperaenos.py | ukirderohit/Python-Hacker-rank-solutions | de3b60b00d864c15a452977225b33ead19c878a5 | [
"MIT"
] | null | null | null | q = int(raw_input().strip())
for a0 in xrange(q):
s=raw_input().strip()
# if s.startswith('0'):
# print "No"
# print s.find('1')
# print s.rfind(s,a0,a0-1)
# posof1 = s.find('1')
digits = [str(x) for x in str(s)]
print digits
for digit in len(digits):
if digits[digit]-digits[digit-1] == 1:
print "yes"
| 26.785714 | 46 | 0.512 |
75fe4ffed842895823f432c3592116337d923fac | 8,457 | py | Python | polyglotdb/client/client.py | michaelhaaf/PolyglotDB | 7640212c7062cf44ae911081241ce83a26ced2eb | [
"MIT"
] | 25 | 2016-01-28T20:47:07.000Z | 2021-11-29T16:13:07.000Z | polyglotdb/client/client.py | michaelhaaf/PolyglotDB | 7640212c7062cf44ae911081241ce83a26ced2eb | [
"MIT"
] | 120 | 2016-04-07T17:55:09.000Z | 2022-03-24T18:30:10.000Z | polyglotdb/client/client.py | PhonologicalCorpusTools/PolyglotDB | 7640212c7062cf44ae911081241ce83a26ced2eb | [
"MIT"
] | 10 | 2015-12-03T20:06:58.000Z | 2021-02-11T03:02:48.000Z | import requests
from ..exceptions import ClientError
| 33.295276 | 108 | 0.537898 |
75fefd40d863da1697a3900b9bc8d32e531394bf | 2,745 | py | Python | python/plotCSV.py | lrquad/LoboScripts | 04d2de79d2d83e781e3f4a3de2531dc48e4013a6 | [
"MIT"
] | null | null | null | python/plotCSV.py | lrquad/LoboScripts | 04d2de79d2d83e781e3f4a3de2531dc48e4013a6 | [
"MIT"
] | null | null | null | python/plotCSV.py | lrquad/LoboScripts | 04d2de79d2d83e781e3f4a3de2531dc48e4013a6 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from matplotlib import rcParams
import matplotlib.patches as patches
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 20
rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0)
rcParams['axes.linewidth'] = 2
hfont = {'fontname': 'Times New Roman'}
folderpath = "./testdata/"
if __name__ == "__main__":
data = loadData(folderpath+"ttmath_error.csv")
labels = loadlabel(folderpath+"h_list.csv")
plotData(data, labels,["ttmath","FD","CD","CFD"])
| 30.5 | 151 | 0.665938 |
2f01f5d13c019c855d7b51b2b4f48b63f6f7275b | 12,327 | py | Python | wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 26 | 2018-12-17T13:45:25.000Z | 2022-01-16T20:00:04.000Z | wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 4 | 2019-01-03T12:08:52.000Z | 2021-12-02T05:21:13.000Z | wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 8 | 2019-01-24T08:22:06.000Z | 2022-02-07T11:37:00.000Z | # Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from ._vscf_error import vscf_error_t
from ._vscf_raw_public_key import vscf_raw_public_key_t
from ._vscf_raw_private_key import vscf_raw_private_key_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
| 49.705645 | 124 | 0.736108 |
2f028c07302e47df287d4dc5d37f771ec2181806 | 30,394 | py | Python | tb_api_client/swagger_client/apis/user_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | 5 | 2017-11-27T15:48:16.000Z | 2020-09-21T04:18:47.000Z | tb_api_client/swagger_client/apis/user_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | null | null | null | tb_api_client/swagger_client/apis/user_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | 6 | 2018-01-14T17:23:46.000Z | 2019-06-24T13:38:54.000Z | # coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>.
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
| 40.471372 | 149 | 0.560012 |
2f03da5c972d890701aa5588b07be7bd754ca560 | 5,268 | py | Python | bulk-image-optimizer.py | carzam87/python-bulk-image-optimizer | 1e9e9396de84de3651b963fc3b8b569893296dde | [
"MIT"
] | 8 | 2020-01-28T10:33:28.000Z | 2022-01-28T12:51:50.000Z | bulk-image-optimizer.py | carzam87/python-bulk-image-optimizer | 1e9e9396de84de3651b963fc3b8b569893296dde | [
"MIT"
] | null | null | null | bulk-image-optimizer.py | carzam87/python-bulk-image-optimizer | 1e9e9396de84de3651b963fc3b8b569893296dde | [
"MIT"
] | 5 | 2020-09-29T08:26:35.000Z | 2021-11-15T20:07:20.000Z | import os
import subprocess
from pathlib import Path
from PIL import Image
import errno
import time
from re import search
CONVERT_PNG_TO_JPG = False
TOTAL_ORIGINAL = 0
TOTAL_COMPRESSED = 0
TOTAL_GAIN = 0
TOTAL_FILES = 0
QUALITY = 85
if __name__ == '__main__':
start_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + r"input"
# ask if .pgn images should automatically converted to .jpg
CONVERT_PNG_TO_JPG = input('Would you like to convert .png images to .jpg? (y/n): ') == 'y'
TOTAL_GAIN = 0
compress(start_path)
print("---------------------------------------------------------------------------------------------")
print('-------------------------------------------SUMMARY-------------------------------------------')
print('Files: ' + f'{TOTAL_FILES}')
print(
"Original: " + f'{TOTAL_ORIGINAL:,.2f}' + " megabytes || " + "New Size: " + f'{TOTAL_COMPRESSED:,.2f}' +
" megabytes" + " || Gain: " + f'{TOTAL_GAIN:,.2f}' + " megabytes ~" + f'{(TOTAL_GAIN / TOTAL_ORIGINAL) * 100:,.2f}'
+ "% reduction")
| 44.644068 | 123 | 0.44609 |
2f03ebf048e5859cb54e5897517da48e3b0f38d0 | 16,968 | py | Python | interpretdl/interpreter/lime.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | 1 | 2021-03-11T02:38:51.000Z | 2021-03-11T02:38:51.000Z | interpretdl/interpreter/lime.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | null | null | null | interpretdl/interpreter/lime.py | Tyihou/InterpretDL | df8894f8703634df4bfcbdcc495a3d12b220028c | [
"Apache-2.0"
] | null | null | null | import os
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
from ..data_processor.readers import preprocess_image, read_image, restore_image
from ..data_processor.visualizer import show_important_parts, visualize_image, save_image
from ..common.paddle_utils import init_checkpoint, to_lodtensor
from ._lime_base import LimeBase
from .abc_interpreter import Interpreter
| 39.277778 | 184 | 0.552393 |
2f042e06fed341e6137967c14ffb3b319a432271 | 2,106 | py | Python | opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | null | null | null | opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | 4 | 2020-11-27T16:27:58.000Z | 2020-12-28T23:00:08.000Z | opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | null | null | null | from opendc.models.portfolio import Portfolio
from opendc.models.scenario import Scenario
from opendc.models.topology import Topology
from opendc.util.rest import Response
def POST(request):
"""Add a new Scenario for this Portfolio."""
request.check_required_parameters(path={'portfolioId': 'string'},
body={
'scenario': {
'name': 'string',
'trace': {
'traceId': 'string',
'loadSamplingFraction': 'float',
},
'topology': {
'topologyId': 'string',
},
'operational': {
'failuresEnabled': 'bool',
'performanceInterferenceEnabled': 'bool',
'schedulerName': 'string',
},
}
})
portfolio = Portfolio.from_id(request.params_path['portfolioId'])
portfolio.check_exists()
portfolio.check_user_access(request.google_id, True)
scenario = Scenario(request.params_body['scenario'])
topology = Topology.from_id(scenario.obj['topology']['topologyId'])
topology.check_exists()
topology.check_user_access(request.google_id, True)
scenario.set_property('portfolioId', portfolio.get_id())
scenario.set_property('simulation', {'state': 'QUEUED'})
scenario.set_property('topology.topologyId', topology.get_id())
scenario.insert()
portfolio.obj['scenarioIds'].append(scenario.get_id())
portfolio.update()
return Response(200, 'Successfully added Scenario.', scenario.obj)
| 42.12 | 91 | 0.45679 |
f92e0d9330578dc947fa3c0cecc40a9523ecca24 | 1,906 | py | Python | Python/pymd/md/core/box.py | ryanlopezzzz/ABPTutorial | 923fa89f1959cd71b28ecf4628ecfbfce6a6206c | [
"MIT"
] | 8 | 2020-05-05T00:41:50.000Z | 2021-11-04T20:54:43.000Z | Python/pymd/md/core/box.py | ryanlopezzzz/ABPTutorial | 923fa89f1959cd71b28ecf4628ecfbfce6a6206c | [
"MIT"
] | null | null | null | Python/pymd/md/core/box.py | ryanlopezzzz/ABPTutorial | 923fa89f1959cd71b28ecf4628ecfbfce6a6206c | [
"MIT"
] | 5 | 2020-05-04T16:37:13.000Z | 2021-08-18T07:53:58.000Z | # Copyright 2020 Rastko Sknepnek, University of Dundee, r.skepnek@dundee.ac.uk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Class handling the simulation box | 45.380952 | 114 | 0.693599 |
f92f4eea713aeec6532cc3eed5da737cef8d020e | 884 | py | Python | dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py | gabefgonc/san-francisco-rice-dotfiles | 60ff3539f34ecfff6d7bce895497e2a3805910d4 | [
"MIT"
] | 4,897 | 2015-07-12T17:52:02.000Z | 2022-03-31T16:07:01.000Z | dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py | gabefgonc/san-francisco-rice-dotfiles | 60ff3539f34ecfff6d7bce895497e2a3805910d4 | [
"MIT"
] | 337 | 2015-07-12T17:14:35.000Z | 2022-03-05T17:27:24.000Z | dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py | gabefgonc/san-francisco-rice-dotfiles | 60ff3539f34ecfff6d7bce895497e2a3805910d4 | [
"MIT"
] | 365 | 2015-07-20T07:51:11.000Z | 2022-02-22T05:00:56.000Z | # -*- coding: utf-8 -*-
# vim:se fenc=utf8 noet:
from __future__ import (unicode_literals, division, absolute_import, print_function)
try:
import vim
except ImportError:
vim = {}
from powerline.bindings.vim import (vim_get_func, buffer_name)
from powerline.theme import requires_segment_info
| 30.482759 | 84 | 0.777149 |
f93060fe13dca91fd46628410cdb2477c1e8f235 | 2,844 | py | Python | app/api/auth.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | app/api/auth.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | app/api/auth.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | from fastapi.security import OAuth2PasswordRequestForm
from fastapi import APIRouter, Depends, HTTPException, status
from datetime import timedelta
from starlette.responses import JSONResponse
from db.crud.users import blacklist_token
from db.session import get_db
from core import security
from core.auth import authenticate_user, get_current_active_user, sign_up_new_user
auth_router = r = APIRouter()
| 33.857143 | 130 | 0.654008 |
f9306dd6abfdca80dd6982ef5b08247263dd7576 | 5,530 | py | Python | src/gui_occluder/custom/sr_occluder.py | hgiesel/anki-multiple-choice | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | [
"MIT"
] | 5 | 2019-12-26T08:08:52.000Z | 2021-11-21T03:34:27.000Z | src/gui_occluder/custom/sr_occluder.py | hgiesel/anki-set-randomizer | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | [
"MIT"
] | 84 | 2019-08-01T20:36:17.000Z | 2019-10-26T16:16:33.000Z | src/gui_occluder/custom/sr_occluder.py | hgiesel/anki_set_randomizer | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | [
"MIT"
] | null | null | null | import os
import enum
from aqt.qt import QDialog, QGraphicsScene, QGraphicsRectItem, QGraphicsEllipseItem, QApplication
from aqt.qt import Qt, QPen, QGraphicsItem, QPixmap, QRectF, QPainter
from aqt.qt import QPointF, QBrush, QColor, QPainterPath, QIcon, QSize, QPalette
from aqt.utils import showInfo
from ..sr_occluder_ui import Ui_SROccluder
from .sr_rect import SRRect
from .sr_occlusion_view.py import SROcclusionView
from .sr_occlusion_scene.py import SROcclusionScene
| 34.5625 | 97 | 0.674141 |
f930eb0037c9a1f7c847f03ac1f6289fad3453d4 | 13,371 | py | Python | gen3config/config.py | uc-cdis/gen3config | fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c | [
"Apache-2.0"
] | null | null | null | gen3config/config.py | uc-cdis/gen3config | fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c | [
"Apache-2.0"
] | null | null | null | gen3config/config.py | uc-cdis/gen3config | fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c | [
"Apache-2.0"
] | null | null | null | """
Configuration class for handling configs with a given default.
If you need custom functionality or need to apply post_processing to parsed config,
simply extend this class.
Example:
```
class FenceConfig(Config):
def __init__(self, *args, **kwargs):
super(FenceConfig, self).__init__(*args, **kwargs)
def post_process(self):
# allow authlib traffic on http for development if enabled. By default
# it requires https.
#
# NOTE: use when fence will be deployed in such a way that fence will
# only receive traffic from internal clients, and can safely use HTTP
if (
self._configs.get("AUTHLIB_INSECURE_TRANSPORT")
and "AUTHLIB_INSECURE_TRANSPORT" not in os.environ
):
os.environ["AUTHLIB_INSECURE_TRANSPORT"] = "true"
# if we're mocking storage, ignore the storage backends provided
# since they'll cause errors if misconfigured
if self._configs.get("MOCK_STORAGE", False):
self._configs["STORAGE_CREDENTIALS"] = {}
cirrus.config.config.update(**self._configs.get("CIRRUS_CFG", {}))
```
Recommended use:
- Create a `config-default.yaml` and `config.py` in the top-level folder your app
- Inside `config-default.yaml` add keys and reasonable default values
- Inside `config.py`, create a class that inherits from this Config class
- See above example
- Add a final line to your `config.py` that instantiates your custom class:
- Ensure that you provide the default config path
- If placed in same directory as `config.py` you can use something like:
```
default_cfg_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "config-default.yaml"
)
config = FenceConfig(default_cfg_path)
```
- Import your instaniated object whenever you need to get configuration
- Example: `from fence.config import config`
- Load in application configuration during init of your app
- Example: `config.load('path/to/fence-config.yaml')`
- Now you can safely access anything that was in your `config-default.yaml` from this
object as if it were a dictionary
- Example: `storage_creds = config["STORAGE_CREDENTIALS"]`
- Example: `if config["SOME_BOOLEAN"]: ...`
- Example: `nested_value = config["TOP_LEVEL"]["nested"]
- And of course you can import that into any file you want and will have access to
keys/values
- Example: `from fence.config import config`
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import glob
from yaml import safe_load as yaml_load
from yaml.scanner import ScannerError
from jinja2 import Template, TemplateSyntaxError
import six
from cdislogging import get_logger
from gen3config.errors import NotFoundError, ParsingError
logger = get_logger(__name__, log_level="info")
def nested_render(cfg, fully_rendered_cfgs, replacements):
"""
Template render the provided cfg by recurisevly replacing {{var}}'s which values
from the current "namespace".
The nested config is treated like nested namespaces where the inner variables
are only available in current block and further nested blocks.
Said the opposite way: the namespace with available vars that can be used
includes the current block's vars and parent block vars.
This means that you can do replacements for top-level
(global namespaced) config vars anywhere, but you can only use inner configs within
that block or further nested blocks.
An example is worth a thousand words:
---------------------------------------------------------------------------------
fence-config.yaml
--------------------------------------------------------------------------------
BASE_URL: 'http://localhost/user'
OPENID_CONNECT:
fence:
api_base_url: 'http://other_fence/user'
client_kwargs:
redirect_uri: '{{BASE_URL}}/login/fence/login'
authorize_url: '{{api_base_url}}/oauth2/authorize'
THIS_WONT_WORK: '{{api_base_url}}/test'
--------------------------------------------------------------------------------
"redirect_uri" will become "http://localhost/user/login/fence/login"
- BASE_URL is in the global namespace so it can be used in this nested cfg
"authorize_url" will become "http://other_fence/user/oauth2/authorize"
- api_base_url is in the current namespace, so it is available
"THIS_WONT_WORK" will become "/test"
- Why? api_base_url is not in the current namespace and so we cannot use that
as a replacement. the configuration (instead of failing) will replace with
an empty string
Args:
cfg (TYPE): Description
fully_rendered_cfgs (TYPE): Description
replacements (TYPE): Description
Returns:
dict: Configurations with template vars replaced
"""
if isinstance(cfg, dict):
for key, value in six.iteritems(cfg):
replacements.update(cfg)
fully_rendered_cfgs[key] = {}
fully_rendered_cfgs[key] = nested_render(
value,
fully_rendered_cfgs=fully_rendered_cfgs[key],
replacements=replacements,
)
# new namespace, remove current vars (no longer available as replacements)
for old_cfg, value in six.iteritems(cfg):
replacements.pop(old_cfg, None)
return fully_rendered_cfgs
else:
# it's not a dict, so lets try to render it. But only if it's
# truthy (which means there's actually something to replace)
if cfg:
try:
t = Template(str(cfg))
rendered_value = t.render(**replacements)
except TemplateSyntaxError:
rendered_value = cfg
try:
cfg = yaml_load(rendered_value)
except ScannerError:
# it's not loading into yaml, so let's assume it's a string with special
# chars such as: {}[],&*#?|:-<>=!%@\)
#
# in YAML, we have to "quote" a string with special chars.
#
# since yaml_load isn't loading from a file, we need to wrap the Python
# str in actual quotes.
cfg = yaml_load('"{}"'.format(rendered_value))
return cfg
def get_config_path(search_folders, file_name="*config.yaml"):
"""
Return the path of a single configuration file ending in config.yaml
from one of the search folders.
NOTE: Will return the first match it finds. If multiple are found,
this will error out.
"""
possible_configs = []
file_name = file_name or "*config.yaml"
for folder in search_folders:
config_path = os.path.join(folder, file_name)
possible_files = glob.glob(config_path)
possible_configs.extend(possible_files)
if len(possible_configs) == 1:
return possible_configs[0]
elif len(possible_configs) > 1:
raise IOError(
"Multiple config.yaml files found: {}. Please specify which "
"configuration to use by providing `config_path` instead of "
"`search_folders` to Config.load(). Alternatively, ensure that only a "
"single valid *config.yaml exists in the search folders: {}.".format(
str(possible_configs), search_folders
)
)
else:
raise NotFoundError(
"Could not find config file {}. Searched in the following locations: "
"{}".format(file_name, str(search_folders))
)
| 35.943548 | 99 | 0.615362 |
f931949a583110bdf77e537bf67ef0dfdd9aeae4 | 8,150 | py | Python | src/ReinforcementLearning/Modules/carlaUtils.py | B-C-WANG/ReinforcementLearningInAutoPilot | 8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc | [
"MIT"
] | 27 | 2019-05-14T01:06:05.000Z | 2022-03-06T03:12:40.000Z | src/ReinforcementLearning/Modules/carlaUtils.py | B-C-WANG/ReinforcementLearningInAutoPilot | 8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc | [
"MIT"
] | null | null | null | src/ReinforcementLearning/Modules/carlaUtils.py | B-C-WANG/ReinforcementLearningInAutoPilot | 8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc | [
"MIT"
] | 10 | 2020-01-20T09:39:51.000Z | 2022-03-31T18:30:53.000Z | # coding:utf-8
# Type: Public
import numpy as np
import common.Math as cMath
import math
| 40.346535 | 141 | 0.640613 |
f932dbe3d5afcee0aae3f946f59a3b66e3f2fb59 | 2,413 | py | Python | models.py | abhishekyana/CycleGANs-PyTorch | ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a | [
"MIT"
] | 12 | 2019-07-27T09:54:57.000Z | 2021-04-23T23:34:25.000Z | models.py | abhishekyana/CycleGANs-PyTorch | ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a | [
"MIT"
] | 5 | 2020-11-13T15:40:12.000Z | 2022-03-11T23:53:51.000Z | models.py | abhishekyana/CycleGANs-PyTorch | ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a | [
"MIT"
] | 2 | 2021-03-11T10:45:33.000Z | 2021-04-23T23:34:29.000Z | import torch.nn as nn
import torch.nn.functional as F
| 30.544304 | 100 | 0.642768 |
f933130df8fe669a9af1c9efd51088775e210fbc | 99 | py | Python | stable_baselines3/bear/__init__.py | mjyoo2/stable-baselines3 | ef7a580219df6d977b56fb99e503890bd5211195 | [
"MIT"
] | null | null | null | stable_baselines3/bear/__init__.py | mjyoo2/stable-baselines3 | ef7a580219df6d977b56fb99e503890bd5211195 | [
"MIT"
] | null | null | null | stable_baselines3/bear/__init__.py | mjyoo2/stable-baselines3 | ef7a580219df6d977b56fb99e503890bd5211195 | [
"MIT"
] | null | null | null | from stable_baselines3.bear.policies import BearPolicy
from stable_baselines3.bear.bear import BEAR | 49.5 | 54 | 0.888889 |
f9347e37b52fec0692880a203b911075b279ecba | 5,194 | py | Python | file-io-and-other-io/modfile/test_ip_gen.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | file-io-and-other-io/modfile/test_ip_gen.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | file-io-and-other-io/modfile/test_ip_gen.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
This is written by Zhiyang Ong to modify text (non-binary) files.
Synopsis:
Script to modify text (non-binary) files.
Revision History:
1) November 11, 2014. Initial working version.
The MIT License (MIT)
Copyright (c) <2014> <Zhiyang Ong>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n"
"""
# Import packages and functions from the Python Standard Library.
#from os import listdir, system
from os import system
#from os.path import isfile, join, splitext
#from os.subprocess import call
#import subprocess
# ============================================================
"""
Create an output file object.
Assume that the specified filename does not belong to an important file.
Assume that the specified file can be overwritten.
"""
f_object = open("input-file.txt", "w");
# Lists to generate data for the input test file.
# List of universities that are good in EDA.
universities = ["Berkeley", "Stanford", "MIT", "UT Austin", "Carnegie Mellon", "Georgia Tech", "Columbia", "Northwestern", "Purdue", "UCSD", "UCLA"]
# List of other universities in EDA.
other_unis = ["UIUC", "Brown", "Boston University", "UC Irvine", "UC Riverside", "UCSB", "USC", "University of Minnesota at Twin Cities", "Utah", "University of Wisconsin-Madison"]
# List of VLSI topics.
vlsi_topics = ["RTL design", "TLM design", "processor design", "SRAM design", "DRAM design", "low-power VLSI design", "decoder design", "DFM", "VLSI verification", "VLSI design flow", "NoC", "asynchronous VLSI design", "VLSI architecture", "digitally-assisted analog IC design", "VLSI signal processing", "microarchitecture"]
# List of EDA topics.
eda_topics = ["model checking", "equivalence checking", "high-level synthesis", "hardware/software partitioning", "hardware-accelerated emulation", "logic synthesis", "RTL synthesis", "static timing analysis", "statistical STA", "power optimization", "DVFS", "logic simulation", "fault saimulation", "ATPG", "DFT", "BIST", "memory compiler", "gate sizing", "threshold voltage assignment", "buffer insertion", "crosstalk analysis", "signal integrity analysis", "noise analysis", "thermal analysis", "floorplanning", "partitioning", "detailed placement", "detailed routing", "global placement", "global routing", "clock network synthesis", "power and ground routing", "layout compaction", "layout extraction", "parasitic extraction", "interconnect modeling", "design rule check", "layout versus schematic check", "electric rule check", "computational lithography", "optical proximity correction", "resolution enhancement technologies", "mask data preparation", "circuit simulation"]
# Lists of numbers to be fixed.
list_of_hundreds = range(1500, 5000, 100)
list_of_10s = range(1234560, 1234767, 10)
# References:
# http://eecs_ece-and-cs.quora.com/Choosing-a-Graduate-Program-in-VLSI-Design-Related-Areas-Things-to-Consider
# http://www.quora.com/What-are-the-best-VLSI-CAD-research-groups-in-US-universities
# Write text to the input test file.
#f_object.write("Ciao Mondo")
# Pointer to currently enumerated index of EDA topics.
ptr = 0
# ============================================================
# Generate test data for the test input file.
# Enumerate all universities that are good in EDA.
for gd_uni in universities:
#temp_str = "%S %S %S", gd_uni, eda_topics[ptr], eda_topics[ptr+1]
temp_str = gd_uni + "; " + str(list_of_hundreds[ptr]) + "; " + eda_topics[ptr]
ptr = ptr + 1
temp_str = temp_str + "; " + str(list_of_10s[ptr]) + "; " + eda_topics[ptr] + ".\n"
if ptr < len(universities):
ptr = ptr + 1
f_object.write(temp_str)
temp_str = "Stanford" + "; " + "326748027" + "; " + "statistical STA"
temp_str = temp_str + "; " + "7289" + "; " + "hardware-accelerated emulation" + ".\n"
f_object.write(temp_str)
# ============================================================
# Close the file object
f_object.close()
| 57.076923 | 980 | 0.701579 |
f934993945194bcd3e81f89c7b932f03bda5ad14 | 8,771 | py | Python | aux_lib.py | paulokuriki/dcmtag2table | e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f | [
"Apache-2.0"
] | null | null | null | aux_lib.py | paulokuriki/dcmtag2table | e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f | [
"Apache-2.0"
] | null | null | null | aux_lib.py | paulokuriki/dcmtag2table | e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f | [
"Apache-2.0"
] | null | null | null | import pydicom
from tqdm import tqdm
import pandas as pd
import os
import time
import glob
import numpy as np
from pydicom import _dicom_dict as dc
from constants import *
import string
def dcmtag2df(folder: str, list_of_tags: list):
"""
# Create a Pandas DataFrame with the <list_of_tags> DICOM tags
# from the DICOM files in <folder>
# Parameters:
# folder (str): folder to be recursively walked looking for DICOM files.
# list_of_tags (list of strings): list of DICOM tags with no whitespaces.
# Returns:
# df (DataFrame): table of DICOM tags from the files in folder.
"""
list_of_tags = list_of_tags.copy()
table = []
start = time.time()
# checks if folder exists
if not os.path.isdir(folder):
print(f'{folder} is not a valid folder.')
return None
# joins ** to the folder name for using at the glob function
print("Searching files recursively...")
search_folder = os.path.join(folder, '**')
try:
filelist = glob.glob(search_folder, recursive=True)
print(f"{len(list(filelist))} files/folders found ")
except Exception as e:
print(e)
return None
time.time()
print("Reading files...")
for _f in tqdm(filelist):
try:
dataset = pydicom.dcmread(_f, stop_before_pixels=True)
items = []
items.append(_f)
for _tag in list_of_tags:
if _tag in dataset:
if dataset.data_element(_tag) is not None:
items.append(str(dataset.data_element(_tag).value))
else:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
series_description = dataset.get('SeriesDescription')
if _tag == 'IOP_Plane':
IOP = dataset.get('ImageOrientationPatient')
_plano = IOP_Plane(IOP)
items.append(_plano)
elif _tag == "Primary":
try:
image_type = ' '.join(dataset.get('ImageType'))
except:
image_type = ''
found_word = search_words_in_serie(image_type, PRIMARY)
items.append(found_word)
elif _tag == "Gad":
found_word = search_words_in_serie(series_description, GAD, GAD_EXCLUSION)
items.append(found_word)
elif _tag == "T1":
found_word = search_words_in_serie(series_description, T1, FLAIR + T2)
items.append(found_word)
elif _tag == "T2":
found_word = search_words_in_serie(series_description, T2)
items.append(found_word)
elif _tag == "FLAIR":
found_word = search_words_in_serie(series_description, FLAIR, T1)
items.append(found_word)
elif _tag == "SWI":
found_word = search_words_in_serie(series_description, SWI)
items.append(found_word)
elif _tag == "FIESTA":
found_word = search_words_in_serie(series_description, FIESTA)
items.append(found_word)
elif _tag == "TOF":
found_word = search_words_in_serie(series_description, TOF)
items.append(found_word)
elif _tag == "DWI":
found_word = search_words_in_serie(series_description, DWI, DWI_EXCLUSION)
items.append(found_word)
elif _tag == "Angio":
found_word = search_words_in_serie(series_description, ANGIO)
items.append(found_word)
elif _tag == "MPR":
found_word = search_words_in_serie(series_description, MPR)
items.append(found_word)
elif _tag == "Others":
found_word = search_words_in_serie(series_description, OTHERS)
items.append(found_word)
else:
# checks if a tag number was informed
tag_number = tag_number_to_base_16(_tag)
if tag_number in dataset:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
items.append("NaN")
table.append((items))
except (FileNotFoundError, PermissionError):
pass
except Exception as e:
pass
list_of_tags.insert(0, "Filename")
test = list(map(list, zip(*table)))
dictone = {}
if len(table) == 0:
print(f'0 DICOM files found at folder: {folder}')
return None
for i, _tag in enumerate(list_of_tags):
dictone[_tag] = test[i]
df = pd.DataFrame(dictone)
time.sleep(2)
print("Finished.")
return df
def IOP_Plane(IOP: list) -> str:
"""
This function takes IOP of an image and returns its plane (Sagittal, Coronal, Transverse)
['1', '0', '0', '0', '0', '-1'] you are dealing with Coronal plane view
['0', '1', '0', '0', '0', '-1'] you are dealing with Sagittal plane view
['1', '0', '0', '0', '1', '0'] you are dealing with Axial plane view
"""
try:
IOP_round = [round(x) for x in IOP]
plane = np.cross(IOP_round[0:3], IOP_round[3:6])
plane = [abs(x) for x in plane]
if plane[0] == 1:
return "SAG"
elif plane[1] == 1:
return "COR"
elif plane[2] == 1:
return "AXI"
else:
return "UNK"
except:
return "UNK"
| 37.165254 | 115 | 0.551248 |
f9368f4ecdf91a5437237dc760bad64780ffdbe1 | 430 | py | Python | eve_swagger/__init__.py | Annakan/eve-swagger | 34b91a335e0e2c471fc552800751e9d702a7f260 | [
"BSD-3-Clause"
] | null | null | null | eve_swagger/__init__.py | Annakan/eve-swagger | 34b91a335e0e2c471fc552800751e9d702a7f260 | [
"BSD-3-Clause"
] | null | null | null | eve_swagger/__init__.py | Annakan/eve-swagger | 34b91a335e0e2c471fc552800751e9d702a7f260 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
eve-swagger
~~~~~~~~~~~
swagger.io extension for Eve-powered REST APIs.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # noqa: F401
from .swagger import swagger, add_documentation # noqa
INFO = 'SWAGGER_INFO'
HOST = 'SWAGGER_HOST'
| 23.888889 | 55 | 0.676744 |
f936f234615d9bcf0427f9ba2eb780c873f4aa17 | 9,005 | py | Python | 2018_Epoch_Spectra.py | chunders/EpochAnalysis | b5d83d9608692e3bf5f9947bb3627e04a54a312f | [
"MIT"
] | null | null | null | 2018_Epoch_Spectra.py | chunders/EpochAnalysis | b5d83d9608692e3bf5f9947bb3627e04a54a312f | [
"MIT"
] | null | null | null | 2018_Epoch_Spectra.py | chunders/EpochAnalysis | b5d83d9608692e3bf5f9947bb3627e04a54a312f | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
_
/ | | __ _ __ _
/ | / |_||_|| ||
/ | / | |\ | ||_
/____ |__/\ . | | \|_|\_|
__________________________ .
Created on Wed May 30 15:34:05 2018
@author: chrisunderwood
To compare the outputted Electron spectrums,
as part of a parameter scan
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
#==============================================================================
# A function that replicates os.walk with a max depth level
#==============================================================================
#==============================================================================
# Creates a list of the folders of interest
#==============================================================================
def listFolders(mainDir):
listSubFolders = [x[0] for x in walklevel(mainDir)][1:]
folderNames = []
#Modify so useable path
for i in range(len(listSubFolders)):
listSubFolders[i] += '/'
#Add the folder that I am looking into here too!
listSubFolders[i] += 'Dist_evo/'
folderNames.append(listSubFolders[i].split('/')[-3])
listSubFolders = np.array(listSubFolders)
folderNames = np.array(folderNames)
return listSubFolders, folderNames
def nearposn(array,value):
#Find array position of value
posn = (abs(array-value)).argmin()
return posn
def subplotPerSpectra(data, Crop):
sns.set_palette(sns.color_palette("Set1", len(folderNames)))
sns.set_context("talk")
sns.set_style('darkgrid')
fig, axes = plt.subplots(nrows = len(data), sharex = True, figsize = (7,8))
for d, names, ax in zip(data, folderNames, axes):
yLims = [1e50, 0]
px = d[:,0]
Energy_J = (px ** 2) / (2 * 9.11e-31)
Energy_eV = Energy_J / 1.6e-19
Energy_MeV = Energy_eV * 1e-6
xlow = nearposn(Energy_MeV, Crop[0])
xhigh = nearposn(Energy_MeV, Crop[1])
# print xlow, xhigh
# xlow = 50; xhigh = 400
intensity = d[:,1]
cropI = intensity[xlow:xhigh]
if cropI.min() < yLims[0]:
yLims[0] = cropI.min()
if cropI.max() > yLims[1]:
yLims[1] = cropI.max()
# print fp
if plot_MeV:
xAxis = Energy_MeV
else:
xAxis = Energy_J
ax.plot(xAxis, intensity)
ax.set_title('Blade Translation ' + names[1:] + 'mm')
ax.set_ylim(yLims)
# ax.set_ylabel('Intensity (# of electrons)')
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useOffset=False)
if plot_MeV:
plt.xlabel('Electron Energy (MeV)')
else:
plt.xlabel('Electron Energy (J)')
# plt.ylabel('Intensity (# of electrons)')
fig.text(0.02, 0.5, 'Intensity (# of electrons)', ha='center', va='center', rotation='vertical')
#==============================================================================
# Apply the plotting limits
#==============================================================================
#plt.xlim([-1e-14, 1e-13])
#plt.yscale('log')
#
# if logPlot:
# plt.ylim([yLims[1]/1e5, yLims[1]])
# plt.yscale('log')
# else:
# plt.ylim(yLims)
#
plt.xlim([xAxis[xlow],xAxis[xhigh]])
plt.legend()
def createPlotOfAll_e_spectra(folderPaths, folderNames, Crop_X, Crop_Y = False):
sns.set_palette(sns.color_palette("Set1", len(folderNames)))
sns.set_context("talk")
sns.set_style('darkgrid')
yLims = [1e50, 0]
data = []
plt.figure(figsize = (10,7))
for fp, names in zip(folderPaths, folderNames):
fp += 'Electron_Spectrum.txt'
try:
#Assuming that the first row is currently px
d = np.loadtxt(fp)
data.append(d)
px = d[:,0]
Energy_J = (px ** 2) / (2 * 9.11e-31)
Energy_eV = Energy_J / 1.6e-19
Energy_MeV = Energy_eV * 1e-6
xlow = nearposn(Energy_MeV, Crop_X[0])
xhigh = nearposn(Energy_MeV, Crop_X[1])
# print xlow, xhigh
# xlow = 50; xhigh = 400
intensity = d[:,1]
if not Crop_Y:
cropI = intensity[xlow:xhigh]
if cropI.min() < yLims[0]:
yLims[0] = cropI.min()
if cropI.max() > yLims[1]:
yLims[1] = cropI.max()
else:
yLims = Crop_Y
# print fp
if plot_MeV:
xAxis = Energy_MeV
else:
xAxis = Energy_J
plt.plot(xAxis, intensity, label = names)
plt.xlim([xAxis[xlow],xAxis[xhigh]])
except:
print 'Error Reading File'
print ' ' + fp
if plot_MeV:
plt.xlabel('Electron Energy (MeV)')
else:
plt.xlabel('Electron Energy (J)')
plt.ylabel('Intensity (# of electrons)')
#==============================================================================
# Apply the plotting limits
#==============================================================================
#plt.xlim([-1e-14, 1e-13])
#plt.yscale('log')
#
if logPlot:
plt.ylim([yLims[1]/1e5, yLims[1]])
plt.yscale('log')
else:
plt.ylim(yLims)
plt.legend()
print 'Crop corresponds to: ', [xAxis[xlow],xAxis[xhigh]], ' MeV'
print 'Range of inputed data is: ', Energy_MeV[0], Energy_MeV[-1]
return data
hdrive = '/Volumes/CIDU_passport/2018_Epoch_vega_1/'
gdrive = '/Volumes/GoogleDrive/My Drive/'
gdrive += '2018_Epoch_vega_1/'
#hdrive += '0601_Gaus_for_wavebreak/'
#fileSplice = [8,None]
#hdrive += '0607_Intensity_Scan/'
#fileSplice = [1,-11]
#hdrive += '0612_profileScan/'
#fileSplice = [2,None]
#hdrive = gdrive + '0711_highRes_selfInjection/'
#fileSplice = [-4,None]
#hdrive = gdrive + '0721_HR_Jump/'
#fileSplice = [-4,None]
hdrive = hdrive + '1010_SlurmJob/'
fileSplice = [10,12]
#hdrive = gdrive + '1018_vega1_Jump/'
#fileSplice = [2,None]
folderPaths, folderNames = listFolders(hdrive)
logPlot = False
plot_MeV = True
#==============================================================================
# Search for the set of folders to look at!
#==============================================================================
starts = ''
#starts = ''
fins = 'FS'
#Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].endswith(fins)]
Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts)]
#Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts) and folderNames[i].endswith('23')]
#Modify the both arrays to just be the ones of interest
folderPaths = folderPaths[Index_to_save]
folderNames = folderNames[Index_to_save]
print folderNames
#==============================================================================
# Crop the axis to the interesting data
#==============================================================================
Energy_Crop = [1, 5] # In MeV
IntensityCrop = [0, 0.5e8]
#==============================================================================
# Slice name for number to sort by
#==============================================================================
Num = []
for f in folderNames:
Num.append(float(f[fileSplice[0]:fileSplice[1]]))
print Num
sort = sorted(zip(Num, folderNames, folderPaths))
folderNames = [x[1] for x in sort]
folderPaths = [x[2] for x in sort]
print 'Sorted'
print folderNames
#folderNames = folderNames[:-1]
data = createPlotOfAll_e_spectra(folderPaths, folderNames, Energy_Crop, IntensityCrop)
plt.savefig(hdrive + 'Electron_spectrum.png')
plt.show()
#data = data[:4]
subplotPerSpectra(data, Energy_Crop)
plt.tight_layout()
plt.savefig(hdrive + 'Electron_spectrums_in_subplot.png', dpi = 300)
| 31.596491 | 125 | 0.4804 |
f937303cbe2bd1ca99e6bfd681984ef1eb1f4844 | 35 | py | Python | first-homework.py | Hexotical/Astr119 | 34a638d29f33c8fde9245cd7c5869bf3f9e7366b | [
"MIT"
] | null | null | null | first-homework.py | Hexotical/Astr119 | 34a638d29f33c8fde9245cd7c5869bf3f9e7366b | [
"MIT"
] | 2 | 2020-10-01T18:51:01.000Z | 2020-10-06T14:15:37.000Z | first-homework.py | Hexotical/astr-119 | 34a638d29f33c8fde9245cd7c5869bf3f9e7366b | [
"MIT"
] | null | null | null | print("Lukas Ho, pronouns: he/him") | 35 | 35 | 0.714286 |
f93933ebd7cddbd101cc7daf0772e4787528a6a9 | 2,965 | py | Python | server/swagger_server/models/people_patch.py | fabric-testbed/fabric-core-api | 8ce79fd16e1020271487967743a89b7a2346bf45 | [
"MIT"
] | null | null | null | server/swagger_server/models/people_patch.py | fabric-testbed/fabric-core-api | 8ce79fd16e1020271487967743a89b7a2346bf45 | [
"MIT"
] | null | null | null | server/swagger_server/models/people_patch.py | fabric-testbed/fabric-core-api | 8ce79fd16e1020271487967743a89b7a2346bf45 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.preferences import Preferences # noqa: F401,E501
from swagger_server import util
| 25.560345 | 101 | 0.601349 |
f9393f537340aad0fcc03fb7b4478b7455578c86 | 14,649 | py | Python | Code/src/models/optim/DMSAD_trainer.py | antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection | e84c28ce4dd28671d39752a7d21c674e05fcb495 | [
"MIT"
] | 8 | 2021-02-19T17:30:00.000Z | 2022-02-21T05:55:06.000Z | Code/src/models/optim/DMSAD_trainer.py | antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection | e84c28ce4dd28671d39752a7d21c674e05fcb495 | [
"MIT"
] | 1 | 2021-05-03T14:04:53.000Z | 2021-05-03T14:48:01.000Z | Code/src/models/optim/DMSAD_trainer.py | antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection | e84c28ce4dd28671d39752a7d21c674e05fcb495 | [
"MIT"
] | 5 | 2021-02-18T22:43:40.000Z | 2021-05-03T14:01:49.000Z | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from src.models.optim.Loss_Functions import DMSADLoss
from src.utils.utils import print_progessbar
| 42.708455 | 123 | 0.558263 |
f93b09d7873482279865a3e138f9e289b66d1ef0 | 7,600 | py | Python | escher/tests/test_plots.py | phantomas1234/escher | 47f3291beefd7cc90207755c717e83f385262956 | [
"MIT"
] | null | null | null | escher/tests/test_plots.py | phantomas1234/escher | 47f3291beefd7cc90207755c717e83f385262956 | [
"MIT"
] | null | null | null | escher/tests/test_plots.py | phantomas1234/escher | 47f3291beefd7cc90207755c717e83f385262956 | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals
from escher import __schema_version__
import escher.server
from escher import Builder, get_cache_dir, clear_cache
from escher.plots import (_load_resource, local_index, server_index,
model_json_for_name, map_json_for_name)
from escher.urls import get_url
import os
import sys
from os.path import join
import json
from pytest import raises, mark
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
if sys.version < '3':
unicode_type = unicode
else:
unicode_type = str
# cache
# server
# model and maps
def test_model_json_for_name(tmpdir):
models = tmpdir.mkdir('models')
models.mkdir('Escherichia coli').join('iJO1366.json').write('"temp"')
json = model_json_for_name('iJO1366', cache_dir=str(tmpdir))
assert json == '"temp"'
# helper functions
def test__load_resource(tmpdir):
assert _load_resource('{"r": "val"}', 'name') == '{"r": "val"}'
directory = os.path.abspath(os.path.dirname(__file__))
assert _load_resource(join(directory, 'example.json'), 'name').strip() == '{"r": "val"}'
with raises(ValueError) as err:
p = join(str(tmpdir), 'dummy')
with open(p, 'w') as f:
f.write('dummy')
_load_resource(p, 'name')
assert 'not a valid json file' in err.value
| 37.073171 | 133 | 0.644342 |
f93b74e758fc59e8cc9ffa0d3c99de08f971b204 | 656 | py | Python | setup.py | HiteshSachdev/casualty | 7d3878bea7bc503a3cc5eb6046aa658608164e0f | [
"MIT"
] | 14 | 2018-10-07T12:05:24.000Z | 2022-03-01T01:58:21.000Z | setup.py | treebohotels/corelated-logs | 13926c97a473bc63c7b18e22870d1760089f30d1 | [
"MIT"
] | 6 | 2018-10-07T09:07:59.000Z | 2019-06-08T09:23:45.000Z | setup.py | treebohotels/corelated-logs | 13926c97a473bc63c7b18e22870d1760089f30d1 | [
"MIT"
] | 2 | 2019-01-23T06:14:31.000Z | 2021-06-21T04:02:26.000Z | from setuptools import find_packages, setup
setup(
name="casualty",
version="0.1.9",
packages=find_packages(exclude=["tests"]),
install_requires=[
"structlog==18.2.0",
"wrapt==1.10.11",
"pre-commit-hooks==1.4.0",
"mock==2.0.0",
"pytest==3.8.2",
"pytest-mock==1.10.0",
"pytest-cov"
],
url="",
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
license="MIT",
author="Sohit Kumar",
author_email="sumitk002@gmail.com",
test_suite="tests",
description="A python library to generate co-relation id and bind it to headers in outgoing request",
)
| 26.24 | 105 | 0.568598 |
f93db1c837037edf147a1adf0e6c511aadcb0960 | 5,321 | py | Python | isaactest/tests/user_progress_access.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | null | null | null | isaactest/tests/user_progress_access.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2016-01-15T11:28:06.000Z | 2016-01-25T17:09:18.000Z | isaactest/tests/user_progress_access.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2019-05-14T16:53:49.000Z | 2019-05-14T16:53:49.000Z | import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import submit_login_form, assert_logged_in
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element, wait_for_invisible_xpath
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException
__all__ = ["user_progress_access"]
#####
# Test : Access Users Progress Page
#####
| 47.088496 | 141 | 0.64236 |
f93dfe9bacfa4bd9cb38fd01bfa6466399547497 | 423 | py | Python | insert_loc_code.py | dspshin/house-bot | 1e2755abae114c3284d7d95d81c40fadb0ab9b43 | [
"MIT"
] | null | null | null | insert_loc_code.py | dspshin/house-bot | 1e2755abae114c3284d7d95d81c40fadb0ab9b43 | [
"MIT"
] | null | null | null | insert_loc_code.py | dspshin/house-bot | 1e2755abae114c3284d7d95d81c40fadb0ab9b43 | [
"MIT"
] | null | null | null | import re
import sqlite3
conn = sqlite3.connect('loc.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS location(loc text PRIMARY KEY, code text)')
conn.commit()
f = open('loc_code.txt')
for d in f.readlines():
data = re.sub(r'\s{2}', '|', d.strip()).split('|')
print data[1].strip(), data[0]
c.execute('INSERT INTO location VALUES ("%s", "%s")'%(data[1].strip(), data[0]))
conn.commit()
f.close() | 24.882353 | 84 | 0.63357 |
f94108d55467d7dc2d4d0a83034f5df29403a946 | 33,479 | py | Python | python/valkka/nv/valkka_nv.py | xiaoxoxin/valkka-nv | 48b8fd5b1293c6e4f96f4798e6d327e209b83bce | [
"WTFPL"
] | 1 | 2021-03-03T13:25:22.000Z | 2021-03-03T13:25:22.000Z | python/valkka/nv/valkka_nv.py | xiaoxoxin/valkka-nv | 48b8fd5b1293c6e4f96f4798e6d327e209b83bce | [
"WTFPL"
] | null | null | null | python/valkka/nv/valkka_nv.py | xiaoxoxin/valkka-nv | 48b8fd5b1293c6e4f96f4798e6d327e209b83bce | [
"WTFPL"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
_valkka_nv = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
_valkka_nv = swig_import_helper()
del swig_import_helper
else:
import _valkka_nv
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
try:
_object = object
_newclass = 1
except __builtin__.Exception:
_newclass = 0
from valkka import core
FrameFilter_swigregister = _valkka_nv.FrameFilter_swigregister
FrameFilter_swigregister(FrameFilter)
DummyFrameFilter_swigregister = _valkka_nv.DummyFrameFilter_swigregister
DummyFrameFilter_swigregister(DummyFrameFilter)
InfoFrameFilter_swigregister = _valkka_nv.InfoFrameFilter_swigregister
InfoFrameFilter_swigregister(InfoFrameFilter)
BriefInfoFrameFilter_swigregister = _valkka_nv.BriefInfoFrameFilter_swigregister
BriefInfoFrameFilter_swigregister(BriefInfoFrameFilter)
ThreadSafeFrameFilter_swigregister = _valkka_nv.ThreadSafeFrameFilter_swigregister
ThreadSafeFrameFilter_swigregister(ThreadSafeFrameFilter)
ForkFrameFilter_swigregister = _valkka_nv.ForkFrameFilter_swigregister
ForkFrameFilter_swigregister(ForkFrameFilter)
ForkFrameFilter3_swigregister = _valkka_nv.ForkFrameFilter3_swigregister
ForkFrameFilter3_swigregister(ForkFrameFilter3)
ForkFrameFilterN_swigregister = _valkka_nv.ForkFrameFilterN_swigregister
ForkFrameFilterN_swigregister(ForkFrameFilterN)
SlotFrameFilter_swigregister = _valkka_nv.SlotFrameFilter_swigregister
SlotFrameFilter_swigregister(SlotFrameFilter)
PassSlotFrameFilter_swigregister = _valkka_nv.PassSlotFrameFilter_swigregister
PassSlotFrameFilter_swigregister(PassSlotFrameFilter)
DumpFrameFilter_swigregister = _valkka_nv.DumpFrameFilter_swigregister
DumpFrameFilter_swigregister(DumpFrameFilter)
CountFrameFilter_swigregister = _valkka_nv.CountFrameFilter_swigregister
CountFrameFilter_swigregister(CountFrameFilter)
TimestampFrameFilter_swigregister = _valkka_nv.TimestampFrameFilter_swigregister
TimestampFrameFilter_swigregister(TimestampFrameFilter)
TimestampFrameFilter2_swigregister = _valkka_nv.TimestampFrameFilter2_swigregister
TimestampFrameFilter2_swigregister(TimestampFrameFilter2)
DummyTimestampFrameFilter_swigregister = _valkka_nv.DummyTimestampFrameFilter_swigregister
DummyTimestampFrameFilter_swigregister(DummyTimestampFrameFilter)
RepeatH264ParsFrameFilter_swigregister = _valkka_nv.RepeatH264ParsFrameFilter_swigregister
RepeatH264ParsFrameFilter_swigregister(RepeatH264ParsFrameFilter)
GateFrameFilter_swigregister = _valkka_nv.GateFrameFilter_swigregister
GateFrameFilter_swigregister(GateFrameFilter)
SwitchFrameFilter_swigregister = _valkka_nv.SwitchFrameFilter_swigregister
SwitchFrameFilter_swigregister(SwitchFrameFilter)
CachingGateFrameFilter_swigregister = _valkka_nv.CachingGateFrameFilter_swigregister
CachingGateFrameFilter_swigregister(CachingGateFrameFilter)
SetSlotFrameFilter_swigregister = _valkka_nv.SetSlotFrameFilter_swigregister
SetSlotFrameFilter_swigregister(SetSlotFrameFilter)
TimeIntervalFrameFilter_swigregister = _valkka_nv.TimeIntervalFrameFilter_swigregister
TimeIntervalFrameFilter_swigregister(TimeIntervalFrameFilter)
FifoFrameFilter_swigregister = _valkka_nv.FifoFrameFilter_swigregister
FifoFrameFilter_swigregister(FifoFrameFilter)
BlockingFifoFrameFilter_swigregister = _valkka_nv.BlockingFifoFrameFilter_swigregister
BlockingFifoFrameFilter_swigregister(BlockingFifoFrameFilter)
SwScaleFrameFilter_swigregister = _valkka_nv.SwScaleFrameFilter_swigregister
SwScaleFrameFilter_swigregister(SwScaleFrameFilter)
Thread_swigregister = _valkka_nv.Thread_swigregister
Thread_swigregister(Thread)
FrameFifoContext_swigregister = _valkka_nv.FrameFifoContext_swigregister
FrameFifoContext_swigregister(FrameFifoContext)
DecoderThread_swigregister = _valkka_nv.DecoderThread_swigregister
DecoderThread_swigregister(DecoderThread)
NVcuInit = _valkka_nv.NVcuInit
NVgetDevices = _valkka_nv.NVgetDevices
NVThread_swigregister = _valkka_nv.NVThread_swigregister
NVThread_swigregister(NVThread)
# This file is compatible with both classic and new-style classes.
| 41.179582 | 138 | 0.729532 |
f94195b0e745d91852d2ea4775d406dd9acd653a | 3,336 | py | Python | mcenter_client/tests/mcenter_server_api/controllers/users_controller.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 7 | 2019-04-08T02:31:55.000Z | 2021-11-15T14:40:49.000Z | mcenter_client/tests/mcenter_server_api/controllers/users_controller.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 31 | 2019-02-22T22:23:26.000Z | 2021-08-02T17:17:06.000Z | mcenter_client/tests/mcenter_server_api/controllers/users_controller.py | lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | [
"Apache-2.0"
] | 8 | 2019-03-15T23:46:08.000Z | 2020-02-06T09:16:02.000Z | import connexion
import six
import flask
import copy
import os
import base64
import time
from mcenter_server_api.models.inline_response200 import InlineResponse200 # noqa: E501
from mcenter_server_api.models.inline_response2001 import InlineResponse2001 # noqa: E501
from mcenter_server_api.models.user import User # noqa: E501
from mcenter_server_api import util
from . import base
users = dict(AdminID=dict(username='admin', password='admin',
createdBy='admin', created=0, id='AdminID'))
def auth_login_post(body): # noqa: E501
"""Authenticate user
# noqa: E501
:param user: username and password fields for authentication
:type user: dict | bytes
:rtype: InlineResponse200
"""
u = _finduserbyname(body['username'])
if u is None:
flask.abort(403)
if u['password'] != body['password']:
flask.abort(403)
return dict(token=base.add_session(u))
def auth_validate_post(body): # noqa: E501
"""Register an user
# noqa: E501
:param authorization: Bearer Token
:type authorization: str
:rtype: InlineResponse2001
"""
return 'do some magic!'
def me_get(): # noqa: E501
"""Get user detail of current user
# noqa: E501
:rtype: User
"""
s = base.check_session()
return _cleanuser(s['user'])
def users_get(): # noqa: E501
"""Get list of users
# noqa: E501
:rtype: List[User]
"""
base.check_session()
ret = []
for u in users.values():
ret.append(_cleanuser(u))
return ret
def users_post(body): # noqa: E501
"""Create a new user
# noqa: E501
:param user: User detail description
:type user: dict | bytes
:rtype: User
"""
s = base.check_session()
if _finduserbyname(body['username']) is not None:
flask.abort(500)
if not body['password']:
flask.abort(500)
base.finish_creation(body, s, users)
return _cleanuser(body)
def users_user_id_delete(userId): # noqa: E501
"""Deregister an user
# noqa: E501
:param user_id: User identifier
:type user_id: str
:rtype: None
"""
s = base.check_session()
u = _finduser(userId)
k = u['username']
if k == s['user']['username']:
flask.abort(500)
del users[userId]
def users_user_id_get(userId): # noqa: E501
"""List details of specific user
# noqa: E501
:param user_id: User identifier
:type user_id: str
:rtype: User
"""
base.check_session()
return _cleanuser(_finduser(userId))
def users_user_id_put(userId, body): # noqa: E501
"""Update user information
# noqa: E501
:param user_id: User identifier
:type user_id: str
:param user: Update user object
:type user: dict | bytes
:rtype: User
"""
base.check_session()
u = _finduser(userId)
for k, v in body.items():
if k not in ['id', 'created', 'createdBy'] and v is not None:
u[k] = v
return _cleanuser(u)
| 20.096386 | 90 | 0.627098 |
f941abe12e92f9a9d99898da1845f80024a4bf16 | 105 | py | Python | dash_react_json_schema_form/_imports_.py | dabble-of-devops-bioanalyze/dash_react_json_schema_form | f8b8826e6798efca1a7f603aa73b9e054056dc9a | [
"Apache-2.0"
] | null | null | null | dash_react_json_schema_form/_imports_.py | dabble-of-devops-bioanalyze/dash_react_json_schema_form | f8b8826e6798efca1a7f603aa73b9e054056dc9a | [
"Apache-2.0"
] | null | null | null | dash_react_json_schema_form/_imports_.py | dabble-of-devops-bioanalyze/dash_react_json_schema_form | f8b8826e6798efca1a7f603aa73b9e054056dc9a | [
"Apache-2.0"
] | null | null | null | from .DashReactJsonSchemaForm import DashReactJsonSchemaForm
__all__ = [
"DashReactJsonSchemaForm"
] | 21 | 60 | 0.819048 |
f94563e81861f76b57c556bc8928617eb8ac0410 | 19,471 | py | Python | symbol.py | LizhengMathAi/symbol_FEM | a2679ff90cfffa40316e33102be1a802e210768a | [
"Apache-2.0"
] | 1 | 2021-02-07T00:53:51.000Z | 2021-02-07T00:53:51.000Z | symbol.py | LizhengMathAi/symbol_FEM | a2679ff90cfffa40316e33102be1a802e210768a | [
"Apache-2.0"
] | null | null | null | symbol.py | LizhengMathAi/symbol_FEM | a2679ff90cfffa40316e33102be1a802e210768a | [
"Apache-2.0"
] | null | null | null | from functools import reduce
import numpy as np
from sparse import SparseTensor
def directional_derivative(self, c, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| c | numpy.ndarray | self.shape + [ND] * order |
| order | int | [] |
| return | numpy.ndarray | self.shape |
+----------+-----------------+---------------------------+
return: \\sum_{ij...} c_{ij...}^{uv...} \\frac{\\partial^ self_{uv...}}{\partial \lambda_i \partial \lambda_j ...}
"""
ni = max([p.coeff.__len__() for p in self.array])
dim = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), dim), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
for axis in range(order):
axes = [axis + 1] + [i for i in range(axis + 3) if i != axis + 1]
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=axes)
axes = list(range(1, axis + 2)) + [axis + 3]
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=axes)
indices = np.maximum(indices, 0)
c = np.reshape(c, newshape=[-1, 1] + [dim] * order)
c = np.transpose(c, axes=list(range(2, order + 2)) + [1, 0]) # shape = [ND] * order + [1] + [?]
coeff = np.reshape((c * coeff), newshape=(dim ** order * ni, -1)) # shape = [ND] * order + [NI] + [?]
indices = np.reshape(indices, newshape=(dim ** order * ni, dim, -1)) # shape = [ND] * order + [NI] + [ND] + [?]
return PolynomialArray([Polynomial(coeff[:, i], indices[:, :, i], merge=True) for i in range(coeff.shape[-1])], shape=self.shape)
def integral(self, dim, determinant):
"""
Working correctly in triangulation grid only!
\Pi_i \alpha_i!
\int_K \Pi_i \lambda_i^{\alpha_i} dx = ------------------------ * determinant
(dim + \Sum_i \alpha_i)!
"""
ni = max([p.coeff.__len__() for p in self.array])
nd = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), nd), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
degree = np.max(indices)
if degree == 0:
numerator = np.ones_like(indices) # shape = [NI, ND, ?]
else:
numerator = reduce_prod([np.maximum(indices - i, 1) for i in range(degree)]) # shape = [NI, ND, ?]
numerator = np.prod(numerator, axis=1) # shape = [NI, ?]
denominator = np.sum(indices, axis=1) + dim # shape = [NI, ?]
denominator = reduce_prod([np.maximum(denominator - i, 1) for i in range(degree + dim)]) # shape = [NI, ?]
return np.reshape(np.sum(coeff * numerator / denominator, axis=0), newshape=self.shape) * determinant
def unit_test():
np.set_printoptions(precision=2)
x = np.random.rand(4, 3)
const_array = np.random.rand(8, 7)
# item 6, degree 2, elements 3
poly = Polynomial(coeff=np.random.rand(6), indices=np.random.randint(0, 3, size=(6, 3)))
polys_1 = [Polynomial(coeff=np.random.rand(5), indices=np.random.randint(0, 5, size=(5, 3))) for _ in range(56)]
polys_1 = PolynomialArray(polys_1, [8, 7])
polys_2 = [Polynomial(coeff=np.random.rand(4), indices=np.random.randint(0, 5, size=(4, 3))) for i in range(56)]
polys_2 = PolynomialArray(polys_2, [8, 7])
polys_3 = [Polynomial(coeff=np.random.rand(3), indices=np.random.randint(0, 5, size=(3, 3))) for i in range(7*8*9)]
polys_3 = PolynomialArray(polys_3, [9, 8, 7])
# four fundamental rules
print("polys_1(x) + np.pi - (polys_1 + np.pi)(x):")
print(np.max(np.abs(polys_1(x) + np.pi - (polys_1 + np.pi)(x))))
print("polys_1(x) + poly(x) - (polys_1 + poly)(x):")
print(np.max(np.abs(polys_1(x) + np.reshape(poly(x), (-1, 1, 1)) - (polys_1 + poly)(x))))
print("polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x):")
print(np.max(np.abs(polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x))))
print("polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x):")
print(np.max(np.abs(polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x))))
print("polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x))))
print("polys_1(x) - np.pi - (polys_1 - np.pi)(x):")
print(np.max(np.abs(polys_1(x) - np.pi - (polys_1 - np.pi)(x))))
print("polys_1(x) - poly(x) - (polys_1 - poly)(x):")
print(np.max(np.abs(polys_1(x) - np.reshape(poly(x), (-1, 1, 1)) - (polys_1 - poly)(x))))
print("polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x):")
print(np.max(np.abs(polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x))))
print("polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x):")
print(np.max(np.abs(polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x))))
print("polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x))))
print("polys_1(x) * np.pi - (polys_1 * np.pi)(x):")
print(np.max(np.abs(polys_1(x) * np.pi - (polys_1 * np.pi)(x))))
print("polys_1(x) * poly(x) - (polys_1 * poly)(x):")
print(np.max(np.abs(polys_1(x) * np.reshape(poly(x), (-1, 1, 1)) - (polys_1 * poly)(x))))
print("polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x):")
print(np.max(np.abs(polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x))))
print("polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x):")
print(np.max(np.abs(polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x))))
print("polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x))))
print(np.max(np.abs(polys_1.reshape(shape=[2, 4, 7])(x) - np.reshape(polys_1(x), newshape=(-1, 2, 4, 7)))))
# check concat
print("PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=1):")
print(np.max(np.abs(PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=2))))
# check sum
print(np.max(np.abs(polys_3.sum(axis=0, keep_dim=True)(x) - np.sum(polys_3(x), axis=0 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=1, keep_dim=True)(x) - np.sum(polys_3(x), axis=1 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=2, keep_dim=True)(x) - np.sum(polys_3(x), axis=2 + 1, keepdims=True))))
# check integral
poly_1 = Polynomial(
coeff=np.array([
1,
3,
]),
indices=np.array([
[1, 2, 3, 4],
[1, 1, 1, 1],
])
)
poly_2 = Polynomial(
coeff=np.array([
2,
4,
]),
indices=np.array([
[4, 3, 2, 1],
[0, 0, 0, 0],
])
)
poly = PolynomialArray(array=[poly_1, poly_2], shape=(2, ))
ans_1 = 0.5 * 1 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_1 += 0.5 * 3 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 8)))
ans_2 = 2 * 2 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_2 += 2 * 4 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 4)))
print(poly.integral(dim=3, determinant=np.array([0.5, 2])) - np.array([ans_1, ans_2]))
# check derivative
poly = poly.derivative(order=1)
print(poly[0, 1])
# check derivative in Polynomial
c = np.random.rand(3, 3)
coeff = np.random.randint(100, size=(4, )) / 100
indices = np.random.randint(10, size=(4, 3))
poly = Polynomial(coeff, indices)
type_1 = (poly.derivative(order=2) * c).sum(axis=0).sum(axis=0)
type_2 = poly.directional_derivative(c, order=2)
error = type_1 - type_2
error = Polynomial(error.coeff, error.indices, merge=True)
print("error:", error)
# check derivative in PolynomialArray
poly = PolynomialArray([poly, poly+1, poly-1, poly*2], shape=(2, 2))
c = np.random.rand(2, 2, 3, 3)
type_1 = (poly.derivative(order=2) * c).sum(axis=2).sum(axis=2)
type_2 = poly.directional_derivative(c, order=2)
for item in (type_1 - type_2).array:
item = Polynomial(item.coeff, item.indices, merge=True)
print("error:", item)
if __name__ == "__main__":
unit_test()
| 49.544529 | 137 | 0.558472 |
f9466d3c2d2932494116e2cb70d044cef50ea795 | 266 | py | Python | pollbot/helper/display/management.py | 3wille/ultimate-poll-bot | 7a99659df463a891b20a1ab424665cd84d4242b4 | [
"MIT"
] | null | null | null | pollbot/helper/display/management.py | 3wille/ultimate-poll-bot | 7a99659df463a891b20a1ab424665cd84d4242b4 | [
"MIT"
] | null | null | null | pollbot/helper/display/management.py | 3wille/ultimate-poll-bot | 7a99659df463a891b20a1ab424665cd84d4242b4 | [
"MIT"
] | null | null | null | """The poll management text."""
from .poll import get_poll_text
def get_poll_management_text(session, poll, show_warning=False):
"""Create the management interface for a poll."""
poll_text = get_poll_text(session, poll, show_warning)
return poll_text
| 26.6 | 64 | 0.74812 |
f9469121eeab103831a2110844d01c4c5cbbd7f5 | 354 | py | Python | codewof/programming/migrations/0009_auto_20200417_0013.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | 3 | 2019-08-29T04:11:22.000Z | 2021-06-22T16:05:51.000Z | codewof/programming/migrations/0009_auto_20200417_0013.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | 265 | 2019-05-30T03:51:46.000Z | 2022-03-31T01:05:12.000Z | codewof/programming/migrations/0009_auto_20200417_0013.py | samuelsandri/codewof | c9b8b378c06b15a0c42ae863b8f46581de04fdfc | [
"MIT"
] | 7 | 2019-06-29T12:13:37.000Z | 2021-09-06T06:49:14.000Z | # Generated by Django 2.2.3 on 2020-04-16 12:13
from django.db import migrations
| 19.666667 | 51 | 0.59887 |
f946e2fce4d695420e4afffc8e580dcd4dade5ec | 273 | py | Python | diy_programs/diy_9_csv_module.py | bhalajin/blueprints | 7ad1d7860aafbb4c333de9efbbb7e546ed43c569 | [
"MIT"
] | null | null | null | diy_programs/diy_9_csv_module.py | bhalajin/blueprints | 7ad1d7860aafbb4c333de9efbbb7e546ed43c569 | [
"MIT"
] | null | null | null | diy_programs/diy_9_csv_module.py | bhalajin/blueprints | 7ad1d7860aafbb4c333de9efbbb7e546ed43c569 | [
"MIT"
] | null | null | null | import csv
a = [[1,2,3], [4,5,6]]
with open('test.csv', 'w', newline='') as testfile:
csvwriter = csv.writer(testfile)
for row in a:
csvwriter.writerow(row)
with open('test.csv', 'r') as testfile:
csvreader = csv.reader(testfile)
for row in csvreader:
print(row) | 21 | 51 | 0.663004 |
f948dbae262921813e79d529b722c0b66116eaf6 | 543 | py | Python | sourceFiles/ex027_LerNomeMostraUltimo.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | sourceFiles/ex027_LerNomeMostraUltimo.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | sourceFiles/ex027_LerNomeMostraUltimo.py | mcleber/Aulas_Python | bd224b593fcf907d54c8a2b92eb3afa88d327171 | [
"MIT"
] | null | null | null | '''
Faa um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o ltimo
nome separadamente.
Ex.: Ana Maria de Souza
primeiro = Ana
ltimo = Souza
'''
n = str(input('Digite seu nome completo: ')).strip()
nome = n.split() # split particiona e cria uma lista comeando no indice 0
print('Muito prazer em te conhecer!')
print('Seu primeiro nome {}'.format(nome[0]))
print('Seu segundo nome {}'.format(nome[1])) # indice 1 pega o segundo nome
print('Seu ltimo nome {}'.format(nome[len(nome)-1]))
| 38.785714 | 100 | 0.696133 |
f949b7feca2216ed779a38104fad871de931f5cd | 1,715 | py | Python | hknweb/forms.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | hknweb/forms.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | hknweb/forms.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import (
UserCreationForm,
SetPasswordForm,
)
from hknweb.models import User, Profile
| 24.855072 | 77 | 0.593586 |
f94e553843e7ec006e6711f29cd3c8bedc298b1e | 18,184 | py | Python | pfstats.py | altinukshini/pfstats | 90137cdfdc7c5ae72b782c3fc113d56231e2667d | [
"MIT"
] | 18 | 2017-09-03T19:59:08.000Z | 2022-02-02T11:59:48.000Z | pfstats.py | altinukshini/pfstats | 90137cdfdc7c5ae72b782c3fc113d56231e2667d | [
"MIT"
] | 3 | 2018-04-23T14:09:47.000Z | 2020-09-30T10:26:16.000Z | pfstats.py | altinukshini/pfstats | 90137cdfdc7c5ae72b782c3fc113d56231e2667d | [
"MIT"
] | 14 | 2017-09-03T19:59:10.000Z | 2022-03-15T12:19:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Postfix mail log parser and filter.
This script filters and parses Postfix logs based on provided filter parameters.
Example:
To use this script type 'python pfstats.py -h'. Below is an example
that filteres postfix log file (even gziped) based on date,
sender of the email and email status::
$ python pfstats.py -d 'Jul 26' -t 'bounced' -s 'info@altinukshini.com'
Todo:
* Filter and parse logs from a year ago
* Add receiver filter
* Maybe provide from-to date filtering option
"""
__author__ = "Altin Ukshini"
__copyright__ = "Copyright (c) 2017, Altin Ukshini"
__license__ = "MIT License"
__version__ = "1.0"
__maintainer__ = "Altin Ukshini"
__email__ = "altin.ukshini@gmail.com"
__status__ = "Production"
import re
import os
import sys
import gzip
import time
import argparse
import datetime
from random import randint
from argparse import RawTextHelpFormatter
from collections import defaultdict
########################################################
# Config
########################################################
default_log_file = r'/var/log/postfix/mail.log'
default_log_dir = r'/var/log/postfix/' # Must end with slash '/'
########################################################
# Predefined variables
########################################################
sender_lines = []
status_lines = []
status_lines_by_type = {'bounced' : [], 'deferred' : [], 'sent' : [], 'rejected' : []}
status_types = ['bounced', 'deferred', 'sent', 'rejected']
file_random_no = randint(100000, 999990)
generated_results = defaultdict(dict)
working_dir = os.getcwd() + '/'
start_time = time.time()
### All this formatting bcs of postfix date format :)
date = datetime.datetime.now()
date_today_year = date.strftime("%Y")
date_today_month = date.strftime("%b")
date_today_day = date.strftime("%d").lstrip('0')
date_today = date_today_month + " " + date_today_day
if int(date_today_day) < 10:
date_today = date_today_month + " " + date_today_day
########################################################
# Functions
########################################################
def get_receiver(line):
"""Return a string
Filter line and get the email receiver to=<>.
"""
receiver = re.search('(?<=to=<).*?(?=>)', line)
return receiver.group()
def get_sender(line):
"""Return a string
Filter line and get the email sender from=<>.
"""
sender = re.search('(?<=from=<).*?(?=>)', line)
return sender.group()
def get_email_subject(line):
"""Return a string
Filter line and get the email subject Subject:.
"""
subject = re.search('(?<=Subject: ).*?(?=\sfrom)', line)
return subject.group()
def get_email_status(line):
"""Return a string
Filter line and get the email status (sent, bounced, deferred, rejected).
"""
status = re.search('(?<=status=).*?(?=\s)', line)
return status.group()
def get_host_message(line, status):
"""Return a string
Filter line and get the host message located after status.
"""
message = re.search('status=' + status + ' (.*)', line)
return message.group(1)
def get_message_id(line):
"""Return a string
Filter line and get the email/message id.
"""
return line.split()[5].replace(":","")
def get_line_date(line):
"""Return a string
Filter line and get the email date (beginning of the line).
"""
return line.split()[0] + " " + str(line.split()[1])
def check_sender_line(line):
"""Return a boolean
Check if line contains specific words to validate if that's the line
we want.
"""
return 'cleanup' in line and 'from=' in line and 'Subject' in line
def filter_line_sender_subject(line):
"""Return void
Filter line based on sender and subject message and
append it to predefined dicts.
"""
global args, sender_lines
if args.sender is not None and args.message is not None:
if args.sender in line and args.message in line:
sender_lines.append(line)
elif args.sender is not None and args.message is None:
if args.sender in line:
sender_lines.append(line)
elif args.message is not None and args.sender is None:
if args.message in line:
sender_lines.append(line)
else:
sender_lines.append(line)
def filter_line(line):
"""Return void
Filter line based on check_sender_line() and email status type and append to
corresponding predefined dicts
"""
global sender_lines, status_lines, status_lines_by_type, status_types
if check_sender_line(line):
filter_line_sender_subject(line)
elif args.type in status_types:
if str('status='+args.type) in line and 'to=' in line and 'dsn=' in line:
status_lines.append(line)
else:
if 'status=' in line and 'to=' in line and 'dsn=' in line :
line_email_status = get_email_status(line)
if line_email_status in status_types:
status_lines_by_type[line_email_status].append(line)
def check_if_gz(file_name):
"""Return a boolean
Check if filename ends with gz extension
"""
return file_name.endswith('.gz')
def filter_log_file(log_file):
"""Return a string
Open file and start filtering line by line.
Apply date filtering as well.
"""
global date_today, date_filter
if check_if_gz(log_file):
with gzip.open(log_file, 'rt') as log_file:
for line in log_file:
print(line)
if date_filter in line:
filter_line(line)
else:
with open(log_file,'r') as log_file:
for line in log_file:
if date_filter in line:
filter_line(line)
log_file.close()
def process_line(sender_line, status_lines, status_type, file):
"""Return void
For each sender, check corresponding message status by message id, extract the required
parameters from lines and write them to generated file.
"""
global args, generated_results
message_id = get_message_id(sender_line)
sender = get_sender(sender_line)
subject = get_email_subject(sender_line)
for status_line in status_lines:
if message_id in status_line:
receiver = get_receiver(status_line)
host_message = get_host_message(status_line, status_type)
line_date = get_line_date(status_line)
generated_results[status_type] += 1
file.write(
line_date + args.output_delimiter +
sender + args.output_delimiter +
receiver + args.output_delimiter +
message_id + args.output_delimiter +
subject + args.output_delimiter +
host_message + "\n")
def write_file_header(file):
"""Return void
Writes file header that represent columns.
"""
global args
file.write(
"date" + args.output_delimiter +
"sender" + args.output_delimiter +
"receiver" + args.output_delimiter +
"message_id" + args.output_delimiter +
"subject" + args.output_delimiter +
"host_message\n")
def date_filter_formated(date_filter):
"""Return datetime
Returns the date provided to a specific format '%Y %b %d'.
"""
return datetime.datetime.strptime(datetime.datetime.now().strftime('%Y ') + date_filter, '%Y %b %d')
def date_filter_int(date_filter):
"""Return int
Returns the datetime provided to a specific format '%Y%b%d' as integer.
"""
return int(date_filter_formated(date_filter).strftime('%Y%m%d'))
def get_files_in_log_dir(default_log_dir):
"""Return list
Returns a list of files from provided directory path.
"""
all_log_files = [f for f in os.listdir(default_log_dir) if os.path.isfile(os.path.join(default_log_dir, f))]
if not all_log_files:
sys.exit("Default log directory has no files in it!")
return all_log_files
def generate_files_to_check(date_filter):
"""Return list
Based on the date filter provided as argument (or today's date), generate the supposed filenames (with specific date and format)
to check in log directory. This will return two filenames.
"""
today_plusone = datetime.datetime.now() + datetime.timedelta(days = 1)
today_minusone = datetime.datetime.now() - datetime.timedelta(days = 1)
date_filter_plusone = date_filter_formated(date_filter) + datetime.timedelta(days = 1)
if (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) == int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + datetime.datetime.now().strftime('%Y%m%d'),
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz'
]
elif (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) < int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz',
'mail.log-' + date_filter_plusone.strftime('%Y%m%d') + '.gz'
]
return []
def populate_temp_log_file(file_name, temp_log_file):
"""Return void
Populates the combined temporary log file from provided log in log directory.
"""
if check_if_gz(file_name):
with gzip.open(file_name, 'rt') as gz_mail_log:
for line in gz_mail_log:
temp_log_file.write(line)
gz_mail_log.close()
else:
with open(file_name, 'r') as mail_log:
for line in mail_log:
temp_log_file.write(line)
mail_log.close()
def generate_working_log(date_filter):
"""Return void
Generates combined working log from different logs from postfix log directory based on date filter.
"""
global args, log_file, working_dir
log_dir_files = get_files_in_log_dir(args.log_dir)
selected_files = generate_files_to_check(date_filter)
temp_log_file = open(working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log', 'w')
for selected_file in selected_files:
if selected_file in log_dir_files:
populate_temp_log_file(args.log_dir + selected_file, temp_log_file)
else:
print("File not found: " + selected_file)
temp_log_file.close()
log_file = working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log'
def print_results(results):
"""Return void
Prints the end results of the file processing
"""
global args, file_random_no
print("\n************************* RESULTS *************************\n")
if results:
total = 0
for result in results:
total += results[result]
if result == 'sent':
print(result + ": \t" + str(results[result]) \
+ "\t\t" + result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
else:
print(result + ":\t" + str(results[result]) + "\t\t" \
+ result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
print("\n-----\nTotal:\t\t" + str(total))
else:
print('Results could not be printed')
print("\n***********************************************************")
if __name__ == "__main__":
########################################################
# Argument(s) Parser
########################################################
parser = argparse.ArgumentParser(description='Filter and parse Postfix log files.', formatter_class=RawTextHelpFormatter)
parser.add_argument('-d', '--date',
dest='date',
default=date_today,
metavar='',
help='''Specify different date. Default is current date.\nFormat: Jan 20 (note one space) &
Jan 2 (note two spaces).\nDefault is todays date: ''' + date_today + '\n\n')
parser.add_argument('-t', '--type',
dest='type',
default='all',
metavar='',
help='Type of email status: bounced, sent, rejected, deferred.\nDefault is all.\n\n')
parser.add_argument('-s', '--sender',
dest='sender',
metavar='',
help='Specify senders address in order to query logs matching this parameter\n\n')
parser.add_argument('-m', '--message',
dest='message',
metavar='',
help='''Postfix default log format must be changed for this option to work.
Add subject message in logs, and then you can use this option to query\nthose emails with specific subject message.\n\n''')
parser.add_argument('-l', '--log',
dest='log',
default=default_log_file,
metavar='',
help='Specify the log file you want to use.\nDefault is: ' + default_log_file + '\n\n')
parser.add_argument('--log-dir',
dest='log_dir',
default=default_log_dir,
metavar='',
help='Specify the log directory.\nDefault is: ' + default_log_dir + '\n\n')
parser.add_argument('--output-directory',
dest='output_directory',
default=working_dir,
metavar='',
help='Specify the generated file(s) directory.\nDefault is current working directory: ' + working_dir + '\n\n')
parser.add_argument('--output-delimiter',
dest='output_delimiter',
default=';',
metavar='',
help='Specify the generated output delimiter.\nDefault is ";"\n\n')
parser.add_argument('--output-filetype',
dest='output_filetype',
default='csv',
metavar='',
help='Specify the generated output file type.\nDefault is "csv"\n\n')
args = parser.parse_args()
## Validate arguments
log_file = default_log_file
date_filter = date_today
# Check if provided parameters are valid
if os.path.isfile(args.log) is not True:
parser.error('Provided log file does not exist: ' + args.log)
if args.output_directory != working_dir and args.output_directory.endswith('/') is not True:
parser.error('Generated output file(s) directory must end with slash "/"')
if args.log_dir != default_log_dir and args.log_dir.endswith('/') is not True:
parser.error('Log directory must end with slash "/"')
if os.path.exists(args.output_directory) is not True:
parser.error('Generated output file(s) directory does not exist: ' + args.output_directory)
if os.path.exists(args.log_dir) is not True:
parser.error('This log directory does not exist in this system: ' + args.log_dir + '\nMaybe provide a different log dir with --log-dir')
# If date provided, change date filter to the provided one
if args.date != date_filter:
date_filter = args.date
# If log provided, change default log file to provided one
if args.log != log_file:
log_file = args.log
########################################################
# Execution / Log parsing and filtering
########################################################
# Check if provided date is valid
if int(date_filter_formated(date_filter).strftime('%Y%m%d')) > int(datetime.datetime.now().strftime('%Y%m%d')):
sys.exit("Provided date format is wrong or higher than today's date!")
# In case the date filter is provided, and it is different from today,
# it means that we will have to generate a temp log which contains
# combined logs from default log dir (gzip logrotated files included)
if date_filter != date_today and log_file == default_log_file:
generate_working_log(date_filter)
# Start filtering log file based on provided filters
filter_log_file(log_file)
# If there were no senders/filter matches, exit
if not sender_lines:
sys.exit("No matching lines found to be processed with provided filters in log file (" + log_file + "). Exiting...")
# Start parsing
# If message status type provided, filter only those messages
if args.type in status_types:
generated_results[args.type] = 0
with open(args.output_directory + args.type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines, args.type, generated_file)
generated_file.close()
# Else, filter all status types (bounced, sent, rejected, deferred)
else:
for status_type in status_types:
generated_results[status_type] = 0
with open(args.output_directory + status_type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines_by_type[status_type], \
status_type, generated_file)
generated_file.close()
# Generate and print results
print_results(generated_results)
print("--- %s seconds ---" % (time.time() - start_time))
| 31.460208 | 144 | 0.592444 |
f951cf837ee7d78498aad48b843418086e875c47 | 1,524 | py | Python | test/atw_config_auto.py | lichengwu/python_tools | 3ebf70e6a6f6689ce2b615bed1500b8817f0b82a | [
"Apache-2.0"
] | null | null | null | test/atw_config_auto.py | lichengwu/python_tools | 3ebf70e6a6f6689ce2b615bed1500b8817f0b82a | [
"Apache-2.0"
] | null | null | null | test/atw_config_auto.py | lichengwu/python_tools | 3ebf70e6a6f6689ce2b615bed1500b8817f0b82a | [
"Apache-2.0"
] | null | null | null | __author__ = 'lichengwu'
if __name__ == "__main__":
config = """atw7b010100054060.et2\t10.100.54.60
atw8b010100054070.et2\t10.100.54.70
atw5b010100054057.et2\t10.100.54.57
atw6b010100054058.et2\t10.100.54.58
atw5b010179212040.s.et2\t10.179.212.40
atw6b010179213116.s.et2\t10.179.213.116
atw7b010179213117.s.et2\t10.179.213.117
atw8b010179213164.s.et2\t10.179.213.164"""
for line in config.split("\n"):
pair = line.split("\t")
host = pair[0].strip()
ip = pair[1].strip()
sharding = host[3]
if sharding == 'm':
print "insert into atw_server_config(gmt_create, gmt_modified, server_ip, biz_type, note, group_list, start_mode) values(now(), now(), '%s', 3, '%s', '0', 2);" % (
ip, get_note(host))
else:
print "insert into atw_server_config(gmt_create, gmt_modified, server_ip, biz_type, note, group_list, start_mode) values(now(), now(), '%s', 3, '%s', '%s', 2);" % (
ip, get_note(host),get_groups(sharding))
| 30.48 | 176 | 0.571522 |
f9564d9454e04c5d07bedcb3655d9efe0ca449c7 | 133 | py | Python | compound_types/built_ins/lists.py | vahndi/compound-types | cda4f49651b4bfbcd9fe199de276be472620cfad | [
"MIT"
] | null | null | null | compound_types/built_ins/lists.py | vahndi/compound-types | cda4f49651b4bfbcd9fe199de276be472620cfad | [
"MIT"
] | null | null | null | compound_types/built_ins/lists.py | vahndi/compound-types | cda4f49651b4bfbcd9fe199de276be472620cfad | [
"MIT"
] | null | null | null | from typing import List
BoolList = List[bool]
DictList = List[dict]
FloatList = List[float]
IntList = List[int]
StrList = List[str]
| 16.625 | 23 | 0.736842 |
f956a3d5495345885097a51ce9c2704ddca7f850 | 3,396 | py | Python | Sketches/TG/soc2007/shard_final/BranchShard.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/TG/soc2007/shard_final/BranchShard.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/TG/soc2007/shard_final/BranchShard.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Shard import *
| 39.488372 | 107 | 0.608952 |
f9570198e7a5f622e1af77b862f79e6f0ce39380 | 486 | py | Python | infrastructure/crypto_ml/agent/SimpleAgent.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | 1 | 2020-02-18T00:38:16.000Z | 2020-02-18T00:38:16.000Z | infrastructure/crypto_ml/agent/SimpleAgent.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | null | null | null | infrastructure/crypto_ml/agent/SimpleAgent.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | 1 | 2020-02-18T00:39:12.000Z | 2020-02-18T00:39:12.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 UWATC. All rights reserved.
#
# Use of this source code is governed by an MIT license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/MIT
from .AgentTemplate import AgentTemplate | 32.4 | 76 | 0.748971 |
f9576382290725337f6455bafa4ade3618c4bd12 | 8,349 | py | Python | pod.py | ddh0/pod | 5c630f609db6d4e2d6704874144faf9fe64ee15b | [
"MIT"
] | 1 | 2020-11-20T16:35:07.000Z | 2020-11-20T16:35:07.000Z | pod.py | ddh0/pod | 5c630f609db6d4e2d6704874144faf9fe64ee15b | [
"MIT"
] | null | null | null | pod.py | ddh0/pod | 5c630f609db6d4e2d6704874144faf9fe64ee15b | [
"MIT"
] | null | null | null | # Program that downloads all episodes of a podcast
# Features
# -- Functions: add, remove, update
# - Run the file to update without having to use python interpreter
# - Download all episodes of a podcast, put into the correct folder
# - Tag each file with metadata from the feed and the stored config
import os
import sys
import pickle
import requests
import datetime
import feedparser
import subprocess
STORAGE_DIR = "C:\\Users\\Dylan\\Python\\pod\\storage\\"
LOGFILE = "C:\\Users\\Dylan\\Python\\pod\\log.txt"
FFMPEG_PATH = "C:\\Users\\Dylan\\Python\\pod\\ffmpeg.exe"
TEMP_DIR = "C:\\Users\\Dylan\\AppData\\Local\\Temp\\"
debug = False
def log(text):
"""For internal use. Easily log events.
To display these events onscreen as they occur, set pod.debug = True."""
if debug: print("--debug: " + text)
with open(LOGFILE, 'a') as log:
log.write(datetime.datetime.now().isoformat() + ': ' + str(text) + '\n')
def add():
"""Creates a stored configuration file for the given feed, "*.pod", so that
the feed can be checked quickly without having to specify the URL or metadata again."""
podcast_obj = Podcast(
input("Podcast name: "),
input("Feed URL: "),
input("Storage dir: "),
input("Prefix: "),
input("Album: "),
input("Artist: "),
input("Release year: "),
input("Album art URL: ")
)
with open(STORAGE_DIR + podcast_obj.name + '.pod', 'wb') as file:
pickle.dump(podcast_obj, file)
def remove():
"""Removes the configuration file associated with the given podcast."""
name = input("Name of podcast to remove: ")
if os.path.exists(STORAGE_DIR + name + '.pod'):
os.remove(STORAGE_DIR + name + '.pod')
else:
print('-- %s does not exist' % name)
def update():
"""Checks for new entries from all feeds, download and tag new episodes."""
# For each stored podcast config
for file in os.listdir(STORAGE_DIR):
with open(STORAGE_DIR + file, 'rb') as f:
podcast_obj = pickle.load(f)
log("Updating podcast: %s" % podcast_obj.name)
print('Updating "%s":' % podcast_obj.name)
# Get feed
feed = feedparser.parse(podcast_obj.feed)
length = len(feed.entries)
# Create storage dir if it does not exist
if not os.path.exists(podcast_obj.storage_dir):
os.mkdir(podcast_obj.storage_dir)
# Download image if it does not exist
image_path = podcast_obj.storage_dir + podcast_obj.prefix + "_Album_Art.png"
if not os.path.exists(image_path):
print("Downloading podcast cover art...")
log("Downloading image")
response = requests.get(podcast_obj.art)
with open(image_path, 'wb') as imgfile:
imgfile.write(response.content)
# Set podcast-specific metadata
# image_path set above, title set per-episode
album = podcast_obj.album
artist = podcast_obj.artist
year = podcast_obj.year
# Get episodes from feed in chronological order
for i in range(length-1, -1, -1):
# Get current episode number
ep_num = length - i
display_prefix = podcast_obj.prefix + "_" + str(ep_num).zfill(3)
# Get episode title
title = feed.entries[i].title
# Get episode URL
episode_url = "" # Variables for
x = 0 # the while loop
skip_this_item = False
while ('.mp3' not in episode_url and
'.wav' not in episode_url and
'.m4a' not in episode_url):
try:
episode_url = feed.entries[i]['links'][x]['href']
except:
skip_this_item = True
break
log("episode_url: %s" % episode_url)
x += 1
if ".mp3" in episode_url:
ext = ".mp3"
if ".wav" in episode_url:
ext = ".wav"
if ".m4a" in episode_url:
ext = ".m4a"
# Get full episode destination path
# xpath is the temporary file as it was downloaded with only the name changed
# path is the final file
xpath = TEMP_DIR + display_prefix + "X" + ext
path = podcast_obj.storage_dir + display_prefix + ext
# Skip this episode if already downloaded
if os.path.exists(path):
continue
if skip_this_item:
print(display_prefix + ": Skipped due to file extension (item likely not audio)")
log(display_prefix + ": Skipped due to file extension (item likely not audio)")
skip_this_item = False
continue
# Show which episode is in progress
print(display_prefix + ': Downloading...')
log('In progress: %s' % display_prefix)
# Download episode
HEADER_STRING = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'}
response = requests.get(episode_url, headers=HEADER_STRING)
# Fail if size is less than 1MB
if sys.getsizeof(response.content) < 1000000: # If size is less than 1MB
log("FATAL ERROR: response.content = %s bytes" % sys.getsizeof(response))
raise IOError("-- response.content was only %s bytes" % sys.getsizeof(response.content))
# Fail upon bad HTTP status code
if not response.ok:
log("FATAL ERROR: Bad response: status code %s" % response.status_code)
raise ConnectionError("-- Response not ok, status code %s" % response.status_code)
# Write mp3 data to file
# Since this is done after the download is complete, interruptions will only break episodes
# if they occur during the file being written to disk. If the script is interrupted during download,
# the script will simply restart the download of the interrupted episode on the next run.
with open(xpath, 'wb') as f:
f.write(response.content)
# Write correct metadata to clean file
# Force using ID3v2.3 tags for best results
# Only fatal errors will be displayed
print(display_prefix + ": Writing correct metadata...")
log("Writing metadata")
subprocess.run([FFMPEG_PATH, "-i", xpath, "-i", image_path, "-map", "0:0", "-map", "1:0", "-codec", "copy",
"-id3v2_version", "3", "-metadata:s:v", 'title="Album cover"',"-metadata:s:v", 'comment="Cover (front)"',
"-metadata", "track=" + str(ep_num),
"-metadata", "title=" + title,
"-metadata", "album=" + album,
"-metadata", "album_artist=" + artist,
"-metadata", "artist=" + artist,
"-metadata", "year=" + year,
"-metadata", "genre=Podcast",
"-loglevel", "fatal", path])
# Delete temporary file
os.remove(xpath)
log("Download complete: %s" % path)
log("Update complete.")
print("Files located in the following folder: %s" % podcast_obj.storage_dir)
if __name__ == '__main__':
update()
| 39.947368 | 177 | 0.548928 |
f9577ac9ab9b2574ecfc469b539a86e4c283b783 | 1,954 | py | Python | threading_ext/RecordingThread.py | Antoine-BL/chess-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | 2 | 2019-08-21T15:52:29.000Z | 2021-09-11T23:07:17.000Z | threading_ext/RecordingThread.py | Antoine-BL/chess-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | 5 | 2020-09-25T23:15:31.000Z | 2022-02-10T00:07:33.000Z | threading_ext/RecordingThread.py | Antoine-BL/EuroTruck-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | null | null | null | import time
import numpy as np
import cv2
from mss import mss
from threading_ext.GameRecorder import GameRecorder
from threading_ext.PausableThread import PausableThread
| 33.689655 | 96 | 0.619754 |
f957da3a4215ef9104b40d885730febc525fd16f | 638 | py | Python | multicast/mtc_recv.py | Tatchakorn/Multi-threaded-Server- | d5502a3da942e06736d07efc8d64186bc03a23d7 | [
"Beerware"
] | 2 | 2021-11-11T12:14:35.000Z | 2021-12-07T15:03:41.000Z | multicast/mtc_recv.py | Tatchakorn/Multi-threaded-Server- | d5502a3da942e06736d07efc8d64186bc03a23d7 | [
"Beerware"
] | null | null | null | multicast/mtc_recv.py | Tatchakorn/Multi-threaded-Server- | d5502a3da942e06736d07efc8d64186bc03a23d7 | [
"Beerware"
] | null | null | null | #! /usr/bin/python3
import threading
import socket
from test import create_upd_clients
from client import multicast_receive
if __name__ == '__main__':
try:
test_multicast_receive()
except Exception as e:
print(e)
finally:
input('Press [ENTER]...') | 24.538462 | 84 | 0.652038 |
f958f1f208280fca2b61c5a648551399de305a52 | 2,135 | py | Python | train/name_test.py | csgwon/dl-pipeline | 5ac2cdafe0daac675d3f3e810918133de3466f8a | [
"Apache-2.0"
] | 7 | 2018-06-26T13:09:12.000Z | 2020-07-15T18:18:38.000Z | train/name_test.py | csgwon/dl-pipeline | 5ac2cdafe0daac675d3f3e810918133de3466f8a | [
"Apache-2.0"
] | null | null | null | train/name_test.py | csgwon/dl-pipeline | 5ac2cdafe0daac675d3f3e810918133de3466f8a | [
"Apache-2.0"
] | 1 | 2018-08-30T19:51:08.000Z | 2018-08-30T19:51:08.000Z | from tools import *
from model import *
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
name_dataset = NamesDataset('data/names/names_train_new.csv')
dataloader = DataLoader(name_dataset, batch_size=32, shuffle=True, num_workers=0)
charcnn = CharCNN(n_classes=len(set(name_data['label'])), vocab_size=len(chars), max_seq_length=max_name_len)
criterion = nn.CrossEntropyLoss()
from tqdm import tqdm_notebook
loss_history, loss_history_avg = train(charcnn, dataloader, 100)
torch.save(charcnn, 'charcnn.pth')
| 32.348485 | 109 | 0.635597 |
f95969e5274454c89e1f512e9e3893dfdf0ca196 | 737 | py | Python | automated_logging/migrations/0019_auto_20210504_1247.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | null | null | null | automated_logging/migrations/0019_auto_20210504_1247.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | null | null | null | automated_logging/migrations/0019_auto_20210504_1247.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2021-05-04 03:47
from django.db import migrations, models
| 30.708333 | 139 | 0.662144 |
f95b45ce076430bae5232cdd5ec93fdf00431354 | 2,037 | py | Python | libdiscid/tests/common.py | phw/python-libdiscid | fac3ca94057c7da2857af2fd7bd099f726a02869 | [
"MIT"
] | null | null | null | libdiscid/tests/common.py | phw/python-libdiscid | fac3ca94057c7da2857af2fd7bd099f726a02869 | [
"MIT"
] | null | null | null | libdiscid/tests/common.py | phw/python-libdiscid | fac3ca94057c7da2857af2fd7bd099f726a02869 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013 Sebastian Ramacher <sebastian+dev@ramacher.at>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Tests for the libdiscid module
"""
from __future__ import unicode_literals
| 31.338462 | 79 | 0.716249 |
f95b8e23ac103c21bff72619bd1a14be401e08f2 | 161 | py | Python | alexa_skill_boilerplate/__init__.py | variable/alexa_skill_boilerplate | c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc | [
"MIT"
] | null | null | null | alexa_skill_boilerplate/__init__.py | variable/alexa_skill_boilerplate | c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc | [
"MIT"
] | null | null | null | alexa_skill_boilerplate/__init__.py | variable/alexa_skill_boilerplate | c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Alexa Skill Boilerplate."""
__author__ = """James Lin"""
__email__ = 'james@lin.net.nz'
__version__ = '0.1.0'
| 20.125 | 52 | 0.639752 |
f95ba865fff759b92ca23cecc5920a5a1660850c | 1,881 | py | Python | train_teachers.py | offthewallace/DP_CNN | e7f4607cbb890a348d088b515c4aa7093fadb878 | [
"MIT"
] | 9 | 2018-02-28T06:09:23.000Z | 2022-03-15T13:42:47.000Z | train_teachers.py | offthewallace/DP_CNN | e7f4607cbb890a348d088b515c4aa7093fadb878 | [
"MIT"
] | null | null | null | train_teachers.py | offthewallace/DP_CNN | e7f4607cbb890a348d088b515c4aa7093fadb878 | [
"MIT"
] | 4 | 2018-01-21T06:42:10.000Z | 2020-08-17T09:07:42.000Z | #Author: Wallace He
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import keras
from keras.models import Sequential
from keras.models import model_from_json
from keras.models import load_model
import partition
import train_CNN
def train_teacher (nb_teachers, teacher_id):
"""
This function trains a single teacher model with responds teacher's ID among an ensemble of nb_teachers
models for the dataset specified.
The model will be save in directory.
:param nb_teachers: total number of teachers in the ensemble
:param teacher_id: id of the teacher being trained
:return: True if everything went well
"""
# Load the dataset
X_train, X_test, y_train, y_test = train_CNN.get_dataset()
# Retrieve subset of data for this teacher
data, labels = partition.partition_dataset(X_train,
y_train,
nb_teachers,
teacher_id)
print("Length of training data: " + str(len(labels)))
# Define teacher checkpoint filename and full path
filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'
filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'
# Perform teacher training need to modify
# Create teacher model
model, opt = train_CNN.create_six_conv_layer(data.shape[1:])
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model, hist = train_CNN.training(model, data, X_test, labels, y_test,filename, data_augmentation=True)
#modify
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(filename2)
print("Saved model to disk")
return True
| 31.35 | 105 | 0.698033 |
f95de109f7f76174c635351d3c9d2f28ebfb7d06 | 3,651 | py | Python | descartes_rpa/fetch/descartes.py | reactome/descartes | 7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25 | [
"Apache-2.0"
] | 2 | 2021-08-02T18:09:07.000Z | 2022-01-18T08:29:59.000Z | descartes_rpa/fetch/descartes.py | reactome/descartes | 7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25 | [
"Apache-2.0"
] | 5 | 2021-06-22T22:27:23.000Z | 2021-08-04T02:04:09.000Z | descartes_rpa/fetch/descartes.py | reactome/descartes_rpa | 7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25 | [
"Apache-2.0"
] | null | null | null | import requests
import shutil
import pandas as pd
from typing import Dict, List
def fetch_descartes_human_tissue(out_file: str, verbose: bool = True) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database.
Args:
out_file: Output file that is going to store .loom data
verbose: If True (default), print statements about download
Examples:
>>> fetch_descartes_human_tissue("Human_Tissue.loom")
"""
url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_all/"
"Human_RNA_processed.loom"
)
if verbose:
print("Downloading Human Single-Cell data from Descartes database")
print(f"data url: {url}")
with requests.get(url, stream=True, timeout=60) as data:
with open(out_file, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded data to {out_file}")
def fetch_descartes_by_tissue(
list_tissues: List[str],
out_dir: str,
verbose: bool = True
) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database by choosing which tissues will be donwloaded.
Args:
list_tissues: List of tissues names to be downloaded.
out_dir: Output directory that is going to store .loom data.
verbose: If True (default), print statements about download.
Examples:
>>> fetch_descartes_by_tissue(
list_tissues=["Thymus", "Hearth"]
out_dir="data"
)
"""
base_url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_by_tissue"
)
for tissue in list_tissues:
url = f"{base_url}/{tissue}_processed.loom"
if verbose:
print((
f"Downloading {tissue} tissue Human Single-Cell data "
"from Descartes database"
))
print(f"data url: {url}")
file_name = f"{out_dir}/{tissue}_data.loom"
with requests.get(url, stream=True, timeout=60) as data:
with open(file_name, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded {file_name} to {out_dir}")
def fetch_de_genes_for_cell_type(
verbose: bool = False
) -> Dict[str, List[str]]:
"""Function to fetch Differentially Expressed (DE) genes from Descartes
Human Atlas from 77 Main Cell types found in 15 Organs.
Args:
verbose: If True (default), print statements about download
Returns:
Dictionary mapping each main cell type to its differentially
expressed genes. Example: {
"Acinar cells": ["MIR1302-11", "FAM138A", ...],
"Myeloid cells": ["CU459201.1", "OR4G4P", ...] ...
}
"""
url = (
"https://atlas.fredhutch.org/data/bbi/descartes/human_gtex/"
"downloads/data_summarize_fetus_data/DE_gene_77_main_cell_type.csv"
)
if verbose:
print((
"Downloading Human Single-Cell Differentially Expressed"
"genes for 77 Main Cell types found in 15 Organs."
))
print(f"data url: {url}")
de_df = pd.read_csv(url)
cell_types = de_df["max.cluster"].unique()
de_mapping = {}
for type in cell_types:
list_genes = de_df[
de_df["max.cluster"] == type
]["gene_id"].tolist()
list_genes = [gene.replace("'", "") for gene in list_genes]
de_mapping[type] = list_genes
return de_mapping
| 30.940678 | 78 | 0.621747 |