hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae93028834095132f0d185515f7eb82644b3d574 | 478 | py | Python | heap/1.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | heap/1.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | heap/1.py | miiiingi/algorithmstudy | 75eaf97e2c41d7edf32eb4a57d4d7685c9218aba | [
"MIT"
] | null | null | null | import heapq
answer = solution([1,2,3,9,10,12], 1000)
print(answer) | 28.117647 | 45 | 0.523013 |
ae98e871311e8fb43a781a04df1f457c09b8bd46 | 587 | py | Python | x_rebirth_station_calculator/station_data/modules/valley_forge.py | Phipsz/XRebirthStationCalculator | ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835 | [
"MIT"
] | 1 | 2016-04-17T11:00:22.000Z | 2016-04-17T11:00:22.000Z | x_rebirth_station_calculator/station_data/modules/valley_forge.py | Phipsz/XRebirthStationCalculator | ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835 | [
"MIT"
] | null | null | null | x_rebirth_station_calculator/station_data/modules/valley_forge.py | Phipsz/XRebirthStationCalculator | ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835 | [
"MIT"
] | null | null | null | from x_rebirth_station_calculator.station_data.station_base import Module
from x_rebirth_station_calculator.station_data.station_base import Production
from x_rebirth_station_calculator.station_data.station_base import Consumption
from x_rebirth_station_calculator.station_data import wares
names = {'L044': 'Valley Forge',
'L049': 'Talschmiede'}
productions = {'al': [Production(wares.Wheat, 5400.0)]}
consumptions = {'al': [Consumption(wares.EnergyCells, 600),
Consumption(wares.Water, 3000)]}
ValleyForge = Module(names, productions, consumptions)
| 39.133333 | 78 | 0.775128 |
ae999dae84b2cf7e73c7d8ac63967bb8d105893f | 652 | py | Python | migrations/versions/e424d03ba260_.py | danielSbastos/gistified | 96a8b61df4dbe54cc2e808734976c969e024976b | [
"MIT"
] | null | null | null | migrations/versions/e424d03ba260_.py | danielSbastos/gistified | 96a8b61df4dbe54cc2e808734976c969e024976b | [
"MIT"
] | null | null | null | migrations/versions/e424d03ba260_.py | danielSbastos/gistified | 96a8b61df4dbe54cc2e808734976c969e024976b | [
"MIT"
] | null | null | null | """empty message
Revision ID: e424d03ba260
Revises: ace8d095a26b
Create Date: 2017-10-12 11:25:11.775853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e424d03ba260'
down_revision = 'ace8d095a26b'
branch_labels = None
depends_on = None
| 22.482759 | 81 | 0.68865 |
ae9c101bafb94a97148b51abc123ec5cc959835d | 428 | py | Python | backend/setup.py | erik/smiegel | 34e9d132e241f2db4e96e84295588cd11d7c0164 | [
"MIT"
] | null | null | null | backend/setup.py | erik/smiegel | 34e9d132e241f2db4e96e84295588cd11d7c0164 | [
"MIT"
] | null | null | null | backend/setup.py | erik/smiegel | 34e9d132e241f2db4e96e84295588cd11d7c0164 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='smiegel',
version='0.0',
long_description=__doc__,
packages=['smiegel'],
include_package_data=True,
author='Erik Price',
description='Self hosted SMS mirroring service',
license='MIT',
install_requires=open('requirements.txt').readlines(),
entry_points={
'console_scripts': [
'smiegel = smiegel.__main__:main'
],
}
)
| 22.526316 | 58 | 0.640187 |
ae9ca2472d73373711675aa4fb19922a4e4088ab | 1,558 | py | Python | buycoins/ngnt.py | Youngestdev/buycoins-python | fa17600cfa92278d1c7f80f0a860e3ba7b5bc3b0 | [
"MIT"
] | 46 | 2021-02-06T07:29:22.000Z | 2022-01-28T06:52:18.000Z | buycoins/ngnt.py | Youngestdev/buycoins-python | fa17600cfa92278d1c7f80f0a860e3ba7b5bc3b0 | [
"MIT"
] | 1 | 2021-04-05T12:40:38.000Z | 2021-04-09T18:46:20.000Z | buycoins/ngnt.py | Youngestdev/buycoins-python | fa17600cfa92278d1c7f80f0a860e3ba7b5bc3b0 | [
"MIT"
] | 5 | 2021-02-06T08:02:19.000Z | 2022-02-18T12:46:26.000Z | from buycoins.client import BuyCoinsClient
from buycoins.exceptions import AccountError, ClientError, ServerError
from buycoins.exceptions.utils import check_response
| 35.409091 | 89 | 0.596919 |
ae9d77bf011601ea6bcbab318779c48b7e9a439f | 1,510 | py | Python | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training_status/signals.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | null | null | null | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training_status/signals.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | null | null | null | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training_status/signals.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | null | null | null | """App Signals
"""
import logging
from django.db.models.signals import post_save
from django.dispatch import receiver
from vision_on_edge.azure_training_status.models import TrainingStatus
from vision_on_edge.notifications.models import Notification
logger = logging.getLogger(__name__)
| 33.555556 | 75 | 0.654305 |
ae9e92c6d74c509eb9f3ed8c37b24f34f450e293 | 2,526 | py | Python | brilleaux_flask/brilleaux.py | digirati-co-uk/brilleaux | 5061d96e60239380c052f70dd12c4bec830e80db | [
"MIT"
] | null | null | null | brilleaux_flask/brilleaux.py | digirati-co-uk/brilleaux | 5061d96e60239380c052f70dd12c4bec830e80db | [
"MIT"
] | null | null | null | brilleaux_flask/brilleaux.py | digirati-co-uk/brilleaux | 5061d96e60239380c052f70dd12c4bec830e80db | [
"MIT"
] | null | null | null | import json
import brilleaux_settings
import flask
from flask_caching import Cache
from flask_cors import CORS
import logging
import sys
from pyelucidate.pyelucidate import async_items_by_container, format_results, mirador_oa
app = flask.Flask(__name__)
CORS(app)
cache = Cache(
app, config={"CACHE_TYPE": "filesystem", "CACHE_DIR": "./", "CACHE_THRESHOLD": 500}
)
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
)
app.run(threaded=True, debug=True, port=5000, host="0.0.0.0")
| 32.805195 | 88 | 0.644497 |
8818024dcc74585344f5c0d78dbccf72a3196b14 | 1,253 | py | Python | src/aioros_master/master.py | mgrrx/aioros_master | 24e74e851d6a00fb6517d053d78c02ed8b8bede2 | [
"Apache-2.0"
] | 1 | 2020-09-01T07:29:21.000Z | 2020-09-01T07:29:21.000Z | src/aioros_master/master.py | mgrrx/aioros_master | 24e74e851d6a00fb6517d053d78c02ed8b8bede2 | [
"Apache-2.0"
] | null | null | null | src/aioros_master/master.py | mgrrx/aioros_master | 24e74e851d6a00fb6517d053d78c02ed8b8bede2 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from aiohttp.web import AppRunner
# TODO fix import
from aioros.graph_resource import get_local_address
from .master_api_server import start_server
from .param_cache import ParamCache
from .registration_manager import RegistrationManager
| 28.477273 | 72 | 0.656824 |
881919bc42fa47660473b0dc049df1de113f468a | 1,344 | py | Python | models/agent.py | AfrinAyesha/data-service | 09d4aa45b1f8b0340646739fb0cf17966541af9d | [
"MIT"
] | null | null | null | models/agent.py | AfrinAyesha/data-service | 09d4aa45b1f8b0340646739fb0cf17966541af9d | [
"MIT"
] | null | null | null | models/agent.py | AfrinAyesha/data-service | 09d4aa45b1f8b0340646739fb0cf17966541af9d | [
"MIT"
] | null | null | null | from db import db
| 30.545455 | 123 | 0.638393 |
881a66b8c5d567be876f01b26aae1838a9ef3f6f | 421 | py | Python | oa/migrations/0004_auto_20171023_1747.py | htwenhe/DJOA | 3c2d384a983e42dedfd72561353ecf9370a02115 | [
"MIT"
] | 1 | 2017-10-31T02:37:37.000Z | 2017-10-31T02:37:37.000Z | oa/migrations/0004_auto_20171023_1747.py | htwenhe/whoa | 3c2d384a983e42dedfd72561353ecf9370a02115 | [
"MIT"
] | 1 | 2017-10-31T01:56:58.000Z | 2017-10-31T01:57:03.000Z | oa/migrations/0004_auto_20171023_1747.py | htwenhe/whoa | 3c2d384a983e42dedfd72561353ecf9370a02115 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-23 09:47
from __future__ import unicode_literals
from django.db import migrations
| 20.047619 | 48 | 0.598575 |
881a7473f1b598f91ff5fb6ca636049be01b75ae | 836 | py | Python | DateVersioning.py | EinarArnason/DateVersioning | 8e5a5e89218eed6324f71c9377d7342a0fd60cbd | [
"MIT"
] | null | null | null | DateVersioning.py | EinarArnason/DateVersioning | 8e5a5e89218eed6324f71c9377d7342a0fd60cbd | [
"MIT"
] | null | null | null | DateVersioning.py | EinarArnason/DateVersioning | 8e5a5e89218eed6324f71c9377d7342a0fd60cbd | [
"MIT"
] | null | null | null | import time
import subprocess
import sys
import logging
if __name__ == "__main__":
try:
print(generate(**dict(arg.split("=") for arg in sys.argv[1:])))
except GitDirectoryError as e:
logging.error("%s %s", "[DateVersioning]", e)
| 25.333333 | 71 | 0.600478 |
881a898ae26445fd0e94d07ff062d0f6af611593 | 520 | py | Python | src/cli_report.py | dmitryvodop/vk_likechecker | 3673ecf7548b3374aa5082bc69b7db1669f2f9c2 | [
"MIT"
] | null | null | null | src/cli_report.py | dmitryvodop/vk_likechecker | 3673ecf7548b3374aa5082bc69b7db1669f2f9c2 | [
"MIT"
] | null | null | null | src/cli_report.py | dmitryvodop/vk_likechecker | 3673ecf7548b3374aa5082bc69b7db1669f2f9c2 | [
"MIT"
] | null | null | null | MAX_CONSOLE_LINE_LENGTH = 79
| 34.666667 | 108 | 0.607692 |
881aadb872501d08df8bad8897f3a02a5ed64924 | 5,138 | py | Python | s3splitmerge/merge.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | s3splitmerge/merge.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | s3splitmerge/merge.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import typing
import pandas as pd
import smart_open
import awswrangler as wr
from .helpers import (
check_enumeration_s3_key_string,
get_key_size_all_objects,
group_s3_objects_no_larger_than,
)
from .options import ZFILL
def merge_parquet(boto3_session,
source_uri_list: typing.List[str],
target_bucket: str,
target_key: str) -> typing.Tuple[str, str]:
"""
Merge multiple parquet file on S3 into one parquet file.
.. note::
For parquet, it has to use the awswrangler API and it only support
boto3_session other than s3_client.
"""
df_list = list()
for s3_uri in source_uri_list:
df = wr.s3.read_parquet(s3_uri, boto3_session=boto3_session)
df_list.append(df)
df = pd.concat(df_list, axis=0)
wr.s3.to_parquet(
df=df,
path=f"s3://{target_bucket}/{target_key}",
boto3_session=boto3_session
)
return target_bucket, target_key
def merge_parquet_by_prefix(boto3_session,
source_bucket,
source_key_prefix,
target_bucket,
target_key,
target_size,
zfill: int = ZFILL) -> typing.List[typing.Tuple[str, str]]:
"""
Smartly merge all parquet s3 object under the same prefix into one or many
fixed size (approximately) parquet file.
"""
check_enumeration_s3_key_string(target_key)
s3_client = boto3_session.client("s3")
target_s3_bucket_key_list = list()
# analyze input data
key_and_size_list = get_key_size_all_objects(
s3_client=s3_client,
bucket=source_bucket,
prefix=source_key_prefix,
)
group_list = group_s3_objects_no_larger_than(
key_and_size_list=key_and_size_list,
max_size=target_size,
)
for nth_group, s3_object_group in enumerate(group_list):
nth_group += 1
source_uri_list = [
f"s3://{source_bucket}/{s3_key}"
for s3_key in s3_object_group
]
bucket_and_key = merge_parquet(
boto3_session=boto3_session,
source_uri_list=source_uri_list,
target_bucket=target_bucket,
target_key=target_key.format(i=str(nth_group).zfill(zfill)),
)
target_s3_bucket_key_list.append(bucket_and_key)
return target_s3_bucket_key_list
| 29.528736 | 87 | 0.610743 |
881b4859fbf99cdf056286c05c45307fee24239c | 5,316 | py | Python | maraboupy/test/test_query.py | yuvaljacoby/Marabou-1 | 553b780ef2e2cfe349b3954adc433a27af37a50f | [
"BSD-3-Clause"
] | null | null | null | maraboupy/test/test_query.py | yuvaljacoby/Marabou-1 | 553b780ef2e2cfe349b3954adc433a27af37a50f | [
"BSD-3-Clause"
] | null | null | null | maraboupy/test/test_query.py | yuvaljacoby/Marabou-1 | 553b780ef2e2cfe349b3954adc433a27af37a50f | [
"BSD-3-Clause"
] | 1 | 2021-06-29T06:54:29.000Z | 2021-06-29T06:54:29.000Z | # Supress warnings caused by tensorflow
import warnings
warnings.filterwarnings('ignore', category = DeprecationWarning)
warnings.filterwarnings('ignore', category = PendingDeprecationWarning)
import pytest
from .. import Marabou
import numpy as np
import os
# Global settings
TOL = 1e-4 # Tolerance for Marabou evaluations
ONNX_FILE = "../../resources/onnx/fc1.onnx" # File for test onnx network
ACAS_FILE = "../../resources/nnet/acasxu/ACASXU_experimental_v2a_1_1.nnet" # File for test nnet network
def test_sat_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return sat
"""
network = load_onnx_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[1]
minOutputValue = 70.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
# The result should be the same regardless of verbosity options used, or if a file redirect is used
tempFile = tmpdir.mkdir("redirect").join("marabouRedirect.log").strpath
opt = Marabou.createOptions(verbosity = 0)
vals_net, _ = network.solve(filename = tempFile)
vals_ipq, _ = Marabou.solve_query(ipq, filename = tempFile)
# The two value dictionaries should have the same number of variables,
# the same keys, and the values assigned should be within some tolerance of each other
assert len(vals_net) == len(vals_ipq)
for k in vals_net:
assert k in vals_ipq
assert np.abs(vals_ipq[k] - vals_net[k]) < TOL
def test_unsat_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return unsat
"""
network = load_onnx_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[0]
minOutputValue = 2000.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query):
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
opt = Marabou.createOptions(verbosity = 0)
vals_net, stats_net = network.solve(options = opt)
vals_ipq, stats_ipq = Marabou.solve_query(ipq, options = opt)
# Assert the value dictionaries are both empty, and both queries have not timed out (unsat)
assert len(vals_net) == 0
assert len(vals_ipq) == 0
assert not stats_net.hasTimedOut()
assert not stats_ipq.hasTimedOut()
def test_to_query(tmpdir):
"""
Test that a query generated from Maraboupy can be saved and loaded correctly and return timeout.
This query is expected to be UNSAT but is currently unsolveable within one second.
If future improvements allow the query to be solved within a second, then this test will need to be updated.
"""
network = load_acas_network()
# Set output constraint
outputVars = network.outputVars.flatten()
outputVar = outputVars[0]
minOutputValue = 1500.0
network.setLowerBound(outputVar, minOutputValue)
# Save this query to a temporary file, and reload the query):
queryFile = tmpdir.mkdir("query").join("query.txt").strpath
network.saveQuery(queryFile)
ipq = Marabou.load_query(queryFile)
# Solve the query loaded from the file and compare to the solution of the original query
opt = Marabou.createOptions(verbosity = 0, timeoutInSeconds = 1)
vals_net, stats_net = network.solve(options = opt)
vals_ipq, stats_ipq = Marabou.solve_query(ipq, options = opt)
# Assert timeout
assert stats_net.hasTimedOut()
assert stats_ipq.hasTimedOut()
def load_onnx_network():
"""
The test network fc1.onnx is used, which has two input variables and two output variables.
The network was trained such that the first output approximates the sum of the absolute
values of the inputs, while the second output approximates the sum of the squares of the inputs
for inputs in the range [-10.0, 10.0].
"""
filename = os.path.join(os.path.dirname(__file__), ONNX_FILE)
network = Marabou.read_onnx(filename)
# Get the input and output variable numbers; [0] since first dimension is batch size
inputVars = network.inputVars[0][0]
# Set input bounds
network.setLowerBound(inputVars[0],-10.0)
network.setUpperBound(inputVars[0], 10.0)
network.setLowerBound(inputVars[1],-10.0)
network.setUpperBound(inputVars[1], 10.0)
return network
def load_acas_network():
"""
Load one of the acas networks. This network is larger than fc1.onnx, making it a better test case
for testing timeout.
"""
filename = os.path.join(os.path.dirname(__file__), ACAS_FILE)
return Marabou.read_nnet(filename, normalize=True)
| 40.892308 | 112 | 0.702784 |
881f656efbfa94bdb3489d35c6e705e30fa814e3 | 2,703 | py | Python | blog/migrations/0006_auto_20220424_1910.py | moboroboo/training-site | 053628c9ce131c3d88c621b837bf67fdd3c59cf2 | [
"MIT"
] | null | null | null | blog/migrations/0006_auto_20220424_1910.py | moboroboo/training-site | 053628c9ce131c3d88c621b837bf67fdd3c59cf2 | [
"MIT"
] | null | null | null | blog/migrations/0006_auto_20220424_1910.py | moboroboo/training-site | 053628c9ce131c3d88c621b837bf67fdd3c59cf2 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-04-24 14:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_jalali.db.models
| 35.565789 | 146 | 0.593045 |
88203bc1014682e00a112fd76476f7cca8c80dfe | 6,825 | py | Python | module4-acid-and-database-scalability-tradeoffs/titanic_queries.py | imdeja/DS-Unit-3-Sprint-2-SQL-and-Databases | 100546c4c8acdecd3361661705f373a2bcd3e7c9 | [
"MIT"
] | null | null | null | module4-acid-and-database-scalability-tradeoffs/titanic_queries.py | imdeja/DS-Unit-3-Sprint-2-SQL-and-Databases | 100546c4c8acdecd3361661705f373a2bcd3e7c9 | [
"MIT"
] | null | null | null | module4-acid-and-database-scalability-tradeoffs/titanic_queries.py | imdeja/DS-Unit-3-Sprint-2-SQL-and-Databases | 100546c4c8acdecd3361661705f373a2bcd3e7c9 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
import pandas as pd
import psycopg2
from psycopg2.extras import execute_values
import json
import numpy as np
load_dotenv()
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST= os.getenv("DB_HOST")
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
curs = conn.cursor()
#- How many passengers survived, and how many died?
query = 'SELECT count(survived) from passengers where survived = 0'
curs.execute(query)
hi = curs.fetchone()
print(hi[0], "passengers died.")
query = 'SELECT count(survived) from passengers where survived = 1'
curs.execute(query)
hi = curs.fetchone()
print(hi[0], "passengers survived.")
#- How many passengers were in each class?
class1 = 'SELECT count(pclass) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 1.")
class2 = 'SELECT count(pclass) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 2.")
class3 = 'SELECT count(pclass) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("There were", hi[0], "passengers in class 3.")
#- How many passengers survived/died within each class?
died = 'SELECT count(pclass) from passengers where survived = 0 and pclass =1'
curs.execute(died)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 1.")
survived = 'SELECT count(pclass) from passengers where survived = 1 and pclass =1'
curs.execute(survived)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 1.")
died1 = 'SELECT count(pclass) from passengers where survived = 0 and pclass =2'
curs.execute(died1)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 2.")
survived1 = 'SELECT count(pclass) from passengers where survived = 1 and pclass =2'
curs.execute(survived1)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 2.")
died2 = 'SELECT count(pclass) from passengers where survived = 0 and pclass =3'
curs.execute(died2)
hi = curs.fetchone()
print("There were", hi[0], "passengers who died in class 3.")
survived2 = 'SELECT count(pclass) from passengers where survived = 1 and pclass =3'
curs.execute(survived2)
hi = curs.fetchone()
print("There were", hi[0], "passengers who survived in class 3.")
#- What was the average age of survivors vs nonsurvivors?
avg_dead = 'select avg(age) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average age of passengers who died was", hi[0])
avg_surv = 'select avg(age) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average age of passengers who survived was", hi[0])
#- What was the average age of each passenger class?
class1 = 'select avg(age) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average age of passengers in class 1 was", hi[0])
class2 = 'select avg(age) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average age of passengers in class 2 was", hi[0])
class3 = 'select avg(age) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average age of passengers in class 3 was", hi[0])
#- What was the average fare by passenger class? By survival?
class1 = 'select avg(fare) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average fare of passengers in class 1 was", hi[0])
class2 = 'select avg(fare) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average fare of passengers in class 2 was", hi[0])
class3 = 'select avg(fare) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average fare of passengers in class 3 was", hi[0])
avg_dead = 'select avg(fare) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average fare of passengers who died was", hi[0])
avg_surv = 'select avg(fare) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average fare of passengers who survived was", hi[0])
#- How many siblings/spouses aboard on average, by passenger class? By survival?
class1 = 'select avg(sib_spouse_count) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 1 was", hi[0])
class2 = 'select avg(sib_spouse_count) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 2 was", hi[0])
class3 = 'select avg(sib_spouse_count) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average siblings/spouses aboard in class 3 was", hi[0])
avg_dead = 'select avg(sib_spouse_count) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average siblings/spouses aboard of passengers who died was", hi[0])
avg_surv = 'select avg(sib_spouse_count) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average siblings/spouses aboard of passengers who survived was", hi[0])
#- How many parents/children aboard on average, by passenger class? By survival?
class1 = 'select avg(parent_child_count) from passengers where pclass =1'
curs.execute(class1)
hi = curs.fetchone()
print("The average parents/children aboard in class 1 was", hi[0])
class2 = 'select avg(parent_child_count) from passengers where pclass =2'
curs.execute(class2)
hi = curs.fetchone()
print("The average parents/children aboard in class 2 was", hi[0])
class3 = 'select avg(parent_child_count) from passengers where pclass =3'
curs.execute(class3)
hi = curs.fetchone()
print("The average parents/children aboard in class 3 was", hi[0])
avg_dead = 'select avg(parent_child_count) from passengers where survived =0'
curs.execute(avg_dead)
hi = curs.fetchone()
print("The average parents/children aboard of passengers who died was", hi[0])
avg_surv = 'select avg(parent_child_count) from passengers where survived =1'
curs.execute(avg_surv)
hi = curs.fetchone()
print("The average parents/children aboard of passengers who survived was", hi[0])
#- Do any passengers have the same name?
name = 'SELECT count(distinct name) from passengers having count(*) >1'
curs.execute(name)
hi = curs.fetchone()
print("All", hi[0], "passengers have a different name.")
#nope!
# (Bonus! Hard, may require pulling and processing with Python) How many married
#couples were aboard the Titanic? Assume that two people (one `Mr.` and one
#`Mrs.`) with the same last name and with at least 1 sibling/spouse aboard are
#a married couple. | 42.12963 | 89 | 0.74989 |
88209768adc40766b4eb56ddccfe9cdaf5de8651 | 2,530 | py | Python | flask-example/data-server/A.py | YJDoc2/Kerberos-Examples | cc5bb8fcbec52ab84e6f0e88d0843f8c564f0689 | [
"MIT"
] | null | null | null | flask-example/data-server/A.py | YJDoc2/Kerberos-Examples | cc5bb8fcbec52ab84e6f0e88d0843f8c564f0689 | [
"MIT"
] | null | null | null | flask-example/data-server/A.py | YJDoc2/Kerberos-Examples | cc5bb8fcbec52ab84e6f0e88d0843f8c564f0689 | [
"MIT"
] | null | null | null | import json
from flask import Flask, render_template, redirect, Response, jsonify,request
from flask_cors import CORS
from Kerberos import Server,Server_Error
app = Flask(__name__, static_folder='./static', static_url_path='/')
cors = CORS(app)
#! This server uses distinct routes for different type of requests
#? We make our Kerberos server (not HTTP Server) from the ticket generated by TGS ,
#? copied from there and saved in Tickets folder here.
server = Server.make_server_from_db('A',check_rand=True)
#* The mock databse
book_data = ['Gravitation','Clean Code']
app.run(host='0.0.0.0', port='5001', debug=True) | 38.923077 | 110 | 0.679447 |
8820ecc0654f8927cee2ae38d218e22ba45c5793 | 3,050 | py | Python | scripts/computeDice.py | STORM-IRIT/pcednet-supp | 68d2a2a62bfb7b450bf241c2251ee3bb99d18c7e | [
"CC-BY-3.0"
] | 7 | 2022-01-28T14:59:11.000Z | 2022-03-17T05:09:28.000Z | scripts/computeDice.py | STORM-IRIT/pcednet-supp | 68d2a2a62bfb7b450bf241c2251ee3bb99d18c7e | [
"CC-BY-3.0"
] | 4 | 2021-11-18T13:50:21.000Z | 2022-02-25T15:10:06.000Z | scripts/computeDice.py | STORM-IRIT/pcednet-supp | 68d2a2a62bfb7b450bf241c2251ee3bb99d18c7e | [
"CC-BY-3.0"
] | null | null | null | import sys, glob
from os import listdir, remove
from os.path import dirname, join, isfile, abspath
from io import StringIO
import numpy as np
import utilsmodule as um
script_path = dirname(abspath(__file__))
datasetPath = join(script_path,"data/")
e = 'shrec'
### Compute the dice coefficient used in Table 1,
# E Moscoso Thompson, G Arvanitis, K Moustakas, N Hoang-Xuan, E R Nguyen, et al..
# SHREC19track: Feature Curve Extraction on Triangle Meshes.
# 12th EG Workshop 3D Object Retrieval 2019,May 2019, Gnes, Italy.
print (" Processing experiment " + e)
# Fields loaded from the file
input_file_fields = ['Precision', 'Recall', 'MCC', 'TP', 'FP', 'TN', 'FN']
# Expected range for the fields (used to compute the histogram bins)
input_fields_range = [(0,1), (0,1), (-1,1), (0,1), (0,1), (0,1), (0,1)]
input_fields_bins = []
# Functions used to summarize a field for the whole dataset
input_fied_summary = {
"median": lambda buf: np.nanmedian(buf),
"mean": lambda buf: np.nanmean(buf)
}
experimentPath = join(datasetPath, e)
experimentFile = join(script_path,"../assets/js/data_" + e + ".js")
approaches = [f for f in listdir(experimentPath) if isfile(join(experimentPath, f))]
# Data loaded from the file
rawdata = dict()
# Number of samples (3D models) used in this experiment
nbsamples = 0
# Load data
for a in approaches:
if a.endswith(".txt"):
aname = a[:-4]
apath = join(experimentPath,a)
# Load and skip comments, empty lines
lines = [item.split() for item in tuple(open(apath, 'r')) if not item[0].startswith('#') or item == '']
nbsamples = len(lines)
# Current layout: lines[lineid][columnid]
# Reshape so we have columns[columnid][lineid]
rawdata[aname] = np.swapaxes( lines, 0, 1 )
# Convert array of str to numpy array of numbers
converter = lambda x:np.fromstring(', '.join(x) , dtype = np.float, sep =', ' )
rawdata[aname] = list(map(converter,rawdata[aname]))
print (" Loaded methods " + str(rawdata.keys()))
for method, data in rawdata.items():
precision = data[0]
recall = data[1]
tp = data[3]
fp = data[4]
tn = data[5]
fn = data[6]
# Compute dice
dice = (2.*tp) / (2.*tp + fn + fp)
#dice = data[2]
data.append(dice)
# Now print the latex table header
for method, data in rawdata.items():
print (method + " & ", end = '')
print("\\\\ \n \hline")
# Find max value per model
maxid = []
for i in range (0,nbsamples):
vmax = 0.
mmax = 0
m = 0
for method, data in rawdata.items():
if data[7][i] > vmax:
vmax = data[7][i]
mmax = m
m = m+1
maxid.append(mmax)
# Now print the latex table content
for i in range (0,nbsamples):
m = 0
for method, data in rawdata.items():
# print ( str(data[:-1][i]) + " & " )
valstr = "{:.2f}".format(data[7][i])
if maxid[i] == m:
valstr = "\\textbf{" + valstr + "}"
print ( valstr + " & " , end = '')
m = m+1
print("\\\\ \n \hline")
| 26.99115 | 107 | 0.615738 |
8821e5692a8f5f25d979cb717b556c74dc17abc9 | 23 | py | Python | pyfirebase/__init__.py | andela-cnnadi/python-fire | 11868007a7ff7fec45ed87cec18466e351cdb5ab | [
"MIT"
] | 14 | 2016-08-31T06:24:33.000Z | 2019-12-12T11:23:21.000Z | pyfirebase/__init__.py | andela-cnnadi/python-fire | 11868007a7ff7fec45ed87cec18466e351cdb5ab | [
"MIT"
] | 2 | 2016-09-16T12:40:51.000Z | 2016-12-27T06:26:39.000Z | pyfirebase/__init__.py | andela-cnnadi/python-fire | 11868007a7ff7fec45ed87cec18466e351cdb5ab | [
"MIT"
] | 5 | 2016-08-30T21:16:32.000Z | 2020-11-05T20:39:52.000Z | from firebase import *
| 11.5 | 22 | 0.782609 |
8823f9acf9979c7b6037b4ffb5d08ae416a7a660 | 1,094 | py | Python | plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/integrations/mlflow_example.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/integrations/mlflow_example.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/integrations/mlflow_example.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | import logging
from random import randint, random
from mlflow import (
active_run,
end_run,
get_tracking_uri,
log_metric,
log_param,
start_run,
)
from mlflow.tracking import MlflowClient
from dbnd import task
logger = logging.getLogger(__name__)
#
# from dbnd_task
# @task
# def mlflow_example():
# pass
if __name__ == "__main__":
mlflow_example()
| 20.259259 | 75 | 0.662706 |
88256ef9dd2f406213232667e9ac4bf44aae2ebc | 312 | py | Python | src/models/functions/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 3 | 2022-02-03T13:25:12.000Z | 2022-02-04T16:12:23.000Z | src/models/functions/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | null | null | null | src/models/functions/__init__.py | takedarts/skipresnet | d6f1e16042f8433a287355009e17e4e5768ad319 | [
"MIT"
] | 1 | 2022-02-04T12:28:02.000Z | 2022-02-04T12:28:02.000Z | from .channelpad import channelpad
from .conv2d_same import conv2d_same
from .padding import get_same_padding, pad_same
from .shakedrop import shakedrop
from .sigaug import signal_augment
from .sigmoid import h_sigmoid
from .stack import adjusted_concat, adjusted_stack
from .swish import h_swish, swish
| 34.666667 | 51 | 0.826923 |
8826f802253e79fbdf200b9f603f2a1bd96164e1 | 2,673 | py | Python | notes/conditionals/if_blocks.py | mcorley-gba/IntroCS21-22 | a823e17f2cb618be0e67468cb15f48873ae85152 | [
"MIT"
] | null | null | null | notes/conditionals/if_blocks.py | mcorley-gba/IntroCS21-22 | a823e17f2cb618be0e67468cb15f48873ae85152 | [
"MIT"
] | null | null | null | notes/conditionals/if_blocks.py | mcorley-gba/IntroCS21-22 | a823e17f2cb618be0e67468cb15f48873ae85152 | [
"MIT"
] | null | null | null | #Conditional Tests HW - Due Monday
# 13 Tests --> 1 True and 1 False for each
#If Statements
#Simplest structure of an if statement:
# if conditional_test:
# do something <-- Instructions/commands
#my_age = 13
#if my_age >= 18:
# print("You are old enough to vote.")
# print("Are you registered to vote?")
#Unindent!
#Indentation plays the same role for if-statements
#as it did for 'for' loops. Anything indented will be
#executed whenever the conditional test is true. Anything
#indented will be skipped whenever the conditional test is
#false.
#USE CAUTION - Don't forget to un-indent when you are finished
#with your if-block.
#Often we want one action if the conditional test is True,
#But make another action whenever it is false.
my_age = 33
if my_age >= 18:
print("You are old enough to vote.")
print("Are you registered to vote?")
else: #Catches any instances when the above test fails
print("You are not old enough to vote.")
print("Please register to vote when you turn 18.")
#The if-else structure works very well in situations in which python
#needs to always execute one of two possible actions.
#in a simple if-else block, one of the two will always be evaluated.
#if-elif-else Chain
#Python will only execute one block in an if-elif-else chain.
#As soon as one test passes, python execute that block
#and skips the rest (even if they might be true).
#Example: Admission to a theme park:
#Three price-levels:
#Under 4 --> Free
#between 4 and 18 --> $25
#18 to 65 --> $40
#65 and older--> $20
age = 66
if age < 4:
price = 0
elif age < 18: #elif = else+if --> if the above test(s) is(are) false,
#try this test next
price = 25
elif age < 65:
price = 40
#We can have more than one elif statement
elif age >= 65:
price = 20
#The catch-all 'else' statement is no longer needed.
#If you have a definite condition for the last block of an if-elif-else
#Use an elif statement with a definite conditional test. If you don't have a
#definite condition in mind for the last layer of an if-elif-else block,
#else works fine (unless you don't really need it).
print(f"Your admission cost is ${price}")
#Think about the structure of your if-elif-else blocks.
#Especially when the tests overlap
#The purpose of the above code was to determine the cost for the user
#Multiple conditions.
requested_toppings = ['mushrooms','extra cheese']
if 'mushrooms' in requested_toppings:
print("Adding mushrooms.")
if 'pepperoni' in requested_toppings:
print("Adding pepperoni")
if 'extra cheese' in requested_toppings:
print("Adding extra cheese")
print("Finished making pizza!")
| 29.373626 | 76 | 0.716423 |
8828b21c1d7aa3ef1f1b5b77da67057776db662c | 3,798 | py | Python | make_histogram.py | hijinks/python-bcet | 3e2fac66c82fb3f1c02e8e19153f5e3e97f57aca | [
"MIT"
] | null | null | null | make_histogram.py | hijinks/python-bcet | 3e2fac66c82fb3f1c02e8e19153f5e3e97f57aca | [
"MIT"
] | null | null | null | make_histogram.py | hijinks/python-bcet | 3e2fac66c82fb3f1c02e8e19153f5e3e97f57aca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# BCET Workflow
__author__ = 'Sam Brooke'
__date__ = 'September 2017'
__copyright__ = '(C) 2017, Sam Brooke'
__email__ = "sbrooke@tuta.io"
import os
import georasters as gr
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser
import fnmatch
import re
from scipy.interpolate import spline
parser = OptionParser()
(options, args) = parser.parse_args()
# args[0] for bcet_directory
# args[1] for no_bcet_directory
bcet_directory = False
no_bcet_directory = False
file_prefix = ''
if os.path.isdir(args[0]):
bcet_directory = args[0]
if os.path.isdir(args[1]):
no_bcet_directory = args[1]
bcet_matches = []
for root, dirnames, filenames in os.walk(bcet_directory):
for filename in fnmatch.filter(filenames, '*.tif'):
bcet_matches.append(os.path.join(root, filename))
print(bcet_matches)
no_bcet_matches = []
for root, dirnames, filenames in os.walk(no_bcet_directory):
for filename in fnmatch.filter(filenames, '*.tif'):
no_bcet_matches.append(os.path.join(root, filename))
print(no_bcet_matches)
output = args[2]
# Load Raster
colours = {
'B1':'lightblue',
'B2':'blue',
'B3':'green',
'B4':'red',
'B5':'firebrick',
'B6':'grey',
'B7':'k'
}
band_labels = {
'B1':'Band 1 - Ultra Blue',
'B2':'Band 2 - Blue',
'B3':'Band 3 - Green',
'B4':'Band 4 - Red',
'B5':'Band 5 - NIR',
'B6':'Band 6 - SWIR 1',
'B7':'Band 7 - SWIR 2'
}
# Display results
#fig = plt.figure(figsize=(8, 5))
fig, axarr = plt.subplots(2, sharex=False)
width = 25 #cm
height = 20 #cm
fig.set_size_inches(float(width)/2.54, float(height)/2.54)
for ma in no_bcet_matches:
raster = os.path.join(ma)
base = os.path.basename(raster)
m = re.search(r"B[0-9]+",base)
band_name = m.group()
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(raster) # Raster information
# ndv = no data value
data = gr.from_file(raster) # Create GeoRaster object
crs = projection.ExportToProj4() # Create a projection string in proj4 format
sp = data.raster.ravel()
spn = len(sp)
hist, bins = np.histogram(data.raster.ravel(), bins=50)
hist_norm = hist.astype(float) / spn
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
centernew = np.linspace(center.min(),center.max(),300) #300 represents number of points to make between T.min and T.max
hist_smooth = spline(center,hist_norm,centernew)
axarr[0].plot(centernew, hist_smooth, color=colours[band_name], label=band_labels[band_name])
for ma in bcet_matches:
raster = os.path.join(ma)
base = os.path.basename(raster)
m = re.search(r"B[0-9]+",base)
band_name = m.group()
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(raster) # Raster information
# ndv = no data value
data = gr.from_file(raster) # Create GeoRaster object
crs = projection.ExportToProj4() # Create a projection string in proj4 format
sp = data.raster.ravel()
spn = len(sp)
hist, bins = np.histogram(data.raster.ravel(), bins=25)
hist_norm = hist.astype(float) / spn
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
centernew = np.linspace(center.min(),center.max(),300) #300 represents number of points to make between T.min and T.max
hist_smooth = spline(center,hist_norm,centernew)
axarr[1].plot(centernew, hist_smooth, color=colours[band_name], label=band_labels[band_name])
axarr[0].set_xlim([0, 25000])
axarr[1].set_xlim([0,255])
axarr[0].set_ylim([0, 0.5])
axarr[1].set_ylim([0, 0.5])
axarr[0].set_xlabel('R')
axarr[1].set_xlabel('R*')
axarr[0].set_ylabel('f')
axarr[1].set_ylabel('f')
axarr[0].set_title('LANDSAT (White Mountains ROI) 2014-02-25 Unmodified Histogram')
axarr[1].set_title('LANDSAT (White Mountains ROI) 2014-02-25 BCET Histogram')
axarr[0].legend()
axarr[1].legend()
plt.savefig('histograms.pdf')
| 27.926471 | 120 | 0.700105 |
882c6ccf86c14fc6738ccff54229e3586e042456 | 1,989 | py | Python | subsurfaceCollabor8/frame_utils.py | digitalcollaboration-collabor8/subsurfaceSampleClient | f5009f3c7740d718392c0a9a2ec6179a51fd28cf | [
"Apache-2.0"
] | null | null | null | subsurfaceCollabor8/frame_utils.py | digitalcollaboration-collabor8/subsurfaceSampleClient | f5009f3c7740d718392c0a9a2ec6179a51fd28cf | [
"Apache-2.0"
] | 3 | 2021-03-08T08:32:39.000Z | 2021-07-29T08:56:49.000Z | subsurfaceCollabor8/frame_utils.py | digitalcollaboration-collabor8/subsurfaceSampleClient | f5009f3c7740d718392c0a9a2ec6179a51fd28cf | [
"Apache-2.0"
] | null | null | null | from pandas import DataFrame
import os
def frame_to_csv(frame:DataFrame,output_file:str,decimal_format=',',
float_format=None,date_format=None,quote_char='"',no_data_repr='',sep=';'):
"""
Converts a pandas dataframe to a csv file
Parameters
----------
output_file -> path to file to write to
decimal_format -> decimal separator to use default ","
float_format -> format mask to use for floats, default none
date_format -> format mask for date, default none
quote_char -> string quote char, default '"'
no_data_repr -> how to represent empty columns, default ''
"""
frame.to_csv(output_file,decimal=decimal_format,
float_format=float_format,date_format=date_format,
quotechar=quote_char,na_rep=no_data_repr,sep=sep)
def frame_to_csv_str(frame:DataFrame,decimal_format=',',
float_format=None,date_format=None,quote_char='"',no_data_repr='',sep=';'):
"""
Converts a pandas dataframe to a csv formatted string
Parameters
----------
decimal_format -> decimal separator to use default ","
float_format -> format mask to use for floats, default none
date_format -> format mask for date, default none
quote_char -> string quote char, default '"'
no_data_repr -> how to represent empty columns, default ''
"""
return frame.to_csv(None,decimal=decimal_format,
float_format=float_format,date_format=date_format,
quotechar=quote_char,na_rep=no_data_repr,sep=sep)
def frame_to_excel(frame:DataFrame,output_file:str,
float_format=None,no_data_rep='',sheetName='Sheet1'):
"""
Converts a pandas data frame to a excel file
Parameters
----------
output_file -> path to file to write to
float_format -> format mask for floats e.g. '%.2f' will format to 2 decimals, default None
no_data_rep -> how empty columns should be represented, default ''
"""
frame.to_excel(output_file,sheet_name=sheetName,
float_format=float_format,na_rep=no_data_rep)
| 33.15 | 94 | 0.712418 |
882fe548d37c7d24cd165d77437c6ae5e4632773 | 1,562 | py | Python | integrationtest/vm/ha/test_ui_stop.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 1 | 2021-03-21T12:41:11.000Z | 2021-03-21T12:41:11.000Z | integrationtest/vm/ha/test_ui_stop.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/ha/test_ui_stop.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 1 | 2017-05-19T06:40:40.000Z | 2017-05-19T06:40:40.000Z | '''
Integration Test for HA mode with UI stop on one node.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
node_ip = None
#Will be called only if exception happens in test().
| 37.190476 | 97 | 0.733675 |
8832c2e7a770e80b777ea2950239b133e7919983 | 4,736 | py | Python | scope/device/andor/lowlevel.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 1 | 2017-11-10T17:23:11.000Z | 2017-11-10T17:23:11.000Z | scope/device/andor/lowlevel.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 5 | 2018-08-01T03:05:35.000Z | 2018-11-29T22:11:25.000Z | scope/device/andor/lowlevel.py | drew-sinha/rpc-scope | 268864097b5b7d123a842f216adc446ec6b32d01 | [
"MIT"
] | 3 | 2016-05-25T18:58:35.000Z | 2018-11-29T23:40:45.000Z | # This code is licensed under the MIT License (see LICENSE file for details)
import ctypes
import atexit
# import all the autogenerated functions and definitions
# note: also pulls in common which provides AndorError and several other constants
from . import wrapper
from .wrapper import *
# Provided for reference purposes, the FeatureStrings list contains all the "feature strings"
# listed in the Andor SDK documentation. The value given for the Feature argument to functions
# provided by this module should be a string appearing in this list.
FeatureStrings = [
'AccumulateCount', # zyla only
'AcquisitionStart',
'AcquisitionStop',
'AOIBinning',
'AOIHBin',
'AOIHeight',
'AOILeft',
'AOIStride',
'AOITop',
'AOIVBin',
'AOIWidth',
'AuxiliaryOutSource',
'Baseline',
'BitDepth',
'BufferOverflowEvent',
'BytesPerPixel',
'CameraAcquiring',
'CameraFamily', # sona only
'CameraModel',
'CameraName',
'CameraPresent',
'CycleMode',
'DeviceCount', # system
'ElectronicShutteringMode',
'EventEnable',
'EventsMissedEvent',
'EventSelector',
'ExposureTime',
'ExposureEndEvent',
'ExposureStartEvent',
'ExternalTriggerDelay',
'FanSpeed',
'FirmwareVersion',
'FrameCount',
'FrameRate',
'FullAOIControl',
'GainMode', # sona only
'ImageSizeBytes',
'InterfaceType',
'IOInvert',
'IOSelector',
'LogLevel', # system
'LUTIndex',
'LUTValue',
'MaxInterfaceTransferRate',
'MetadataEnable',
'MetadataFrame',
'MetadataTimestamp',
'Overlap',
'PixelEncoding',
'PixelHeight',
'PixelReadoutRate',
'PixelWidth',
'ReadoutTime',
'RollingShutterGlobalClear', # zyla only
'RowNExposureEndEvent',
'RowNExposureStartEvent',
'RowReadTime',
'SensorCooling',
'SensorHeight',
'SensorTemperature',
'SensorWidth',
'SerialNumber',
'SimplePreAmpGainControl', # deprecated on sona
'SoftwareTrigger',
'SoftwareVersion', # system
'SpuriousNoiseFilter',
'StaticBlemishCorrection', # zyla only
'TemperatureControl',
'TemperatureStatus',
'TimestampClock',
'TimestampClockFrequency',
'TimestampClockReset',
'TriggerMode',
'VerticallyCenterAOI'
]
_AT_HANDLE_SYSTEM = 1
def initialize():
"""Initialize the andor libraries."""
_init_core_lib()
_init_util_lib()
camera_name = _init_camera()
software_version = _string_for_handle(_AT_HANDLE_SYSTEM, 'SoftwareVersion')
return camera_name, software_version | 31.157895 | 117 | 0.714949 |
88398ae2c4128d5752a93cafe6efa48eb9858180 | 3,718 | py | Python | tests/integration/questionnaire/test_questionnaire_save_sign_out.py | uk-gov-mirror/ONSdigital.eq-survey-runner | b3a67a82347d024177f7fa6bf05499f47ece7ea5 | [
"MIT"
] | 27 | 2015-10-02T17:27:54.000Z | 2021-04-05T12:39:16.000Z | tests/integration/questionnaire/test_questionnaire_save_sign_out.py | uk-gov-mirror/ONSdigital.eq-survey-runner | b3a67a82347d024177f7fa6bf05499f47ece7ea5 | [
"MIT"
] | 1,836 | 2015-09-16T09:59:03.000Z | 2022-03-30T14:27:06.000Z | tests/integration/questionnaire/test_questionnaire_save_sign_out.py | uk-gov-mirror/ONSdigital.eq-survey-runner | b3a67a82347d024177f7fa6bf05499f47ece7ea5 | [
"MIT"
] | 20 | 2016-09-09T16:56:12.000Z | 2021-11-12T06:09:27.000Z | from app.validation.error_messages import error_messages
from tests.integration.integration_test_case import IntegrationTestCase
| 35.75 | 149 | 0.655998 |
883ab54b46f93c6809f1bedd1cd71a0ee4774d4e | 16,479 | py | Python | model/UniGNN.py | czc567/UniGNN | bbb061f393b847ff6c7c20cab9e1ecb8f1c3eb96 | [
"MIT"
] | null | null | null | model/UniGNN.py | czc567/UniGNN | bbb061f393b847ff6c7c20cab9e1ecb8f1c3eb96 | [
"MIT"
] | null | null | null | model/UniGNN.py | czc567/UniGNN | bbb061f393b847ff6c7c20cab9e1ecb8f1c3eb96 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn, torch.nn.functional as F
from torch.nn.parameter import Parameter
import math
from torch_scatter import scatter
from torch_geometric.utils import softmax
# NOTE: can not tell which implementation is better statistically
def normalize_l2(X):
"""Row-normalize matrix"""
rownorm = X.detach().norm(dim=1, keepdim=True)
scale = rownorm.pow(-1)
scale[torch.isinf(scale)] = 0.
X = X * scale
return X
# v1: X -> XW -> AXW -> norm
# v1: X -> XW -> AXW -> norm
# v1: X -> XW -> AXW -> norm
# v2: X -> AX -> norm -> AXW
__all_convs__ = {
'UniGAT': UniGATConv,
'UniGCN': UniGCNConv,
'UniGCN2': UniGCNConv2,
'UniGIN': UniGINConv,
'UniSAGE': UniSAGEConv,
}
| 32.311765 | 141 | 0.550155 |
883dc5b6a2c7b8f6d8eeeaa196713dc1735f14e3 | 23,515 | py | Python | emolog_pc/emolog/emotool/main.py | alon/emolog | ed6e9e30a46ffc04282527ee73aa3bb8605e2dc9 | [
"MIT"
] | null | null | null | emolog_pc/emolog/emotool/main.py | alon/emolog | ed6e9e30a46ffc04282527ee73aa3bb8605e2dc9 | [
"MIT"
] | 2 | 2019-01-29T15:27:34.000Z | 2021-03-06T20:00:16.000Z | emolog_pc/emolog/emotool/main.py | alon/emolog | ed6e9e30a46ffc04282527ee73aa3bb8605e2dc9 | [
"MIT"
] | 1 | 2019-01-03T18:44:54.000Z | 2019-01-03T18:44:54.000Z | #!/bin/env python3
# import os
# os.environ['PYTHONASYNCIODEBUG'] = '1'
# import logging
# logging.getLogger('asyncio').setLevel(logging.DEBUG)
from datetime import datetime
import traceback
import atexit
import argparse
import os
from os import path
import sys
import logging
from struct import pack
import random
from time import time, sleep, perf_counter
from socket import socket
from configparser import ConfigParser
from shutil import which
from asyncio import sleep, Protocol, get_event_loop, Task
from pickle import dumps
import csv
from ..consts import BUILD_TIMESTAMP_VARNAME
from ..util import version, resolve, create_process, kill_all_processes, gcd
from ..util import verbose as util_verbose
from ..lib import AckTimeout, ClientProtocolMixin, SamplerSample
from ..varsfile import merge_vars_from_file_and_list
from ..dwarfutil import read_elf_variables
logger = logging.getLogger()
module_dir = os.path.dirname(os.path.realpath(__file__))
pc_dir = os.path.join(module_dir, '..', '..', '..', 'examples', 'pc_platform')
pc_executable = os.path.join(pc_dir, 'pc')
def start_serial_process(serialurl, baudrate, hw_flow_control, port):
"""
Block until serial2tcp is ready to accept a connection
"""
serial2tcp_cmd = create_python_process_cmdline('serial2tcp.py')
if hw_flow_control is True:
serial2tcp_cmd += ['-r']
serial2tcp_cmd += ' -b {} -p {} -P {}'.format(baudrate, serialurl, port).split()
serial_subprocess = create_process(serial2tcp_cmd)
return serial_subprocess
args = None
if sys.platform == 'win32':
try_getch_message = "Press any key to stop capture early..."
try_getch = windows_try_getch
else:
try_getch_message = "Press Ctrl-C to stop capture early..."
def bandwidth_calc(args, variables):
"""
:param variables: list of dictionaries
:return: average baud rate (considering 8 data bits, 1 start & stop bits)
"""
packets_per_second = args.ticks_per_second # simplification: assume a packet every tick (upper bound)
header_average = packets_per_second * SamplerSample.empty_size()
payload_average = sum(args.ticks_per_second / v['period_ticks'] * v['size'] for v in variables)
return (header_average + payload_average) * 10
CONFIG_FILE_NAME = 'local_machine_config.ini'
def reasonable_timestamp_ms(timestamp):
"""
checks that the timestamp is within 100 years and not zero
this means a random value from memory will probably not be interpreted as a valid timestamp
and a better error message could be printed
"""
return timestamp != 0 and timestamp < 1000 * 3600 * 24 * 365 * 100
def check_timestamp(params, elf_variables):
if BUILD_TIMESTAMP_VARNAME not in params:
logger.error('timestamp not received from target')
raise SystemExit
read_value = int(params[BUILD_TIMESTAMP_VARNAME])
if BUILD_TIMESTAMP_VARNAME not in elf_variables:
logger.error('Timestamp variable not in ELF file. Did you add a pre-build step to generate it?')
raise SystemExit
elf_var = elf_variables[BUILD_TIMESTAMP_VARNAME]
elf_value = elf_var['init_value']
if elf_value is None or elf_var['address'] == 0:
logger.error('Bad timestamp variable in ELF: init value = {value}, address = {address}'.format(value=elf_value, address=elf_var["address"]))
raise SystemExit
elf_value = int(elf_variables[BUILD_TIMESTAMP_VARNAME]['init_value'])
if read_value != elf_value:
if not reasonable_timestamp_ms(read_value):
logger.error("Build timestamp mismatch: the embedded target probably doesn't contain a timestamp variable")
raise SystemExit
if read_value < elf_value:
logger.error('Build timestamp mismatch: target build timestamp is older than ELF')
else:
logger.error('Build timestamp mismatch: target build timestamp is newer than ELF')
raise SystemExit
print("Timestamp verified: ELF file and embedded target match")
if __name__ == '__main__':
main()
| 39.991497 | 180 | 0.671444 |
8840790f6010bda703bcb00bd31eb62706621ff5 | 799 | py | Python | Workflows/pyOpenMS/OpenSwathFeatureXMLToTSV_basics.py | Leon-Bichmann/Tutorials | c9183f98b162833cbca59e6d71a5ae3cc4375b31 | [
"BSD-3-Clause"
] | null | null | null | Workflows/pyOpenMS/OpenSwathFeatureXMLToTSV_basics.py | Leon-Bichmann/Tutorials | c9183f98b162833cbca59e6d71a5ae3cc4375b31 | [
"BSD-3-Clause"
] | null | null | null | Workflows/pyOpenMS/OpenSwathFeatureXMLToTSV_basics.py | Leon-Bichmann/Tutorials | c9183f98b162833cbca59e6d71a5ae3cc4375b31 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def main(options):
# test parameter handling
print options.infile, options.traml_in, options.outfile
def handle_args():
import argparse
usage = ""
usage += "\nOpenSwathFeatureXMLToTSV Converts a featureXML to a mProphet tsv."
parser = argparse.ArgumentParser(description = usage )
parser.add_argument('-in', dest='infile', help = 'An input file containing features [featureXML]')
parser.add_argument('-tr', dest='traml_in', help='An input file containing the transitions [TraML]')
parser.add_argument('-out', dest='outfile', help='Output mProphet TSV file [tsv]')
args = parser.parse_args(sys.argv[1:])
return args
if __name__ == '__main__':
options = handle_args()
main(options)
| 799 | 799 | 0.68836 |
88409ac26b662efb26f26a13bba8f1ae10c3260d | 487 | py | Python | test.py | exc4l/kanjigrid | 9e7dc0dadb578fc7ee4129aca5abf0a3767bc6dd | [
"MIT"
] | 1 | 2021-03-23T14:10:59.000Z | 2021-03-23T14:10:59.000Z | test.py | exc4l/kanjigrid | 9e7dc0dadb578fc7ee4129aca5abf0a3767bc6dd | [
"MIT"
] | null | null | null | test.py | exc4l/kanjigrid | 9e7dc0dadb578fc7ee4129aca5abf0a3767bc6dd | [
"MIT"
] | null | null | null | import kanjigrid
gridder = kanjigrid.Gridder("Kanji", 40, "Header", 52)
grading = kanjigrid.Jouyou()
with open("test.txt", "r", encoding="utf-8") as f:
data = f.read()
gridder.feed_text(data)
grid = gridder.make_grid(grading, outside_of_grading=True, stats=True, bar_graph=True)
grid.save("test.png")
if "" in grading.get_all_in_grading():
print("")
if "" in grading.get_all_in_grading():
print("")
if "" in grading.get_all_in_grading():
print(" as replacement") | 28.647059 | 86 | 0.702259 |
8843966a1736b059d72b2035589a76126f469706 | 12,738 | py | Python | spectrl/rl/ars_discrete.py | luigiberducci/dirl | 5f7997aea20dfb7347ebdee66de9bea4e6cd3c62 | [
"MIT"
] | 6 | 2021-11-11T00:29:18.000Z | 2022-03-18T13:56:51.000Z | spectrl/rl/ars_discrete.py | luigiberducci/dirl | 5f7997aea20dfb7347ebdee66de9bea4e6cd3c62 | [
"MIT"
] | null | null | null | spectrl/rl/ars_discrete.py | luigiberducci/dirl | 5f7997aea20dfb7347ebdee66de9bea4e6cd3c62 | [
"MIT"
] | 4 | 2021-11-26T03:11:02.000Z | 2022-01-13T02:32:29.000Z | import torch
import numpy as np
import time
from spectrl.util.rl import get_rollout, test_policy
def ars(env, nn_policy, params):
'''
Run augmented random search.
Parameters:
env: gym.Env (state is expected to be a pair (np.array, int))
Also expected to provide cum_reward() function.
nn_policy: NNPolicy
params: ARSParams
'''
best_policy = nn_policy
best_success_rate = 0
best_reward = -1e9
log_info = []
num_steps = 0
start_time = time.time()
# Step 1: Save original policy
nn_policy_orig = nn_policy
# Step 2: Initialize state distribution estimates
mu_sum = np.zeros(nn_policy.params.state_dim)
sigma_sq_sum = np.ones(nn_policy.params.state_dim) * 1e-5
n_states = 0
# Step 3: Training iterations
for i in range(params.n_iters):
# Step 3a: Sample deltas
deltas = []
for _ in range(params.n_samples):
# i) Sample delta
delta = _sample_delta(nn_policy)
# ii) Construct perturbed policies
nn_policy_plus = _get_delta_policy(
nn_policy, delta, params.delta_std)
nn_policy_minus = _get_delta_policy(
nn_policy, delta, -params.delta_std)
# iii) Get rollouts
sarss_plus = get_rollout(env, nn_policy_plus, False)
sarss_minus = get_rollout(env, nn_policy_minus, False)
num_steps += (len(sarss_plus) + len(sarss_minus))
# iv) Estimate cumulative rewards
r_plus = env.cum_reward(
np.array([state for state, _, _, _ in sarss_plus]))
r_minus = env.cum_reward(
np.array([state for state, _, _, _ in sarss_minus]))
# v) Save delta
deltas.append((delta, r_plus, r_minus))
# v) Update estimates of normalization parameters
states = np.array([nn_policy.get_input(state)
for state, _, _, _ in sarss_plus + sarss_minus])
mu_sum += np.sum(states)
sigma_sq_sum += np.sum(np.square(states))
n_states += len(states)
# Step 3b: Sort deltas
deltas.sort(key=lambda delta: -max(delta[1], delta[2]))
deltas = deltas[:params.n_top_samples]
# Step 3c: Compute the sum of the deltas weighted by their reward differences
delta_sum = [torch.zeros(delta_cur.shape)
for delta_cur in deltas[0][0]]
for j in range(params.n_top_samples):
# i) Unpack values
delta, r_plus, r_minus = deltas[j]
# ii) Add delta to the sum
for k in range(len(delta_sum)):
delta_sum[k] += (r_plus - r_minus) * delta[k]
# Step 3d: Compute standard deviation of rewards
sigma_r = np.std([delta[1] for delta in deltas] +
[delta[2] for delta in deltas])
# Step 3e: Compute step length
delta_step = [(params.lr * params.delta_std / (params.n_top_samples * sigma_r + 1e-8))
* delta_sum_cur
for delta_sum_cur in delta_sum]
# Step 3f: Update policy weights
nn_policy = _get_delta_policy(nn_policy, delta_step, 1.0)
# Step 3g: Update normalization parameters
nn_policy.mu = mu_sum / n_states
nn_policy.sigma_inv = 1.0 / np.sqrt((sigma_sq_sum / n_states))
# Step 3h: Logging
if i % params.log_interval == 0:
exp_cum_reward, success_rate = test_policy(env, nn_policy, 100, use_cum_reward=True)
current_time = time.time() - start_time
print('\nSteps taken after iteration {}: {}'.format(i, num_steps))
print('Reward after iteration {}: {}'.format(i, exp_cum_reward))
print('Success rate after iteration {}: {}'.format(i, success_rate))
print('Time after iteration {}: {} mins'.format(i, current_time/60))
log_info.append([num_steps, current_time/60, exp_cum_reward, success_rate])
# save best policy
if success_rate > best_success_rate or (success_rate == best_success_rate
and exp_cum_reward >= best_reward):
best_policy = nn_policy
best_success_rate = success_rate
best_reward = exp_cum_reward
if success_rate > 80 and exp_cum_reward > 0:
params.lr = max(params.lr/2, params.min_lr)
nn_policy = best_policy
# Step 4: Copy new weights and normalization parameters to original policy
for param, param_orig in zip(nn_policy.parameters(), nn_policy_orig.parameters()):
param_orig.data.copy_(param.data)
nn_policy_orig.mu = nn_policy.mu
nn_policy_orig.sigma_inv = nn_policy.sigma_inv
return log_info
def _sample_delta(nn_policy):
'''
Construct random perturbations to neural network parameters.
nn_policy: NNPolicy or NNPolicySimple
Returns: [torch.tensor] (list of torch tensors that is the same shape as nn_policy.parameters())
'''
delta = []
for param in nn_policy.parameters():
delta.append(torch.normal(torch.zeros(param.shape, dtype=torch.float)))
return delta
def _get_delta_policy(nn_policy, delta, sign):
'''
Construct the policy perturbed by the given delta
Parameters:
nn_policy: NNPolicy or NNPolicySimple
delta: [torch.tensor] (list of torch tensors with same shape as nn_policy.parameters())
sign: float
Returns: NNPolicy or NNPolicySimple
'''
# Step 1: Construct the perturbed policy
nn_policy_delta = None
if (isinstance(nn_policy, NNPolicySimple)):
nn_policy_delta = NNPolicySimple(nn_policy.params)
elif (isinstance(nn_policy, NNPolicy)):
nn_policy_delta = NNPolicy(nn_policy.params)
else:
raise Exception("Unrecognized neural network architecture")
# Step 2: Set normalization of the perturbed policy
nn_policy_delta.mu = nn_policy.mu
nn_policy_delta.sigma_inv = nn_policy.sigma_inv
# Step 3: Set the weights of the perturbed policy
for param, param_delta, delta_cur in zip(nn_policy.parameters(), nn_policy_delta.parameters(),
delta):
param_delta.data.copy_(param.data + sign * delta_cur)
return nn_policy_delta
| 33.87766 | 100 | 0.622861 |
8844d83df31129aa57478e21727d0b2f1ba309a4 | 640 | py | Python | backends/c-scpu/config.py | guoshzhao/antares | 30a6338dd6ce4100922cf26ec515e615b449f76a | [
"MIT"
] | null | null | null | backends/c-scpu/config.py | guoshzhao/antares | 30a6338dd6ce4100922cf26ec515e615b449f76a | [
"MIT"
] | null | null | null | backends/c-scpu/config.py | guoshzhao/antares | 30a6338dd6ce4100922cf26ec515e615b449f76a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import subprocess
| 22.857143 | 111 | 0.673438 |
884592962cd70a31da5127626d57fbccde00c157 | 408 | py | Python | grab-exceptions.py | Plexical/pymod.me | b3bf1a8c6e15fa02d1b58a8f296ba60aae60d18a | [
"MIT"
] | 1 | 2016-06-13T19:17:03.000Z | 2016-06-13T19:17:03.000Z | grab-exceptions.py | Plexical/pymod.me | b3bf1a8c6e15fa02d1b58a8f296ba60aae60d18a | [
"MIT"
] | 3 | 2017-05-10T08:29:20.000Z | 2018-02-07T20:57:16.000Z | grab-exceptions.py | Plexical/pydoc.me | b3bf1a8c6e15fa02d1b58a8f296ba60aae60d18a | [
"MIT"
] | null | null | null | import sys
from pymod import index
from pymod.index import modules
from pymod.mappings import url
out = lambda s: sys.stdout.write(s)
out('{ ')
dom = index.domof('https://docs.python.org/2/library/exceptions.html')
for el in (el for el in dom.findAll('a', {'class': 'headerlink'})
if '-' not in el.attrs['href']):
out("'{}', ".format(el.attrs['href'].split('#exceptions.')[1]))
out('}\n')
| 25.5 | 70 | 0.644608 |
8845d03ee4e193d770ba1a3bdc365691fd17435f | 878 | py | Python | src/10_reactive/db.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | src/10_reactive/db.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | src/10_reactive/db.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | import sqlite3
from collections import namedtuple
from functional import seq
with sqlite3.connect(':memory:') as conn:
conn.execute('CREATE TABLE user (id INT, name TEXT)')
conn.commit()
User = namedtuple('User', 'id name')
seq([(1, 'pedro'), (2, 'fritz')]).to_sqlite3(
conn, 'INSERT INTO user (id, name) VALUES (?, ?)')
seq([(3, 'sam'), (4, 'stan')]).to_sqlite3(conn, 'user')
seq([User(name='tom', id=5), User(name='keiga', id=6)]).to_sqlite3(conn, 'user')
seq([dict(name='david', id=7), User(name='jordan', id=8)]
).to_sqlite3(conn, 'user')
print(list(conn.execute('SELECT * FROM user')))
# [
# (1, 'pedro'), (2, 'fritz'),
# (3, 'sam'), (4, 'stan'),
# (5, 'tom'), (6, 'keiga'),
# (7, 'david'), (8, 'jordan')
# ]
users = seq.sqlite3(conn, 'SELECT * FROM user').to_list()
print(users)
| 31.357143 | 84 | 0.555809 |
88466faa8fda4f9d6ebd884891fe47a03fd3b5e7 | 298 | py | Python | run_wrf/configs/test/config_test_jobsched.py | matzegoebel/run_wrf | cbbbb0ec818416d9b24698d369f70ad9ba8cb801 | [
"BSD-2-Clause"
] | 2 | 2021-01-15T11:05:37.000Z | 2021-01-15T11:05:39.000Z | run_wrf/configs/test/config_test_jobsched.py | matzegoebel/run_wrf | cbbbb0ec818416d9b24698d369f70ad9ba8cb801 | [
"BSD-2-Clause"
] | null | null | null | run_wrf/configs/test/config_test_jobsched.py | matzegoebel/run_wrf | cbbbb0ec818416d9b24698d369f70ad9ba8cb801 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Settings for launch_jobs.py
Test settings for automated tests.
To test run with job scheduler
@author: Matthias Gbel
"""
from run_wrf.configs.test.config_test import *
from copy import deepcopy
params = deepcopy(params)
params["vmem"] = 500
| 16.555556 | 46 | 0.728188 |
8846dbf904bdc1c8ef7bcf560fdf92013cf493ce | 362 | py | Python | Chapter 02/Chap02_Example2.9.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 02/Chap02_Example2.9.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 02/Chap02_Example2.9.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | s1 = "I am a beginner in python \nI will study the concepts to be familiar with this language.\nIt is a very user friendly language"
print("The long string is: \n" + s1) # -- L1
s2 = """The long string is:
I am a beginner in python
I will study the concepts to be familiar with this language.
It is a very user friendly language"""
print(s2) # -- L2
| 40.222222 | 133 | 0.69337 |
884a95c1571d9440eebd2ee5b658dea1bc3ebe28 | 5,177 | py | Python | requirements/docutils-0.18/setup.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | null | null | null | requirements/docutils-0.18/setup.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | null | null | null | requirements/docutils-0.18/setup.py | QuentinTournier40/AnimationFreeCAD | 8eaff8356ec68b948a721b83a6888b652278db8a | [
"Apache-2.0"
] | 1 | 2022-02-03T08:03:30.000Z | 2022-02-03T08:03:30.000Z | #!/usr/bin/env python
# $Id: setup.py 8864 2021-10-26 11:46:55Z grubert $
# Copyright: This file has been placed in the public domain.
from __future__ import print_function
import glob
import os
import sys
try:
from setuptools import setup
except ImportError:
print('Error: The "setuptools" module, which is required for the')
print(' installation of Docutils, could not be found.\n')
print(' You may install it with `python -m pip install setuptools`')
print(' or from a package called "python-setuptools" (or similar)')
print(' using your system\'s package manager.\n')
print(' Alternatively, install a release from PyPi with')
print(' `python -m pip install docutils`.')
sys.exit(1)
package_data = {
'name': 'docutils',
'description': 'Docutils -- Python Documentation Utilities',
'long_description': """\
Docutils is a modular system for processing documentation
into useful formats, such as HTML, XML, and LaTeX. For
input Docutils supports reStructuredText, an easy-to-read,
what-you-see-is-what-you-get plaintext markup syntax.""", # wrap at col 60
'url': 'http://docutils.sourceforge.net/',
'version': '0.18',
'author': 'David Goodger',
'author_email': 'goodger@python.org',
'maintainer': 'docutils-develop list',
'maintainer_email': 'docutils-develop@lists.sourceforge.net',
'license': 'public domain, Python, 2-Clause BSD, GPL 3 (see COPYING.txt)',
'platforms': 'OS-independent',
'python_requires': '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
'include_package_data': True,
'exclude_package_data': {"": ["docutils.conf"]},
'package_dir': {
'docutils': 'docutils',
'docutils.tools': 'tools'
},
'packages': [
'docutils',
'docutils.languages',
'docutils.parsers',
'docutils.parsers.rst',
'docutils.parsers.rst.directives',
'docutils.parsers.rst.languages',
'docutils.readers',
'docutils.transforms',
'docutils.utils',
'docutils.utils.math',
'docutils.writers',
'docutils.writers.html4css1',
'docutils.writers.html5_polyglot',
'docutils.writers.pep_html',
'docutils.writers.s5_html',
'docutils.writers.latex2e',
'docutils.writers.xetex',
'docutils.writers.odf_odt',
],
'scripts': [
'tools/rst2html.py',
'tools/rst2html4.py',
'tools/rst2html5.py',
'tools/rst2s5.py',
'tools/rst2latex.py',
'tools/rst2xetex.py',
'tools/rst2man.py',
'tools/rst2xml.py',
'tools/rst2pseudoxml.py',
'tools/rstpep2html.py',
'tools/rst2odt.py',
'tools/rst2odt_prepstyles.py',
],
'classifiers': [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Other Audience',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: Public Domain',
'License :: OSI Approved :: Python Software Foundation License',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing',
'Natural Language :: English', # main/default language, keep first
'Natural Language :: Afrikaans',
'Natural Language :: Arabic',
'Natural Language :: Catalan',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: Czech',
'Natural Language :: Danish',
'Natural Language :: Dutch',
'Natural Language :: Esperanto',
'Natural Language :: Finnish',
'Natural Language :: French',
'Natural Language :: Galician',
'Natural Language :: German',
'Natural Language :: Hebrew',
'Natural Language :: Italian',
'Natural Language :: Japanese',
'Natural Language :: Korean',
'Natural Language :: Latvian',
'Natural Language :: Lithuanian',
'Natural Language :: Persian',
'Natural Language :: Polish',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Russian',
'Natural Language :: Slovak',
'Natural Language :: Spanish',
'Natural Language :: Swedish',
],
}
"""Distutils setup parameters."""
if __name__ == '__main__':
do_setup()
| 35.703448 | 78 | 0.603631 |
884b8595b246a25d1c4c0a76969e4887169352b3 | 3,171 | py | Python | tests/plugins/remove/test_rm_cli.py | jtpavlock/moe | 6f053c8c53f92686013657bda676b00f97edd230 | [
"MIT"
] | 14 | 2021-09-04T11:42:18.000Z | 2022-02-04T05:11:46.000Z | tests/plugins/remove/test_rm_cli.py | jtpavlock/Moe | 6f053c8c53f92686013657bda676b00f97edd230 | [
"MIT"
] | 56 | 2021-05-26T00:00:46.000Z | 2021-08-08T17:14:31.000Z | tests/plugins/remove/test_rm_cli.py | jtpavlock/moe | 6f053c8c53f92686013657bda676b00f97edd230 | [
"MIT"
] | 1 | 2021-07-22T21:55:21.000Z | 2021-07-22T21:55:21.000Z | """Tests the ``remove`` plugin."""
from unittest.mock import patch
import pytest
import moe
class TestCommand:
"""Test the `remove` command."""
def test_track(self, mock_track, mock_query, mock_rm, tmp_rm_config):
"""Tracks are removed from the database with valid query."""
cli_args = ["remove", "*"]
mock_query.return_value = [mock_track]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="track")
mock_rm.assert_called_once_with(mock_track)
def test_album(self, mock_album, mock_query, mock_rm, tmp_rm_config):
"""Albums are removed from the database with valid query."""
cli_args = ["remove", "-a", "*"]
mock_query.return_value = [mock_album]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="album")
mock_rm.assert_called_once_with(mock_album)
def test_extra(self, mock_extra, mock_query, mock_rm, tmp_rm_config):
"""Extras are removed from the database with valid query."""
cli_args = ["remove", "-e", "*"]
mock_query.return_value = [mock_extra]
moe.cli.main(cli_args, tmp_rm_config)
mock_query.assert_called_once_with("*", query_type="extra")
mock_rm.assert_called_once_with(mock_extra)
def test_multiple_items(
self, mock_track_factory, mock_query, mock_rm, tmp_rm_config
):
"""All items returned from the query are removed."""
cli_args = ["remove", "*"]
mock_tracks = [mock_track_factory(), mock_track_factory()]
mock_query.return_value = mock_tracks
moe.cli.main(cli_args, tmp_rm_config)
for mock_track in mock_tracks:
mock_rm.assert_any_call(mock_track)
assert mock_rm.call_count == 2
def test_exit_code(self, mock_query, mock_rm, tmp_rm_config):
"""Return a non-zero exit code if no items are removed."""
cli_args = ["remove", "*"]
mock_query.return_value = []
with pytest.raises(SystemExit) as error:
moe.cli.main(cli_args, tmp_rm_config)
assert error.value.code != 0
mock_rm.assert_not_called()
| 33.03125 | 84 | 0.666667 |
884fed80e4bfbef3b30a1a62c33621b37d15f8a9 | 2,089 | py | Python | include/PyBool_builder.py | tyler-utah/PBL | 842a3fab949528bc03ee03d1f802e4163604d0f5 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2015-04-03T10:40:35.000Z | 2021-02-18T21:54:54.000Z | include/PyBool_builder.py | tyler-utah/PBL | 842a3fab949528bc03ee03d1f802e4163604d0f5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-08-06T18:46:35.000Z | 2017-08-07T14:01:23.000Z | include/PyBool_builder.py | tyler-utah/PBL | 842a3fab949528bc03ee03d1f802e4163604d0f5 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2015-10-09T05:42:04.000Z | 2022-03-24T19:07:19.000Z | #Tyler Sorensen
#February 15, 2012
#University of Utah
#PyBool_builder.py
#The interface to build recursive style boolean expressions
#See README.txt for more information
def mk_const_expr(val):
"""
returns a constant expression of value VAL
VAL should be of type boolean
"""
return {"type" : "const",
"value": val }
def mk_var_expr(name):
"""
returns a variable expression of name NAME
where NAME is a string
"""
return {"type" : "var" ,
"name" : (name, 0)}
def mk_neg_expr(expr):
"""
returns a negated expression where EXPR
is the expression to be negated
"""
return {"type" : "neg",
"expr" : expr }
def mk_and_expr(expr1, expr2):
"""
returns an and expression
of the form (EXPR1 /\ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "and" ,
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_or_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 \/ EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "or" ,
"expr1" : expr1 ,
"expr2" : expr2 }
#NOT NEEDED
def mk_impl_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 -> EXPR2)
where EXPR1 and EXPR2 are expressions
NOTE: Order of expr1 and expr2 matters here
"""
return {"type" : "impl",
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_eqv_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 <=> EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "eqv" ,
"expr1" : expr1 ,
"expr2" : expr2 }
def mk_xor_expr(expr1, expr2):
"""
returns an or expression
of the form (EXPR1 XOR EXPR2)
where EXPR1 and EXPR2 are expressions
"""
return {"type" : "xor" ,
"expr1" : expr1 ,
"expr2" : expr2 }
| 23.47191 | 59 | 0.560555 |
8850e891db7232fd18b38c698cdc17d7011b2602 | 2,595 | py | Python | spotify-monitor.py | tmessi/bins | 6fdd738ee44b5dc68849ddb6056667c4170dd603 | [
"MIT"
] | null | null | null | spotify-monitor.py | tmessi/bins | 6fdd738ee44b5dc68849ddb6056667c4170dd603 | [
"MIT"
] | null | null | null | spotify-monitor.py | tmessi/bins | 6fdd738ee44b5dc68849ddb6056667c4170dd603 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
'''
A simple script to get the playback status of spotify.
This script needs ``dbus-python`` for spotify communication
To run simply::
./spotify-monitor.py <command>
Where command is one of the following::
``playback``
``playing``
'''
# pylint: disable=W0703
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import sys
def get_pandora_status(command):
'''
Get status for pithos/pandora
'''
try:
bus = dbus.SessionBus()
pithos_object = bus.get_object("net.kevinmehall.Pithos",
"/net/kevinmehall/Pithos")
pithos = dbus.Interface(pithos_object, "net.kevinmehall.Pithos")
if command == 'playback':
res = 'Playing' if pithos.IsPlaying() else 'Paused'
elif command == 'playing':
info = dict((str(k), str(v)) for k, v in pithos.GetCurrentSong().items())
res = '{0} - {1}'.format(info['title'], info['artist'])
except dbus.exceptions.DBusException:
res = None
return res
def get_status(command):
'''
Get the status.
command
The command to query spofity with.
Returns the status from spotify.
'''
try:
bus_loop = DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus(mainloop=bus_loop)
spotify_bus = session_bus.get_object('org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2')
spotify = dbus.Interface(spotify_bus,
'org.freedesktop.DBus.Properties')
if command == 'playback':
res = spotify.Get('org.mpris.MediaPlayer2.Player',
'PlaybackStatus')
elif command == 'playing':
meta = spotify.Get('org.mpris.MediaPlayer2.Player',
'Metadata')
artist = meta['xesam:artist'][0].encode('utf-8')
title = meta['xesam:title'].encode('utf-8')
res = '{0} - {1}'.format(title, artist)
except Exception:
res = 'Not Playing'
return res
def main(arg):
'''
Pass the arg to spotify.
'''
if arg == 'playback':
res = get_pandora_status(arg)
if not res or res == 'Not Playing':
res = get_status(arg)
print res
elif arg == 'playing':
res = get_pandora_status(arg)
if not res:
res = get_status(arg)
print res
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
else:
exit(101)
| 27.315789 | 85 | 0.563391 |
8851d7b5290203c0b1a5e1b0bf63d3352db40abc | 787 | py | Python | tests/file-test/test-pyro.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | tests/file-test/test-pyro.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | tests/file-test/test-pyro.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | import sys
sys.path.insert(0, '../Pyro4-4.17')
import Pyro4
from time import clock
"""
log = open('pyro.log', 'w')
times = []
proxy = Pyro4.Proxy("PYRO:example.service@localhost:54642")
for i in range(100) :
local = []
begin = clock()
for files in proxy.getFiles(proxy.getcwd()) :
for file in files :
local.append(file)
end = clock()
times.append(end - begin)
log.write(str(end - begin) + "\n")
log.write("Average: " + str(reduce(lambda x, y: x+y, times)/len(times)))
"""
proxy = Pyro4.Proxy("PYRO:service@smarmy-pirate.cs.utexas.edu:9975")
begin = clock()
for files in proxy.getFiles(proxy.getcwd()) :
for file in files :
log = open('p' + file, 'w')
log.write(proxy.getFile(file))
end = clock()
print str(end - begin)
| 24.59375 | 72 | 0.617535 |
88521be531a73b3f205941d7145e1d213b76932c | 117 | py | Python | tests/test_controllers/test_demo.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 6 | 2015-01-28T05:59:08.000Z | 2018-01-09T07:48:57.000Z | tests/test_controllers/test_demo.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 2 | 2020-05-09T16:36:43.000Z | 2020-05-09T16:52:35.000Z | tests/test_controllers/test_demo.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 1 | 2016-01-13T07:19:44.000Z | 2016-01-13T07:19:44.000Z | from nose.tools import assert_equal
from tests.fixtures import WebTest
| 16.714286 | 35 | 0.811966 |
8852b79b5bfc54d623d2ee6424607b818660de24 | 1,358 | py | Python | evaluation/hpt/sizes/plot.py | cucl-srg/Measuring-Burstiness | b9024bf606362d5587773a0c5b892fcb97a3d577 | [
"Apache-2.0"
] | 1 | 2022-03-21T02:26:27.000Z | 2022-03-21T02:26:27.000Z | evaluation/hpt/sizes/plot.py | cucl-srg/Measuring-Burstiness | b9024bf606362d5587773a0c5b892fcb97a3d577 | [
"Apache-2.0"
] | null | null | null | evaluation/hpt/sizes/plot.py | cucl-srg/Measuring-Burstiness | b9024bf606362d5587773a0c5b892fcb97a3d577 | [
"Apache-2.0"
] | 1 | 2020-08-10T16:46:05.000Z | 2020-08-10T16:46:05.000Z | import sys
import matplotlib
import numpy as np
# Avoid errors when running on headless servers.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if len(sys.argv) != 6:
print "Usage plot.py <data file port 1> <min size> <step size> <max size> <num packets sent>"
sys.exit(1)
width = 20
data_file = sys.argv[1]
min_rate = int(sys.argv[2])
step_size = int(sys.argv[3])
max_rate = int(sys.argv[4])
num_packets_sent = int(sys.argv[5])
x_data = np.arange(min_rate, max_rate + step_size, step_size)
y_data = []
error = []
with open(data_file, 'r') as f:
for data in f.readlines():
if len(data.split(' ')) == 1:
y_data.append(int(data))
error = None
else:
values = []
for value in data.split(' '):
values.append(int(value))
y_data.append(np.mean(values))
error.append(np.std(values))
dropped_counts = []
for data in y_data:
dropped_counts.append(num_packets_sent - data)
plt.title('Number of drops by one port with different sized packets')
plt.xlabel('Packet size (Bytes)')
plt.ylabel('Packets')
plt.bar(x_data, y_data, width, color='blue', label="Number Captured", y_err=error)
plt.bar(x_data, dropped_counts, width, color='red', bottom=y_data, label="Number Dropped")
plt.legend()
plt.savefig('dropped_packets.eps', format='eps')
| 28.893617 | 97 | 0.658321 |
8853e415ffd0f52c5a2f8419a9bf5ebfef325883 | 2,678 | py | Python | examples/pytorch/dtgrnn/dataloading.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 9,516 | 2018-12-08T22:11:31.000Z | 2022-03-31T13:04:33.000Z | examples/pytorch/dtgrnn/dataloading.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 2,494 | 2018-12-08T22:43:00.000Z | 2022-03-31T21:16:27.000Z | examples/pytorch/dtgrnn/dataloading.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 2,529 | 2018-12-08T22:56:14.000Z | 2022-03-31T13:07:41.000Z | import os
import ssl
from six.moves import urllib
import torch
import numpy as np
import dgl
from torch.utils.data import Dataset, DataLoader
def PEMS_BAYGraphDataset():
if not os.path.exists('data/graph_bay.bin'):
if not os.path.exists('data'):
os.mkdir('data')
download_file('graph_bay.bin')
g, _ = dgl.load_graphs('data/graph_bay.bin')
return g[0]
| 28.795699 | 78 | 0.639283 |
88551bbfcb08a1b53c119a389c90207f6e61b6cd | 1,552 | py | Python | django_boto/tests.py | degerli/django-boto | 930863b75c0f26eb10090a6802e16e1cf127b588 | [
"MIT"
] | 54 | 2015-02-09T14:25:56.000Z | 2021-09-03T21:11:29.000Z | django_boto/tests.py | degerli/django-boto | 930863b75c0f26eb10090a6802e16e1cf127b588 | [
"MIT"
] | 12 | 2015-01-10T06:39:56.000Z | 2019-06-19T19:36:40.000Z | django_boto/tests.py | degerli/django-boto | 930863b75c0f26eb10090a6802e16e1cf127b588 | [
"MIT"
] | 18 | 2015-01-09T20:06:38.000Z | 2019-02-22T12:33:44.000Z | # -*- coding: utf-8 -*-
import string
import random
import logging
import urllib2
from os import path
from django.test import TestCase
from django.core.files.base import ContentFile
from s3 import upload
from s3.storage import S3Storage
from settings import BOTO_S3_BUCKET
logger = logging.getLogger(__name__)
local_path = path.realpath(path.dirname(__file__))
| 23.164179 | 74 | 0.661727 |
8857049e6802ce1ea80b578b8e1834b184a88a8c | 4,060 | py | Python | src/roswire/ros1/bag/player.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | 4 | 2019-09-22T18:38:33.000Z | 2021-04-02T01:37:10.000Z | src/roswire/ros1/bag/player.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | 208 | 2019-03-27T18:34:39.000Z | 2021-07-26T20:36:07.000Z | src/roswire/ros1/bag/player.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# http://wiki.ros.org/Bags/Format/2.0
__all__ = ("BagPlayer",)
import subprocess
import threading
from types import TracebackType
from typing import Optional, Type
import dockerblade
from loguru import logger
from ... import exceptions
def finished(self) -> bool:
"""Checks whether playback has completed."""
p = self._process
return p.finished if p else False
def wait(self, time_limit: Optional[float] = None) -> None:
"""Blocks until playback has finished.
Parameters
----------
time_limit: Optional[float] = None
an optional time limit.
Raises
------
PlayerTimeout:
if playback did not finish within the provided timeout.
PlayerFailure:
if an unexpected occurred during playback.
"""
assert self._process
try:
self._process.wait(time_limit)
retcode = self._process.returncode
assert retcode is not None
if retcode != 0:
out = "\n".join(self._process.stream) # type: ignore
raise exceptions.PlayerFailure(retcode, out)
except subprocess.TimeoutExpired as error:
raise exceptions.PlayerTimeout from error
def start(self) -> None:
"""Starts playback from the bag.
Raises
------
PlayerAlreadyStarted:
if the player has already started.
"""
logger.debug("starting bag playback")
with self.__lock:
if self.__started:
raise exceptions.PlayerAlreadyStarted
self.__started = True
command: str = f"rosbag play -q {self.__fn_container}"
self._process = self.__shell.popen(
command, stdout=False, stderr=False
)
logger.debug("started bag playback")
def stop(self) -> None:
"""Stops playback from the bag.
Raises
------
PlayerAlreadyStopped:
if the player has already been stopped.
"""
logger.debug("stopping bag playback")
with self.__lock:
if self.__stopped:
raise exceptions.PlayerAlreadyStopped
if not self.__started:
raise exceptions.PlayerNotStarted
assert self._process
self._process.kill()
out = "\n".join(list(self._process.stream)) # type: ignore
logger.debug("player output:\n%s", out)
self._process = None
if self.__delete_file_after_use:
self.__files.remove(self.__fn_container)
self.__stopped = True
logger.debug("stopped bag playback")
| 29.852941 | 71 | 0.578818 |
885a5fd1a5967b9a6efcaa58fe258ea5b945e757 | 3,120 | py | Python | flask_vgavro_utils/decorators.py | vgavro/flask-vgavro-utils | 01b7caa0241a6b606c228081eea169e51a0d1337 | [
"BSD-2-Clause"
] | null | null | null | flask_vgavro_utils/decorators.py | vgavro/flask-vgavro-utils | 01b7caa0241a6b606c228081eea169e51a0d1337 | [
"BSD-2-Clause"
] | null | null | null | flask_vgavro_utils/decorators.py | vgavro/flask-vgavro-utils | 01b7caa0241a6b606c228081eea169e51a0d1337 | [
"BSD-2-Clause"
] | null | null | null | from functools import wraps
from flask import request, make_response
from .exceptions import ApiError
from .schemas import create_schema, ma_version_lt_300b7
def response_headers(headers={}):
"""
This decorator adds the headers passed in to the response
"""
# http://flask.pocoo.org/snippets/100/
return decorator
| 31.515152 | 96 | 0.605769 |
885c26a7ab9bb55b6a5e53e5c4ace165bde1c509 | 1,447 | py | Python | manolo/urls.py | rmaceissoft/django-manolo | 7a447b6b06a5a9a0bbc3ed5daf754721a48bbd76 | [
"BSD-3-Clause"
] | null | null | null | manolo/urls.py | rmaceissoft/django-manolo | 7a447b6b06a5a9a0bbc3ed5daf754721a48bbd76 | [
"BSD-3-Clause"
] | null | null | null | manolo/urls.py | rmaceissoft/django-manolo | 7a447b6b06a5a9a0bbc3ed5daf754721a48bbd76 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.urls import path
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from drf_yasg.views import get_schema_view
admin.autodiscover()
from visitors import views
from api.views import schema_view
urlpatterns = [
path('administramelo/', admin.site.urls),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^search_date/$', views.search_date),
url(r'^search/', views.search, name='search_view'),
url(r'^api/', include('api.urls')),
url(r'^docs(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^docs/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redocs/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
url(r'^statistics/$', views.statistics, name='statistics'),
url(r'^statistics_api/$', views.statistics_api),
url(r'^about/', views.about, name='about'),
path('', include('visitors.urls')),
url(r'^cazador/', include('cazador.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 34.452381 | 105 | 0.704216 |
885c8e23ca6cf334dab1eb1260245a09eefae5fc | 2,043 | py | Python | samples/frontend/datagator/rest/decorators.py | liuyu81/datagator-contrib | 813529e211f680732bd1dc9568f5b4f2bdcacdcc | [
"Apache-2.0"
] | 2 | 2015-02-20T02:50:07.000Z | 2017-05-02T19:26:42.000Z | samples/frontend/datagator/rest/decorators.py | liuyu81/datagator-contrib | 813529e211f680732bd1dc9568f5b4f2bdcacdcc | [
"Apache-2.0"
] | null | null | null | samples/frontend/datagator/rest/decorators.py | liuyu81/datagator-contrib | 813529e211f680732bd1dc9568f5b4f2bdcacdcc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
datagator.rest.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2015 by `University of Denver <http://pardee.du.edu/>`_
:license: Apache 2.0, see LICENSE for more details.
"""
import base64
from django.contrib.auth import authenticate, login
from django.core.exceptions import SuspiciousOperation
__all__ = ['with_authentication', ]
| 29.608696 | 78 | 0.600587 |
885d21f84b46886e1d6a64a22aa7df5e772efb4b | 2,836 | py | Python | src/ui.py | ajstensland/slyther | 176e901120032b94427d634e647cb8940b019f14 | [
"MIT"
] | 9 | 2019-10-15T17:24:09.000Z | 2021-11-07T20:23:43.000Z | src/ui.py | ajstensland/slyther | 176e901120032b94427d634e647cb8940b019f14 | [
"MIT"
] | 21 | 2019-10-17T15:42:04.000Z | 2019-11-14T20:43:55.000Z | src/ui.py | ajstensland/slyther | 176e901120032b94427d634e647cb8940b019f14 | [
"MIT"
] | 4 | 2019-10-27T13:54:03.000Z | 2021-07-09T12:54:02.000Z | from getpass import getpass
import socket
COLORS = {"green" : "\33[92m",
"red" : "\33[91m",
"yellow" : "\33[93m",
"endc" : "\33[0m" }
def print_green(msg):
"""Prints msg in green text."""
print("{0}{1}{2}".format(COLORS["green"], msg, COLORS["endc"]))
def print_yellow(msg):
"""Prints msg in yellow text."""
print("{0}{1}{2}".format(COLORS["yellow"], msg, COLORS["endc"]))
def print_red(msg):
"""Prints msg in red text."""
print("{0}{1}{2}".format(COLORS["red"], msg, COLORS["endc"]))
def print_banner():
"""Prints the slyther entry banner."""
print_green("///////////////////")
print_green("// s l y t h e r //")
print_green("///////////////////")
def getpass_handled(prompt):
"""Wrapper for getpass() that handles KeyboardInterrupts."""
try:
return getpass(prompt)
except KeyboardInterrupt:
print_red("\nAborting...")
exit()
def confirm(prompt):
"""Displays the prompt, only returns true with input 'Y' or 'y'."""
confirmation = input(COLORS["yellow"] + prompt + COLORS["endc"]).lower()
return confirmation == "y"
def input_default(prompt, default):
"""Displays the prompt, returns input (default if user enters nothing)."""
response = input("{} [{}]: ".format(prompt, default))
return response if response else default
def get_ip():
"""Prompts the user for and returns a valid IP address string."""
while True:
ip = input("IP: ")
# Check if the ip has 3 "."s. inet_aton does not verify this
if len(ip.split(".")) != 4:
print_red("\nInvalid IP address. Please try again.")
continue
# Check if input creates a valid ip
try:
socket.inet_aton(ip)
except socket.error:
print_red("\nInvalid IP address. Please try again.")
continue
return ip
def get_recipient(contacts):
"""
Prompts a user for a contact. If a valid one is not provided, the user may
create a new one.
Args:
contacts: The contacts dictionary to select from.
Returns:
The contact ID of a valid contact.
"""
while True:
recipient = input("Contact Name: ")
for contact_id in contacts:
if recipient == contacts[contact_id]["name"]:
return contact_id
print_red("Contact not recognized.")
def get_command(commands):
"""Prompts for a command, and returns when the user has chosen a valid one."""
while True:
command = input("> ").lower()
if command in commands:
return command
else:
print_red("Invalid command. Please try again.")
| 26.259259 | 82 | 0.575811 |
885d4d75f5722e68e64b97b960999165b69c5ecc | 1,244 | py | Python | src/data processing - clinical notes and structured data/step5_note_level_tagging.py | arjun-parthi/SSRI-Project | 62f610a594e5849ccf0f3c25cd6adcd63888ec2a | [
"MIT"
] | 2 | 2019-02-12T00:37:37.000Z | 2021-03-25T05:40:06.000Z | src/data processing - clinical notes and structured data/step5_note_level_tagging.py | arjun-parthi/SSRI-Project | 62f610a594e5849ccf0f3c25cd6adcd63888ec2a | [
"MIT"
] | null | null | null | src/data processing - clinical notes and structured data/step5_note_level_tagging.py | arjun-parthi/SSRI-Project | 62f610a594e5849ccf0f3c25cd6adcd63888ec2a | [
"MIT"
] | 1 | 2021-03-25T05:40:17.000Z | 2021-03-25T05:40:17.000Z | import pandas as pd
import numpy as np
from collections import Counter
data = pd.read_csv('out/negex_all.txt', sep="\t", header=None)
print(data.shape)
data.columns = ['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE','TEXT_SNIPPET','lower_text','STATUS']
df = data.groupby(['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE'])['STATUS'].apply(','.join).reset_index()
df_text = data.groupby(['PAT_DEID','NOTE_DEID','NOTE_DATE','ENCOUNTER_DATE','NOTE_CODE'])['TEXT_SNIPPET'].apply(' ##### '.join).reset_index()
df_text_required = df_text[['NOTE_DEID','TEXT_SNIPPET']]
df_fin = pd.merge(df, df_text_required, on='NOTE_DEID', how='inner')
df1 = df_fin.copy()
df1 = majority_rule('STATUS','STATUS_FINAL')
print(df1.shape)
df2 = pd.merge(df1, df_text_required, on='NOTE_DEID', how='inner')
df2.to_pickle("out/annotated_note_all.pkl")
| 34.555556 | 141 | 0.676045 |
885d8583c03a1a8044c9ab014f78fb40213a58b5 | 821 | py | Python | app.py | shaungarwood/co-voter-db | bcbc0d46459cc9913ed318b32b284a4139c75b74 | [
"MIT"
] | null | null | null | app.py | shaungarwood/co-voter-db | bcbc0d46459cc9913ed318b32b284a4139c75b74 | [
"MIT"
] | null | null | null | app.py | shaungarwood/co-voter-db | bcbc0d46459cc9913ed318b32b284a4139c75b74 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
from flask import jsonify
from os import environ
import query
app = Flask(__name__)
if 'MONGODB_HOST' in environ:
mongodb_host = environ['MONGODB_HOST']
else:
mongodb_host = "localhost"
if 'MONGODB_PORT' in environ:
mongodb_port = environ['MONGODB_PORT']
else:
mongodb_port = "27017"
vr = query.VoterRecords(mongodb_host, mongodb_port)
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| 21.605263 | 52 | 0.686967 |
885e29d199b2465852d40fecbeb6ea0388645c61 | 972 | py | Python | stack/tests/m_decoded_string_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | stack/tests/m_decoded_string_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | stack/tests/m_decoded_string_test.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | from stack.m_decoded_string import DecodeString
| 24.3 | 66 | 0.595679 |
885efaccc06cb190fd2ac00927d90705f3efe0f0 | 534 | py | Python | tests/uranium/plot_rho.py | rc/dftatom | fe479fd27a7e0f77c6a88a1949996406ec935ac2 | [
"MIT"
] | 40 | 2015-03-19T16:00:14.000Z | 2021-12-29T03:06:10.000Z | tests/uranium/plot_rho.py | rc/dftatom | fe479fd27a7e0f77c6a88a1949996406ec935ac2 | [
"MIT"
] | 6 | 2015-06-23T21:59:56.000Z | 2019-09-04T20:36:51.000Z | tests/uranium/plot_rho.py | rc/dftatom | fe479fd27a7e0f77c6a88a1949996406ec935ac2 | [
"MIT"
] | 15 | 2015-06-07T15:14:49.000Z | 2021-12-05T07:03:24.000Z | from pylab import plot, show, legend
from numpy import array
from h5py import File
data = File("data.h5")
iter = 2
R = array(data["/%04d/R" % iter])
rho = array(data["/%04d/rho" % iter])
Vtot = array(data["/%04d/V_tot" % iter])
Zeff = -Vtot * R
#for i in range(1, 19):
# P = array(data["/%04d/P%04d" % (iter, i)])
# plot(R, P, label="P%04d" % i)
for i in range(1, 11):
iter = i
R = array(data["/%04d/R" % iter])
rho = array(data["/%04d/rho" % iter])
plot(R, rho*R**2, label="iter=%d" % iter)
legend()
show()
| 22.25 | 47 | 0.571161 |
88601c88f30dcd435b8268d9578887c317aa8098 | 379 | py | Python | src/account/migrations/0002_address_default_add.py | amaan2398/EcommerceWebsiteDjango | 5173d13d60c1276c957161ce0ea37b8de5acddf4 | [
"MIT"
] | 1 | 2020-08-11T01:17:36.000Z | 2020-08-11T01:17:36.000Z | src/account/migrations/0002_address_default_add.py | amaan2398/EcommerceWebsiteDjango | 5173d13d60c1276c957161ce0ea37b8de5acddf4 | [
"MIT"
] | null | null | null | src/account/migrations/0002_address_default_add.py | amaan2398/EcommerceWebsiteDjango | 5173d13d60c1276c957161ce0ea37b8de5acddf4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-18 13:17
from django.db import migrations, models
| 19.947368 | 52 | 0.593668 |
886191b83cc6306a7a234ebf3e4730d225e73536 | 692 | py | Python | 100-clean_web_static.py | cbarros7/AirBnB_clone_v2 | b25d8facc07ac5be2092a9f6214d1ef8c32ce60e | [
"MIT"
] | null | null | null | 100-clean_web_static.py | cbarros7/AirBnB_clone_v2 | b25d8facc07ac5be2092a9f6214d1ef8c32ce60e | [
"MIT"
] | null | null | null | 100-clean_web_static.py | cbarros7/AirBnB_clone_v2 | b25d8facc07ac5be2092a9f6214d1ef8c32ce60e | [
"MIT"
] | 1 | 2021-08-11T05:20:27.000Z | 2021-08-11T05:20:27.000Z | #!/usr/bin/python3
# Fabfile to delete out-of-date archives.
import os
from fabric.api import *
env.hosts = ['104.196.116.233', '54.165.130.77']
def do_clean(number=0):
"""Delete out-of-date archives.
"""
number = 1 if int(number) == 0 else int(number)
archives = sorted(os.listdir("versions"))
[archives.pop() for i in range(number)]
with lcd("versions"):
[local("rm ./{}".format(a)) for a in archives]
with cd("/data/web_static/releases"):
archives = run("ls -tr").split()
archives = [a for a in archives if "web_static_" in a]
[archives.pop() for i in range(number)]
[run("rm -rf ./{}".format(a)) for a in archives]
| 28.833333 | 62 | 0.606936 |
88627cf7ecface35fcb049861351f30b77fd4c4c | 173 | py | Python | tfrec/utils/__init__.py | Praful932/Tf-Rec | fe0e08d3621da911149a95d8a701e434dfa61161 | [
"MIT"
] | 18 | 2020-12-22T04:16:54.000Z | 2022-03-23T08:49:16.000Z | tfrec/utils/__init__.py | Praful932/Tf-Rec | fe0e08d3621da911149a95d8a701e434dfa61161 | [
"MIT"
] | 1 | 2021-05-11T12:28:07.000Z | 2022-03-16T17:33:03.000Z | tfrec/utils/__init__.py | Praful932/Tf-Rec | fe0e08d3621da911149a95d8a701e434dfa61161 | [
"MIT"
] | 2 | 2021-04-26T10:29:44.000Z | 2021-07-01T03:31:31.000Z | from tfrec.utils.model_utils import cross_validate
from tfrec.utils.model_utils import preprocess_and_split
__all__ = [
'cross_validate',
'preprocess_and_split',
]
| 21.625 | 56 | 0.791908 |
8863590d6524676746195e9a24531f9c96bd95d5 | 17,316 | py | Python | dask/threaded.py | eriknw/dask | f654b47a61cbbddaf5d2f4d1a3e6e07373b86709 | [
"BSD-3-Clause"
] | null | null | null | dask/threaded.py | eriknw/dask | f654b47a61cbbddaf5d2f4d1a3e6e07373b86709 | [
"BSD-3-Clause"
] | null | null | null | dask/threaded.py | eriknw/dask | f654b47a61cbbddaf5d2f4d1a3e6e07373b86709 | [
"BSD-3-Clause"
] | null | null | null | """
A threaded shared-memory scheduler for dask graphs.
This code is experimental and fairly ugly. It should probably be rewritten
before anyone really depends on it. It is very stateful and error-prone.
That being said, it is decently fast.
State
=====
Many functions pass around a ``state`` variable that holds the current state of
the computation. This variable consists of several other dictionaries and
sets, explained below.
Constant state
--------------
1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x
2. dependents: {a: [x, y]} a must run before x or y
Changing state
--------------
### Data
1. cache: available concrete data. {key: actual-data}
2. released: data that we've seen, used, and released because it is no longer
needed
### Jobs
1. ready: A set of ready-to-run tasks
1. running: A set of tasks currently in execution
2. finished: A set of finished tasks
3. waiting: which tasks are still waiting on others :: {key: {keys}}
Real-time equivalent of dependencies
4. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}
Real-time equivalent of dependents
Example
-------
>>> import pprint
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': set(['z']),
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
Optimizations
=============
We build this scheduler with out-of-core array operations in mind. To this end
we have encoded some particular optimizations.
Compute to release data
-----------------------
When we choose a new task to execute we often have many options. Policies at
this stage are cheap and can significantly impact performance. One could
imagine policies that expose parallelism, drive towards a paticular output,
etc.. Our current policy is the compute tasks that free up data resources.
See the functions ``choose_task`` and ``score`` for more information
Inlining computations
---------------------
We hold on to intermediate computations either in memory or on disk.
For very cheap computations that may emit new copies of the data, like
``np.transpose`` or possibly even ``x + 1`` we choose not to store these as
separate pieces of data / tasks. Instead we combine them with the computations
that require them. This may result in repeated computation but saves
significantly on space and computation complexity.
See the function ``inline`` for more information.
"""
from .core import istask, flatten, reverse_dict, get_dependencies, ishashable
from .utils import deepmap
from operator import add
from toolz import concat, partial
from multiprocessing.pool import ThreadPool
from .compatibility import Queue
from threading import Lock
import psutil
DEBUG = False
def start_state_from_dask(dsk, cache=None):
""" Start state from a dask
Example
-------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> import pprint
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': set(['z']),
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
"""
if cache is None:
cache = dict()
for k, v in dsk.items():
if not istask(v):
cache[k] = v
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
waiting = dict((k, v.copy()) for k, v in dependencies.items() if v)
dependents = reverse_dict(dependencies)
for a in cache:
for b in dependents[a]:
waiting[b].remove(a)
waiting_data = dict((k, v.copy()) for k, v in dependents.items() if v)
ready = set([k for k, v in waiting.items() if not v])
waiting = dict((k, v) for k, v in waiting.items() if v)
state = {'dependencies': dependencies,
'dependents': dependents,
'waiting': waiting,
'waiting_data': waiting_data,
'cache': cache,
'ready': ready,
'running': set(),
'finished': set(),
'released': set()}
return state
'''
Running tasks
-------------
When we execute tasks we both
1. Perform the actual work of collecting the appropriate data and calling the function
2. Manage administrative state to coordinate with the scheduler
'''
def _execute_task(arg, cache, dsk=None):
""" Do the actual work of collecting data and executing a function
Examples
--------
>>> cache = {'x': 1, 'y': 2}
Compute tasks against a cache
>>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner
2
>>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation
3
Also grab data from cache
>>> _execute_task('x', cache)
1
Support nested lists
>>> list(_execute_task(['x', 'y'], cache))
[1, 2]
>>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))
[[1, 2], [2, 1]]
>>> _execute_task('foo', cache) # Passes through on non-keys
'foo'
"""
dsk = dsk or dict()
if isinstance(arg, list):
return (_execute_task(a, cache) for a in arg)
elif istask(arg):
func, args = arg[0], arg[1:]
args2 = [_execute_task(a, cache, dsk=dsk) for a in args]
return func(*args2)
elif not ishashable(arg):
return arg
elif arg in cache:
return cache[arg]
elif arg in dsk:
raise ValueError("Premature deletion of data. Key: %s" % str(arg))
else:
return arg
def execute_task(dsk, key, state, queue, results, lock):
"""
Compute task and handle all administration
See also:
_execute_task - actually execute task
"""
try:
task = dsk[key]
result = _execute_task(task, state['cache'], dsk=dsk)
with lock:
finish_task(dsk, key, result, state, results)
result = key, task, result, None
except Exception as e:
import sys
exc_type, exc_value, exc_traceback = sys.exc_info()
result = key, task, e, exc_traceback
queue.put(result)
return
def finish_task(dsk, key, result, state, results):
"""
Update executation state after a task finishes
Mutates. This should run atomically (with a lock).
"""
state['cache'][key] = result
if key in state['ready']:
state['ready'].remove(key)
for dep in state['dependents'][key]:
s = state['waiting'][dep]
s.remove(key)
if not s:
del state['waiting'][dep]
state['ready'].add(dep)
for dep in state['dependencies'][key]:
if dep in state['waiting_data']:
s = state['waiting_data'][dep]
s.remove(key)
if not s and dep not in results:
if DEBUG:
from chest.core import nbytes
print("Key: %s\tDep: %s\t NBytes: %.2f\t Release" % (key, dep,
sum(map(nbytes, state['cache'].values()) / 1e6)))
assert dep in state['cache']
release_data(dep, state)
assert dep not in state['cache']
elif dep in state['cache'] and dep not in results:
release_data(dep, state)
state['finished'].add(key)
state['running'].remove(key)
return state
def release_data(key, state):
""" Remove data from temporary storage
See Also
finish_task
"""
if key in state['waiting_data']:
assert not state['waiting_data'][key]
del state['waiting_data'][key]
state['released'].add(key)
del state['cache'][key]
def nested_get(ind, coll, lazy=False):
""" Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
if lazy:
return (nested_get(i, coll, lazy=lazy) for i in ind)
else:
return tuple([nested_get(i, coll, lazy=lazy) for i in ind])
return seq
else:
return coll[ind]
'''
Task Selection
--------------
We often have a choice among many tasks to run next. This choice is both
cheap and can significantly impact performance.
Here we choose tasks that immediately free data resources.
'''
def score(key, state):
""" Prefer to run tasks that remove need to hold on to data """
deps = state['dependencies'][key]
wait = state['waiting_data']
return sum([1./len(wait[dep])**2 for dep in deps])
def choose_task(state, score=score):
"""
Select a task that maximizes scoring function
Default scoring function selects tasks that free up the maximum number of
resources.
E.g. for ready tasks a, b with dependencies:
{a: {x, y},
b: {x, w}}
and for data w, x, y, z waiting on the following tasks
{w: {b, c}
x: {a, b, c},
y: {a}}
We choose task a because it will completely free up resource y and
partially free up resource x. Task b only partially frees up resources x
and w and completely frees none so it is given a lower score.
See also:
score
"""
return max(state['ready'], key=partial(score, state=state))
'''
Inlining
--------
We join small cheap tasks on to others to avoid the creation of intermediaries.
'''
def inline(dsk, fast_functions=None):
""" Inline cheap functions into larger operations
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline(dsk, [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
result = dict((k, expand_value(dsk, fast_functions, k))
for k, v in dsk.items()
if not dependents[k]
or not istask(v)
or not isfast(v[0]))
return result
def expand_key(dsk, fast, key):
"""
>>> dsk = {'out': (sum, ['i', 'd']),
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> expand_key(dsk, [inc], 'd')
'd'
>>> expand_key(dsk, [inc], 'i') # doctest: +SKIP
(inc, 'x')
>>> expand_key(dsk, [inc], ['i', 'd']) # doctest: +SKIP
[(inc, 'x'), 'd']
"""
if isinstance(key, list):
return [expand_key(dsk, fast, item) for item in key]
if not ishashable(key):
return key
if (key in dsk and istask(dsk[key]) and isfast(dsk[key][0])):
task = dsk[key]
return (task[0],) + tuple([expand_key(dsk, fast, k) for k in task[1:]])
else:
return key
def expand_value(dsk, fast, key):
"""
>>> dsk = {'out': (sum, ['i', 'd']),
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> expand_value(dsk, [inc], 'd') # doctest: +SKIP
(double, 'y')
>>> expand_value(dsk, [inc], 'i') # doctest: +SKIP
(inc, 'x')
>>> expand_value(dsk, [inc], 'out') # doctest: +SKIP
(sum, [(inc, 'x'), 'd'])
"""
task = dsk[key]
if not istask(task):
return task
func, args = task[0], task[1:]
return (func,) + tuple([expand_key(dsk, fast, arg) for arg in args])
'''
`get`
-----
The main function of the scheduler. Get is the main entry point.
'''
def get(dsk, result, nthreads=psutil.NUM_CPUS, cache=None, debug_counts=None, **kwargs):
""" Threaded cached implementation of dask.get
Parameters
----------
dsk: dict
A dask dictionary specifying a workflow
result: key or list of keys
Keys corresponding to desired data
nthreads: integer of thread count
The number of threads to use in the ThreadPool that will actually execute tasks
cache: dict-like (optional)
Temporary storage of results
debug_counts: integer or None
This integer tells how often the scheduler should dump debugging info
Examples
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> get(dsk, 'w')
4
>>> get(dsk, ['w', 'y'])
(4, 2)
"""
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = set([result])
results = set(result_flat)
pool = ThreadPool(nthreads)
state = start_state_from_dask(dsk, cache=cache)
queue = Queue()
#lock for state dict updates
#When a task completes, we need to update several things in the state dict.
#To make sure the scheduler is in a safe state at all times, the state dict
# needs to be updated by only one thread at a time.
lock = Lock()
tick = [0]
if not state['ready']:
raise ValueError("Found no accessible jobs in dask")
def fire_task():
""" Fire off a task to the thread pool """
# Update heartbeat
tick[0] += 1
# Emit visualization if called for
if debug_counts and tick[0] % debug_counts == 0:
visualize(dsk, state, filename='dask_%03d' % tick[0])
# Choose a good task to compute
key = choose_task(state)
state['ready'].remove(key)
state['running'].add(key)
# Submit
pool.apply_async(execute_task, args=[dsk, key, state, queue, results,
lock])
try:
# Seed initial tasks into the thread pool
with lock:
while state['ready'] and len(state['running']) < nthreads:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while state['waiting'] or state['ready'] or state['running']:
key, finished_task, res, tb = queue.get()
if isinstance(res, Exception):
import traceback
traceback.print_tb(tb)
raise res
with lock:
while state['ready'] and len(state['running']) < nthreads:
fire_task()
finally:
# Clean up thread pool
pool.close()
pool.join()
# Final reporting
while not queue.empty():
key, finished_task, res, tb = queue.get()
# print("Finished %s" % str(finished_task))
if debug_counts:
visualize(dsk, state, filename='dask_end')
return nested_get(result, state['cache'])
'''
Debugging
---------
The threaded nature of this project presents challenging to normal unit-test
and debug workflows. Visualization of the execution state has value.
Our main mechanism is a visualization of the execution state as colors on our
normal dot graphs (see dot module).
'''
def visualize(dsk, state, filename='dask'):
""" Visualize state of compputation as dot graph """
from dask.dot import dot_graph, write_networkx_to_dot
g = state_to_networkx(dsk, state)
write_networkx_to_dot(g, filename=filename)
def state_to_networkx(dsk, state):
""" Convert state to networkx for visualization
See Also:
visualize
"""
from .dot import to_networkx
data, func = color_nodes(dsk, state)
return to_networkx(dsk, data_attributes=data, function_attributes=func)
| 28.340426 | 88 | 0.569416 |
8863c6f90ec7586192322c6a14b72934c7e25e1f | 495 | py | Python | ToDo/todoapp/migrations/0003_auto_20200901_1842.py | UTKx/vigilant-octo-waffle | 77beec55877bb12ebb4d2db5a8f673c32cfe69de | [
"MIT"
] | 1 | 2020-09-04T22:07:23.000Z | 2020-09-04T22:07:23.000Z | ToDo/todoapp/migrations/0003_auto_20200901_1842.py | UTKx/vigilant-octo-waffle | 77beec55877bb12ebb4d2db5a8f673c32cfe69de | [
"MIT"
] | null | null | null | ToDo/todoapp/migrations/0003_auto_20200901_1842.py | UTKx/vigilant-octo-waffle | 77beec55877bb12ebb4d2db5a8f673c32cfe69de | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-01 18:42
from django.db import migrations, models
import django.db.models.deletion
| 24.75 | 126 | 0.646465 |
886441bce8027fa3056e6171153d3d8c51ba7d04 | 360 | py | Python | yachter/courses/management/commands/courses_export_static.py | rcoup/yachter | 8a73af1b5205f194000a6eac5974b3751d5d00f5 | [
"Apache-2.0"
] | 1 | 2015-10-09T11:07:32.000Z | 2015-10-09T11:07:32.000Z | yachter/courses/management/commands/courses_export_static.py | rcoup/yachter | 8a73af1b5205f194000a6eac5974b3751d5d00f5 | [
"Apache-2.0"
] | null | null | null | yachter/courses/management/commands/courses_export_static.py | rcoup/yachter | 8a73af1b5205f194000a6eac5974b3751d5d00f5 | [
"Apache-2.0"
] | null | null | null | from django.core.management.base import LabelCommand
from yachter.courses.utils import export_static_html
| 30 | 72 | 0.75 |
8864cc357b1ab216b6ae36ff17356348ff1a4bee | 6,163 | py | Python | deprecated/test_01_job_cli.py | cloudmesh/cloudmesh-queue | 8a299c8a4915916c9214d4b9e681da4a1b36bfd4 | [
"Apache-2.0"
] | null | null | null | deprecated/test_01_job_cli.py | cloudmesh/cloudmesh-queue | 8a299c8a4915916c9214d4b9e681da4a1b36bfd4 | [
"Apache-2.0"
] | 12 | 2020-12-18T09:57:49.000Z | 2020-12-28T12:34:15.000Z | deprecated/test_01_job_cli.py | cloudmesh/cloudmesh-queue | 8a299c8a4915916c9214d4b9e681da4a1b36bfd4 | [
"Apache-2.0"
] | null | null | null | ###############################################################
# cms set host='juliet.futuresystems.org'
# cms set user=$USER
#
# pytest -v --capture=no tests/test_01_job_cli.py
# pytest -v tests/test_01_job_cli.py
# pytest -v --capture=no tests/test_01_job_cli.py::TestJob::<METHODNAME>
###############################################################
import pytest
from cloudmesh.common.Shell import Shell
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.util import HEADING
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.common.variables import Variables
from cloudmesh.configuration.Configuration import Configuration
from textwrap import dedent
from cloudmesh.common.util import path_expand
import oyaml as yaml
import re
import time
import getpass
Benchmark.debug()
variables = Variables()
print(variables)
variables["jobset"] = path_expand("./a.yaml")
configured_jobset = variables["jobset"]
remote_host_ip = variables['host'] or 'juliet.futuresystems.org'
remote_host_user = variables['user'] or getpass.getuser()
| 28.013636 | 80 | 0.577154 |
88663926b411e82cb276e8ee0d40df6d2b4d5fe4 | 3,370 | py | Python | source/pysqlizer-cli.py | slafi/pysqlizer | 871ad922d42fd99a59dd33091ea3eaa4406542b4 | [
"MIT"
] | null | null | null | source/pysqlizer-cli.py | slafi/pysqlizer | 871ad922d42fd99a59dd33091ea3eaa4406542b4 | [
"MIT"
] | null | null | null | source/pysqlizer-cli.py | slafi/pysqlizer | 871ad922d42fd99a59dd33091ea3eaa4406542b4 | [
"MIT"
] | 1 | 2020-01-05T05:36:58.000Z | 2020-01-05T05:36:58.000Z | import argparse
import time
from pathlib import Path
from logger import get_logger
from csv_reader import CSVReader
from utils import infer_type, clear_console
from sql_generator import SQLGenerator
if __name__ == "__main__":
## Clear console
clear_console()
## get logger
logger = get_logger('pysqlizer')
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='', help='Input CSV filename', metavar='infile', required=True)
parser.add_argument('-o', '--output', type=str, default='', help='Output SQL filename', metavar='outfile')
parser.add_argument('-t', '--table_name', type=str, default='', help='SQL table name', metavar='tname')
parser.add_argument('-d', '--db_name', type=str, default='', help='SQL database name', metavar='dbname')
parser.add_argument('-s', '--delimiter', type=str, default='', help='CSV file delimiter', metavar='delimiter')
parser.add_argument('-v', '--version', help='Show the program version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
#print(args)
logger.info('Starting PySQLizer...')
# Get arguments
input_file = args.input
output_file = args.output
table_name = args.table_name
database_name = args.db_name
delimiter = args.delimiter if args.delimiter else ','
## Check input file (type, existence and extension)
infile = Path(input_file)
if infile.is_dir():
logger.error('The file {} is a directory!'.format(input_file))
quit()
if not infile.exists():
logger.debug('The file {} does not exist!'.format(input_file))
quit()
if not infile.suffix.lower() == '.csv':
logger.error('The extension of the file {} is not CSV!'.format(input_file))
quit()
if output_file == '':
output_file = infile.stem
if table_name == '':
table_name = 'tname'
try:
logger.info('Reading CSV file: {}'.format(input_file))
start_time = time.perf_counter()
## Create CSV reader instance
csv_reader = CSVReader(input_file)
csv_reader.read_file(delimiter=delimiter)
csv_reader.extract_header_fields()
csv_reader.check_data_sanity()
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
logger.info('Generating SQL instructions...')
start_time = time.perf_counter()
## Create SQL generator instance
sql_generator = SQLGenerator()
table_query = sql_generator.create_sql_table(table_name=table_name, columns=csv_reader.keys, db_name=database_name)
insert_query = sql_generator.insert_data(tablename=table_name, columns=csv_reader.keys, data=csv_reader.data)
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
logger.info('Saving SQL file: {}'.format(output_file + '.sql'))
start_time = time.perf_counter()
sql_generator.save_sql_file(filename=output_file, table_structure_query=table_query, insert_query=insert_query)
end_time = time.perf_counter()
logger.info('Elapsed time: {}s'.format(end_time-start_time))
except Exception as e:
logger.error('{}'.format(e.args))
| 35.104167 | 123 | 0.663205 |
88672f1ee1e8b7ba396e5278ca480986acfefed4 | 854 | py | Python | MergeIntervals56.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | MergeIntervals56.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | MergeIntervals56.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | # Definition for an interval.
solution = Solution()
ans = solution.merge([Interval(1, 4), Interval(2, 3)])
for i in ans:
print(i.start, i.end)
| 24.4 | 54 | 0.529274 |
88681b8ce61bdcea470b1b26564a91d9e24035aa | 221 | py | Python | Training/mangement_system.py | Orleanslindsay/Python_Programming | dacc08090e9ebf9eb43aec127ee3e2e3cdcb4f55 | [
"MIT"
] | 1 | 2021-08-16T10:25:01.000Z | 2021-08-16T10:25:01.000Z | Training/mangement_system.py | Orleanslindsay/Python_Programming | dacc08090e9ebf9eb43aec127ee3e2e3cdcb4f55 | [
"MIT"
] | null | null | null | Training/mangement_system.py | Orleanslindsay/Python_Programming | dacc08090e9ebf9eb43aec127ee3e2e3cdcb4f55 | [
"MIT"
] | null | null | null | from tkinter import *
import mariadb
root = Tk()
root.title('SCHOOL MANAGEMENT')
root.geometry("900x700")
counter=2
for i in range(1,20):
label=Entry(root).grid(row=counter,column=0)
counter += 2
root.mainloop() | 13 | 45 | 0.710407 |
88688860861603e2b3b947fdc9d58f769c86e31d | 2,742 | py | Python | FlopyAdapter/MtPackages/SftAdapter.py | inowas/InowasFlopyAdapter | 43ddf223778693ea5e7651d7a55bef56deff0ad5 | [
"MIT"
] | null | null | null | FlopyAdapter/MtPackages/SftAdapter.py | inowas/InowasFlopyAdapter | 43ddf223778693ea5e7651d7a55bef56deff0ad5 | [
"MIT"
] | null | null | null | FlopyAdapter/MtPackages/SftAdapter.py | inowas/InowasFlopyAdapter | 43ddf223778693ea5e7651d7a55bef56deff0ad5 | [
"MIT"
] | 1 | 2020-09-27T23:26:14.000Z | 2020-09-27T23:26:14.000Z | import flopy.mt3d as mt
| 26.882353 | 67 | 0.486871 |
8869c83d02e1a922baaa9130b61848763de1897f | 2,089 | py | Python | src/payoff_landscape.py | khozzy/phd | 9a05572a6960d948320669c51e0c80bb9d037d4a | [
"CC-BY-4.0"
] | null | null | null | src/payoff_landscape.py | khozzy/phd | 9a05572a6960d948320669c51e0c80bb9d037d4a | [
"CC-BY-4.0"
] | null | null | null | src/payoff_landscape.py | khozzy/phd | 9a05572a6960d948320669c51e0c80bb9d037d4a | [
"CC-BY-4.0"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from collections import namedtuple
from typing import Dict
from src.visualization import diminishing_reward_colors, PLOT_DPI
StateAction = namedtuple('StateAction', 'id state action')
| 34.245902 | 100 | 0.691719 |
886c6ba2396b2e58f84b93dfe9961e27c379d6bf | 520 | py | Python | fly/ModelStart.py | cheburakshu/fly | d452af4b83e4cb0f8d0094bf1e0c1b407d39bdf5 | [
"Apache-2.0"
] | null | null | null | fly/ModelStart.py | cheburakshu/fly | d452af4b83e4cb0f8d0094bf1e0c1b407d39bdf5 | [
"Apache-2.0"
] | null | null | null | fly/ModelStart.py | cheburakshu/fly | d452af4b83e4cb0f8d0094bf1e0c1b407d39bdf5 | [
"Apache-2.0"
] | null | null | null | import time
import sys
import threading
import asyncio
# fly
from .ModelBootstrap import ModelBootstrap
from . import ModelManager
#runForEver = threading.Event()
# Expects a .conf for the model. It should be availble in config folder
#modelConf='calculator.conf' #sys.argv[1]
#bootstrap(modelConf)
# This will wait forever.
#
#runForEver.wait()
| 21.666667 | 72 | 0.725 |
886c941fa641d07a3da73aedf1058de8f4d4b127 | 569 | py | Python | newss.py | krishnansuki/daily-news | 3b03ea4bcd0aed8ddf69d91128bfce1f3d9192c0 | [
"Apache-2.0"
] | 1 | 2020-08-01T04:04:34.000Z | 2020-08-01T04:04:34.000Z | newss.py | krishnansuki/daily-news | 3b03ea4bcd0aed8ddf69d91128bfce1f3d9192c0 | [
"Apache-2.0"
] | null | null | null | newss.py | krishnansuki/daily-news | 3b03ea4bcd0aed8ddf69d91128bfce1f3d9192c0 | [
"Apache-2.0"
] | null | null | null | import feedparser
allheadlines = []
newsurls={'googlenews': 'https://news.google.com/news/rss/?h1=ta&ned=us&gl=IN',}# I used IN in this line for indian news instead of that you can use your capital's
for key, url in newsurls.items():
allheadlines.extend(getHeadLines(url))
for h in allheadlines:
print(h)
| 35.5625 | 172 | 0.692443 |
886e4ad951e13066662fa39167df5cb479e0a992 | 664 | py | Python | features/steps/public_pages.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 6 | 2021-05-12T08:40:36.000Z | 2022-01-25T08:31:06.000Z | features/steps/public_pages.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 14 | 2021-05-12T09:03:08.000Z | 2021-06-10T13:18:52.000Z | features/steps/public_pages.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 1 | 2021-05-14T20:54:15.000Z | 2021-05-14T20:54:15.000Z | from behave import when, then
from application.models import Member
| 22.896552 | 77 | 0.709337 |
886fdb38b86a90cbac81f513da517e4152656447 | 2,824 | py | Python | examples/05_fields.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | 3 | 2016-05-30T06:45:29.000Z | 2017-08-30T19:58:56.000Z | examples/05_fields.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | null | null | null | examples/05_fields.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | 5 | 2016-12-13T02:05:31.000Z | 2018-03-23T07:11:30.000Z | """
Displaying the fields in an xy cross section of the sphere (x polarized light, z-propagating)
"""
import numpy as np
import matplotlib.pyplot as plt
import miepy
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
Ag = miepy.materials. Ag()
# calculate scattering coefficients, 800 nm illumination
radius = 200e-9 # 200 nm radius
lmax = 5 # Use up to 5 multipoles
sphere = miepy.single_mie_sphere(radius, Ag, 800e-9, lmax)
# create discretized xy plane
x = np.linspace(-2*radius,2*radius,100)
y = np.linspace(-2*radius,2*radius,100)
z = np.array([radius*0.0])
X,Y,Z = np.meshgrid(x,y,z, indexing='xy')
R = (X**2 + Y**2 + Z**2)**0.5
THETA = np.arccos(Z/R)
PHI = np.arctan2(Y,X)
# electric and magnetic field functions
E_func = sphere.E_field(index=0)
E = E_func(R,THETA,PHI).squeeze()
IE = np.sum(np.abs(E)**2, axis=0)
H_func = sphere.H_field(index=0)
H = H_func(R,THETA,PHI).squeeze()
IH = np.sum(np.abs(H)**2, axis=0)
# plot results
fig,axes = plt.subplots(ncols=2, figsize=plt.figaspect(1/2.7))
for i,ax in enumerate(axes):
plt.subplot(ax)
I = IE if i == 0 else IH
plt.pcolormesh(np.squeeze(X)*1e9,np.squeeze(Y)*1e9, I, shading="gouraud", cmap=cm.viridis)
plt.colorbar(label='field intensity')
THETA = np.squeeze(THETA)
PHI = np.squeeze(PHI)
for i,ax in enumerate(axes):
F = E if i == 0 else H
Fx = F[0]*np.sin(THETA)*np.cos(PHI) + F[1]*np.cos(THETA)*np.cos(PHI) - F[2]*np.sin(PHI)
Fy = F[0]*np.sin(THETA)*np.sin(PHI) + F[1]*np.cos(THETA)*np.sin(PHI) + F[2]*np.cos(PHI)
step=10
ax.streamplot(np.squeeze(X)*1e9, np.squeeze(Y)*1e9, np.real(Fx), np.real(Fy), color='white', linewidth=1.0)
for ax in axes:
ax.set(xlim=[-2*radius*1e9, 2*radius*1e9], ylim=[-2*radius*1e9, 2*radius*1e9],
aspect='equal', xlabel="X (nm)", ylabel="Y (nm)")
axes[0].set_title("Electric Field")
axes[1].set_title("Magnetic Field")
plt.show()
# theta = np.linspace(0,np.pi,50)
# phi = np.linspace(0,2*np.pi,50)
# r = np.array([10000])
# R,THETA,PHI = np.meshgrid(r,theta,phi)
# X = R*np.sin(THETA)*np.cos(PHI)
# Y = R*np.sin(THETA)*np.sin(PHI)
# Z = R*np.cos(THETA)
# X = X.squeeze()
# Y = Y.squeeze()
# Z = Z.squeeze()
# E = E_func(R,THETA,PHI)
# I = np.sum(np.abs(E)**2, axis=0)
# I = np.squeeze(I)
# I -= np.min(I)
# I /= np.max(I)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# shape = X.shape
# C = np.zeros((shape[0], shape[1], 4))
# cmap_3d = cm.viridis
# for i in range(shape[0]):
# for j in range(shape[1]):
# C[i,j,:] = cmap_3d(I[i,j])
# surf = ax.plot_surface(X*1e9, Y*1e9, Z*1e9, rstride=1, cstride=1,shade=False, facecolors=C,linewidth=.0, edgecolors='#000000', antialiased=False)
# m = cm.ScalarMappable(cmap=cmap_3d)
# m.set_array(I)
# plt.colorbar(m)
# surf.set_edgecolor('k')
# ax.set_xlabel('X')
| 29.726316 | 147 | 0.645892 |
887136302539945d1d8fc0fd52d9556bdb55e9ef | 13,616 | py | Python | pya2a/models.py | LvanWissen/pya2a | d8a7848ba408850aedd79d18ad2816524499f528 | [
"MIT"
] | null | null | null | pya2a/models.py | LvanWissen/pya2a | d8a7848ba408850aedd79d18ad2816524499f528 | [
"MIT"
] | null | null | null | pya2a/models.py | LvanWissen/pya2a | d8a7848ba408850aedd79d18ad2816524499f528 | [
"MIT"
] | null | null | null | import datetime
import dateutil.parser
import xml
import xml.etree.ElementTree
from pya2a.utils import parseRemark
| 32.809639 | 107 | 0.563234 |
887318ae1de3947a937b8a9fff9e751422b6ec84 | 4,059 | py | Python | python/quantization.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 31 | 2016-01-27T07:03:25.000Z | 2022-02-25T07:59:11.000Z | python/quantization.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2015-01-26T01:27:21.000Z | 2015-01-30T16:16:30.000Z | python/quantization.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2017-02-07T04:17:56.000Z | 2020-06-12T05:01:31.000Z | import numpy as np
min, max = -0.8777435, 0.57090986
M = np.asmatrix([
[0.02355068, -0.50542802, 0.16642167, -0.44872788, -0.05130898, 0.13320047, 0.41464597, -0.55703336, 0.52567458, 0.23784444, 0.15049535, 0.16599870, -0.28757980, 0.22277315, 0.56460077, -0.70838273, -0.61990398, -0.39724344, -0.09969769, 0.45835119, 0.02840372, 0.09637213, 0.04063996, -0.16667950, -0.68209213, -0.09524837, 0.27514741, 0.02957204, -0.11251312, -0.43414843],
[-0.31239739, -0.13213386, -0.59719753, -0.16117097, 0.29835659, -0.21633907, -0.55013347, -0.22406115, -0.47912723, -0.08179668, 0.46718585, 0.38543564, -0.49470344, -0.35172677, -0.23060481, -0.39899889, -0.18135746, -0.54352880, -0.28287631, -0.05576789, 0.20255803, 0.18899839, 0.36582524, 0.43294433, 0.21794824, -0.62954980, -0.52842420, 0.00261285, 0.23226254, 0.27430296],
[-0.12496945, 0.27272177, 0.09565081, -0.19869098, 0.40514281, 0.30038768, -0.13575996, -0.01735646, 0.31392211, -0.34690821, -0.26467761, 0.27735108, 0.25757775, 0.56070799, 0.48236406, -0.16126287, -0.56543708, -0.52047604, 0.31337339, 0.31964961, -0.19712290, 0.29141095, 0.25103137, -0.49437916, -0.00175839, -0.39314604, -0.46974984, -0.24069642, -0.07134162, 0.38584659],
[-0.22494942, -0.23908727, -0.14118181, 0.25917593, -0.46544874, 0.21652603, 0.11955780, -0.08858330, 0.11210553, 0.15425776, 0.35051644, 0.12857421, -0.31161663, -0.10459967, 0.28051424, 0.35245281, 0.21058421, -0.38336727, -0.53721315, -0.45408809, 0.17018577, 0.37464410, 0.25320616, -0.50858176, 0.03510477, 0.28646398, -0.49693882, 0.31466347, 0.34066224, 0.39151987],
[-0.24122262, -0.18464386, -0.50166339, -0.06581594, 0.23343681, -0.28764677, -0.28263095, 0.47374201, -0.14122090, 0.41170570, -0.27171388, -0.76247406, -0.43367779, -0.41885039, -0.58815128, 0.16303478, -0.15360811, 0.40358800, 0.28507465, 0.11577206, -0.05193469, 0.10712312, 0.37356687, 0.17525157, -0.61338550, 0.28956139, 0.04172062, 0.19050168, -0.36498675, -0.48431775],
[0.20951799, -0.57114357, 0.16709965, 0.28986153, -0.48571789, 0.17514014, 0.42663154, -0.58854365, -0.49951825, -0.69118619, -0.12997085, 0.20892869, -0.27441102, 0.25154045, 0.33150116, 0.22571780, 0.00198699, -0.21132891, 0.54626226, -0.39937377, 0.09991331, 0.16465400, -0.31479383, 0.19637901, 0.27371463, -0.35296553, 0.32819411, 0.33079246, 0.09111243, -0.15263695],
[0.23110701, -0.82688808, 0.35345000, -0.63799143, 0.10259465, -0.67562747, 0.06791017, -0.55785728, 0.11328468, 0.03148035, 0.06963930, -0.40473521, 0.15695126, 0.10480986, 0.06786098, 0.05529213, -0.06358500, 0.39808711, -0.46259707, -0.41053730, 0.23919414, 0.06440434, -0.55259717, 0.17278855, -0.26870996, -0.59644037, -0.20437278, -0.15572956, -0.62037915, 0.20436110],
[0.43668377, 0.03184615, -0.79770166, 0.30957624, -0.29246098, 0.41470772, -0.39726156, 0.08003121, 0.32232824, 0.18267424, -0.46286914, -0.52988207, 0.40305007, 0.43693665, 0.57090986, -0.71393168, 0.16701773, -0.01028878, 0.03239791, -0.39907083, 0.20838976, 0.25748143, 0.24718748, -0.05084279, -0.52348840, -0.07115566, -0.33007148, 0.18890919, 0.40487564, 0.28275076],
[0.00545317, 0.05541809, -0.29821581, -0.69852740, 0.23890208, -0.58182591, 0.37835562, -0.12874492, -0.24086623, -0.18621640, 0.20001458, -0.55234039, 0.40093267, 0.19279823, -0.56214923, -0.12595257, -0.13790886, 0.04751531, -0.31666499, 0.33546147, 0.19133377, 0.01450487, -0.69050521, -0.15352796, 0.31702802, 0.13524684, 0.08716883, 0.35998338, 0.36140910, -0.18685688],
[0.13561521, 0.09853959, 0.23551922, -0.37978131, -0.26070073, 0.43132550, -0.10494933, 0.07914228, 0.04663205, -0.41666678, 0.16825140, 0.51182604, 0.13776678, -0.68972874, -0.72430468, -0.10668162, 0.29812980, -0.13480635, -0.66627938, 0.01717626, -0.11104345, 0.31376141, 0.39751169, -0.19769318, -0.28220543, 0.13042673, 0.42700538, 0.08965667, 0.18087055, -0.87774348],
])
S = (max - min)/127.0
result = np.clip(np.ceil(M/S).astype(int), -128, 127).tolist()
print(
'\n'.join(
' '.join(str(e) for e in row)
for row in result
)
)
| 156.115385 | 384 | 0.700172 |
8873474612b143bfb3dcc0cc047dd90038304a53 | 3,391 | py | Python | Modulo_01/Aula09-ManipulandoStrings.py | rodrigojackal/Python-Guanabara | 8bfbbbd7b4549b9235d9b6fcb3a0c3d00eb384f2 | [
"MIT"
] | null | null | null | Modulo_01/Aula09-ManipulandoStrings.py | rodrigojackal/Python-Guanabara | 8bfbbbd7b4549b9235d9b6fcb3a0c3d00eb384f2 | [
"MIT"
] | null | null | null | Modulo_01/Aula09-ManipulandoStrings.py | rodrigojackal/Python-Guanabara | 8bfbbbd7b4549b9235d9b6fcb3a0c3d00eb384f2 | [
"MIT"
] | null | null | null | # Aula 09 - Manipulando de cadeias de texto (Strings)
"""
Tcnica de Fatiamento
Frase = Curso em Video Python
Frase [9]: letra especfica
Frase [9:13]: Vai pegar do 9 ao 12 (menos um no final)
Frase [9:21:2]: Pula de 2 em 2
Frase [:5]: Ir comear no primeiro caracter e terminar no 4 (excluindo o nmero 5)
Frase [15:]: Indiquei o nicio at o final
Frase [9::3]: Comea no 9 e vai at o final, porm pulando de 3 em 3
# Anlise
len(frase): Ir ler o tamanho da frase e mostrar a quantidade de caracter.
frase.count('o'): Conta quantos caracteres escolhidos tem na frase.
frase.count('o',0,13): Fazendo uma contagem do 0 ao 12 e informar quantos caracteres tem neste conjunto.
frase.find('deo'): Neste ponto ir mostrar qual a posio esta a frase.
frase.find('Android'): Sinal que ele ir retornar menos -1, dizendo que o string no existe.
'Curso' in frase: Mostra se existe ou no a string na variavel.
# Transformao
frase.replace('Python','Android'): Ir substituir a frase encontrada com a frase escolhida.
frase.upper(): Ir converter tudo para maiscula.
frase.lower(): Ir converter tudo para minscula.
frase.capitalize(): Converte apenas a primeira letra altera para mascula e o resto ficaria em minscula.
frase.title(): Converte a primeira letra da frase para maiscula.
frase.strip(): Remove espaos inteis da string.
frase.rstrip(): Remove espaos inteis da string a direita.
frase.lstrip(): Remove espaos inteis da string a esquerda.
# Diviso
frase.split(procurar as funes): A frase ser dividida com base nos espaos da string em lista.
# Juno
'-'.join(frase): Ir juntar as frase que foram feito de lista acima para transformar em string nica
com - ao invs do espao.
# Dica
Para escrever um texto grande sem precisar colocar vrios prints, coloque tudo dentro de um comentrio.
Para forar a atualizao da frase ser preciso:
frase = 'Curso em Vdeo Python'
frase = frase.replace('Python','Android')
print(frase)
print("""
Github: http://github.com/rodrigojackal
Twitter: @RodrigoJackal
Skype: rodrigo.jackal
Linkedin: https://www.linkedin.com/in/rodrigo-ferreira-santos-andrade/
""")
"""
# Desafios
"""
Desafio 022 - Analisador de Texto: Crie um programa que leia o nome completo de uma pessoa e mostre:
O nome com todas as letras maisculas
O nome com todas as letras minsculas
Quantas letras ao todo (sem considerar espaos)
Quantas letras tem o primeiro nome.
Desafio 023 - Separando digitos de um nmero: Faa um programa que leia um nmero de 0 a 9999
e mostre na tela cada um dos dgitos separados.
Ex: Digite um nmero: 1834
Unidade: 4
Dezena: 3
Centena: 8
Milhar: 1
Desafio 024 - Verificando as primeiras letras de um texto: Crie um programa que leia o nome de uma cidade e diga
se ela comea ou no com o nome "SANTO"
Desafio 025 - Procurando uma string dentro de outra: Crie um programa que leia o nome de uma pessoa e diga
se ela tem "SILVA" no nome.
Desafio 026 - Primeira e ltima ocorrncia de uma string: Faa um programa que leia uma frase pelo teclado
e mostre:
Quantas vezes aparece a letra "A".
Em que posio ela aparece a primeira vez.
Em que posio ela aparece a ltima vez.
Desafio 027 - Primeiro e ltimo nome de uma pessoa: Faa um programa que leia o nome completo de uma pessoa,
mostrando em seguida o primeiro e o ltimo nome separadamente.
Ex: Ana Maria de Souza
Primeiro: Ana
ltimo: Souza
"""
| 33.245098 | 112 | 0.760248 |
88735f42e1881dd628983cb3ca2947d11d481c02 | 2,575 | py | Python | {{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/models/train.py | vasinkd/cookiecutter-data-science | 3b1be4b701198b73e9701af498381a2c55da9fe6 | [
"MIT"
] | 2 | 2020-03-26T22:06:11.000Z | 2021-04-02T08:52:38.000Z | {{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/models/train.py | vasinkd/cookiecutter-data-science | 3b1be4b701198b73e9701af498381a2c55da9fe6 | [
"MIT"
] | 1 | 2019-10-19T13:36:33.000Z | 2019-10-19T13:36:33.000Z | {{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/models/train.py | vasinkd/cookiecutter-data-science | 3b1be4b701198b73e9701af498381a2c55da9fe6 | [
"MIT"
] | 8 | 2019-10-17T20:32:07.000Z | 2022-03-10T15:28:49.000Z | import optuna
from {{cookiecutter.repo_name}}.utils import check_args_num, \
read_config, set_random_seed, str_hash, file_hash
from {{cookiecutter.repo_name}}.settings import optuna_db_path
def get_objective(config):
"""
more on optuna objectives:
https://optuna.readthedocs.io/en/stable/faq.html
"""
raise NotImplementedError
def check_descr_unique(data_descr, data_hash):
"""
raises if database contains a row with the same data description
but different data hash
"""
raise NotImplementedError
def create_predictor():
"""
Creates a predictor object using inference stages and model object
"""
raise NotImplementedError
def measure_inference_time(predictor):
"""
Creates a predictor object using inference stages and model object
"""
raise NotImplementedError
if __name__ == "__main__":
_, config_file, X_file, y_file, best_model_path, predictor_file, \
metrics_file, study_name_file = check_args_num(8)
set_random_seed()
data_hash = str_hash(file_hash(X_file) + file_hash(y_file))
config = read_config(config_file)
objective_name = config.get('algo_name')
study_name = str_hash(data_hash + objective_name)
X = read_inp_file(X_file)
y = read_inp_file(y_file)
objective = get_objective(config)
sampler = optuna.samplers.TPESampler(seed=None)
study = optuna.create_study(optuna_db_path, study_name=study_name,
sampler=sampler, load_if_exists=True)
data_descr = config.get('data_descr')
check_descr_unique(data_descr, data_hash)
study.set_user_attr("data_description", data_descr)
study.set_user_attr("data_hash", data_hash)
study.set_user_attr("algo_name", objective_name)
try:
study.optimize(objective, n_trials=config.get('n_trials'))
except KeyboardInterrupt:
pass
write_output('{:6f}\n'.format(study.best_value), metrics_file)
write_output('{}\n'.format(study_name), study_name_file)
if (study.best_value is not None) and (objective.best_result is not None) \
and ((objective.best_result - study.best_value)
< config['metric_precision']):
write_output(objective.best_model, best_model_path)
predictor = create_predictor()
write_output(predictor, predictor_file)
inf_time = measure_inference_time(predictor)
study.set_user_attr("inference_time", inf_time)
| 28.932584 | 79 | 0.717282 |
88737c6f1857632bec14f3d69ee844444dd65d17 | 2,221 | py | Python | musiker_fille_bot.py | pranay414/musiker_fille_bot | 55d87a3bfdbaf8b99b5ca86c6f7a433cd6280d42 | [
"MIT"
] | null | null | null | musiker_fille_bot.py | pranay414/musiker_fille_bot | 55d87a3bfdbaf8b99b5ca86c6f7a433cd6280d42 | [
"MIT"
] | 1 | 2017-12-24T11:18:07.000Z | 2017-12-25T20:29:18.000Z | musiker_fille_bot.py | pranay414/musiker_fille_bot | 55d87a3bfdbaf8b99b5ca86c6f7a433cd6280d42 | [
"MIT"
] | null | null | null | # - *- coding: utf- 8 - *-
""" Bot to suggest music from Spotify based on your mood.
"""
import spotipy, os
from spotipy.oauth2 import SpotifyClientCredentials
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
#from access_token import AUTH_TOKEN, CLIENT_ID, CLIENT_SECRET
# Intialise spotipy
client_credentials_manager = SpotifyClientCredentials(client_id=os.environ['CLIENT_ID'], client_secret=os.environ['CLIENT_SECRET'])
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
# Define command handlers. They usually take two arguments bot and update
# In case of error handler they recieve TelegramError object in error
def main():
"""Start the bot"""
# Create event handler and pass it your bot's token
updater = Updater(os.environ['AUTH_TOKEN'])
# Get dispatcher to register handlers
dispatcher = updater.dispatcher
print("Bot started!")
# On different commands - answer in Telegram
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('help', help))
dispatcher.add_handler(CommandHandler('new', new))
# dispatcher.add_handler(CommandHandler(''))
# On non-command i.e message - echo the message in telegram
dispatcher.add_handler(MessageHandler(Filters.text, sorry))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C
updater.idle()
if __name__ == '__main__':
main()
| 38.964912 | 185 | 0.722647 |
8875919d1f2a6e03d1eb055a54e6b7d341bfcdca | 1,544 | py | Python | tracker/utils/projects.py | dti-research/tracker | f2384c0c7b631aa9efd39bf606cda8b85187fcc6 | [
"BSD-3-Clause"
] | 1 | 2019-07-25T18:02:37.000Z | 2019-07-25T18:02:37.000Z | tracker/utils/projects.py | dti-research/tracker | f2384c0c7b631aa9efd39bf606cda8b85187fcc6 | [
"BSD-3-Clause"
] | 10 | 2019-08-29T12:27:35.000Z | 2020-01-04T18:40:48.000Z | tracker/utils/projects.py | dti-research/tracker | f2384c0c7b631aa9efd39bf606cda8b85187fcc6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019, Danish Technological Institute.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
""" Utility code to locate tracker projects
"""
from tracker.tracker_file import TrackerFile
from tracker.utils import cli
from tracker.utils import config
def get_project_names():
"""Searches for Tracker projects at the Tracker home configuration file
Returns:
<list> -- List of project names
"""
trackerfile = TrackerFile()
projects = trackerfile.get("projects", {})
project_names = []
if projects:
for d in projects:
k, _ = list(d.items())[0]
project_names.append(k)
return project_names
| 21.746479 | 75 | 0.609456 |
8875c6d752de20720e19e0bcf7801a8392c5567b | 220 | py | Python | int/Lib/site-packages/hn/__init__.py | yoniv/shellHN-main | 767e78f54403ebd3193ee9ada8672cfc06705967 | [
"MIT"
] | null | null | null | int/Lib/site-packages/hn/__init__.py | yoniv/shellHN-main | 767e78f54403ebd3193ee9ada8672cfc06705967 | [
"MIT"
] | null | null | null | int/Lib/site-packages/hn/__init__.py | yoniv/shellHN-main | 767e78f54403ebd3193ee9ada8672cfc06705967 | [
"MIT"
] | 1 | 2021-09-22T10:41:46.000Z | 2021-09-22T10:41:46.000Z | """
Python API for Hacker News.
@author Karan Goel
@email karan@goel.im
"""
__title__ = 'hackernews'
__author__ = 'Karan Goel'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Karan Goel'
from .hn import HN, Story
| 15.714286 | 43 | 0.722727 |
887679321d473fff8303fdde842a58ecb25c93a7 | 57 | py | Python | training/constants.py | vlad-danaila/deep_hiv_ab_pred | 651e9174cef9d5f27de328536aec5ebea3af8f3d | [
"MIT"
] | null | null | null | training/constants.py | vlad-danaila/deep_hiv_ab_pred | 651e9174cef9d5f27de328536aec5ebea3af8f3d | [
"MIT"
] | null | null | null | training/constants.py | vlad-danaila/deep_hiv_ab_pred | 651e9174cef9d5f27de328536aec5ebea3af8f3d | [
"MIT"
] | null | null | null | ACCURACY = 0
MATTHEWS_CORRELATION_COEFFICIENT = 1
AUC = 2 | 19 | 36 | 0.807018 |
8876c3bace11ab590dd97932baa4aa09e457abf7 | 2,580 | py | Python | day06.py | AnthonyFloyd/2017-AdventOfCode-Python | ef66ed25fef416f1f5f269810e6039cab53dc6d0 | [
"MIT"
] | null | null | null | day06.py | AnthonyFloyd/2017-AdventOfCode-Python | ef66ed25fef416f1f5f269810e6039cab53dc6d0 | [
"MIT"
] | null | null | null | day06.py | AnthonyFloyd/2017-AdventOfCode-Python | ef66ed25fef416f1f5f269810e6039cab53dc6d0 | [
"MIT"
] | null | null | null | '''
Advent of Code 2017
Day 6: Memory Reallocation
'''
import unittest
TEST_BANKS = ('0 2 7 0', 5, 4)
INPUT_BANKS = '0 5 10 0 11 14 13 4 11 8 8 7 1 4 12 11'
def findInfiniteLoop(memoryBanks):
'''
Finds the number of iterations required to detect an infinite loop with the given start condition.
memoryBanks is a list of integers, representing a number of memory banks with items in each.
Returns the number of iterations until an infinite loop is detected, and the size of the loop.
'''
nIterations = 0
nBanks = len(memoryBanks)
foundLoop = False
# create a history of known configurations, starting with the current one
# use a list instead of a set because sets reorder the items
# use strings instead of frozensets because frozensets reorder the items
resultList = [' '.join([str(i) for i in memoryBanks]),]
while not foundLoop:
# find the memory bank with the largest quanity
maximumItems = max(memoryBanks)
index = memoryBanks.index(maximumItems)
# Redistribute the items by emptying out the current bank and then
# giving the rest one of them, looping around the banks
nIterations += 1
memoryBanks[index] = 0
for counter in range(maximumItems):
index += 1
if index == nBanks:
index = 0
memoryBanks[index] += 1
# check to see if the current state has been seen before
currentState = ' '.join([str(i) for i in memoryBanks])
if currentState in resultList:
foundLoop = True
sizeOfLoop = nIterations - resultList.index(currentState)
else:
resultList.append(currentState)
return (nIterations, sizeOfLoop)
# Unit tests
if __name__ == '__main__':
print('Advent of Code\nDay 6: Memory Reallocation\n')
(iterations, loopSize) = findInfiniteLoop([int(i) for i in INPUT_BANKS.strip().split()])
print('Part 1: {0:d} iterations to infinite loop'.format(iterations))
print('Part 2: The loop is {0:d} iterations'.format(loopSize))
| 27.446809 | 109 | 0.637597 |
887a62af70424662df05268a24baf2a7aafc6529 | 1,757 | py | Python | iucas/utils.py | rysdyk/django-iucas | d534800c6a1fc6cf3ea5e3f1c0d9bc0dc7a2b4db | [
"BSD-3-Clause"
] | null | null | null | iucas/utils.py | rysdyk/django-iucas | d534800c6a1fc6cf3ea5e3f1c0d9bc0dc7a2b4db | [
"BSD-3-Clause"
] | null | null | null | iucas/utils.py | rysdyk/django-iucas | d534800c6a1fc6cf3ea5e3f1c0d9bc0dc7a2b4db | [
"BSD-3-Clause"
] | 1 | 2020-01-16T20:25:52.000Z | 2020-01-16T20:25:52.000Z | """
Utility Methods for Authenticating against and using Indiana University CAS.
"""
import httplib2
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
def validate_cas_ticket(casticket, casurl):
"""
Takes a CAS Ticket and makes the out of bound GET request to
cas.iu.edu to verify the ticket.
"""
validate_url = 'https://%s/cas/validate?cassvc=IU&casurl=%s' % \
(settings.CAS_HOST, casurl,)
if hasattr(settings, 'CAS_HTTP_CERT'):
h = httplib2.Http(ca_certs=settings.CAS_HTTP_CERT)
else:
h = httplib2.Http()
resp, content = h.request(validate_url,"GET")
return content.splitlines()
def get_cas_username(casticket, casurl):
"""
Validates the given casticket and casurl and returns the username of the
logged in user. If the user is not logged in returns None
"""
resp = validate_cas_ticket(casticket, casurl)
if len(resp) == 2 and resp[0] == 'yes':
return resp[1]
else:
return None
| 30.293103 | 76 | 0.632328 |
887a99f77ebc5982239f9bc71d68f9e4f2afc02f | 20,460 | py | Python | blousebrothers/confs/views.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | 1 | 2022-01-27T11:58:10.000Z | 2022-01-27T11:58:10.000Z | blousebrothers/confs/views.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | 5 | 2021-03-19T00:01:54.000Z | 2022-03-11T23:46:21.000Z | blousebrothers/confs/views.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.http import JsonResponse
from decimal import Decimal
from datetime import datetime, timedelta
import re
import logging
from disqusapi import DisqusAPI
from django.contrib import messages
from django.apps import apps
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from djng.views.mixins import JSONResponseMixin, allow_remote_invocation
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import (
DetailView,
ListView,
UpdateView,
CreateView,
FormView,
DeleteView,
TemplateView,
)
from django.conf import settings
import blousebrothers.classifier as cl
from blousebrothers.tools import get_disqus_sso
from blousebrothers.auth import (
BBConferencierReqMixin,
ConferenceWritePermissionMixin,
ConferenceReadPermissionMixin,
TestPermissionMixin,
BBLoginRequiredMixin,
)
from blousebrothers.tools import analyse_conf, get_full_url
from blousebrothers.confs.utils import get_or_create_product
from blousebrothers.users.charts import MonthlyLineChart
from blousebrothers.users.models import User
from .models import (
Conference,
Question,
Answer,
AnswerImage,
ConferenceImage,
QuestionImage,
QuestionExplainationImage,
Item,
Test,
TestAnswer,
)
from .forms import ConferenceForm, ConferenceFinalForm, RefundForm, ConferenceFormSimple
logger = logging.getLogger(__name__)
Product = apps.get_model('catalogue', 'Product')
def ajax_switch_correction(request):
"""
Ajax switch correction available.
"""
status = request.GET['state'] == 'true'
conf = request.user.created_confs.get(id=request.GET['conf_id'])
conf.correction_dispo = status
conf.save()
return JsonResponse({'success': True})
def ajax_switch_for_sale(request):
"""
Ajax conf available.
"""
status = request.GET['state'] == 'true'
conf = request.user.created_confs.get(id=request.GET['conf_id'])
conf.for_sale = status
conf.save()
return JsonResponse({'success': True})
| 36.34103 | 115 | 0.62219 |
887ad1b6a09fd7cd401ad5b4a47a80e80503fdb2 | 3,177 | py | Python | software/robotClass.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
] | null | null | null | software/robotClass.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
] | 3 | 2018-02-05T23:21:02.000Z | 2018-05-03T02:58:50.000Z | software/robotClass.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
] | null | null | null | import serial
import string
import math
from itertools import chain
| 27.868421 | 95 | 0.678942 |
887cf7708e8f994e5fe9bde9079bf51b3a0cab82 | 4,054 | py | Python | hijackednode/logger.py | Luxter77/Hijacked-Node | 378cbb4201e891e81107b22ba568b3564b2e4197 | [
"BSD-3-Clause"
] | null | null | null | hijackednode/logger.py | Luxter77/Hijacked-Node | 378cbb4201e891e81107b22ba568b3564b2e4197 | [
"BSD-3-Clause"
] | null | null | null | hijackednode/logger.py | Luxter77/Hijacked-Node | 378cbb4201e891e81107b22ba568b3564b2e4197 | [
"BSD-3-Clause"
] | null | null | null | from discord import TextChannel, User
from discord.ext.commands import Bot
from .configuration import CONF0
from tqdm.asyncio import tqdm
# class LogMe:
# """This is a complicated logger I came up with.\n
# Feel free to insult me whilst readding it."""
# _std = {
# "LS": "|-----------------Log_ START-------------------|",
# "ES": "|-----------------ERR_ START-------------------|",
# "EE": "|------------------ERR_ END--------------------|",
# "LE": "|------------------Log_ END--------------------|",
# "!?": "Some unprintable error happened...",
# "!!": "Ah for fucks sake something went horribly wrong!",
# }
# def __init__(self, bot: Bot, config: CONF0):
# self.LogAdmin = set([bot.get_user(Admin) for Admin in config.LogAdmin])
# self.LogChan = set([bot.get_channel(Chan) for Chan in config.LogChan])
# async def __call__(self, st, err_: bool = False, tq: bool = True):
# if err_:
# print(self._std["ES"]) if (tq) else tqdm.write(self._std["ES"])
# print(st) if (tq) else tqdm.write(st)
# try:
# with self.bot.get_channel(self.LogChan) as chan:
# await chan.send()
# if self.LogAdmin:
# await chan.send(
# " ".join([str(admin.mention) for admin in self.LogAdmin])
# )
# await chan.send(st)
# await chan.send(self._std["EE"])
# except Exception:
# try:
# with self.bot.get_channel(self.debug) as chan:
# await chan.send(self._std["ES"])
# try:
# if self.LogAdmin:
# await chan.send(
# " ".join(
# [str(admin.mention) for admin in self.LogAdmin]
# )
# )
# await chan.send(str(st))
# except Exception:
# if self.LogAdmin:
# await chan.send(
# " ".join(
# [str(admin.mention) for admin in self.LogAdmin]
# )
# )
# await chan.send("Some unprintable error happened...")
# await chan.send(self._std["EE"])
# except Exception:
# _std = "Ah for hugs sake something went horribly wrong! AGAIN"
# print(_std) if (tq) else tqdm.write(_std)
# print(self._std["EE"]) if (tq) else tqdm.write(self._std["EE"])
# else:
# print(st) if (tq) else tqdm.write(st)
# try:
# with self.bot.get_channel(self.debug) as chan:
# await chan.send(st)
# except Exception:
# try:
# with self.bot.get_channel(self.debug) as chan:
# try:
# try:
# await chan.send(st)
# except Exception:
# await chan.send(str(type(st)))
# await chan.send(str(st))
# except Exception:
# await chan.send(self._std["!?"])
# except Exception:
# await self(self._std["!!"], True)
# def add_LogChan(self, Chan: TextChannel) -> None:
# self.Logchan.add(Chan)
# def del_LogChan(self, Chan: TextChannel) -> None:
# self.Logchan.remove(Chan)
# def add_LogAdmin(self, Admin: User) -> None:
# self.LogAdmin.add(Admin)
# def del_LogAdmin(self, Admin: User) -> None:
# self.LogAdmin.remove(Admin)
| 45.044444 | 89 | 0.415886 |
887d7ad21774f9d78fa33b58dec3b6e2af7b8b30 | 13,930 | py | Python | tests/test_api.py | vsoch/django-oci | e60b2d0501ddd45f6ca3596b126180bebb2e6903 | [
"Apache-2.0"
] | 5 | 2020-03-24T23:45:28.000Z | 2021-11-26T03:31:05.000Z | tests/test_api.py | vsoch/django-oci | e60b2d0501ddd45f6ca3596b126180bebb2e6903 | [
"Apache-2.0"
] | 14 | 2020-04-02T17:13:28.000Z | 2020-12-29T12:36:38.000Z | tests/test_api.py | vsoch/django-oci | e60b2d0501ddd45f6ca3596b126180bebb2e6903 | [
"Apache-2.0"
] | null | null | null | """
test_django-oci api
-------------------
Tests for `django-oci` api.
"""
from django.urls import reverse
from django.contrib.auth.models import User
from django_oci import settings
from rest_framework import status
from rest_framework.test import APITestCase
from django.test.utils import override_settings
from time import sleep
from unittest import skipIf
import subprocess
import requests
import hashlib
import base64
import json
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
# Boolean from environment that determines authentication required variable
auth_regex = re.compile('(\w+)[:=] ?"?([^"]+)"?')
# Important: user needs to be created globally to be seen
user, _ = User.objects.get_or_create(username="dinosaur")
token = str(user.auth_token)
def calculate_digest(blob):
"""Given a blob (the body of a response) calculate the sha256 digest"""
hasher = hashlib.sha256()
hasher.update(blob)
return hasher.hexdigest()
def get_auth_header(username, password):
"""django oci requires the user token as the password to generate a longer
auth token that will expire after some number of seconds
"""
auth_str = "%s:%s" % (username, password)
auth_header = base64.b64encode(auth_str.encode("utf-8"))
return {"Authorization": "Basic %s" % auth_header.decode("utf-8")}
def get_authentication_headers(response):
"""Given a requests.Response, assert that it has status code 401 and
provides the Www-Authenticate header that can be parsed for the request
"""
assert response.status_code == 401
assert "Www-Authenticate" in response.headers
matches = dict(auth_regex.findall(response.headers["Www-Authenticate"]))
for key in ["scope", "realm", "service"]:
assert key in matches
# Prepare authentication headers and get token
headers = get_auth_header(user.username, token)
url = "%s?service=%s&scope=%s" % (
matches["realm"],
matches["service"],
matches["scope"],
)
# With proper headers should be 200
auth_response = requests.get(url, headers=headers)
assert auth_response.status_code == 200
body = auth_response.json()
# Make sure we have the expected fields
for key in ["token", "expires_in", "issued_at"]:
assert key in body
# Formulate new auth header
return {"Authorization": "Bearer %s" % body["token"]}
def get_manifest(config_digest, layer_digest):
"""A dummy image manifest with a config and single image layer"""
return json.dumps(
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": config_digest,
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": layer_digest,
}
],
"annotations": {"com.example.key1": "peas", "com.example.key2": "carrots"},
}
)
def add_url_prefix(download_url):
if not download_url.startswith("http"):
download_url = "http://127.0.0.1:8000%s" % download_url
return download_url
| 37.245989 | 87 | 0.618808 |
887d7f78ede177237d678a89bcd14f2af84d31d3 | 1,492 | py | Python | LCSTPlotter.py | edwinstorres/LCST-Plotter | 1afbd251cc395461498e902069e90bb14e66b013 | [
"MIT"
] | null | null | null | LCSTPlotter.py | edwinstorres/LCST-Plotter | 1afbd251cc395461498e902069e90bb14e66b013 | [
"MIT"
] | null | null | null | LCSTPlotter.py | edwinstorres/LCST-Plotter | 1afbd251cc395461498e902069e90bb14e66b013 | [
"MIT"
] | null | null | null | #LCST Plotter
#Author: ESTC
import numpy
import streamlit
import matplotlib.pyplot as plt
import pandas
launch_app()
if datafile is not None:
load_data(datafile)
make_plot(x1a,x1b,T,cation,anion)
| 32.434783 | 81 | 0.661528 |
887dda35242cbfb0d65a1b78e9d2c415c3d774ec | 13,039 | py | Python | hello.py | zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project- | 0659191afa6a8802647f46d0dc4f85f2044639e5 | [
"Apache-2.0"
] | null | null | null | hello.py | zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project- | 0659191afa6a8802647f46d0dc4f85f2044639e5 | [
"Apache-2.0"
] | null | null | null | hello.py | zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project- | 0659191afa6a8802647f46d0dc4f85f2044639e5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from flask import Flask, render_template, app, url_for,request
import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
from textblob import TextBlob
import re
import pandas as pa
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
import time
import itertools
app=Flask(__name__)
if __name__ == '__main__':
app.run("127.0.0.1",5000,debug=True)
| 37.90407 | 190 | 0.638162 |
887ec99957073c1e4d84bfa70941e65fc56ac5fc | 529 | py | Python | utils.py | Ji-Xinyou/DIP-proj-DepthEstimation | 5432c14ce1d0cdc9b8b6ab0a273678ffbe6086bd | [
"MIT"
] | null | null | null | utils.py | Ji-Xinyou/DIP-proj-DepthEstimation | 5432c14ce1d0cdc9b8b6ab0a273678ffbe6086bd | [
"MIT"
] | null | null | null | utils.py | Ji-Xinyou/DIP-proj-DepthEstimation | 5432c14ce1d0cdc9b8b6ab0a273678ffbe6086bd | [
"MIT"
] | null | null | null | import torch
def save_param(model, pth_path):
'''
save the parameters of the model
Args:
model: the model to which the params belong
pth_path: the path where .pth file is saved
'''
torch.save(model.state_dict(), pth_path)
def load_param(model, pth_path):
'''
load the parameters of the model
Args:
model: the model where the params go into
pth_path: the path where .pth (to be loaded) is saved
'''
model.load_state_dict(torch.load(pth_path)) | 25.190476 | 61 | 0.635161 |
8883af6b6c6f6d7fd863836a0ab018f4af35d11b | 1,348 | py | Python | prob-77.py | tushargayan2324/Project_Euler | 874accc918e23337510056d7140cd85a1656dd3e | [
"MIT"
] | null | null | null | prob-77.py | tushargayan2324/Project_Euler | 874accc918e23337510056d7140cd85a1656dd3e | [
"MIT"
] | null | null | null | prob-77.py | tushargayan2324/Project_Euler | 874accc918e23337510056d7140cd85a1656dd3e | [
"MIT"
] | null | null | null | #Project Euler Problem-77
#Author Tushar Gayan
#Multinomial Theorem
import math
import numpy as np
'''prime_list = []
i = 1
while len(prime_list)<200:
if prime_check(i) == True:
prime_list.append(i)
i +=1
print(prime_list)
m = 1
for i in prime_list:
m *= np.poly1d(mod_list(i,30))
#print(i)
print(np.poly1d(m))
#for i in range(480):
# print(m[i])
print(m.c)'''
i = 1
while partition(i) < 5000:
i += 1
print partition(i), i
| 18.985915 | 57 | 0.5 |
888504477ef926e05cac253422a2f5fcc1a109ea | 4,031 | py | Python | main.py | sun624/Dogecoin_musk | 6dc48f03275321d29bb1ab131ecd14626bcc5170 | [
"MIT"
] | null | null | null | main.py | sun624/Dogecoin_musk | 6dc48f03275321d29bb1ab131ecd14626bcc5170 | [
"MIT"
] | null | null | null | main.py | sun624/Dogecoin_musk | 6dc48f03275321d29bb1ab131ecd14626bcc5170 | [
"MIT"
] | null | null | null | #! usr/bin/env python3
from os import times
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import requests
import pandas as pd
import json
import datetime
import time
import math
from twitter import get_coin_tweets_dates
#beautifulsoup cannot scrape dynamically changing webpages.
#Instead we use third party library called Selenium and webdrivers.
"""
draw coin price fluctuation with Elon's tweet
"""
if __name__ == '__main__':
main()
| 29.210145 | 170 | 0.690896 |
88858e6eec8ef3e573592e88fd8baa705aa1f430 | 1,264 | py | Python | 064_minimum_path_sum.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 064_minimum_path_sum.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 064_minimum_path_sum.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | """
64. Minimum Path Sum
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right
which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
http://www.tangjikai.com/algorithms/leetcode-64-minimum-path-sum
Dynamic Programming
We can use an two-dimensional array
to record the minimum sum at each position of grid,
finally return the last element as output.
"""
| 29.395349 | 75 | 0.530854 |
8886118689d4c63bf084bbb40abe034f4a2125d5 | 12,507 | py | Python | pants-plugins/structured/subsystems/r_distribution.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | pants-plugins/structured/subsystems/r_distribution.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | pants-plugins/structured/subsystems/r_distribution.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import subprocess
import sys
from contextlib import contextmanager
from abc import abstractproperty
from pants.binaries.binary_util import BinaryUtil
from pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult
from pants.fs.archive import TGZ
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import environment_as, temporary_file_path
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
from pants.util.strutil import ensure_binary
logger = logging.getLogger(__name__)
def __init__(self, binary_util, r_version, modules_git_ref, tools_cache_dir,
resolver_cache_dir, chroot_cache_dir):
self._binary_util = binary_util
self._r_version = r_version
self.modules_git_ref = modules_git_ref
self.tools_cache_dir = tools_cache_dir
self.resolver_cache_dir = resolver_cache_dir
self.chroot_cache_dir = chroot_cache_dir
def _unpack_distribution(self, supportdir, r_version, output_filename):
logger.debug('unpacking R distribution, version: %s', r_version)
tarball_filepath = self._binary_util.select_binary(
supportdir=supportdir, version=r_version, name=output_filename)
logger.debug('Tarball for %s(%s): %s', supportdir, r_version, tarball_filepath)
work_dir = os.path.join(os.path.dirname(tarball_filepath), 'unpacked')
TGZ.extract(tarball_filepath, work_dir, concurrency_safe=True)
return work_dir
R_SAVE_IMAGE_BOILERPLATE = """{initial_input}
save.image(file='{save_file_path}', safe=FALSE)
"""
RDATA_FILE_NAME = '.Rdata'
BLANK_LINE_REGEX = re.compile('^\s*$')
VALID_VERSION_REGEX = re.compile('^[0-9]+(\.[0-9]+)*$')
R_LIST_PACKAGES_BOILERPLATE = """{libs_input}
cat(installed.packages(lib.loc={libs_joined})[,'Package'], sep='\\n')
"""
# R_INSTALL_SOURCE_PACKAGE_BOILERPLATE = """???"""
# def gen_source_install_input(self, source_dir, outdir):
# return self.R_INSTALL_SOURCE_PACKAGE_BOILERPLATE.format(
# expr="devtools::install_local('{}', lib='{}')".format(
# source_dir, outdir),
# outdir=outdir,
# )
# def install_source_package(self, context, source_dir, pkg_cache_dir):
# source_input = self.gen_source_install_input(source_dir, pkg_cache_dir)
# self.invoke_rscript(context, source_input).stdout.split('\n')
| 33.352 | 93 | 0.691773 |
88864f3fa8092982651eaeda9dbe085e135b834a | 5,121 | py | Python | src/test.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | 8 | 2021-12-02T02:25:55.000Z | 2022-03-18T23:41:42.000Z | src/test.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | null | null | null | src/test.py | yliuhz/PMAW | 23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4 | [
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
import numpy as np
import torch
from torch import nn
if __name__=='__main__':
model = convmodel()
for m in model.parameters():
m.data.fill_(0.1)
# criterion = nn.CrossEntropyLoss()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
model.train()
# 8 sample 10x10
# 1
images = torch.ones(8, 3, 10, 10)
targets = torch.ones(8, dtype=torch.float)
output = model(images)
print(output.shape)
# torch.Size([8, 20])
loss = criterion(output.view(-1,), targets)
print(model.conv1.weight.grad)
# None
loss.backward()
print(model.conv1.weight.grad[0][0][0])
# tensor([-0.0782, -0.0842, -0.0782])
#
#
print(model.conv1.weight[0][0][0])
# tensor([0.1000, 0.1000, 0.1000], grad_fn=<SelectBackward>)
# 0.1
optimizer.step()
print(model.conv1.weight[0][0][0])
# tensor([0.1782, 0.1842, 0.1782], grad_fn=<SelectBackward>)
# learning rate 1
# ( - )
optimizer.zero_grad()
print(model.conv1.weight.grad[0][0][0])
# tensor([0., 0., 0.])
#
#
# zero_grad()
#
print('>>>test for bn<<<')
bn = nn.BatchNorm2d(2)
aa = torch.randn(2,2,1,1)
bb = bn(aa)
print('aa=', aa)
print('bb=', bb)
cc = BatchNorm(2, 4)(aa)
print('cc=', cc)
shape = (1, 2, 1, 1)
mean = aa.mean(dim=(0,2,3), keepdim=True)
dd = (aa - mean) / torch.sqrt(((aa-mean)**2).mean(dim=(0,2,3), keepdim=True))
print('dd=', dd)
| 35.075342 | 81 | 0.610037 |
8887cdf2cc8ae9604a5a9ce44664b255c6cabd67 | 64 | py | Python | hanlp/datasets/ner/__init__.py | v-smwang/HanLP | 98db7a649110fca4307acbd6a26f2b5bb1159efc | [
"Apache-2.0"
] | 27,208 | 2015-03-27T10:25:45.000Z | 2022-03-31T13:26:32.000Z | hanlp/datasets/ner/__init__.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | 1,674 | 2015-03-30T06:36:44.000Z | 2022-03-16T01:52:56.000Z | hanlp/datasets/ner/__init__.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | 7,710 | 2015-03-27T08:07:57.000Z | 2022-03-31T14:57:23.000Z | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-06 15:32 | 21.333333 | 24 | 0.59375 |
888a09b848fdd84015221d8d652297a6bccb8e05 | 563 | py | Python | portfolio/2011_krakDK/krak/items.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/2011_krakDK/krak/items.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/2011_krakDK/krak/items.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | # Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
| 23.458333 | 48 | 0.657194 |
888a79727132fd019b0db67bf3741b80a00a7a59 | 29,630 | py | Python | src/mau/parsers/main_parser.py | Project-Mau/mau | 193d16633c1573227debf4517ebcaf07add24979 | [
"MIT"
] | 28 | 2021-02-22T18:46:52.000Z | 2022-02-21T15:14:05.000Z | src/mau/parsers/main_parser.py | Project-Mau/mau | 193d16633c1573227debf4517ebcaf07add24979 | [
"MIT"
] | 5 | 2021-02-23T09:56:13.000Z | 2022-03-13T09:47:42.000Z | src/mau/parsers/main_parser.py | Project-Mau/mau | 193d16633c1573227debf4517ebcaf07add24979 | [
"MIT"
] | 2 | 2021-02-23T09:11:45.000Z | 2021-03-13T11:08:21.000Z | import re
import copy
from mau.lexers.base_lexer import TokenTypes, Token
from mau.lexers.main_lexer import MainLexer
from mau.parsers.base_parser import (
BaseParser,
TokenError,
ConfigurationError,
parser,
)
from mau.parsers.text_parser import TextParser
from mau.parsers.arguments_parser import ArgumentsParser
from mau.parsers.preprocess_variables_parser import PreprocessVariablesParser
from mau.parsers.nodes import (
HorizontalRuleNode,
TextNode,
BlockNode,
ContentNode,
ContentImageNode,
CommandNode,
HeaderNode,
ListNode,
ListItemNode,
ParagraphNode,
TocNode,
TocEntryNode,
FootnotesNode,
)
def header_anchor(text, level):
"""
Return a sanitised anchor for a header.
"""
# Everything lowercase
sanitised_text = text.lower()
# Get only letters, numbers, dashes, spaces, and dots
sanitised_text = "".join(re.findall("[a-z0-9-\\. ]+", sanitised_text))
# Remove multiple spaces
sanitised_text = "-".join(sanitised_text.split())
return sanitised_text
# The MainParser is in charge of parsing
# the whole input, calling other parsers
# to manage single paragraphs or other
# things like variables.
def _parse_content_image(self, uri, title):
# Parse a content image in the form
#
# [alt_text, classes]
# << image:uri
#
# alt_text is the alternate text to use is the image is not reachable
# and classes is a comma-separated list of classes
# Assign names and consume the attributes
self.argsparser.set_names_and_defaults(
["alt_text", "classes"], {"alt_text": None, "classes": None}
)
args, kwargs = self.argsparser.get_arguments_and_reset()
alt_text = kwargs.pop("alt_text")
classes = kwargs.pop("classes")
if classes:
classes = classes.split(",")
self._save(
ContentImageNode(
uri=uri,
alt_text=alt_text,
classes=classes,
title=title,
kwargs=kwargs,
)
)
def _parse_standard_content(self, content_type, uri, title):
# This is the fallback for an unknown content type
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
self._save(
ContentNode(
uri=uri,
title=title,
args=args,
kwargs=kwargs,
)
)
| 32.136659 | 104 | 0.583463 |
888b41cc12274148e790e361bed90e406da76010 | 3,344 | py | Python | stereomag/nets.py | MandyMY/stereo-magnification | c18fa484484597dfa653f317459a503d9bf8d933 | [
"Apache-2.0"
] | null | null | null | stereomag/nets.py | MandyMY/stereo-magnification | c18fa484484597dfa653f317459a503d9bf8d933 | [
"Apache-2.0"
] | null | null | null | stereomag/nets.py | MandyMY/stereo-magnification | c18fa484484597dfa653f317459a503d9bf8d933 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for multiplane image (MPI) prediction networks.
"""
from __future__ import division
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
#from tensorflow.contrib import slim
import tf_slim as slim
def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False):
"""Network definition for multiplane image (MPI) inference.
Args:
inputs: stack of input images [batch, height, width, input_channels]
num_outputs: number of output channels
ngf: number of features for the first conv layer
vscope: variable scope
reuse_weights: whether to reuse weights (for weight sharing)
Returns:
pred: network output at the same spatial resolution as the inputs.
"""
with tf.variable_scope(vscope, reuse=reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm):
cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1)
cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2)
cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1)
cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2)
cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1)
cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1)
cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2)
cnv4_1 = slim.conv2d(
cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2)
cnv4_2 = slim.conv2d(
cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2)
cnv4_3 = slim.conv2d(
cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2)
# Adding skips
skip = tf.concat([cnv4_3, cnv3_3], axis=3)
cnv6_1 = slim.conv2d_transpose(
skip, ngf * 4, [4, 4], scope='conv6_1', stride=2)
cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1)
cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1)
skip = tf.concat([cnv6_3, cnv2_2], axis=3)
cnv7_1 = slim.conv2d_transpose(
skip, ngf * 2, [4, 4], scope='conv7_1', stride=2)
cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1)
skip = tf.concat([cnv7_2, cnv1_2], axis=3)
cnv8_1 = slim.conv2d_transpose(
skip, ngf, [4, 4], scope='conv8_1', stride=2)
cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1)
feat = cnv8_2
pred = slim.conv2d(
feat,
num_outputs, [1, 1],
stride=1,
activation_fn=tf.nn.tanh,
normalizer_fn=None,
scope='color_pred')
return pred
| 39.809524 | 78 | 0.650718 |
888c285859f9179b927cbdc06da726b52d44b5cf | 3,731 | py | Python | tests/test_init.py | ashb/freedesktop-icons | 10737b499bff9a22c853aa20822215c8e059a737 | [
"MIT"
] | 1 | 2021-06-02T11:11:50.000Z | 2021-06-02T11:11:50.000Z | tests/test_init.py | ashb/freedesktop-icons | 10737b499bff9a22c853aa20822215c8e059a737 | [
"MIT"
] | null | null | null | tests/test_init.py | ashb/freedesktop-icons | 10737b499bff9a22c853aa20822215c8e059a737 | [
"MIT"
] | null | null | null | from pathlib import Path
from unittest import mock
import pytest
from freedesktop_icons import Icon, Theme, lookup, lookup_fallback, theme_search_dirs
| 35.198113 | 100 | 0.729027 |
888d0174a06f5d771e461f6d3b086646f76a87f5 | 569 | py | Python | src/sweetrpg_library_api/application/__init__.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | src/sweetrpg_library_api/application/__init__.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | 33 | 2021-09-18T23:52:05.000Z | 2022-03-30T12:25:49.000Z | src/sweetrpg_library_api/application/__init__.py | sweetrpg/library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""
"""
import os
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sweetrpg_library_api.application import constants
sentry_sdk.init(dsn=os.environ[constants.SENTRY_DSN],
traces_sample_rate=0.2,
environment=os.environ.get(constants.SENTRY_ENV) or 'Unknown',
integrations=[
FlaskIntegration(), RedisIntegration(),
])
| 28.45 | 78 | 0.680141 |
888eea6317cde6023d0d320a6a78866a20795e44 | 13,674 | py | Python | isochrones_old.py | timothydmorton/fpp-old | 6a2175d4bd9648b61c244c7463148632f36de631 | [
"MIT"
] | null | null | null | isochrones_old.py | timothydmorton/fpp-old | 6a2175d4bd9648b61c244c7463148632f36de631 | [
"MIT"
] | null | null | null | isochrones_old.py | timothydmorton/fpp-old | 6a2175d4bd9648b61c244c7463148632f36de631 | [
"MIT"
] | null | null | null | """
Compiles stellar model isochrones into an easy-to-access format.
"""
from numpy import *
from scipy.interpolate import LinearNDInterpolator as interpnd
from consts import *
import os,sys,re
import scipy.optimize
#try:
# import pymc as pm
#except:
# print 'isochrones: pymc not loaded! MCMC will not work'
import numpy.random as rand
import atpy
DATAFOLDER = os.environ['ASTROUTIL_DATADIR'] #'/Users/tdm/Dropbox/astroutil/data'
def write_all_dartmouth_to_fits(fehs=arange(-1,0.51,0.1)):
for feh in fehs:
try:
print feh
dartmouth_to_fits(feh)
except:
raise
pass
| 34.270677 | 152 | 0.52311 |