hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5529bf458e89cd64db669da48d12a440dd5e7310 | 14,095 | py | Python | tests/grammar/test_data_type.py | Daniihh/sqlpyparser | aad1d613c02d4f8fa6b833c060a683cf7e194b1c | [
"MIT"
] | 28 | 2016-02-13T10:20:21.000Z | 2022-03-10T02:41:58.000Z | tests/grammar/test_data_type.py | Daniihh/sqlpyparser | aad1d613c02d4f8fa6b833c060a683cf7e194b1c | [
"MIT"
] | 22 | 2016-02-15T15:55:09.000Z | 2017-09-12T13:49:17.000Z | tests/grammar/test_data_type.py | Daniihh/sqlpyparser | aad1d613c02d4f8fa6b833c060a683cf7e194b1c | [
"MIT"
] | 16 | 2016-02-15T16:41:23.000Z | 2021-05-18T04:51:52.000Z | # -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pyparsing
from mysqlparse.grammar.data_type import data_type_syntax
| 40.386819 | 122 | 0.57602 |
5529c5dbc7514236bc8611211cfb848e2618a841 | 2,615 | py | Python | bayarea_urbansim/data_regeneration/export_to_h5.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | bayarea_urbansim/data_regeneration/export_to_h5.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | bayarea_urbansim/data_regeneration/export_to_h5.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from spandex import TableLoader
import pandas.io.sql as sql
loader = TableLoader()
def db_to_df(query):
"""Executes SQL query and returns DataFrame."""
conn = loader.database._connection
return sql.read_frame(query, conn)
## Export to HDF5- get path to output file
h5_path = loader.get_path('out/regeneration/summaries/bayarea_v3.h5') ## Path to the output file
#Buildings
buildings = db_to_df('select * from building').set_index('building_id')
if 'id' in buildings.columns:
del buildings['id']
buildings['building_type_id'] = 0
buildings.building_type_id[buildings.development_type_id == 1] = 1
buildings.building_type_id[buildings.development_type_id == 2] = 3
buildings.building_type_id[buildings.development_type_id == 5] = 12
buildings.building_type_id[buildings.development_type_id == 7] = 10
buildings.building_type_id[buildings.development_type_id == 9] = 5
buildings.building_type_id[buildings.development_type_id == 10] = 4
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 14] = 7
buildings.building_type_id[buildings.development_type_id == 15] = 9
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 17] = 6
buildings.building_type_id[buildings.development_type_id == 24] = 16
#Parcels
parcels = db_to_df('select * from parcel').set_index('parcel_id')
parcels['shape_area'] = parcels.acres * 4046.86
if 'id' in parcels.columns:
del parcels['id']
if 'geom' in parcels.columns:
del parcels['geom']
if 'centroid' in parcels.columns:
del parcels['centroid']
#Jobs
jobs = db_to_df('select * from jobs').set_index('job_id')
if 'id' in jobs.columns:
del jobs['id']
#Households
hh = db_to_df('select * from households').set_index('household_id')
if 'id' in hh.columns:
del hh['id']
hh = hh.rename(columns = {'hinc':'income'})
for col in hh.columns:
hh[col] = hh[col].astype('int32')
#Zones
zones_path = loader.get_path('juris/reg/zones/zones.csv')
zones = pd.read_csv(zones_path).set_index('zone_id')
#Putting tables in the HDF5 file
store = pd.HDFStore(h5_path)
store['parcels'] = parcels # http://urbansim.org/Documentation/Parcel/ParcelTable
store['buildings'] = buildings # http://urbansim.org/Documentation/Parcel/BuildingsTable
store['households'] = hh # http://urbansim.org/Documentation/Parcel/HouseholdsTable
store['jobs'] = jobs # http://urbansim.org/Documentation/Parcel/JobsTable
store['zones'] = zones # http://urbansim.org/Documentation/Parcel/ZonesTable
store.close() | 39.029851 | 97 | 0.757553 |
552b355ab9a4608d3f4dc4d7df2c3b24e79e210d | 7,060 | py | Python | minder_utils/visualisation/feature_engineering.py | alexcapstick/minder_utils | 3bb9380b7796b5dd5b995ce1839ea6a94321021d | [
"MIT"
] | null | null | null | minder_utils/visualisation/feature_engineering.py | alexcapstick/minder_utils | 3bb9380b7796b5dd5b995ce1839ea6a94321021d | [
"MIT"
] | null | null | null | minder_utils/visualisation/feature_engineering.py | alexcapstick/minder_utils | 3bb9380b7796b5dd5b995ce1839ea6a94321021d | [
"MIT"
] | 1 | 2022-03-16T11:10:43.000Z | 2022-03-16T11:10:43.000Z | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import pandas as pd
from minder_utils.formatting.label import label_by_week, label_dataframe
from minder_utils.feature_engineering import Feature_engineer
from minder_utils.feature_engineering.calculation import *
from minder_utils.util import formatting_plots
from minder_utils.formatting import Formatting
fe = Feature_engineer(Formatting())
sns.set()
att = 'bathroom_night'
figure_title = {
'bathroom_night': 'Bathroom activity during the night',
'bathroom_daytime': 'Bathroom activity during the day',
}
patient_id = ''
def visualise_data_time_lineplot(time_array, values_array, name, fill_either_side_array=None, fig = None, ax = None):
'''
This function accepts a dataframe that has a ```'time'``` column and
and a ```'value'``` column.
'''
if ax is None:
fig, ax = plt.subplots(1,1,figsize = (10,6))
ax.plot(time_array, values_array)
if not fill_either_side_array is None:
ax.fill_between(time_array,
y1=values_array-fill_either_side_array,
y2=values_array+fill_either_side_array,
alpha = 0.3)
return fig, ax
def visualise_data_time_heatmap(data_plot, name, fig = None, ax = None):
'''
This function accepts a dataframe in which the columns are the days and
the rows are the aggregated times of the day.
'''
if ax is None:
fig, axes = plt.subplots(1,1,figsize = (10,6))
ax = sns.heatmap(data_plot.values, cmap = 'Blues', cbar_kws={'label': name})
ax.invert_yaxis()
x_tick_loc = np.arange(0, data_plot.shape[1], 90)
ax.set_xticks(x_tick_loc + 0.5)
ax.set_xticklabels(data_plot.columns.astype(str)[x_tick_loc].values)
y_tick_loc = np.arange(0, data_plot.shape[0], 3)
ax.set_yticks(y_tick_loc + 0.5)
ax.set_yticklabels([pd.to_datetime(time).strftime("%H:%M") for time in data_plot.index.values[y_tick_loc]], rotation = 0)
ax.set_xlabel('Day')
ax.set_ylabel('Time of Day')
return fig, ax
def visualise_activity_daily_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_daily = fe.activity_specific_agg(agg='daily', load_smaller_aggs = True)
activity_daily = label_dataframe(activity_daily, days_either_side=0)
activity_daily=activity_daily.rename(columns = {'valid':'UTI Label'})
activity_daily['Feature'] = activity_daily['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_daily['location'].unique():
data_plot = activity_daily[activity_daily['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_weekly_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_weekly = fe.activity_specific_agg(agg='weekly', load_smaller_aggs = True)
activity_weekly = label_by_week(activity_weekly)
activity_weekly=activity_weekly.rename(columns = {'valid':'UTI Label'})
activity_weekly['Feature'] = activity_weekly['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_weekly['location'].unique():
data_plot = activity_weekly[activity_weekly['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_evently_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_evently = fe.activity_specific_agg(agg='evently', load_smaller_aggs = True)
activity_evently = label_dataframe(activity_evently, days_either_side=0)
activity_evently=activity_evently.rename(columns = {'valid':'UTI Label'})
activity_evently['Feature'] = activity_evently['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_evently['location'].unique():
data_plot = activity_evently[activity_evently['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
if __name__ == '__main__':
results = weekly_compare(getattr(fe, att), kolmogorov_smirnov)
df = label_by_week(getattr(fe, att))
visualise_weekly_data(df)
visualise_weekly_statistical_analysis(df)
visualise_body_temperature(label_by_week(fe.body_temperature))
| 28.699187 | 125 | 0.657507 |
552c410668701cd1585658195d593e1b5751e350 | 442 | py | Python | code-everyday-challenge/n159_cyclically_rotate.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n159_cyclically_rotate.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n159_cyclically_rotate.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null |
# https://practice.geeksforgeeks.org/problems/cyclically-rotate-an-array-by-one2614/1
# Given an array, rotate the array by one position in clock-wise direction.
# Input:
# N = 5
# A[] = {1, 2, 3, 4, 5}
# Output:
# 5 1 2 3 4
if __name__ == "__main__":
a = [1, 2, 3,4,5]
print(rotate_cycle(a)) | 17.68 | 85 | 0.567873 |
552d7c8af23d30920337cc95fa4d7065705c0c5f | 10,800 | py | Python | adamw_optimizer.py | pwldj/Bio_XLNet_CRF | 536053e9d74abdb2ee56000a8a779ffc1c0dd0fc | [
"Apache-2.0"
] | null | null | null | adamw_optimizer.py | pwldj/Bio_XLNet_CRF | 536053e9d74abdb2ee56000a8a779ffc1c0dd0fc | [
"Apache-2.0"
] | 2 | 2022-03-07T07:27:13.000Z | 2022-03-07T07:27:15.000Z | adamw_optimizer.py | pwldj/MTL-BioNER | 3fb336f517346daeec6a716fa6a657a421754bdb | [
"Apache-2.0"
] | 1 | 2021-05-05T08:42:53.000Z | 2021-05-05T08:42:53.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adamw for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
| 44.444444 | 85 | 0.64463 |
552db8b8886012305a174d08f78e6a22fd0ea206 | 38 | py | Python | tests/test_e2e.py | sasakalaba/drone-strike | 92e1aa9a79347f2fdc336529b584206aa20e72d3 | [
"Unlicense"
] | null | null | null | tests/test_e2e.py | sasakalaba/drone-strike | 92e1aa9a79347f2fdc336529b584206aa20e72d3 | [
"Unlicense"
] | null | null | null | tests/test_e2e.py | sasakalaba/drone-strike | 92e1aa9a79347f2fdc336529b584206aa20e72d3 | [
"Unlicense"
] | null | null | null | from .base import BaseTestCase
pass
| 7.6 | 30 | 0.789474 |
552fdd4ea7856ad8f238ffba4056d7b666e1d19e | 1,559 | py | Python | backend/breach/helpers/injector.py | Cancelll/rupture | cd87481717b39de2654659b7ff436500e28a0600 | [
"MIT"
] | 184 | 2016-03-31T04:19:42.000Z | 2021-11-26T21:37:12.000Z | backend/breach/helpers/injector.py | Cancelll/rupture | cd87481717b39de2654659b7ff436500e28a0600 | [
"MIT"
] | 212 | 2016-03-31T04:32:06.000Z | 2017-02-26T09:34:47.000Z | backend/breach/helpers/injector.py | Cancelll/rupture | cd87481717b39de2654659b7ff436500e28a0600 | [
"MIT"
] | 38 | 2016-03-31T09:09:44.000Z | 2021-11-26T21:37:13.000Z | from backend.settings import BASE_DIR
import os
import subprocess
import stat
rupture_dir = os.path.abspath(os.path.join(BASE_DIR, os.pardir))
client_dir = os.path.join(rupture_dir, 'client')
| 25.557377 | 84 | 0.645285 |
5530fb74fc5655f0d169fed9774ccb03f4699d79 | 952 | py | Python | wagtail_client/utils.py | girleffect/core-integration-demo | c37a0d5183d16bec6245a41e12dd90691ffa7138 | [
"BSD-3-Clause"
] | null | null | null | wagtail_client/utils.py | girleffect/core-integration-demo | c37a0d5183d16bec6245a41e12dd90691ffa7138 | [
"BSD-3-Clause"
] | 19 | 2018-02-06T08:56:24.000Z | 2018-09-11T08:05:24.000Z | wagtail_client/utils.py | girleffect/core-integration-demo | c37a0d5183d16bec6245a41e12dd90691ffa7138 | [
"BSD-3-Clause"
] | 2 | 2018-05-25T09:44:03.000Z | 2021-08-18T12:07:47.000Z | from urllib.parse import urlencode
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
def provider_logout_url(request):
"""
This function is used to construct a logout URL that can be used to log the user out of
the Identity Provider (Authentication Service).
:param request:
:return:
"""
site = get_current_site(request)
if not hasattr(site, "oidcsettings"):
raise RuntimeError(f"Site {site} has no settings configured.")
parameters = {
"post_logout_redirect_uri": site.oidcsettings.wagtail_redirect_url
}
# The OIDC_STORE_ID_TOKEN setting must be set to true if we want to be able to read
# it from the session.
if "oidc_id_token" in request.session:
parameters["id_token_hint"] = request.session["oidc_id_token"]
redirect_url = settings.OIDC_OP_LOGOUT_URL + "?" + urlencode(parameters, doseq=True)
return redirect_url
| 34 | 91 | 0.722689 |
553261313f73826b4fd76c66eae4be0cde9803af | 978 | py | Python | connectToProteusFromMongo.py | erentts/Ignite-Greenhouse | 328730399328936332b5c6f3f8dcd18bf56369b9 | [
"MIT"
] | 4 | 2021-02-22T21:19:28.000Z | 2021-05-03T14:19:18.000Z | connectToProteusFromMongo.py | erentts/Ignite-Greenhouse | 328730399328936332b5c6f3f8dcd18bf56369b9 | [
"MIT"
] | null | null | null | connectToProteusFromMongo.py | erentts/Ignite-Greenhouse | 328730399328936332b5c6f3f8dcd18bf56369b9 | [
"MIT"
] | null | null | null | import pymongo
import dns
import serial
from pymongo import MongoClient
import struct
cluster = MongoClient("")
serialPort = serial.Serial(port= "COM1", baudrate=9600 ,bytesize =8 , timeout =None, parity='N',stopbits=1)
db=cluster["<greenHouse>"]
collection = db["greenhouses"]
while serialPort.readline():
results = collection.find({"greenHouseName" : "SERA 1" })
for result in results:
targetTemperature = abs(int(result.get("targetTemperature")))
# declaring an integer value
int_val = targetTemperature
# converting to string
str_val = str(targetTemperature)
# converting string to bytes
byte_val = str_val.encode()
serialPort.write(byte_val)
getterThree = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"targetTemperature" : targetTemperature }})
getter = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"currentTemperature" : float(serialPort.read() + serialPort.read()) }})
| 31.548387 | 145 | 0.702454 |
55333cbb250a399b054018a193b9449274e24d7c | 837 | py | Python | website_sale_cache/__manifest__.py | factorlibre/website-addons | 9a0c7a238e2b6030d57f7a08d48816b4f2431524 | [
"MIT"
] | 1 | 2020-03-01T03:04:21.000Z | 2020-03-01T03:04:21.000Z | website_sale_cache/__manifest__.py | factorlibre/website-addons | 9a0c7a238e2b6030d57f7a08d48816b4f2431524 | [
"MIT"
] | null | null | null | website_sale_cache/__manifest__.py | factorlibre/website-addons | 9a0c7a238e2b6030d57f7a08d48816b4f2431524 | [
"MIT"
] | 3 | 2019-07-29T20:23:16.000Z | 2021-01-07T20:51:24.000Z | # Copyright 2017 Artyom Losev
# Copyright 2018 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": """E-commerce Category Cache""",
"summary": """Use this module to greatly accelerate the loading of a page with a large number of product categories""",
"category": "Website",
"images": ["images/websale_cache.png"],
"version": "13.0.1.0.1",
"author": "IT-Projects LLC, Artyom Losev",
"support": "apps@itpp.dev",
"website": "https://www.it-projects.info",
"license": "Other OSI approved licence", # MIT
"price": 25.00,
"currency": "EUR",
"depends": ["website_sale", "website", "base_action_rule"],
"data": ["views.xml", "data/ir_action_server.xml", "data/base_action_rules.xml"],
"installable": False,
}
| 41.85 | 123 | 0.658303 |
5537fd0769af5384988d439a528247d706c25d2b | 848 | py | Python | lumin/utils/mod_ver.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 43 | 2019-02-11T16:16:42.000Z | 2021-12-13T15:35:20.000Z | lumin/utils/mod_ver.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 48 | 2020-05-21T02:40:50.000Z | 2021-08-10T11:07:08.000Z | lumin/utils/mod_ver.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 14 | 2019-05-02T15:09:41.000Z | 2022-01-12T21:13:34.000Z | import pkg_resources
__all__ = []
| 56.533333 | 160 | 0.602594 |
553885dd25affc404a552785fdb6d4e6392000ff | 18,526 | py | Python | pysac/mhs_atmosphere/mhs_model/flux_tubes.py | SolarDrew/pysac | 9fd86dd03966b7e7f90653a47a2ccca7964c83bc | [
"BSD-2-Clause"
] | null | null | null | pysac/mhs_atmosphere/mhs_model/flux_tubes.py | SolarDrew/pysac | 9fd86dd03966b7e7f90653a47a2ccca7964c83bc | [
"BSD-2-Clause"
] | null | null | null | pysac/mhs_atmosphere/mhs_model/flux_tubes.py | SolarDrew/pysac | 9fd86dd03966b7e7f90653a47a2ccca7964c83bc | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 11 11:37:39 2014
@author: sm1fg
Construct the magnetic network and generate the adjustments to the
non-magnetic atmosphere for mhs equilibrium.
"""
import os
import warnings
import numpy as np
import astropy.units as u
from scipy.interpolate import RectBivariateSpline
#============================================================================
# locate flux tubes and footpoint strength
#============================================================================
def get_flux_tubes(
model_pars,
coords,
option_pars
):
""" Obtain an array of x,y coordinates and corresponding vertical
component value for the photospheric magnetic field """
if model_pars['nftubes'] == 0:
xi, yi, Si = [[0.]]*u.Mm, [[0.]]*u.Mm, [[0.0]]*u.T # x,y,Bz(r=0,z=0)
else:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.1/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
# parameters for matching Mumford,Fedun,Erdelyi 2014
if option_pars['l_sunspot']:
Si = [[0.5]]*u.T # 128.5mT SI units
# parameters for matching Mumford,Fedun,Erdelyi 2014
if option_pars['l_mfe']:
Si = [[0.1436]]*u.T # 128.5mT SI units
elif option_pars['l_drewmod']:
Si = [[0.012]] * u.T
#Si = [[0.005]] * u.T
#Si = [[0.05]] * u.T
elif model_pars['model'] == 'drewtube':
Si = [[2.7]] * u.kG
#Si = [[0.001]] * u.T
# parameters for matching Gent,Fedun,Mumford,Erdelyi 2014
elif option_pars['l_single']:
Si = [[0.1]]*u.T # 100mT SI units
# parameters for matching Gent,Fedun,Erdelyi 2014 flux tube pair
elif option_pars['l_tube_pair']:
xi, yi, Si = (
u.Quantity([
[ 1.0],
[ 1.0],
[-0.95],
[-1.05]
], unit=u.Mm),
u.Quantity([
[ 0.00],
[ 0.00],
[ .15],
[-0.15]
], unit=u.Mm),
u.Quantity([
[ 50e-3],
[ 50e-3],
[ 50e-3],
[ 50e-3]
], unit=u.T)
)# 50mT SI
# parameters for matching Gent,Fedun,Erdelyi 2014 twisted flux tubes
elif option_pars['l_multi_twist']:
"""xi, yi, Si = (
u.Quantity([
[ 0.34],
[ 0.07],
[ .14],
[-0.31]
], unit=u.Mm),
u.Quantity([
[ 0.20],
[ 0.33],
[ 0.04],
[-0.34]
], unit=u.Mm),
u.Quantity([
[ 50e-3],
[ 50e-3],
[ 50e-3],
[ 50e-3]
], unit=u.T)
)# 50mT SI"""
xi, yi, Si = (u.Quantity([[0.34], [0.07], [0.14], [-0.31]], unit=u.Mm),
u.Quantity([[0.2], [0.33], [0.04], [-0.34]], unit=u.Mm),
u.Quantity([[50e-3], [50e-3], [50e-3], [50e-3]], unit=u.T))
elif option_pars['l_multi_netwk']:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.5/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
x1 = [-1.75, -0.75, 1.25, 1.00, -0.75]
y1 = [-1.00, 0.50, 0.50, -1.50, 1.70]
xi[ : 3] += x1[0] * u.Mm
xi[3 : 6] += x1[1] * u.Mm
xi[6 : 9] += x1[2] * u.Mm
xi[9 :12] += x1[3] * u.Mm
xi[12:15] += x1[4] * u.Mm
yi[ : 3] += y1[0] * u.Mm
yi[3 : 6] += y1[1] * u.Mm
yi[6 : 9] += y1[2] * u.Mm
yi[9 :12] += y1[3] * u.Mm
yi[12:15] += y1[4] * u.Mm
for xj in xi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
for xj in yi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
elif option_pars['l_multi_lanes']:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.475/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
x1 = [-2., -1.2, -0.4, 0.4, 1.2, 2.]
xi[ : 3] += x1[0] * u.Mm
xi[3 : 6] += x1[1] * u.Mm
xi[6 : 9] += x1[2] * u.Mm
xi[9 :12] += x1[3] * u.Mm
xi[12:15] += x1[4] * u.Mm
xi[16:18] += x1[5] * u.Mm
for xj in xi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
for xj in yi:
xj += np.random.uniform(-0.25,0.25) * u.Mm
else:
raise ValueError("in get_flux_tubes axial parameters need to be defined")
return xi, yi, Si
#-----------------------------------------------------------------------------
#
def get_hmi_flux_tubes(
model_pars, option_pars,
indx,
dataset = 'hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits',
sunpydir = os.path.expanduser('~/sunpy/data/'),
savedir = os.path.expanduser('~/figs/hmi/'),
l_newdata = False
):
""" indx is 4 integers: lower and upper indices each of x,y coordinates
# dataset of the form 'hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits'
# """
from sunpy.net import vso
import sunpy.map
client = vso.VSOClient()
results = client.query(vso.attrs.Time("2014/07/05 23:59:50",
"2014/07/05 23:59:55"),
vso.attrs.Instrument('HMI'),
vso.attrs.Physobs('LOS_magnetic_field'))
if l_newdata:
if not os.path.exits(sunpydir):
raise ValueError("in get_hmi_map set 'sunpy' dir for vso data\n"+
"for large files you may want link to local drive rather than network")
client.get(results).wait(progress=True)
if not os.path.exits(savedir):
os.makedirs(savedir)
hmi_map = sunpy.map.Map(sunpydir+dataset)
#hmi_map = hmi_map.rotate()
#hmi_map.peek()
s = hmi_map.data[indx[0]:indx[1],indx[2]:indx[3]] #units of Gauss Bz
s *= u.G
nx = s.shape[0]
ny = s.shape[1]
nx2, ny2 = 2*nx, 2*ny # size of interpolant
#pixel size in arc seconds
dx, dy = hmi_map.scale.items()[0][1],hmi_map.scale.items()[1][1]
x, y = np.mgrid[
hmi_map.xrange[0]+indx[0]*dx:hmi_map.xrange[0]+indx[1]*dx:1j*nx2,
hmi_map.xrange[0]+indx[2]*dy:hmi_map.xrange[0]+indx[3]*dy:1j*ny2
]
#arrays to interpolate s from/to
fx = u.Quantity(np.linspace(x.min().value,x.max().value,nx), unit=x.unit)
fy = u.Quantity(np.linspace(y.min().value,y.max().value,ny), unit=y.unit)
xnew = u.Quantity(np.linspace(x.min().value,x.max().value,nx2), unit=x.unit)
ynew = u.Quantity(np.linspace(y.min().value,y.max().value,ny2), unit=y.unit)
f = RectBivariateSpline(fx,fy,s.to(u.T))
#The initial model assumes a relatively small region, so a linear
#Cartesian map is applied here. Consideration may be required if larger
#regions are of interest, where curvature or orientation near the lim
#of the surface is significant.
s_int = f(xnew,ynew) #interpolate s and convert units to Tesla
s_int /= 4. # rescale s as extra pixels will sum over FWHM
x_int = x * 7.25e5 * u.m #convert units to metres
y_int = y * 7.25e5 * u.m
dx_int = dx * 7.25e5 * u.m
dy_int = dy * 7.25e5 * u.m
FWHM = 0.5*(dx_SI+dy_SI)
smax = max(abs(s.min()),abs(s.max())) # set symmetric plot scale
cmin = -smax*1e-4
cmax = smax*1e-4
#
# filename = 'hmi_map'
# import loop_plots as mhs
# mhs.plot_hmi(
# s*1e-4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(),
# cmin,cmax,filename,savedir,annotate = '(a)'
# )
# filename = 'hmi_2x2_map'
# mhs.plot_hmi(
# s_SI*4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(),
# cmin,cmax,filename,savedir,annotate = '(a)'
# )
#
# return s_SI, x_SI, y_SI, nx2, ny2, dx_SI, dy_SI, cmin, cmax, FWHM
#============================================================================
# Magnetic Field Construction (See. Fedun et.al 2011)
#============================================================================
def construct_magnetic_field(
x, y, z,
x0, y0, S,
model_pars,
option_pars,
physical_constants,
scales):
""" Construct self similar magnetic field configuration
Note if model_pars['B_corona'] = 0 then paper3 results otherwise paper 2
"""
#Extract commonly used scales:
z1 = model_pars['photo_scale']
z2 = model_pars['chrom_scale']
z3 = model_pars['corona_scale']
f0 = model_pars['radial_scale']
mu0 = physical_constants['mu0']
g0 = physical_constants['gravity']
#scale Bf1, Bf2 to sum to 1
Bf1 = model_pars['phratio']
Bf2 = model_pars['chratio']
Bf3 = model_pars['coratio']
Bbz = model_pars['B_corona']
#define exponentials and derivatives, basis functions
if option_pars['l_B0_expz']:
B1z = Bf1 * np.exp(-z**2/z1**2)
B2z = Bf2 * np.exp(-z/z2)
B3z = Bf3 * np.exp(-z/z3)
B0z = B1z + B2z + B3z
B10dz= -2*z*B1z/z1**2 - B2z/z2 - B3z/z3
B20dz= -2* B1z/z1**2 + 4*z**2*B1z/z1**4 + B2z/z2**2 + B3z/z3**2
B30dz= 12*z*B1z/z1**4 - 8*z**3*B1z/z1**6 - B2z/z2**3 - B3z/z3**3
elif option_pars['l_B0_rootz']:
B0z = Bf2 * z2**(0.125) / (z + z2)**(0.125)
B10dz = -0.125 * B0z / (z + z2)
B20dz = 9./64. * B0z / (z + z2)**2
B30dz = -153./512 * B0z / (z + z2)**3
elif option_pars['l_B0_quadz']:
B1z = Bf1 * z1**2 / (z**2 + z1**2)
B2z = Bf2 * z2 /(z + z2)
B3z = Bf3 * np.exp(-z/z3)# B3z = Bf3 * z3 /(z + z3)
B0z = B1z + B2z + B3z
B10dz=- 2 * z *B1z**2/z1**2 - B2z**2/z2 - B3z/z3
B20dz= 8*z**2*B1z**3/z1**4 - 2* B1z**2/z1**2 +2*B2z**3/z2**2 +2*B3z/z3**2
B30dz=-48*z**3*B1z**4/z1**6 +24*z*B1z**3/z1**4 -6*B2z**4/z2**3 -6*B3z/z3**3
else:
raise ValueError("in mhs_model.flux_tubes.construct_magnetic_field \
option_pars all False for axial strength Z dependence")
rr= np.sqrt((x-x0)**2 + (y-y0)**2)
#self similarity functions
fxyz= -0.5*rr**2 * B0z**2
G0 = np.exp(fxyz/f0**2)
#Define Field
B0z2 = B0z*B0z
Bx = -S * (x-x0) * (B10dz * B0z * G0)
By = -S * (y-y0) * (B10dz * B0z * G0)
Bz = S * B0z2 * G0 + Bbz
f02 = f0*f0
G02 = G0*G0
B0z3 = B0z2*B0z
# B0z4 = B0z3*B0z
B10dz2 = B10dz**2
#Define derivatives of Bx
dxBx = - S * (B10dz * B0z * G0) + 2 * S * (x-x0)**2 * B10dz * B0z3 * G0/f02
dyBx = 2 * S * (x-x0) * (y-y0) * B10dz * B0z3 * G0/f02
dzBx = - 2 * S * (x-x0) * (B0z*B20dz + (1. + 2.*fxyz/f02)*B10dz2)*G0
#Define derivatives By
dyBy = - S * (B10dz * B0z * G0) \
+ 2 * S * (y-y0)**2 * B10dz * B0z3 * G0/f02
dxBy = 2 * S * (x-x0) * (y-y0) * B10dz * B0z3 * G0/f02
dzBy = - 2 * S * (y-y0) * (B0z*B20dz + (1. + 2.*fxyz/f02)*B10dz2)*G0
#Magnetic Pressure and horizontal thermal pressure balance term
pbbal= -0.5*Bz**2/mu0 + 0.5/mu0 * S**2 * G02 * (
f02 * B0z * B20dz + 2 * fxyz * B10dz2) + S*Bbz*G0/mu0 * (
f02 * B20dz / B0z + (2 * fxyz - f02) * B10dz2 / B0z2)
#density balancing B
# import pdb; pdb.set_trace()
del rr, x, y, z
rho_1 = S**2*G02/(mu0*g0) * (
(0.5*f02 + 2*fxyz) * B10dz*B20dz + 0.5*f02 * B0z*B30dz
- 2. * B0z3*B10dz
) + S*Bbz*G0/(mu0*g0) * (f02*B30dz/B0z + (2*f02 - 2*fxyz +
4*fxyz**2/f02) * B10dz2*B10dz/B0z3 +
3 * (2*fxyz - f02) * B20dz*B10dz/B0z2
- 2 * (fxyz/f02 + 1) * B10dz*B0z )
B2x = (Bx * dxBx + By * dyBx + Bz * dzBx)/mu0
B2y = (Bx * dxBy + By * dyBy + Bz * dzBy)/mu0
return pbbal, rho_1, Bx, By, Bz, B2x, B2y
#============================================================================
# Magnetic Field Construction (See. Fedun et.al 2011)
#============================================================================
def construct_pairwise_field(x, y, z,
xi, yi,
xj, yj,
Si, Sj,
model_pars,
option_pars,
physical_constants,
scales
):
""" Construct self similar magnetic field configuration """
#Extract commonly used scales:
z1 = model_pars['photo_scale']
z2 = model_pars['chrom_scale']
z3 = model_pars['corona_scale']
f0 = model_pars['radial_scale']
mu0 = physical_constants['mu0']
g0 = physical_constants['gravity']
#scale Bf1, Bf2 to sum to 1
Bf1 = model_pars['phratio']
Bf2 = model_pars['chratio']
Bf3 = model_pars['coratio']
Bbz = model_pars['B_corona']
#define exponentials and derivatives, basis functions
if option_pars['l_B0_expz']:
B1z = Bf1 * np.exp(-z**2/z1**2)
B2z = Bf2 * np.exp(-z/z2)
B3z = Bf3 * np.exp(-z/z3)
B0z = B1z + B2z + B3z
B10dz= -2*z*B1z/z1**2 - B2z/z2 - B3z/z3
B20dz= -2* B1z/z1**2 + 4*z**2*B1z/z1**4 + B2z/z2**2 + B3z/z3**2
B30dz= 12*z*B1z/z1**4 - 8*z**3*B1z/z1**6 - B2z/z2**3 - B3z/z3**3
else:
#if option_pars['l_BO_quadz']:
B1z = Bf1 * z1**2 / (z**2 + z1**2)
B2z = Bf2 * z2 /(z + z2)
B3z = Bf3 * np.exp(-z/z3)
# B3z = Bf3 * z3 /(z + z3)
B0z = B1z + B2z + B3z
B10dz=- 2 * z *B1z**2/z1**2 - B2z**2/z2 - B3z/z3
B20dz= 8*z**2*B1z**3/z1**4 - 2* B1z**2/z1**2 +2*B2z**3/z2**2 +2*B3z/z3**2
B30dz=-48*z**3*B1z**4/z1**6 +24*z*B1z**3/z1**4 -6*B2z**4/z2**3 -6*B3z/z3**3
B10dz2 = B10dz**2
BB10dz = B10dz*B0z
BB10dz2 = BB10dz**2
BB20dz = B20dz*B0z
B0z2 = B0z*B0z
# B30dz= -B1z/z1**3 - B2z/z2**3
ri= np.sqrt((x-xi)**2 + (y-yi)**2)
rj= np.sqrt((x-xj)**2 + (y-yj)**2)
ri2 = ri**2
rj2 = rj**2
#self similarity functions
fxyzi= -ri2 * B0z2/2.
fxyzj= -rj2 * B0z2/2.
f02 = f0*f0
G0i = np.exp(fxyzi/f02)
G0j = np.exp(fxyzj/f02)
G0ij = G0i*G0j
#Define Field
Bxi = -Si * (x-xi) * (B10dz * B0z * G0i)
Byi = -Si * (y-yi) * (B10dz * B0z * G0i)
Bzi = Si * B0z**2 * G0i + Bbz
Bxj = -Sj * (x-xj) * (B10dz * B0z * G0j)
Byj = -Sj * (y-yj) * (B10dz * B0z * G0j)
Bzj = Sj * B0z**2 * G0j + Bbz
B0z3 = B0z2*B0z
B0z4 = B0z3*B0z
BdB2 = B10dz2/B0z2
B2dB = B20dz/B0z
#Magnetic Pressure and horizontal thermal pressure balance term
pbbal= - Bzi*Bzj/mu0 - Si*Sj*G0ij*f02*(B10dz2 + BB20dz)/mu0 \
+ Bbz*Si*G0i * ((2*fxyzi - f02) * BdB2 + f02 * B2dB) /mu0 \
+ Bbz*Sj*G0j * ((2*fxyzj - f02) * BdB2 + f02 * B2dB) /mu0
#density balancing B
rho_1 = \
2.*Si*Sj*G0ij*BB10dz/(mu0*g0)*(
+ (fxyzi + fxyzj) * (BdB2 + B2dB)
- ((fxyzi + fxyzj)/f02 + 2.) * B0z2
+ 0.5*f02 * (3.*B2dB + B30dz/B10dz)
+((x-xi)*(x-xj) + (y-yi)*(y-yj)) * ((
1. + (fxyzi + fxyzj)/f02) * B10dz2 + BB20dz - B0z4/f02)
) + Bbz*Si*G0i/(mu0*g0) * (B30dz/B0z*f02 - 2*(fxyzi/f02 + 1) *
BB10dz + (4*fxyzi**2/f02 - 2*fxyzi + 2*f02) * B10dz2*B10dz/B0z3
+ (6*fxyzi - 3*f02) * B10dz*B20dz/B0z2
) + Bbz*Sj*G0j/(mu0*g0) * (B30dz/B0z*f02 - 2*(fxyzj/f02 + 1) *
BB10dz + (4*fxyzj**2/f02 - 2*fxyzj + 2*f02) * B10dz2*B10dz/B0z3
+ (6*fxyzj - 3*f02) * B10dz*B20dz/B0z2
)
Fx = - 2*Si*Sj/mu0 * G0ij*BB10dz2/f02 * (
(x-xi) * fxyzi + (x-xj) * fxyzj )
Fy = - 2*Si*Sj/mu0 * G0ij*BB10dz2/f02 * (
(y-yi) * fxyzi + (y-yj) * fxyzj )
#Define derivatives of Bx
dxiBx = - Si * (BB10dz * G0i) \
+ 2 * Si * (x-xi)**2 * B10dz * B0z3 * G0i/f02
dyiBx = 2 * Si * (x-xi) * (y-yi) * B10dz * B0z3 * G0i/f02
dziBx = - Si * (x-xi) * (B0z*B20dz + (1. + 2.*fxyzi/f02)*B10dz2)*G0i
dxjBx = - Sj * (BB10dz * G0j) \
+ 2 * Sj * (x-xj)**2 * B10dz * B0z3 * G0j/f02
dyjBx = 2 * Sj * (x-xj) * (y-yj) * B10dz * B0z3 * G0j/f02
dzjBx = - Sj * (x-xj) * (B0z*B20dz + (1. + 2.*fxyzj/f02)*B10dz2)*G0j
#Define derivatives By
dxiBy = - Si * (BB10dz * G0i) \
+ 2 * Si * (y-yi)**2 * B10dz * B0z3 * G0i/f02
dyiBy = 2 * Si * (x-xi) * (y-yi) * B10dz * B0z3 * G0i/f02
dziBy = - Si * (y-yi) * (B0z*B20dz + (1. + 2.*fxyzi/f02)*B10dz2)*G0i
dxjBy = - Sj * (BB10dz * G0j) \
+ 2 * Sj * (y-yj)**2 * B10dz * B0z3 * G0j/f02
dyjBy = 2 * Sj * (x-xj) * (y-yj) * B10dz * B0z3 * G0j/f02
dzjBy = - Sj * (y-yj) * (B0z*B20dz + (1. + 2.*fxyzj/f02)*B10dz2)*G0j
B2x = (Bxi * dxjBx + Byi * dyjBx + Bzi * dzjBx
+ Bxj * dxiBx + Byj * dyiBx + Bzj * dziBx)/mu0
B2y = (Bxi * dxjBy + Byi * dyjBy + Bzi * dzjBy
+ Bxj * dxiBy + Byj * dyiBy + Bzj * dziBy)/mu0
return pbbal, rho_1, Fx, Fy, B2x, B2y
| 40.986726 | 83 | 0.45428 |
5539d275ebd36d43b5d44642306d4d9d488a83a3 | 961 | py | Python | s3_file_uploads/serializers.py | dabapps/django-s3-file-uploads | 17ed6b4e02bd43bc925af987ff5bf971a82da434 | [
"BSD-3-Clause"
] | 5 | 2019-05-27T03:51:30.000Z | 2021-03-19T11:24:09.000Z | s3_file_uploads/serializers.py | dabapps/django-s3-file-uploads | 17ed6b4e02bd43bc925af987ff5bf971a82da434 | [
"BSD-3-Clause"
] | 7 | 2019-12-04T22:38:13.000Z | 2021-06-10T17:50:06.000Z | s3_file_uploads/serializers.py | dabapps/django-s3-file-uploads | 17ed6b4e02bd43bc925af987ff5bf971a82da434 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from s3_file_uploads.constants import ACCESS_CONTROL_TYPES, PRIVATE
from s3_file_uploads.models import UploadedFile
| 27.457143 | 80 | 0.597294 |
553a35ee3c9965503e444537543d6f056c2747c7 | 1,873 | py | Python | vbts_webadmin/views/subscribers.py | pcarivbts/vbts-webadmin | 0616eca6492daa3ebc26b442e8dbebda7ac06d51 | [
"BSD-3-Clause"
] | null | null | null | vbts_webadmin/views/subscribers.py | pcarivbts/vbts-webadmin | 0616eca6492daa3ebc26b442e8dbebda7ac06d51 | [
"BSD-3-Clause"
] | 3 | 2020-06-05T18:34:16.000Z | 2021-06-10T20:31:18.000Z | vbts_webadmin/views/subscribers.py | pcarivbts/vbts-webadmin | 0616eca6492daa3ebc26b442e8dbebda7ac06d51 | [
"BSD-3-Clause"
] | 2 | 2018-07-04T00:54:50.000Z | 2022-01-28T16:52:10.000Z | """
Copyright (c) 2015-present, Philippine-California Advanced Research Institutes-
The Village Base Station Project (PCARI-VBTS). All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from django.contrib import messages as alerts
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.db.models import Q
from django.shortcuts import render
from django.utils.translation import ugettext as _
from vbts_subscribers.models import SipBuddies
from vbts_webadmin.forms import SearchForm
| 33.446429 | 79 | 0.705286 |
553df305accc95bd90095dbb25295bf9604e38ba | 268 | py | Python | Aula 05/[Exercicio 01] .py | IsaacPSilva/LetsCode | 64396ee9fd0ad395598c74c3727a614261e5dd50 | [
"MIT"
] | null | null | null | Aula 05/[Exercicio 01] .py | IsaacPSilva/LetsCode | 64396ee9fd0ad395598c74c3727a614261e5dd50 | [
"MIT"
] | null | null | null | Aula 05/[Exercicio 01] .py | IsaacPSilva/LetsCode | 64396ee9fd0ad395598c74c3727a614261e5dd50 | [
"MIT"
] | null | null | null | '''1. Faa um programa que pede para o usurio digitar uma palavra e
imprima cada letra em uma linha.'''
#Informando frase a ser verificada
frase = input('Digite uma palavra: ')
#Convertendo frase em palavras, e imprimindo depois
for letra in frase:
print(letra) | 29.777778 | 68 | 0.75 |
553e5975ce3bca9dd2037d832b61d89b76e372a6 | 16,307 | py | Python | examples/vq_rnn_fruit_joint/vq_fruit_joint.py | kastnerkyle/tfbldr | 58ad1437d500924acd15d1c6eec4a864f57e9c7c | [
"BSD-3-Clause"
] | 4 | 2018-05-15T22:35:00.000Z | 2019-02-22T01:40:49.000Z | examples/vq_rnn_fruit_joint/vq_fruit_joint.py | kastnerkyle/tfbldr | 58ad1437d500924acd15d1c6eec4a864f57e9c7c | [
"BSD-3-Clause"
] | null | null | null | examples/vq_rnn_fruit_joint/vq_fruit_joint.py | kastnerkyle/tfbldr | 58ad1437d500924acd15d1c6eec4a864f57e9c7c | [
"BSD-3-Clause"
] | 2 | 2018-06-09T15:08:44.000Z | 2018-11-20T10:13:48.000Z | from tfbldr.nodes import Conv2d
from tfbldr.nodes import ConvTranspose2d
from tfbldr.nodes import VqEmbedding
from tfbldr.nodes import BatchNorm2d
from tfbldr.nodes import Linear
from tfbldr.nodes import ReLU
from tfbldr.nodes import Sigmoid
from tfbldr.nodes import Tanh
from tfbldr.nodes import OneHot
from tfbldr.nodes import Softmax
from tfbldr.nodes import LSTMCell
from tfbldr.nodes import CategoricalCrossEntropyIndexCost
from tfbldr.nodes import CategoricalCrossEntropyLinearIndexCost
from tfbldr.nodes import BernoulliCrossEntropyCost
from tfbldr.datasets import ordered_list_iterator
from tfbldr.plot import get_viridis
from tfbldr.plot import autoaspect
from tfbldr.datasets import fetch_fruitspeech
from tfbldr import get_params_dict
from tfbldr import run_loop
from tfbldr import scan
import tensorflow as tf
import numpy as np
from collections import namedtuple, defaultdict
import itertools
viridis_cm = get_viridis()
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fruit = fetch_fruitspeech()
minmin = np.inf
maxmax = -np.inf
for s in fruit["data"]:
si = s - s.mean()
minmin = min(minmin, si.min())
maxmax = max(maxmax, si.max())
train_data = []
valid_data = []
type_counts = defaultdict(lambda: 0)
final_audio = []
for n, s in enumerate(fruit["data"]):
type_counts[fruit["target"][n]] += 1
s = s - s.mean()
n_s = (s - minmin) / float(maxmax - minmin)
n_s = 2 * n_s - 1
#n_s = mu_law_transform(n_s, 256)
if type_counts[fruit["target"][n]] == 15:
valid_data.append(n_s)
else:
train_data.append(n_s)
cut = 256
step = 1
train_audio, train_audio_idx = _cuts(train_data, cut, step)
valid_audio, valid_audio_idx = _cuts(valid_data, cut, step)
random_state = np.random.RandomState(1999)
l1_dim = (64, 1, 4, [1, 1, 2, 1])
l2_dim = (128, 1, 4, [1, 1, 2, 1])
l3_dim = (256, 1, 4, [1, 1, 2, 1])
l3_dim = (257, 1, 4, [1, 1, 2, 1])
l4_dim = (256, 1, 4, [1, 1, 2, 1])
l5_dim = (257, 1, 1, [1, 1, 1, 1])
embedding_dim = 512
vqvae_batch_size = 50
rnn_batch_size = 50
n_hid = 512
n_clusters = 64
# goes from 256 -> 16
hardcoded_z_len = 16
# reserve 0 for "start code"
n_inputs = embedding_dim + 1
switch_step = 10000
both = True
# reserve 0 for start code
rnn_init = "truncated_normal"
forward_init = "truncated_normal"
l_dims = [l1_dim, l2_dim, l3_dim, l4_dim, l5_dim]
stride_div = np.prod([ld[-1] for ld in l_dims])
ebpad = [0, 0, 4 // 2 - 1, 0]
dbpad = [0, 0, 4 // 2 - 1, 0]
train_itr_random_state = np.random.RandomState(1122)
valid_itr_random_state = np.random.RandomState(12)
train_itr = ordered_list_iterator([train_audio], train_audio_idx, vqvae_batch_size, random_state=train_itr_random_state)
valid_itr = ordered_list_iterator([valid_audio], valid_audio_idx, vqvae_batch_size, random_state=valid_itr_random_state)
"""
for i in range(10000):
tt = train_itr.next_batch()
# tt[0][3][:, :16] == tt[0][2][:, 16:32]
"""
g, vs = create_graph()
rnn_train = False
step = 0
with tf.Session(graph=g) as sess:
run_loop(sess,
loop, train_itr,
loop, valid_itr,
n_steps=75000,
n_train_steps_per=5000,
n_valid_steps_per=500)
| 38.189696 | 124 | 0.592506 |
553eb4733f79df133de3656ed4a77eb050d859d2 | 311 | py | Python | scripts/poorscrum/poorscrum_tools.py | r09491/poorscrum | cdbbc0db03fde842f546093f46e70d03a105bbbd | [
"MIT"
] | null | null | null | scripts/poorscrum/poorscrum_tools.py | r09491/poorscrum | cdbbc0db03fde842f546093f46e70d03a105bbbd | [
"MIT"
] | 7 | 2021-03-18T22:37:46.000Z | 2022-03-11T23:41:39.000Z | scripts/poorscrum/poorscrum_tools.py | r09491/poorscrum | cdbbc0db03fde842f546093f46e70d03a105bbbd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
| 17.277778 | 46 | 0.508039 |
554005d26d7a3413df01a385a87bf09337208562 | 6,162 | py | Python | cata/teachers/ensembles/both_rotation_ensemble.py | seblee97/student_teacher_catastrophic | 9baaaf2850025ba9cf33d61c42386bc4c3b2dad2 | [
"MIT"
] | 2 | 2021-09-13T01:44:09.000Z | 2021-12-11T11:56:49.000Z | cata/teachers/ensembles/both_rotation_ensemble.py | seblee97/student_teacher_catastrophic | 9baaaf2850025ba9cf33d61c42386bc4c3b2dad2 | [
"MIT"
] | 8 | 2020-11-13T18:37:30.000Z | 2022-02-15T15:11:51.000Z | cata/teachers/ensembles/both_rotation_ensemble.py | seblee97/student_teacher_catastrophic | 9baaaf2850025ba9cf33d61c42386bc4c3b2dad2 | [
"MIT"
] | null | null | null | from typing import List
from typing import Union
import numpy as np
import torch
from cata.teachers.ensembles import base_teacher_ensemble
from cata.utils import custom_functions
| 35.413793 | 89 | 0.636806 |
5542014f27e11156c75907e597b9852418147144 | 7,176 | py | Python | scripts/admin/admin.py | starmarek/organize-me | 710e7acd86e887b7e4379fde18e1f375846ea59e | [
"MIT"
] | null | null | null | scripts/admin/admin.py | starmarek/organize-me | 710e7acd86e887b7e4379fde18e1f375846ea59e | [
"MIT"
] | null | null | null | scripts/admin/admin.py | starmarek/organize-me | 710e7acd86e887b7e4379fde18e1f375846ea59e | [
"MIT"
] | null | null | null | import json
import logging
import os
import shlex
import subprocess
from pathlib import Path
from types import SimpleNamespace
import coloredlogs
import fire
from .adminFiles import (
DockerComposeFile,
DotenvFile,
GitlabCIFile,
JsonFile,
PackageJsonFile,
Pipfile,
RuntimeTxtFile,
YarnRCFile,
)
log = logging.getLogger("admin")
coloredlogs.install(level="DEBUG")
yarn_dir = ".yarn/releases/"
for file in os.listdir(".yarn/releases"):
if os.getenv("CORE_YARN_VER") in file:
yarn_executable = file
virtualenv_path = subprocess.run(["pipenv", "--venv"], capture_output=True, text=True, check=True).stdout.strip()
dotenv_file = DotenvFile(path=".env")
compose_file = DockerComposeFile(path="docker-compose.yml")
dotenv_template_file = DotenvFile(path=".template.env")
gitlab_ci_file = GitlabCIFile(path=".gitlab-ci.yml")
yarnrc_file = YarnRCFile(path=".yarnrc.yml")
runtime_txt_file = RuntimeTxtFile(path="runtime.txt")
pipfile_file = Pipfile(path="Pipfile")
package_json_file = PackageJsonFile(path="package.json")
verifiable_files = [compose_file, gitlab_ci_file, pipfile_file, runtime_txt_file, package_json_file, yarnrc_file]
if __name__ == "__main__":
log.info("Starting admin script")
_verify_versions()
fire.Fire(CLI)
| 32.324324 | 148 | 0.67879 |
5542f0b7bef41dfe29c0868984e349d2a0c056ea | 300 | py | Python | F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py | sylvain2002/CBM101 | 4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5 | [
"MIT"
] | 7 | 2019-07-03T07:41:55.000Z | 2022-02-06T20:25:37.000Z | F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py | sylvain2002/CBM101 | 4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5 | [
"MIT"
] | 9 | 2019-03-14T15:15:09.000Z | 2019-08-01T14:18:21.000Z | F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py | sylvain2002/CBM101 | 4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5 | [
"MIT"
] | 11 | 2019-03-12T10:43:11.000Z | 2021-10-05T12:15:00.000Z | a = 'ARRYR'
b = 'ARSYS'
levenshtein(a,b)
# ANSWER a)
# It quantifies the number of single-letter changes to morph one into the other
#
# ANSWER b)
# We could encode the 'price' of changing between particular amino acids
# thereby acknowledging that some substitutions are more or less costly/likely | 27.272727 | 79 | 0.756667 |
5543d0392b1a991c4c0bc9b77494d93272ec2802 | 743 | py | Python | tests/components/pages/ts.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 15 | 2019-12-19T11:57:30.000Z | 2021-11-15T23:34:41.000Z | tests/components/pages/ts.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 196 | 2019-09-21T15:10:14.000Z | 2022-03-31T11:07:48.000Z | tests/components/pages/ts.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 7 | 2019-10-30T19:38:15.000Z | 2021-12-01T04:54:16.000Z | from dazzler.system import Page
from dazzler.components import core
from tests.components import ts_components as tsc
page = Page(
__name__,
core.Container([
tsc.TypedComponent(
'override',
children=core.Container('foobar'),
num=2,
text='foobar',
boo=True,
arr=[1, 2, 'mixed'],
arr_str=['foo', 'bar'],
arr_num=[7, 8, 9],
arr_obj_lit=[{'name': 'foo'}],
obj={'anything': 'possible'},
enumeration='foo',
union=7,
style={'border': '1px solid rgb(0,0,255)'},
class_name='other'
),
tsc.TypedClassComponent('class based', children='clazz')
])
)
| 27.518519 | 64 | 0.51144 |
55454283c60ef0107317118c446ed4395d8f58a5 | 4,464 | py | Python | src/gistsgetter/app.py | pmfrank/gistsgetter | a19f59604ebf1cb13c641d25c4461b4347bba58a | [
"MIT"
] | null | null | null | src/gistsgetter/app.py | pmfrank/gistsgetter | a19f59604ebf1cb13c641d25c4461b4347bba58a | [
"MIT"
] | null | null | null | src/gistsgetter/app.py | pmfrank/gistsgetter | a19f59604ebf1cb13c641d25c4461b4347bba58a | [
"MIT"
] | null | null | null | """
An application dedicated to creating, editing, and deleting Gists in GitHub
"""
from __future__ import absolute_import
import toga
import pyperclip
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from .common.Search import search
from functools import partial
| 38.817391 | 132 | 0.622536 |
5549b2fc2c6d6a256c772a1fa6b1cb0ba16583fe | 7,401 | py | Python | src/qcar/src/qcar/q_essential.py | bchampp/scylla | 6ec27877cc03c200a874cd0eb25a36c866471250 | [
"MIT"
] | null | null | null | src/qcar/src/qcar/q_essential.py | bchampp/scylla | 6ec27877cc03c200a874cd0eb25a36c866471250 | [
"MIT"
] | null | null | null | src/qcar/src/qcar/q_essential.py | bchampp/scylla | 6ec27877cc03c200a874cd0eb25a36c866471250 | [
"MIT"
] | null | null | null | from quanser.hardware import HIL, HILError, PWMMode
from quanser.multimedia import Video3D, VideoCapture, Video3DStreamType, MediaError, ImageFormat, ImageDataType
from quanser.devices import RPLIDAR, RangingMeasurements, RangingMeasurementMode, DeviceError, RangingDistance
from .q_misc import Utilities
import numpy as np
import pygame
import time
saturate = Utilities.saturate
# region: Cameras
# endregion
# region: LIDAR
# endregion | 37.190955 | 211 | 0.740576 |
554a7b61e03b3173856a7a579bde9d2c36a7f575 | 1,689 | py | Python | ex071.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | ex071.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | ex071.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | '''Crie um programa que simule o funcionamento de um caixa eletrnico. No incio, pergunte ao usurio qual ser o valor a ser sacado (nmero inteiro) e o programa vai informar quantas cdulas de cada valor sero entregues. OBS:
considere que o caixa possui cdulas de R$50, R$20, R$10 e R$1.'''
'''print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que voc quer sacar R$ '))
c50 = valor % 50
c20 = c50 % 20
c10 = c20 % 10
c1 = c10 % 1
b50 = valor - c50
b20 = valor - b50 - c20
b10 = valor - b50 - b20 - c10
b1 = valor - b50 - b20 - b10 - c1
print(f'Total de {b50/50:.0f} celulas de R$ 50,00')
print(f'Total de {b20/20:.0f} celulas de R$ 20,00')
print(f'Total de {b10/10:.0f} celulas de R$ 10,00')
print(f'Total de {b1/1:.0f} celulas de R$ 1,00')
print('--' * 15)
print('Volte sempre ao Banco CEV! Tenha um bom dia')'''
'''valor = int(input("informe o valor a ser sacado : "))
nota50 = valor // 50
valor %= 50
nota20 = valor // 20
valor %= 20
nota10 = valor // 10
valor %= 10
nota1 = valor // 1
print(f"notas de 50 = {nota50}")
print(f"notas de 20 = {nota20}")
print(f"notas de 10 = {nota10}")
print(f"notas de 1 = {nota1}")'''
print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que voc quer sacar R$ '))
total = valor
cel = 50
contCel = 0
while True:
if total >= cel:
total -= cel
contCel += 1
else:
print(f'O total de {contCel} cluldas de R$ {cel}.')
if cel == 50:
cel = 20
elif cel == 20:
cel = 10
elif cel == 10:
cel = 1
contCel = 0
if total == 0:
break
| 27.241935 | 227 | 0.587922 |
554c5ff1d984eee7cf69842945a06a7b43f122ff | 919 | py | Python | common.py | hoostus/prime-harvesting | 6606b94ea7859fbf217dbea4ace856e3fa4d154e | [
"BlueOak-1.0.0",
"Apache-2.0"
] | 23 | 2016-09-07T06:13:37.000Z | 2022-02-17T23:49:03.000Z | common.py | hoostus/prime-harvesting | 6606b94ea7859fbf217dbea4ace856e3fa4d154e | [
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null | common.py | hoostus/prime-harvesting | 6606b94ea7859fbf217dbea4ace856e3fa4d154e | [
"BlueOak-1.0.0",
"Apache-2.0"
] | 12 | 2016-06-30T17:27:39.000Z | 2021-12-12T07:54:27.000Z | import itertools
import math
import simulate
import harvesting
import plot
from decimal import setcontext, ExtendedContext
# Don't raise exception when we divide by zero
#setcontext(ExtendedContext)
#getcontext().prec = 5
| 31.689655 | 93 | 0.677911 |
554e5d74e0feb6600546ab4240369b860c3f874d | 492 | py | Python | g/appengine/py/standard/simple-blog/app/helpers/hasher.py | chhschou/sandpit | d4a6760905b45b90455f10a5b50af3c5f743e445 | [
"MIT"
] | null | null | null | g/appengine/py/standard/simple-blog/app/helpers/hasher.py | chhschou/sandpit | d4a6760905b45b90455f10a5b50af3c5f743e445 | [
"MIT"
] | null | null | null | g/appengine/py/standard/simple-blog/app/helpers/hasher.py | chhschou/sandpit | d4a6760905b45b90455f10a5b50af3c5f743e445 | [
"MIT"
] | null | null | null | import random
import string
import hashlib
# Implement the function valid_pw() that returns True if a user's password
# matches its hash. You will need to modify make_pw_hash.
| 23.428571 | 74 | 0.707317 |
554ef62e12daf1b4dd0a910c08086098d9a39602 | 769 | py | Python | tests/hdx/scraper/test_utils.py | mcarans/hdx-python-scraper | ce17c672591979d4601bd125a38b86ea81a9f3c4 | [
"MIT"
] | null | null | null | tests/hdx/scraper/test_utils.py | mcarans/hdx-python-scraper | ce17c672591979d4601bd125a38b86ea81a9f3c4 | [
"MIT"
] | null | null | null | tests/hdx/scraper/test_utils.py | mcarans/hdx-python-scraper | ce17c672591979d4601bd125a38b86ea81a9f3c4 | [
"MIT"
] | null | null | null | from datetime import datetime
from hdx.data.dataset import Dataset
from hdx.scraper.utilities import (
get_isodate_from_dataset_date,
string_params_to_dict,
)
| 29.576923 | 86 | 0.629389 |
554fb560fa2735d2073c8f53fb708577f43575e0 | 3,796 | py | Python | store/models.py | Dokeey/Buy-Sell | 9d70eb8649d79962657cc4be896e437908de537b | [
"MIT"
] | 7 | 2019-03-25T14:43:41.000Z | 2021-09-16T01:44:41.000Z | store/models.py | Dokeey/Buy-Sell | 9d70eb8649d79962657cc4be896e437908de537b | [
"MIT"
] | 80 | 2019-03-25T09:25:00.000Z | 2020-02-09T01:01:09.000Z | store/models.py | Dokeey/Buy-Sell | 9d70eb8649d79962657cc4be896e437908de537b | [
"MIT"
] | 4 | 2019-03-25T13:58:07.000Z | 2021-11-26T09:12:32.000Z | from random import randrange
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from hitcount.models import HitCountMixin, HitCount
from imagekit.models import ProcessedImageField
from pilkit.processors import ResizeToFill
from django_cleanup import cleanup
from store.fields import DefaultStaticProcessedImageField
# @cleanup.ignore
from django.contrib.auth import get_user_model
User = get_user_model()
try:
user_pk = User.objects.get(username='deleteuser').id
except:
user_pk = None
from trade.models import Item
| 36.5 | 133 | 0.692308 |
554fefef5722dcfd6c785e2d4dadd682981a85f8 | 1,361 | py | Python | auth-api/app.py | dlavery/auth | 9f37b4be2eeda2446b7d3abd44c7b45918486e0b | [
"MIT"
] | null | null | null | auth-api/app.py | dlavery/auth | 9f37b4be2eeda2446b7d3abd44c7b45918486e0b | [
"MIT"
] | null | null | null | auth-api/app.py | dlavery/auth | 9f37b4be2eeda2446b7d3abd44c7b45918486e0b | [
"MIT"
] | null | null | null | import configparser
import logging
from flask import Flask
from flask_pymongo import PyMongo
from Crypto.PublicKey import RSA
# Value mapping
LOG_LEVELS = {'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'WARN': logging.DEBUG, 'ERROR': logging.ERROR}
# Create application
app = Flask(__name__)
# Read external config
config = configparser.ConfigParser()
config.read('auth-api.cfg')
app.config['MONGO_DBNAME'] = config['DATABASE']['dbName']
app.config['MONGO_URI'] = config['DATABASE']['dbURI']
logfile = config['LOGGING']['logFile']
loglevel = LOG_LEVELS[config['LOGGING']['logLevel']]
app.config['SERVER_NAME'] = config['APPLICATION']['serverName']
app.config['DEBUG'] = config['APPLICATION']['debug']
# Set up logging
fh = logging.FileHandler(logfile, mode='a', encoding='utf8', delay=False)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(filename)s %(lineno)d %(message)s')
fh.setFormatter(fmt)
app.logger.addHandler(fh)
app.logger.setLevel(loglevel)
# Set up database
mongo = PyMongo(app)
# Get crypto
pubkeyfile = config['PKI']['pubkeyFile']
authpublickey = RSA.import_key(open(pubkeyfile).read()).exportKey()
keyfile = config['PKI']['keyFile']
passphrase = config['PKI']['passPhrase']
authprivatekey = RSA.import_key(open(keyfile).read(), passphrase=passphrase).exportKey()
# Get session secret
app.secret_key = config['SESSIONS']['secretKey']
| 32.404762 | 106 | 0.740632 |
5555b6c3e07de5a90e04d4e0ebe99f3c40e0594c | 1,587 | py | Python | experts/siamdw.py | songheony/AAA-journal | 4306fac0afe567269b8d2f1cbef2a1c398fdde82 | [
"MIT"
] | 9 | 2020-07-07T09:03:07.000Z | 2021-04-22T03:38:49.000Z | experts/siamdw.py | songheony/AAA-journal | 4306fac0afe567269b8d2f1cbef2a1c398fdde82 | [
"MIT"
] | null | null | null | experts/siamdw.py | songheony/AAA-journal | 4306fac0afe567269b8d2f1cbef2a1c398fdde82 | [
"MIT"
] | 1 | 2021-07-31T19:26:52.000Z | 2021-07-31T19:26:52.000Z | import sys
import numpy as np
import cv2
from easydict import EasyDict as edict
from base_tracker import BaseTracker
import path_config
sys.path.append("external/SiamDW/lib")
from tracker.siamrpn import SiamRPN
import models.models as models
from utils.utils import load_pretrain
| 34.5 | 85 | 0.628859 |
555695e92a72c35957e937841df7b620e7484601 | 3,346 | py | Python | serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | import math
import torch
| 35.978495 | 162 | 0.661686 |
55572056018bf803954acf22ae96913928e3246d | 1,479 | py | Python | src/modules/base/url_helper.py | yakii9/artificial-programmer | a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1 | [
"MIT"
] | 1 | 2018-10-21T22:46:27.000Z | 2018-10-21T22:46:27.000Z | src/modules/base/url_helper.py | yakii9/artificial-programmer | a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1 | [
"MIT"
] | 1 | 2018-10-29T04:34:13.000Z | 2018-11-01T14:32:23.000Z | src/modules/base/url_helper.py | yakii9/artificial-programmer | a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1 | [
"MIT"
] | 1 | 2018-10-21T22:46:48.000Z | 2018-10-21T22:46:48.000Z | import urllib.request
from html.parser import HTMLParser
from urllib import parse
from modules.base.handle_timeout import timeout
| 25.067797 | 98 | 0.577417 |
5557b931f8213b68a545c1e272d7bfa56dc0f55f | 7,460 | py | Python | trainer/trainer.py | iprapas/dl-continuous-deployment | bcee578a8ae3aa74e4ede00d125cb456f6a3010e | [
"MIT"
] | null | null | null | trainer/trainer.py | iprapas/dl-continuous-deployment | bcee578a8ae3aa74e4ede00d125cb456f6a3010e | [
"MIT"
] | null | null | null | trainer/trainer.py | iprapas/dl-continuous-deployment | bcee578a8ae3aa74e4ede00d125cb456f6a3010e | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker, confusion_matrix_image
import copy
import sys
import time
from model.metric import Accuracy, TopkAccuracy
def get_top_k(x, ratio):
"""it will sample the top 1-ratio of the samples."""
x_data = x.view(-1)
x_len = x_data.nelement()
top_k = max(1, int(x_len * (1 - ratio)))
# get indices and the corresponding values
if top_k == 1:
_, selected_indices = torch.max(x_data.abs(), dim=0, keepdim=True)
else:
_, selected_indices = torch.topk(
x_data.abs(), top_k, largest=True, sorted=False
)
return x_data[selected_indices], selected_indices
| 38.061224 | 112 | 0.604826 |
555ab459155bc7618fd3e853eed5270201c2705f | 341 | py | Python | eoa.py | LDNN97/evolutionary-optimization-algorithm | 5819ab759ecc1fee94a03e407c97f2ab7bd0f862 | [
"MIT"
] | 21 | 2019-03-12T14:48:36.000Z | 2022-03-08T12:55:30.000Z | eoa.py | LDNN97/Evolutionary-Optimization-Algorithms | 5819ab759ecc1fee94a03e407c97f2ab7bd0f862 | [
"MIT"
] | null | null | null | eoa.py | LDNN97/Evolutionary-Optimization-Algorithms | 5819ab759ecc1fee94a03e407c97f2ab7bd0f862 | [
"MIT"
] | 5 | 2021-02-17T08:33:39.000Z | 2022-01-23T11:44:16.000Z | from prob.problems import *
from opti.de import DE
from opti.cmaes import CMAES
from opti.cmaes_origin import CMAESO
from opti.cmaes_maes import CMAESM
from opti.cmaes_large import CMAESL
# beta
from opti.cmaes_bipop import CMAESB
if __name__ == "__main__":
TaskProb = Sphere(50, -50, 50)
Task = DE(TaskProb, 1000)
Task.run()
| 21.3125 | 36 | 0.747801 |
555da31cec0240cea59e597af6f6196956ec03f6 | 574 | py | Python | tests/test_common.py | shikanon/BaiduMapAPI | 36c41bd99e523fa231e7d654f0ba504349b2a7ad | [
"MIT"
] | 7 | 2019-03-07T04:38:44.000Z | 2021-04-23T02:43:10.000Z | tests/test_common.py | shikanon/BaiduMapAPI | 36c41bd99e523fa231e7d654f0ba504349b2a7ad | [
"MIT"
] | 2 | 2020-03-24T16:47:11.000Z | 2020-12-03T08:52:31.000Z | tests/test_common.py | shikanon/BaiduMapAPI | 36c41bd99e523fa231e7d654f0ba504349b2a7ad | [
"MIT"
] | 1 | 2019-10-22T07:21:58.000Z | 2019-10-22T07:21:58.000Z | from BaiduMapAPI.common import convertCoord, expandUp
import pytest | 41 | 90 | 0.606272 |
555e8fe1a5ae17b4fbc51d4ad0090a37d1dc68ba | 3,520 | py | Python | pycba/utils.py | mayermelhem/pycba | 8f6a0da12629bac2ad1c6c8e113357f96931ef17 | [
"Apache-2.0"
] | 10 | 2022-02-07T01:16:02.000Z | 2022-03-12T07:56:43.000Z | pycba/utils.py | mayermelhem/pycba | 8f6a0da12629bac2ad1c6c8e113357f96931ef17 | [
"Apache-2.0"
] | 5 | 2022-02-08T07:42:53.000Z | 2022-03-31T21:33:42.000Z | pycba/utils.py | mayermelhem/pycba | 8f6a0da12629bac2ad1c6c8e113357f96931ef17 | [
"Apache-2.0"
] | 1 | 2022-02-12T04:33:38.000Z | 2022-02-12T04:33:38.000Z | """
PyCBA - Utility functions for interacting with PyCBA
"""
import re
import numpy as np
from typing import Tuple
def parse_beam_string(
beam_string: str,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
This function parses a beam descriptor string and returns CBA input vectors.
The beam descriptor string uses a specific format: spans lengths in float are
separated by single characters describing the terminals of that beam element.
The terminal characters are:
- P - pinned (effectively the same as roller, but retained for visualisations)
- R - roller (can occur at any terminal)
- E - encastre (i.e. fully-fixed) - can only occur at beam extremity
- F - free (e.g. cantilever end) - can only occur at beam extremity
- H - hinge - can only occur internally in the beam
Examples of beam strings are:
- *P40R20R* - 2-span, 60 m long, with pinned-roller-roller supports
- *E20H30R10F* - 3-span, 60 m long, encastre-hinge-roller-free
**Complex beam configurations may not be describable using the beam string.**
The function returns a tuple containing the necessary beam inputs for
:class:`pycba.analysis.BeamAnalysis`: `(L, EI, R, eType)`
Parameters
----------
beam_string :
The string to be parsed.
Raises
------
ValueError
When the beam string does not meet basic structural requirements.
Returns
-------
(L, EI, R, eType) : tuple(np.ndarray, np.ndarray, np.ndarray, np.ndarray)
In which:
- `L` is a vector of span lengths.
- `EI` is A vector of member flexural rigidities (prismatic).
- `R` is a vector describing the support conditions at each member end.
- `eType` is a vector of the member types.
Example
-------
This example creates a four-span beam with fixed extreme supports and
an internal hinge. ::
beam_str = "E30R30H30R30E"
(L, EI, R, eType) = cba.parse_beam_string(beam_str)
ils = cba.InfluenceLines(L, EI, R, eType)
ils.create_ils(step=0.1)
ils.plot_il(0.0, "R")
"""
beam_string = beam_string.lower()
terminals = re.findall(r"[efhpr]", beam_string)
spans_str = [m.end() for m in re.finditer(r"[efhpr]", beam_string)]
if len(terminals) < 2:
raise ValueError("At least two terminals must be defined")
if terminals[0] == "h" or terminals[-1] == "h":
raise ValueError("Cannot have a hinge at an extremity")
if len(terminals) > 2:
if any(t == "f" or t == "e" for t in terminals[1:-1]):
raise ValueError("Do not define internal free or encastre terminals")
# Get and check the span lengths
L = [
float(beam_string[spans_str[i] : spans_str[i + 1] - 1])
for i in range(len(spans_str) - 1)
]
if len(terminals) - 1 != len(L):
raise ValueError("Inconsistent terminal count and span count")
EI = 30 * 1e10 * np.ones(len(L)) * 1e-6 # kNm2 - arbitrary value
R = []
eType = [1 for l in L]
for i, t in enumerate(terminals):
if t == "p" or t == "r": # pin or roller
R.append([-1, 0])
elif t == "e": # encastre
R.append([-1, -1])
elif t == "f": # free
R.append([0, 0])
elif t == "h": # hinge
R.append([0, 0])
eType[i - 1] = 2
R = [elem for sublist in R for elem in sublist]
return (L, EI, R, eType)
| 34.174757 | 86 | 0.605682 |
555f6946d9a27cac92dae44e27d4220ecfaf6269 | 10,363 | py | Python | models/dcase2020_fuss_baseline/evaluate_lib.py | marciopuga/sound-separation | 0b23ae22123b041b9538295f32a92151cb77bff9 | [
"Apache-2.0"
] | 412 | 2020-03-03T05:55:53.000Z | 2022-03-29T20:49:11.000Z | models/dcase2020_fuss_baseline/evaluate_lib.py | marciopuga/sound-separation | 0b23ae22123b041b9538295f32a92151cb77bff9 | [
"Apache-2.0"
] | 12 | 2020-04-09T17:47:01.000Z | 2022-03-22T06:07:04.000Z | models/dcase2020_fuss_baseline/evaluate_lib.py | marciopuga/sound-separation | 0b23ae22123b041b9538295f32a92151cb77bff9 | [
"Apache-2.0"
] | 89 | 2020-03-06T08:26:44.000Z | 2022-03-31T11:36:23.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate separated audio from a DCASE 2020 task 4 separation model."""
import os
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import inference
from train import data_io
from train import metrics
from train import permutation_invariant
def _weights_for_nonzero_refs(source_waveforms):
"""Return shape (source,) weights for signals that are nonzero."""
source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms), axis=-1))
return tf.greater(source_norms, 1e-8)
def _weights_for_active_seps(power_sources, power_separated):
"""Return (source,) weights for active separated signals."""
min_power = tf.reduce_min(power_sources, axis=-1, keepdims=True)
return tf.greater(power_separated, 0.01 * min_power)
def compute_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
# Align separated sources to reference sources.
perm_inv_loss = permutation_invariant.wrap(
lambda tar, est: -metrics.signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms[tf.newaxis],
separated_waveforms[tf.newaxis])
separated_waveforms = separated_waveforms[0] # Remove batch axis.
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = metrics.signal_to_noise_ratio_gain_invariant(
separated_waveforms, source_waveforms)
num_active_refs = tf.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = metrics.signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform[tf.newaxis], (source_waveforms.shape[0], 1)),
source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs}
def _report_score_stats(metric_per_source_count, label='', counts=None):
"""Report mean and std dev for specified counts."""
values_all = []
if counts is None:
counts = metric_per_source_count.keys()
for count in counts:
values = metric_per_source_count[count]
values_all.extend(list(values))
return '%s for count(s) %s = %.1f +/- %.1f dB' % (
label, counts, np.mean(values_all), np.std(values_all))
def evaluate(checkpoint_path, metagraph_path, data_list_path, output_path):
"""Evaluate a model on FUSS data."""
model = inference.SeparationModel(checkpoint_path, metagraph_path)
file_list = data_io.read_lines_from_file(data_list_path, skip_fields=1)
with model.graph.as_default():
dataset = data_io.wavs_to_dataset(file_list, batch_size=1,
num_samples=160000,
repeat=False)
# Strip batch and mic dimensions.
dataset['receiver_audio'] = dataset['receiver_audio'][0, 0]
dataset['source_images'] = dataset['source_images'][0, :, 0]
# Separate with a trained model.
i = 1
max_count = 4
dict_per_source_count = lambda: {c: [] for c in range(1, max_count + 1)}
sisnr_per_source_count = dict_per_source_count()
sisnri_per_source_count = dict_per_source_count()
under_seps = []
equal_seps = []
over_seps = []
df = None
while True:
try:
waveforms = model.sess.run(dataset)
except tf.errors.OutOfRangeError:
break
separated_waveforms = model.separate(waveforms['receiver_audio'])
source_waveforms = waveforms['source_images']
if np.allclose(source_waveforms, 0):
print('WARNING: all-zeros source_waveforms tensor encountered.'
'Skiping this example...')
continue
metrics_dict = compute_metrics(source_waveforms, separated_waveforms,
waveforms['receiver_audio'])
metrics_dict = {k: v.numpy() for k, v in metrics_dict.items()}
sisnr_sep = metrics_dict['sisnr_separated']
sisnr_mix = metrics_dict['sisnr_mixture']
sisnr_imp = metrics_dict['sisnr_improvement']
weights_active_pairs = metrics_dict['weights_active_pairs']
# Create and initialize the dataframe if it doesn't exist.
if df is None:
# Need to create the dataframe.
columns = []
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
columns.append(metric_name + '_source%d' % i_src)
else:
# Scalar metric.
columns.append(metric_name)
columns.sort()
df = pd.DataFrame(columns=columns)
if output_path.endswith('.csv'):
csv_path = output_path
else:
csv_path = os.path.join(output_path, 'scores.csv')
# Update dataframe with new metrics.
row_dict = {}
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
row_dict[metric_name + '_source%d' % i_src] = metric_value[i_src - 1]
else:
# Scalar metric.
row_dict[metric_name] = metric_value
new_row = pd.Series(row_dict)
df = df.append(new_row, ignore_index=True)
# Store metrics per source count and report results so far.
under_seps.append(metrics_dict['under_separation'])
equal_seps.append(metrics_dict['equal_separation'])
over_seps.append(metrics_dict['over_separation'])
sisnr_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_sep[weights_active_pairs].tolist())
sisnri_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_imp[weights_active_pairs].tolist())
print('Example %d: SI-SNR sep = %.1f dB, SI-SNR mix = %.1f dB, '
'SI-SNR imp = %.1f dB, ref count = %d, sep count = %d' % (
i, np.mean(sisnr_sep), np.mean(sisnr_mix),
np.mean(sisnr_sep - sisnr_mix), metrics_dict['num_active_refs'],
metrics_dict['num_active_seps']))
if not i % 20:
# Report mean statistics and save csv every so often.
lines = [
'Metrics after %d examples:' % i,
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
print('\nWriting csv to %s.\n' % csv_path)
df.to_csv(csv_path)
i += 1
# Report final mean statistics.
lines = [
'Final statistics:',
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
# Write final csv.
print('\nWriting csv to %s.' % csv_path)
df.to_csv(csv_path)
| 41.618474 | 80 | 0.666795 |
5560d79a769a8dcd00036d30ac155bdbbb8657ae | 422 | py | Python | homeassistant/components/system_bridge/const.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/system_bridge/const.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/system_bridge/const.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Constants for the System Bridge integration."""
import asyncio
from aiohttp.client_exceptions import (
ClientConnectionError,
ClientConnectorError,
ClientResponseError,
)
from systembridge.exceptions import BridgeException
DOMAIN = "system_bridge"
BRIDGE_CONNECTION_ERRORS = (
asyncio.TimeoutError,
BridgeException,
ClientConnectionError,
ClientConnectorError,
ClientResponseError,
)
| 21.1 | 51 | 0.779621 |
55652d01d18ec68adf27b069baae8bf7ed3db2f4 | 1,705 | py | Python | python/domain/compliance/model/measure.py | ICTU/document-as-code | e65fddb94513e7c2f54f248b4ce69e9e10ce42f5 | [
"Apache-2.0"
] | 2 | 2021-01-09T17:00:51.000Z | 2021-02-19T09:35:26.000Z | python/domain/compliance/model/measure.py | ICTU/document-as-code | e65fddb94513e7c2f54f248b4ce69e9e10ce42f5 | [
"Apache-2.0"
] | null | null | null | python/domain/compliance/model/measure.py | ICTU/document-as-code | e65fddb94513e7c2f54f248b4ce69e9e10ce42f5 | [
"Apache-2.0"
] | 1 | 2020-02-24T15:50:05.000Z | 2020-02-24T15:50:05.000Z | """
BIO measure - defines and describes a measure for BIO compliance
"""
from domain.base import Base
| 26.640625 | 112 | 0.652199 |
556657f3480d4123e6f0535b01c6ed2f5345122d | 615 | py | Python | week_06/readibility.py | fentybit/cs50 | a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3 | [
"CNRI-Python"
] | null | null | null | week_06/readibility.py | fentybit/cs50 | a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3 | [
"CNRI-Python"
] | null | null | null | week_06/readibility.py | fentybit/cs50 | a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3 | [
"CNRI-Python"
] | null | null | null | from cs50 import get_string
text = get_string("Text: ")
text_length = len(text)
letters = 0
sentences = 0
words = 1
for i in range(text_length):
if text[i].isalpha():
letters += 1
for i in range(text_length):
if ord(text[i]) == 46 or ord(text[i]) == 33 or ord(text[i]) == 63:
sentences += 1
for i in range(text_length):
if ord(text[i]) == 32:
words += 1
L = 100 * (letters / words)
S = 100 * (sentences / words)
grade = round(0.0588 * L - 0.296 * S - 15.8)
if 16 <= grade:
print("Grade 16+")
elif grade < 1:
print("Before Grade 1")
else:
print(f"Grade {grade}") | 20.5 | 70 | 0.588618 |
5567063c93ec8ddf93486996ed882ce5ca8b8b9d | 206 | py | Python | fauxblog/admin.py | nickobrad/faux | cecb03e97a176149606dc88373d1844fc1f6b23c | [
"MIT"
] | null | null | null | fauxblog/admin.py | nickobrad/faux | cecb03e97a176149606dc88373d1844fc1f6b23c | [
"MIT"
] | null | null | null | fauxblog/admin.py | nickobrad/faux | cecb03e97a176149606dc88373d1844fc1f6b23c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Category, ImagePost, Location
# Register your models here.
admin.site.register(ImagePost)
admin.site.register(Category)
admin.site.register(Location)
| 20.6 | 49 | 0.81068 |
556731a35682ef9f34de75b049e18d73969d3bfa | 1,574 | py | Python | lib/Vector.py | aldahick/dotter.py | c3e783801f36403476087b5638a93e5fd5959bbe | [
"MIT"
] | null | null | null | lib/Vector.py | aldahick/dotter.py | c3e783801f36403476087b5638a93e5fd5959bbe | [
"MIT"
] | null | null | null | lib/Vector.py | aldahick/dotter.py | c3e783801f36403476087b5638a93e5fd5959bbe | [
"MIT"
] | null | null | null | import math
from random import randint
# pylint: disable=I0011,invalid-name
| 26.677966 | 71 | 0.559085 |
55686a8be609e908e7580542f40aa36255c8c155 | 12,532 | py | Python | functions.py | flyingmat/pyfactorizer | 6e607408bc21d04b09ecabfc6a579ad4058965f5 | [
"MIT"
] | null | null | null | functions.py | flyingmat/pyfactorizer | 6e607408bc21d04b09ecabfc6a579ad4058965f5 | [
"MIT"
] | null | null | null | functions.py | flyingmat/pyfactorizer | 6e607408bc21d04b09ecabfc6a579ad4058965f5 | [
"MIT"
] | null | null | null | from math import floor
remove_spaces = lambda inlst: [i for i in inlst if i != ' ']
| 39.040498 | 151 | 0.514682 |
556a5954e27e88a1963c24a16323e7c269ae5148 | 2,556 | py | Python | pystratis/api/balances/tests/test_balances.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/balances/tests/test_balances.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/balances/tests/test_balances.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | import pytest
from pytest_mock import MockerFixture
from pystratis.api.balances import Balances
from pystratis.core.types import Address
from pystratis.core.networks import CirrusMain
| 38.727273 | 96 | 0.736307 |
556d8216ffbaa6f7a0d0816c6b1ba9baa984c1a1 | 381 | py | Python | Problems/14.py | Maurya232Abhishek/Python-repository-for-basics | 3dcec5c529a0847df07c9dcc1424675754ce6376 | [
"MIT"
] | 2 | 2021-07-14T11:01:58.000Z | 2021-07-14T11:02:01.000Z | Problems/14.py | Maurya232Abhishek/Python-repository-for-basics | 3dcec5c529a0847df07c9dcc1424675754ce6376 | [
"MIT"
] | null | null | null | Problems/14.py | Maurya232Abhishek/Python-repository-for-basics | 3dcec5c529a0847df07c9dcc1424675754ce6376 | [
"MIT"
] | null | null | null | """ x = 2
while True:
y = n / (x * x)
if (x == y):
print(x)
if x == int(x):
return True
else:
return False
x = (y + x + x) / 3
print(x)"""
print(isPerCube()) | 19.05 | 28 | 0.351706 |
556e3ec9c1d73a0070074ad45f8de00d47c96b09 | 179 | py | Python | year1/python/week2/q9_squareroots.py | OthmanEmpire/university | 3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278 | [
"MIT"
] | 1 | 2016-05-21T17:23:50.000Z | 2016-05-21T17:23:50.000Z | year1/python/week2/q9_squareroots.py | OthmanEmpire/university_code | 3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278 | [
"MIT"
] | null | null | null | year1/python/week2/q9_squareroots.py | OthmanEmpire/university_code | 3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278 | [
"MIT"
] | null | null | null | ## This program prints out the first 10 square roots that are even ##
for x in range(1,10):
y = (2*x)**2 # If n^2 is even hence n must be even as well
print(y)
| 29.833333 | 72 | 0.603352 |
556f083296f917021fc8c5ac171cde72ce1bed3a | 1,690 | py | Python | backend/health/health_check.py | threefoldtech/zeroCI | 851def4cbaebba681641ecb24c731de56277d6ed | [
"Apache-2.0"
] | null | null | null | backend/health/health_check.py | threefoldtech/zeroCI | 851def4cbaebba681641ecb24c731de56277d6ed | [
"Apache-2.0"
] | 52 | 2019-11-14T09:39:04.000Z | 2021-03-16T10:15:55.000Z | backend/health/health_check.py | AhmedHanafy725/0-CI | ce73044eea2c15bcbb161a1d6f23e75e4f8d53a0 | [
"Apache-2.0"
] | 1 | 2019-10-30T09:51:25.000Z | 2019-10-30T09:51:25.000Z | import sys
sys.path.append("/sandbox/code/github/threefoldtech/zeroCI/backend")
from redis import Redis
from health_recover import Recover
from utils.utils import Utils
recover = Recover()
if __name__ == "__main__":
health = Health()
health.test_zeroci_server()
health.test_redis()
health.test_workers()
health.test_schedule()
| 25.606061 | 76 | 0.562722 |
5570f5a350941f5510b456b02cd8353c974ae345 | 13,284 | py | Python | vesper/command/recording_importer.py | RichardLitt/Vesper | 5360844f42a06942e7684121c650b08cf8616285 | [
"MIT"
] | null | null | null | vesper/command/recording_importer.py | RichardLitt/Vesper | 5360844f42a06942e7684121c650b08cf8616285 | [
"MIT"
] | null | null | null | vesper/command/recording_importer.py | RichardLitt/Vesper | 5360844f42a06942e7684121c650b08cf8616285 | [
"MIT"
] | null | null | null | """Module containing class `RecordingImporter`."""
from pathlib import Path
import itertools
import logging
import os
from django.db import transaction
from vesper.command.command import CommandExecutionError
from vesper.django.app.models import (
DeviceConnection, Job, Recording, RecordingChannel, RecordingFile)
from vesper.singletons import recording_manager
import vesper.command.command_utils as command_utils
import vesper.command.recording_utils as recording_utils
import vesper.util.audio_file_utils as audio_file_utils
import vesper.util.signal_utils as signal_utils
import vesper.util.time_utils as time_utils
def _get_recorder_mic_outputs(recorder, time):
"""
Gets a mapping from recorder input channel numbers to connected
microphone outputs for the specified recorder and time.
"""
connections = DeviceConnection.objects.filter(
input__device=recorder,
output__device__model__type='Microphone',
start_time__lte=time,
end_time__gt=time)
# print('recording_importer.get_recorder_mic_outputs', connections.query)
return dict((c.input.channel_num, c.output) for c in connections)
| 32.479218 | 77 | 0.564664 |
557658851f4a3ae8f5f44ddef879cff02f03ad5f | 1,096 | py | Python | l10n_ar_ux/models/res_company.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 1 | 2021-01-25T15:57:58.000Z | 2021-01-25T15:57:58.000Z | l10n_ar_ux/models/res_company.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | null | null | null | l10n_ar_ux/models/res_company.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 2 | 2020-10-17T16:36:02.000Z | 2021-01-24T10:20:05.000Z | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models
| 39.142857 | 81 | 0.620438 |
5576c4dbc04cfe8f5be4007143719bb7a25f5574 | 2,033 | py | Python | Quotebot/utils.py | musawakiliML/Whatsapp-Bots | 29fe6c645010ddedac1424b22c842b3e61511644 | [
"MIT"
] | null | null | null | Quotebot/utils.py | musawakiliML/Whatsapp-Bots | 29fe6c645010ddedac1424b22c842b3e61511644 | [
"MIT"
] | null | null | null | Quotebot/utils.py | musawakiliML/Whatsapp-Bots | 29fe6c645010ddedac1424b22c842b3e61511644 | [
"MIT"
] | null | null | null | import requests
def random_quote(type=''):
'''A function to get random quotes'''
if type == "today":
response_quote = requests.get("https://zenquotes.io/api/today/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
elif type == "quote":
response_quote = requests.get("https://zenquotes.io/api/random/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
else:
return f"Invalid Request!"
def jokes():
'''This function gets a joke'''
response_joke = requests.get("https://some-random-api.ml/joke")
if response_joke.status_code == 200:
joke = response_joke.json()
return joke['joke']
else:
return f"Invalid Request {response_joke.status_code}"
| 31.765625 | 113 | 0.624693 |
557a41cb5f2fe81007b03e1796d482334c493ead | 3,401 | py | Python | src/day16.py | dcbriccetti/advent-of-code-2021-python | 65958fb256234cf882714d3c3306cdbf60bcc0ae | [
"Unlicense"
] | 4 | 2021-12-10T22:47:56.000Z | 2021-12-26T21:35:58.000Z | src/day16.py | dcbriccetti/advent-of-code-2021-python | 65958fb256234cf882714d3c3306cdbf60bcc0ae | [
"Unlicense"
] | null | null | null | src/day16.py | dcbriccetti/advent-of-code-2021-python | 65958fb256234cf882714d3c3306cdbf60bcc0ae | [
"Unlicense"
] | null | null | null | from math import prod
from pathlib import Path
if __name__ == '__main__':
decoder = Decoder(Path('../data/16.txt').read_text().strip())
print(f'Result: {decoder.parse()}, versions sum: {decoder.versions_sum}')
| 36.180851 | 100 | 0.586004 |
557ac6c635a14924685b462c2a901a11408e15a1 | 6,328 | py | Python | Santander-spyder.py | Herikc2/Santander-Customer-Satisfaction | c868538ab06c252b2f9e51bac384b0f6e48efd70 | [
"MIT"
] | null | null | null | Santander-spyder.py | Herikc2/Santander-Customer-Satisfaction | c868538ab06c252b2f9e51bac384b0f6e48efd70 | [
"MIT"
] | null | null | null | Santander-spyder.py | Herikc2/Santander-Customer-Satisfaction | c868538ab06c252b2f9e51bac384b0f6e48efd70 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 17:13:15 2021
Database: https://www.kaggle.com/c/santander-customer-satisfaction
@author: Herikc Brecher
"""
# Import from libraries
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore")
# Loading the training dataset in CSV format
training_file = 'data/train.csv'
test_file = 'data/test.csv'
data_training = pd.read_csv(training_file)
test_data = pd.read_csv (test_file)
print(data_training.shape)
print(test_data.shape)
# Viewing the first 20 lines
data_training.head (20)
# Data type of each attribute
data_training.dtypes
# Statistical Summary
data_training.describe()
# Distribution of classes
data_training.groupby("TARGET").size()
# Dividing by class
data_class_0 = data_training[data_training['TARGET'] == 0]
data_class_1 = data_training[data_training['TARGET'] == 1]
counter_class_0 = data_class_0.shape[0]
contador_classe_1 = data_class_1.shape[0]
data_class_0_sample = data_class_0.sample(counter_class_0)
training_data = pd.concat([data_class_0_sample, data_class_1], axis = 0)
# Pearson correlation
data_training.corr(method = 'pearson')
# Finding the correlation between the target variable and the predictor variables
corr = training_data[training_data.columns [1:]].corr()['TARGET'][:].abs()
minimal_correlation = 0.02
corr2 = corr[corr > minimal_correlation]
corr2.shape
corr2
corr_keys = corr2.index.tolist()
data_filter = data_training[corr_keys]
data_filter.head(20)
data_filter.dtypes
# Filtering only the columns that have a correlation above the minimum variable
array_treino = data_training[corr_keys].values
# Separating the array into input and output components for training data
X = array_treino[:, 0:array_treino.shape[1] - 1]
Y = array_treino[:, array_treino.shape[1] - 1]
# Creating the training and test dataset
test_size = 0.30
X_training, X_testing, Y_training, Y_testing = train_test_split(X, Y, test_size = test_size)
# Generating normalized data
scaler = Normalizer (). fit (X_training)
normalizedX_treino = scaler.transform(X_training)
scaler = Normalizer().fit(X_testing)
normalizedX_teste = scaler.transform(X_testing)
Y_training = Y_training.astype('int')
Y_testing = Y_testing.astype('int')
'''
Execution of a series of classification algorithms is based on those that have the best result.
For this test, the training base is used without any treatment or data selection.
'''
# Setting the number of folds for cross validation
num_folds = 10
# Preparing the list of models
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('NB', GaussianNB()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('SVM', SVC()))
results = []
names = []
for name, model in models:
kfold = KFold (n_splits = num_folds)
cv_results = cross_val_score (model, X_training, Y_training, cv = kfold, scoring = 'accuracy')
results.append (cv_results)
names.append (name)
msg = "% s:% f (% f)"% (name, cv_results.mean (), cv_results.std ())
print (msg)
# Boxplot to compare the algorithms
fig = plt.figure ()
fig.suptitle ('Comparison of Classification Algorithms')
ax = fig.add_subplot (111)
plt.boxplot (results)
ax.set_xticklabels (names)
plt.show ()
# Function to evaluate the performance of the model and save it in a pickle format for future reuse.
# Linear Regression
model = LogisticRegression()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("LR")
# Linear Discriminant Analysis
model = LinearDiscriminantAnalysis()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("LDA")
# KNN
model = KNeighborsClassifier()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("KNN")
# CART
model = DecisionTreeClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("CART")
# XGBOOST
model = XGBClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("XGBOOST")
# Loading the model
file = 'models model_classifier_final_XGBOOST.sav'
model_classifier = pickle.load(open(file, 'rb'))
model_prod = model_classifier.score(X_testing, Y_testing)
print("Uploaded Model")
# Print Result
print("Accuracy:% .3f"% (model_prod.mean () * 100))
| 30.423077 | 140 | 0.733881 |
557b0f82fa2e590f23c344cfc48bb3aef2ee423d | 4,502 | py | Python | Memorization Tool/task/tool.py | soukalli/jetbrain-accademy | fc486d439b4b54a58956e1186eb69c56b85f85f1 | [
"MIT"
] | null | null | null | Memorization Tool/task/tool.py | soukalli/jetbrain-accademy | fc486d439b4b54a58956e1186eb69c56b85f85f1 | [
"MIT"
] | null | null | null | Memorization Tool/task/tool.py | soukalli/jetbrain-accademy | fc486d439b4b54a58956e1186eb69c56b85f85f1 | [
"MIT"
] | null | null | null | # write your code here
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///flashcard.db?check_same_thread=False')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
successor = {'A': 'B', 'B': 'C'}
Base.metadata.create_all(engine)
main_loop()
| 28.675159 | 72 | 0.589294 |
557b20fb22a3ac884a03a5ffa7db1db58d06ea7c | 9,862 | py | Python | src/compass/utils/geo_metadata.py | vbrancat/COMPASS | 285412ac2fc474e789e255dae16eba4485017c07 | [
"Apache-2.0"
] | 11 | 2021-11-24T07:24:11.000Z | 2022-03-23T16:40:13.000Z | src/compass/utils/geo_metadata.py | vbrancat/COMPASS | 285412ac2fc474e789e255dae16eba4485017c07 | [
"Apache-2.0"
] | 6 | 2021-12-15T16:45:58.000Z | 2022-03-24T23:36:16.000Z | src/compass/utils/geo_metadata.py | LiangJYu/COMPASS | 459f5d6cf05c2b7c9013f0d862bfef22af280fa6 | [
"Apache-2.0"
] | 4 | 2021-12-07T19:45:26.000Z | 2022-02-28T23:05:37.000Z | from dataclasses import dataclass
from datetime import datetime
import json
from types import SimpleNamespace
import isce3
from isce3.core import LUT2d, Poly1d, Orbit
from isce3.product import GeoGridParameters
import numpy as np
from ruamel.yaml import YAML
from shapely.geometry import Point, Polygon
from compass.utils.geo_runconfig import GeoRunConfig
from compass.utils.raster_polygon import get_boundary_polygon
from compass.utils.wrap_namespace import wrap_namespace, unwrap_to_dict
| 37.930769 | 95 | 0.58548 |
557c04366bccd072c61ed9301e5aeee3a5f38113 | 142 | py | Python | app.py | WIZ7ZY/flask-app | b59b0b84543c4f0faf40c57b4753a3c324edc2d8 | [
"MIT"
] | null | null | null | app.py | WIZ7ZY/flask-app | b59b0b84543c4f0faf40c57b4753a3c324edc2d8 | [
"MIT"
] | null | null | null | app.py | WIZ7ZY/flask-app | b59b0b84543c4f0faf40c57b4753a3c324edc2d8 | [
"MIT"
] | null | null | null | from web import create_app
import ntplib
if __name__ == '__main__':
app = create_app(debug=False)
app.run(host='0.0.0.0', port=5000)
| 20.285714 | 38 | 0.690141 |
557fbf2a8059c9beebbcd0bd1552ded759c8e7f0 | 2,227 | py | Python | tests/test_db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | tests/test_db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | tests/test_db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from methinks.db import Entry
import pytest
from server.app import create_app
from server.app import db as _db
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def test_insert(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
def test_delete(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
session.delete(e)
session.commit()
def test_find_by_hash(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
first = Entry.query.filter(Entry.hexid == e.hash).first()
assert first == e
| 26.831325 | 93 | 0.64661 |
55813ead580a9fd9024544a5265e546eab6feb28 | 3,339 | py | Python | mysite/mysite/settings.py | prnake/search_engine_demo | 57122052f63bbd054e0ca84d3c6832e6ecb00ec8 | [
"MIT"
] | 3 | 2020-08-08T04:44:29.000Z | 2020-09-10T07:38:11.000Z | mysite/mysite/settings.py | prnake/search_engine_demo | 57122052f63bbd054e0ca84d3c6832e6ecb00ec8 | [
"MIT"
] | null | null | null | mysite/mysite/settings.py | prnake/search_engine_demo | 57122052f63bbd054e0ca84d3c6832e6ecb00ec8 | [
"MIT"
] | null | null | null | import os
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# False if not in os.environ
DEBUG = env('DEBUG')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
ADMIN_EMAIL = str(env('ADMIN_EMAIL')).split(' ')
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"search.apps.SearchConfig",
"scrapy.apps.ScrapyConfig",
'captcha',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Captcha
CAPTCHA_IMAGE_SIZE = (80, 28)
CAPTCHA_TIMEOUT = 1
| 24.91791 | 91 | 0.692123 |
5581ae54a36323a4a46f3383645e34f4c26755e1 | 2,891 | py | Python | bin/simple_log_server.py | kr0nt4b/ctrl_my_home | fd86b479d78f94aaa5d6cc92f0f49399aaef0733 | [
"Apache-2.0"
] | null | null | null | bin/simple_log_server.py | kr0nt4b/ctrl_my_home | fd86b479d78f94aaa5d6cc92f0f49399aaef0733 | [
"Apache-2.0"
] | null | null | null | bin/simple_log_server.py | kr0nt4b/ctrl_my_home | fd86b479d78f94aaa5d6cc92f0f49399aaef0733 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
"""
Simple socket server using threads
"""
import socket
import sys
from thread import *
import os
import logging
HOST = '' # Symbolic name meaning all available interfaces
PORT = 9998 # Arbitrary non-privileged port
LOG_FORMAT = '%(asctime)-15s %(message)s'
SMART_LOG = '/var/log/smart/smarthome.log'
if __name__ == "__main__":
log_server = LogServer()
try:
log_server.start()
except KeyboardInterrupt as e:
print(e.message)
| 28.91 | 95 | 0.590107 |
5581eb881f3ca5ddfe7fd5be0a7447ea5b604281 | 1,348 | py | Python | utils/calc_drh.py | leogoesger/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 11 | 2018-04-14T00:34:34.000Z | 2021-05-04T17:23:50.000Z | utils/calc_drh.py | Yesicaleon/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 15 | 2019-04-02T03:35:22.000Z | 2022-02-12T13:17:11.000Z | utils/calc_drh.py | Yesicaleon/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 9 | 2018-12-01T19:46:11.000Z | 2022-03-31T17:18:15.000Z | import numpy as np
from utils.helpers import *
percentiles = [10, 25, 50, 75, 90]
percentile_keys = ["ten", "twenty_five", "fifty", "seventy_five", "ninty"]
def calc_drh(flow_matrix):
"""Dimensionless Hydrograph Plotter"""
average_annual_flow = calculate_average_each_column(flow_matrix)
number_of_rows = len(flow_matrix)
number_of_columns = len(flow_matrix[0, :])
normalized_matrix = np.zeros((number_of_rows, number_of_columns))
"""Initiating the DRH object with desired keys"""
drh = {}
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]] = []
drh["min"] = []
drh["max"] = []
for row_index, _ in enumerate(flow_matrix[:, 0]):
for column_index, _ in enumerate(flow_matrix[row_index, :]):
normalized_matrix[row_index, column_index] = flow_matrix[row_index,
column_index]/average_annual_flow[column_index]
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]].append(round(np.nanpercentile(
normalized_matrix[row_index, :], percentile), 2))
drh["min"].append(round(np.nanmin(normalized_matrix[row_index, :]), 2))
drh["max"].append(round(np.nanmax(normalized_matrix[row_index, :]), 2))
return drh
| 39.647059 | 116 | 0.647626 |
55837f3526a4635ce717d7aeac4df126359ab0fc | 78 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py | SauravMaheshkar/cookiecutter-kaggle-cv-starter | fb7b8b84daa039034d53398f64e5adfaeead6445 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py | SauravMaheshkar/cookiecutter-kaggle-cv-starter | fb7b8b84daa039034d53398f64e5adfaeead6445 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py | SauravMaheshkar/cookiecutter-kaggle-cv-starter | fb7b8b84daa039034d53398f64e5adfaeead6445 | [
"MIT"
] | null | null | null | import torch.nn as nn
__all__ = ["Model"]
| 9.75 | 23 | 0.653846 |
5583a4b67ff425c68e23ee2615524b5aa7a257d1 | 591 | py | Python | meiduo1/apps/meiduo_admin/views/user_group.py | woobrain/nginx-uwsgi-web | 5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae | [
"MIT"
] | null | null | null | meiduo1/apps/meiduo_admin/views/user_group.py | woobrain/nginx-uwsgi-web | 5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae | [
"MIT"
] | 2 | 2021-05-28T19:45:17.000Z | 2021-11-02T15:49:34.000Z | meiduo1/apps/meiduo_admin/views/user_group.py | woobrain/nginx-uwsgi-web | 5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae | [
"MIT"
] | null | null | null | from django.contrib.auth.models import Group, Permission
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .statistical import UserPagination
from apps.meiduo_admin.serializer.user_group import UserGroupSerializer, GroupPerSerializer
| 32.833333 | 91 | 0.788494 |
558514d3c5a79e30120fc03aa990f786ff898ee6 | 355 | py | Python | server/soman/announcements/urls.py | bilgorajskim/soman | 0d65d632c39a72f51b43fae71f4b00efc7b286c1 | [
"MIT"
] | null | null | null | server/soman/announcements/urls.py | bilgorajskim/soman | 0d65d632c39a72f51b43fae71f4b00efc7b286c1 | [
"MIT"
] | null | null | null | server/soman/announcements/urls.py | bilgorajskim/soman | 0d65d632c39a72f51b43fae71f4b00efc7b286c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets
from . import views
router = routers.DefaultRouter()
router.register(r'announcements', views.AnnouncementViewSet)
urlpatterns = [
url(r'^api/', include(router.urls))
]
| 27.307692 | 60 | 0.769014 |
55860760bf8930847b1a7c03d8b03442f460fce6 | 414 | py | Python | backend/app/db/__init__.py | MaxKochanov/stock-news | 42776196604e91cd673c94c9f7dea71343791bd1 | [
"MIT"
] | null | null | null | backend/app/db/__init__.py | MaxKochanov/stock-news | 42776196604e91cd673c94c9f7dea71343791bd1 | [
"MIT"
] | null | null | null | backend/app/db/__init__.py | MaxKochanov/stock-news | 42776196604e91cd673c94c9f7dea71343791bd1 | [
"MIT"
] | null | null | null | from app.db.wrappers import ClickHouse
DBS = {}
| 19.714286 | 73 | 0.654589 |
5588a3d3733f037d283e357aa48613bd11e602e8 | 1,108 | py | Python | ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-10-08T17:45:44.000Z | 2018-05-29T12:16:43.000Z | ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 5 | 2017-02-12T15:50:53.000Z | 2017-09-18T12:25:01.000Z | ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-07-03T07:59:12.000Z | 2017-09-18T11:22:23.000Z | from ravendb.documents.conventions.document_conventions import DocumentConventions
from ravendb.exceptions.exceptions import DatabaseDoesNotExistException
from ravendb.http.request_executor import RequestExecutor
from ravendb.http.server_node import ServerNode
from ravendb.http.topology import UpdateTopologyParameters
from ravendb.tests.test_base import TestBase
| 44.32 | 99 | 0.773466 |
558930319f7b3b786028343bb2be22080c9650c4 | 14,091 | py | Python | src/icaltool/icaltool.py | randomchars42/icaltool | acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac | [
"Unlicense"
] | null | null | null | src/icaltool/icaltool.py | randomchars42/icaltool | acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac | [
"Unlicense"
] | null | null | null | src/icaltool/icaltool.py | randomchars42/icaltool | acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import csv
import logging
import logging.config
import re
import argparse
import json
import sys
from .log import log
from . import datatypes
logger = logging.getLogger(__name__)
default_column_mapping = {
'DTSTART': 0,
'DTEND': 1,
'DTSTAMP': 2,
'UID': 3,
'CREATED': 4,
'DESCRIPTION': 5,
'LAST-MODIFIED': 6,
'LOCATION': 7,
'SEQUENCE': 8,
'SUMMARY': 9,
'CATEGORIES': 10,
'CLASS': 11,
'ATTACH': 12,
'TRANSP': 13,
'RRULE': 14,
'EXDATE': 15,
'STATUS': 16
}
custom_column_names = {
'DTSTART': 'DTSTART',
'DTEND': 'DTEND',
'DTSTAMP': 'DTSTAMP',
'UID': 'UID',
'CREATED': 'CREATED',
'DESCRIPTION': 'DESCRIPTION',
'LAST-MODIFIED': 'LAST-MODIFIED',
'LOCATION': 'LOCATION',
'SEQUENCE': 'SEQUENCE',
'SUMMARY': 'SUMMARY',
'CATEGORIES': 'CATEGORIES',
'CLASS': 'CLASS',
'ATTACH': 'ATTACH',
'TRANSP': 'TRANSP',
'RRULE': 'RRULE',
'EXDATE': 'EXDATE',
'STATUS': 'STATUS'
}
standard_components = [
'VCALENDAR',
'STANDARD',
'DAYLIGHT',
'VEVENT',
'VTODO',
'VJOURNAL',
'VALARM',
'VFREEBUSY'
]
# taken from :
# https://stackoverflow.com/questions/9027028/argparse-argument-order
def main():
# parse arguments
parser = argparse.ArgumentParser(
description='Tool to work with calendar data. It can read .ics ' +
'(preferred) and .csv files. You can filter the compontents ' +
'(events, todos, alarms, journals, freebusy-indicators) by their ' +
'type or the value of their properties, e.g. start date ' +
'(DTSTART) or organiser (ORGANIZER). The result can be written ' +
'back to a file, again either .ics (preferred) or .csv.',
epilog='')
parser.add_argument(
'file',
help='the file to load, either .csv or .ics (preferred)',
type=str)
parser.add_argument(
'-o',
'--output',
help='the file to write to, either .csv or .ics (preferred)',
type=str,
action=CustomAction)
parser.add_argument(
'-f',
'--filter',
help='rules to filter which component types (events, todos, alarms, ' +
'journals, freebusy-indicators) to keep / sort out',
type=str,
action=CustomAction)
parser.add_argument(
'-s',
'--setup',
help='json-string containing options, e.g. ' +
'{"VEVENT": {"defined_properties": ' +
'{"ATTENDEE": [-1, "Property"]}}} ' +
'to ignore the ATTENDEE property when parsing',
type=str)
parser.add_argument(
'-c',
'--component',
help='component type stored in the .csv-file (one of: events ' +
'[VEVENT], todos [VTODO], alarms [VALARM], journals [VJOURNAL], ' +
'freebusy-indicators [VFREEBUSY]); if no component is specified ' +
'events [VEVENT] are assumed to be the input / desired output',
type=str,
default='VEVENT')
parser.add_argument(
'-v',
'--verbosity',
action='count',
help='increase verbosity',
default=0)
args = parser.parse_args()
# setup logging
logging_config = log.config
if args.verbosity >= 3:
logging_config['handlers']['console']['level'] = 'DEBUG'
elif args.verbosity == 2:
logging_config['handlers']['console']['level'] = 'INFO'
elif args.verbosity == 1:
logging_config['handlers']['console']['level'] = 'WARNING'
else:
logging_config['handlers']['console']['level'] = 'ERROR'
logging.config.dictConfig(logging_config)
# setup ICalTool
tool = ICalTool()
if not args.setup is None:
tool.setup(json.loads(args.setup))
# load file
tool.load(args.file, component=args.component)
# do whatever
if not 'ordered_args' in args:
logger.error('nothing to do with the loaded data - exiting')
return
# process actions in order of flags
for arg, value in args.ordered_args:
if arg == 'output':
if value == args.file:
logger.error('please don\'t attempt to overwrite your input ' +
'file - while it is technically possible it seems unwise ' +
"\n cancelling")
continue
tool.write(value, component=args.component)
elif arg == 'filter':
tool.filter(value)
if __name__ == '__main__':
main()
| 33.630072 | 83 | 0.544461 |
55895bd32cc5eee1e655399e93c373ec1fa66d6b | 1,462 | py | Python | install_R_packages.py | mohaEs/PyVisualField | 64c7303c77500c923300536dd717f2e6c0262323 | [
"MIT"
] | null | null | null | install_R_packages.py | mohaEs/PyVisualField | 64c7303c77500c923300536dd717f2e6c0262323 | [
"MIT"
] | null | null | null | install_R_packages.py | mohaEs/PyVisualField | 64c7303c77500c923300536dd717f2e6c0262323 | [
"MIT"
] | 1 | 2022-01-04T19:33:06.000Z | 2022-01-04T19:33:06.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 18:00:28 2021
@author: Mohammad Eslami
"""
try:
import rpy2
print('===> rpy2 version: ', rpy2.__version__)
from rpy2.robjects.packages import importr
# import rpy2's package module
import rpy2.robjects.packages as rpackages
# R vector of strings
from rpy2.robjects.vectors import StrVector
except:
print('===> Something is wrong: rpy2 is not available!')
# import R's "base" package
lib_base = importr('base')
# import R's "utils" package
lib_utils = importr('utils')
# select a mirror for R packages
lib_utils.chooseCRANmirror(ind=1) # select the first mirror in the list
# R package names
packnames = ('visualFields', 'vfprogression')
# Selectively install what needs to be install.
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
lib_utils.install_packages(StrVector(names_to_install))
try:
lib_vf = importr('visualFields')
print('===> visualFields R package is installed/loaded successfully!')
lib_vfprogression = importr('vfprogression')
print('===> vfprogression R package is installed/loaded successfully!')
except:
print('===> Something is wrong: R packages are not available!')
# try:
# import PyVisualFields
# print('===> PyVisualFields package loaded successfully!')
# except:
# print('===> Something is wrong: PyVisualFields is not available!')
| 27.584906 | 75 | 0.699042 |
5589cd912e691b17322bc09642b9a8ec0453acc9 | 8,949 | py | Python | usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 19:47
from __future__ import unicode_literals
from django.db import migrations
| 46.853403 | 84 | 0.703766 |
558acc49675640913785e7f0a2b6dca8cde8835f | 2,654 | py | Python | tests/unit/utils/test_io_utils.py | briannemsick/barrage | f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455 | [
"MIT"
] | 16 | 2019-06-21T22:45:59.000Z | 2020-08-20T22:26:22.000Z | tests/unit/utils/test_io_utils.py | briannemsick/barrage | f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455 | [
"MIT"
] | 15 | 2019-06-21T23:09:59.000Z | 2020-05-07T03:02:33.000Z | tests/unit/utils/test_io_utils.py | briannemsick/barrage | f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455 | [
"MIT"
] | 6 | 2019-06-22T15:27:39.000Z | 2020-07-06T02:18:55.000Z | import json
import os
import pickle
import numpy as np
import pandas as pd
import pytest
from barrage.utils import io_utils
| 31.223529 | 78 | 0.715901 |
558cbd4a7ce3e41aaed8e2b86ecb2cf3f058fd07 | 20,998 | py | Python | script.py | kenneth2001/Virus | e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1 | [
"MIT"
] | null | null | null | script.py | kenneth2001/Virus | e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1 | [
"MIT"
] | null | null | null | script.py | kenneth2001/Virus | e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1 | [
"MIT"
] | null | null | null | import asyncio
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import discord
import numpy as np
from urllib.error import HTTPError
import yt_dlp as youtube_dl
from discord.ext import commands
import os
from pytz import timezone
from yt_dlp.utils import DownloadError, ExtractorError
from util.log import pretty_output, pretty_print
from util.preprocessing import load_config, load_gif, load_user
import secrets
try:
print('LOADING config.txt')
TOKEN, TIMEZONE, MODE = load_config('config/config.txt')
print('LOADED config.txt\n')
except:
print('ERROR LOADING config.txt\n')
tz = timezone(TIMEZONE)
token = TOKEN #os.environ['token']
# 0: local, 1: repl.it
# For setting up bot on replit.com
if MODE == 1:
from util.keep_alive import keep_alive
os.environ['MPLCONFIGDIR'] = '/tmp/' #"/home/runner/Virus-demo/tmp"
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
elif MODE == 0:
import matplotlib.pyplot as plt
import sympy
else:
print('UNDEFINED MODE')
exit()
try:
print('LOADING gif.json')
gif = load_gif('config/gif.json')
print('LOADED gif.json\n')
except:
print('ERROR LOADING gif.json\n')
try:
print('LOADING user.json')
user = load_user('config/user.json')
print('LOADED user.json\n')
except:
print('ERROR LOADING user.json\n')
ytdl_format_options = {
'format': 'bestaudio/best',
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn',
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"
}
# channel_var stores all variable for differnet channels
# key: serverid
# value: 1. activated[bool] - indicate whether the music playing function is activated
# 2. bully[dict] - list of user being bullied
# 3. ctx[object]
# 4. log[list] - log of user entering / leaving voice channels
# 5. playing[bool] - indicate whether the bot is playing music
# 6. queue[list] - list of music to be played
channel_var = {}
# return gif link
# Wong Tai Sin Fortune Sticks ()
client = commands.Bot(command_prefix='#', help_command=None)
async def initialize(server_id: int, ctx: object=None):
"""Initializing channel_var
Args:
server_id (int)
ctx (object, optional): Defaults to None.
"""
global channel_var
info = channel_var.get(server_id, -1)
if info != -1:
if channel_var[server_id]['ctx'] == None and ctx != None:
channel_var[server_id]['ctx'] = ctx
return
else:
channel_var[server_id] = {'ctx':ctx, 'queue':[], 'activated':False, 'playing':True, 'log':[], 'bully':{}}
def generate_question():
question = ""
for i in range(6):
question += str(np.random.randint(1, 21))
question += np.random.choice(['*', '+', '-'])
question += str(np.random.randint(1, 21))
return question
# experimental
# experimental
# experimental
if MODE == 1:
keep_alive() # For setting up bot on replit.com
start_time = datetime.now(tz)
client.run(token)
| 37.563506 | 194 | 0.595247 |
558d879413f6f88e3c45e2ca06534a675e1043f9 | 480 | py | Python | solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py | lk-hang/leetcode | 4c8735463bdcb9f48666e03a39eb03ee9f625cec | [
"MIT"
] | null | null | null | solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py | lk-hang/leetcode | 4c8735463bdcb9f48666e03a39eb03ee9f625cec | [
"MIT"
] | null | null | null | solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py | lk-hang/leetcode | 4c8735463bdcb9f48666e03a39eb03ee9f625cec | [
"MIT"
] | null | null | null | """
Given an integer number n, return the difference between the product of its digits and the sum of its digits.
"""
| 25.263158 | 109 | 0.522917 |
558e58ba058923b58851710da67bc2d4ad87a57f | 1,031 | py | Python | VideoIndexerDemo/VideoIndexer/application.py | microsoft/ai4accessibility | 4c13d006f285e31f01d1bc71a55c20e9234713a5 | [
"MIT"
] | 2 | 2021-07-11T06:03:43.000Z | 2021-10-09T23:37:21.000Z | VideoIndexerDemo/VideoIndexer/application.py | microsoft/ai4accessibility | 4c13d006f285e31f01d1bc71a55c20e9234713a5 | [
"MIT"
] | 6 | 2021-09-08T03:07:13.000Z | 2022-03-12T00:57:07.000Z | VideoIndexerDemo/VideoIndexer/application.py | microsoft/ai4accessibility | 4c13d006f285e31f01d1bc71a55c20e9234713a5 | [
"MIT"
] | 3 | 2021-02-14T18:51:31.000Z | 2021-02-14T18:51:41.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dotenv import load_dotenv
load_dotenv()
import os
import json
import requests
from concurrent.futures import ThreadPoolExecutor
from flask import Flask, flash, request, redirect, url_for, session
from video_captioning.main import upload_video, video_callback, train_custom_speech
executor = ThreadPoolExecutor(max_workers=20)
app = Flask("layout_detection")
if __name__ == "__main__":
app.run(port=5000, debug=True, host='0.0.0.0') | 29.457143 | 83 | 0.747818 |
559154d893c3d43225a58bc587edd3aa01dea828 | 5,154 | py | Python | code/tests/unit/api/test_enrich.py | CiscoSecurity/tr-05-serverless-cybercrime-tracker | 28fcfaa220025c9e8523633a4a9a04f319656756 | [
"MIT"
] | 3 | 2020-04-28T08:53:14.000Z | 2020-12-17T14:25:32.000Z | code/tests/unit/api/test_enrich.py | CiscoSecurity/tr-05-serverless-cybercrime-tracker | 28fcfaa220025c9e8523633a4a9a04f319656756 | [
"MIT"
] | 2 | 2020-03-06T15:00:22.000Z | 2020-06-26T11:21:52.000Z | code/tests/unit/api/test_enrich.py | CiscoSecurity/tr-05-serverless-cybercrime-tracker | 28fcfaa220025c9e8523633a4a9a04f319656756 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from requests.exceptions import SSLError
from pytest import fixture
from unittest import mock
from tests.unit.mock_for_tests import (
CYBERCRIME_RESPONSE_MOCK,
EXPECTED_DELIBERATE_RESPONSE,
EXPECTED_OBSERVE_RESPONSE,
EXPECTED_RESPONSE_500_ERROR,
EXPECTED_RESPONSE_404_ERROR,
CYBERCRIME_ERROR_RESPONSE_MOCK,
EXPECTED_RESPONSE_SSL_ERROR
)
def cybercrime_api_response(*, ok, payload=None, status_error=None):
mock_response = mock.MagicMock()
mock_response.ok = ok
if ok and not payload:
payload = CYBERCRIME_RESPONSE_MOCK
else:
mock_response.status_code = status_error
mock_response.json = lambda: payload
return mock_response
def test_enrich_call_success(route, client, valid_json,
cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(ok=True)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
data = response.get_json()
if route == '/observe/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
judgements = data['data']['judgements']
assert judgements['docs'][0].pop('id')
assert judgements['docs'][0].pop('valid_time')
assert data == EXPECTED_OBSERVE_RESPONSE
if route == '/deliberate/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
assert data == EXPECTED_DELIBERATE_RESPONSE
| 29.451429 | 76 | 0.695188 |
55915bb2fe7f5c79e7cd44acfd89dd079dc66443 | 2,658 | py | Python | Python/Euler 01 - 10.py | jiegillet/project-euler | 3b530e11af00e9d9eccb7aa41ed8018ee6d7b472 | [
"MIT"
] | null | null | null | Python/Euler 01 - 10.py | jiegillet/project-euler | 3b530e11af00e9d9eccb7aa41ed8018ee6d7b472 | [
"MIT"
] | null | null | null | Python/Euler 01 - 10.py | jiegillet/project-euler | 3b530e11af00e9d9eccb7aa41ed8018ee6d7b472 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Jrmie on 2013-10-26.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
"""
# Problem 1
lim=1000
s=0
for i in range(lim):
if i%3==0 or i%5==0:
s+=i
print s
print sum([x for x in range(1000) if x % 3== 0 or x % 5== 0])
"""
"""
# Problem 2
lim=4000000
f1,f2,s=1,1,0
while f2<lim:
f1,f2=f2,f1+f2
if f2%2==0: s+=f2
print s
"""
""""
# Problem 3
num=600851475143
while num>1:
div=2
while num%div!=0 and div!=num:
div+=1
num/=div
print div
"""
"""
# Problem 4
max=0
for i in range(999,99,-1):
for j in range(999,i-99,-1):
if str(i*j)==str(i*j)[::-1] and i>max:
max=i*j
print max
"""
"""
# Problem 5
print 2**4*3**2*5*7*11*13*17*19
"""
"""
# Problem 6
print sum(range(1,101))**2- sum([e**2 for e in range(1,101)])
"""
"""
# Problem 7
primes=[2,3]
n=3
# while len(primes)<10001:
# n+=2
# if not 0 in [n%p for p in primes]:
# primes.append(n)
# print primes[-1] # 45 seconds
while len(primes)<100001:
n+=2
p=True
for p in primes:
if p*p>n: break
if n%p==0: p=False; break
if p: primes.append(n)
print primes[-1] # .3 seconds for 10001 # 6 second for 100001
"""
"""
# Problem 8
num=str(7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450)
print max( int(num[i])*int(num[i+1])*int(num[i+2])*int(num[i+3])*int(num[i+4]) for i in range(len(num)-4))
"""
"""
# Problem 9
sol=0
for i in range(1000,2,-1):
for j in range(i-1,2,-1):
if i**2==j**2+(1000-i-j)**2:
sol=i*j*(1000-i-j)
break
if sol>0: break
print sol
"""
#Problem 10
primes=[2,3]
n=3
while primes[-1]<2E6:
n+=2
p=True
for p in primes:
if p*p>n: break
if n%p==0: p=False; break
if p: primes.append(n)
print sum(primes)-primes[-1]
| 25.557692 | 1,009 | 0.748683 |
5593fe3d21ad82b5382d08854df0a8f99eec0ed9 | 1,900 | py | Python | src/ensae_teaching_cs/tests/american_cities.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 73 | 2015-05-12T13:12:11.000Z | 2021-12-21T11:44:29.000Z | src/ensae_teaching_cs/tests/american_cities.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 90 | 2015-06-23T11:11:35.000Z | 2021-03-31T22:09:15.000Z | src/ensae_teaching_cs/tests/american_cities.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 65 | 2015-01-13T08:23:55.000Z | 2022-02-11T22:42:07.000Z | """
@file
@brief Function to test others functionalities
"""
import os
import pandas
from pyquickhelper.loghelper import fLOG
from ..faq.faq_matplotlib import graph_cities
from ..special import tsp_kruskal_algorithm, distance_haversine
def american_cities(df_or_filename, nb_cities=-1, img=None, fLOG=fLOG):
"""
Computes the :epkg:`TSP` for american cities.
@param df_or_filename dataframe
@param nb_cities number of cities to keep
@param img image to produce
@param fLOG logging function
@return dataframe (results)
"""
if isinstance(df_or_filename, str):
df = pandas.read_csv(df_or_filename)
else:
df = df_or_filename
df["Longitude"] = -df["Longitude"]
df = df[df.Latitude < 52]
df = df[df.Longitude > -130].copy()
fLOG(df.columns)
df = df.dropna()
if nb_cities > 0:
df = df[:nb_cities].copy()
fLOG(df.shape)
points = [(row[1], row[2], row[3])
for row in df.itertuples(index=False)]
fLOG("number of cities:", len(points))
trip = tsp_kruskal_algorithm(
points, distance=haversine, fLOG=fLOG, max_iter=10)
# trip
dftrip = pandas.DataFrame(
trip, columns=["Latitude", "Longitude", "City"])
# graph
for i in range(0, dftrip.shape[0]):
if i % 10 != 0:
dftrip.loc[i, "City"] = ""
if img is not None:
import matplotlib.pyplot as plt
fig, ax = graph_cities(dftrip, markersize=3, linked=True, fLOG=fLOG,
fontcolor="red", fontsize='16', loop=True, figsize=(32, 32))
assert ax is not None
fig.savefig(img)
assert os.path.exists(img)
plt.close('all')
fLOG("end")
return dftrip
| 29.6875 | 91 | 0.596316 |
55948a0d8acfcbe1f96f58b36c1bb83505bd04f6 | 175 | py | Python | first_task.py | yashika0607/Task1_python | 4a867227f48f0c8ed9ad418fb412550eef3a7571 | [
"Apache-2.0"
] | null | null | null | first_task.py | yashika0607/Task1_python | 4a867227f48f0c8ed9ad418fb412550eef3a7571 | [
"Apache-2.0"
] | null | null | null | first_task.py | yashika0607/Task1_python | 4a867227f48f0c8ed9ad418fb412550eef3a7571 | [
"Apache-2.0"
] | null | null | null | #task 1
r=float(input("Enter the radius of the circle?\n"))
pi=3.143
area=pi*r*r
print("Area of the circle is ",area)
#task 2
x=input("Enter the file name\n")
print(x+".py")
| 17.5 | 51 | 0.674286 |
5594b24c92581e7c3ba26f490dea8b770f2cf8fd | 2,049 | py | Python | tools/ntp_spoofer.py | dschoonwinkel/pypacker | 58c833f40207db746b0b2995ca3835a533e0258e | [
"BSD-3-Clause"
] | null | null | null | tools/ntp_spoofer.py | dschoonwinkel/pypacker | 58c833f40207db746b0b2995ca3835a533e0258e | [
"BSD-3-Clause"
] | null | null | null | tools/ntp_spoofer.py | dschoonwinkel/pypacker | 58c833f40207db746b0b2995ca3835a533e0258e | [
"BSD-3-Clause"
] | null | null | null | """Simple NTP spoofing tool."""
from pypacker.layer12.ethernet import Ethernet
from pypacker.layer3 import ip
from pypacker.layer4.udp import UDP
from pypacker.layer567 import ntp
from pypacker import psocket
# interface to listen on
IFACE = "wlan0"
# source address which commits a NTP request and we send a wrong answer
IP_SRC = "192.168.178.27"
#
# normal NTP request
#
"""
psock_req = psocket.SocketHndl(iface_name=IFACE, mode=psocket.SocketHndl.MODE_LAYER_3)
ntp_req = ip.IP(src_s=IP_SRC, dst_s="188.138.9.208", p=ip.IP_PROTO_UDP) +\
UDP(sport=1234, dport=123) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.CLIENT)
print("sending NTP request and waiting for answer..")
answer = psock_req.sr(ntp_req)[0][ntp.NTP]
"""
# print("answer is: %s" % answer)
#unpack_I = struct.Struct(">I").unpack
# print("seconds since 1.1.1900: %d" % unpack_I(answer.transmit_time[0:4])[0])
# psock_req.close()
#
# spoof NTP response
#
print("waiting for NTP request")
psock = psocket.SocketHndl(iface_name=IFACE, timeout=600)
filter = lambda p: p[ntp.NTP] is not None and p[ip.IP].src_s == IP_SRC
answer = psock.recvp(filter_match_recv=filter)[0]
answer_ntp = answer[ntp.NTP]
print("got NTP packet: %s" % answer_ntp)
ntp_answer_send = Ethernet(dst=answer[Ethernet].src, src=answer[Ethernet].dst) +\
ip.IP(src=answer[ip.IP].dst, dst_s=IP_SRC, p=ip.IP_PROTO_UDP) +\
UDP(sport=answer[UDP].dport, dport=answer[UDP].sport) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.SERVER, stratum=2, interval=4,
update_time=answer_ntp.transmit_time,
originate_time=answer_ntp.transmit_time,
receive_time=b"\x00" * 4 + answer_ntp.transmit_time[4:],
transmit_time=b"\x00" * 4 + answer_ntp.transmit_time[4:])
# alternative packet creation
"""
ntp_answer_send = answer.create_reverse()
layer_ntp = ntp_answer_send[ntp.NTP]
layer_ntp.mode = ntp.SERVER
layer_ntp.originate_time = answer_ntp.transmit_time
layer_ntp.receive_time = layer_ntp.transmit_time = b"\x00"*4 + answer_ntp.transmit_time[4:]
"""
psock.send(ntp_answer_send.bin())
psock.close()
| 32.52381 | 91 | 0.736945 |
5594c3feafec578628223eff5ebd91b66138d3a5 | 7,524 | py | Python | motsfinder/exprs/test_basics.py | daniel-dpk/distorted-motsfinder-public | 8c2eec174c755c55b26b568243e58c2956a35257 | [
"MIT"
] | 4 | 2019-08-26T09:50:26.000Z | 2022-03-02T16:11:17.000Z | motsfinder/exprs/test_basics.py | daniel-dpk/distorted-motsfinder-public | 8c2eec174c755c55b26b568243e58c2956a35257 | [
"MIT"
] | 5 | 2021-03-31T19:55:34.000Z | 2021-04-01T08:29:53.000Z | motsfinder/exprs/test_basics.py | daniel-dpk/distorted-motsfinder-public | 8c2eec174c755c55b26b568243e58c2956a35257 | [
"MIT"
] | 1 | 2019-09-18T14:15:33.000Z | 2019-09-18T14:15:33.000Z | #!/usr/bin/env python3
from __future__ import print_function
from builtins import range, map
import unittest
import sys
import pickle
import numpy as np
from mpmath import mp
from testutils import DpkTestCase
from .numexpr import NumericExpression
from .numexpr import isclose
from .basics import OffsetExpression, DivisionExpression, SimpleSinExpression
from .basics import SimpleCoshExpression
def run_tests():
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
return len(unittest.TextTestRunner(verbosity=2).run(suite).failures)
if __name__ == '__main__':
unittest.main()
| 34.356164 | 91 | 0.591042 |
559516145d3a91e65f7eba170cf38f3e8329840b | 468 | py | Python | python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | 7 | 2020-05-10T09:57:23.000Z | 2021-03-27T11:55:07.000Z | python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | null | null | null | python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | 3 | 2021-03-27T03:42:57.000Z | 2021-08-09T12:03:41.000Z | from progression import Progression
if __name__ == "__main__":
fibonacci_progresssion = FibonacciProgression(first= 1, second= 2)
fibonacci_progresssion.print_progression(20)
| 29.25 | 85 | 0.713675 |
55966e42aa982766be05f8a6dbd86f8df5f992eb | 18,587 | py | Python | openamundsen/modules/snow/multilayermodel.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 3 | 2021-05-28T06:46:36.000Z | 2021-06-14T13:39:25.000Z | openamundsen/modules/snow/multilayermodel.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 22 | 2021-04-28T12:31:58.000Z | 2022-03-09T18:29:12.000Z | openamundsen/modules/snow/multilayermodel.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 1 | 2021-06-01T12:48:54.000Z | 2021-06-01T12:48:54.000Z | import numpy as np
from numba import njit, prange
from openamundsen import constants, constants as c, heatconduction
from openamundsen.snowmodel import SnowModel
from . import snow
| 30.470492 | 161 | 0.559907 |
5596e16fb509c3accc1b616f5872b39869a62e82 | 2,746 | py | Python | scripts/custom_task_manager.py | operaun/dotfiles | 6e91206427199a9f6a9ac7397a886ac2f26eade0 | [
"MIT"
] | 1 | 2016-10-06T12:31:04.000Z | 2016-10-06T12:31:04.000Z | scripts/custom_task_manager.py | operaun/dotfiles | 6e91206427199a9f6a9ac7397a886ac2f26eade0 | [
"MIT"
] | null | null | null | scripts/custom_task_manager.py | operaun/dotfiles | 6e91206427199a9f6a9ac7397a886ac2f26eade0 | [
"MIT"
] | null | null | null | # scripts/custom_task_manager.py
import os
import subprocess
from abc import ABCMeta, abstractmethod
from pretty_printer import *
| 31.204545 | 99 | 0.659505 |
5598bbdfc235215336c94064608a0db8ff763655 | 3,961 | py | Python | bpmn/urls.py | VSSantana/SFDjango-BPMN | e5a3fb8da9282fd88f72a85a4b34d89d38391e36 | [
"MIT"
] | 1 | 2021-09-21T00:02:10.000Z | 2021-09-21T00:02:10.000Z | bpmn/urls.py | VSSantana/SFDjango-BPMN | e5a3fb8da9282fd88f72a85a4b34d89d38391e36 | [
"MIT"
] | 5 | 2021-09-22T13:54:06.000Z | 2021-09-22T14:05:56.000Z | bpmn/urls.py | marcelobbfonseca/SFDjango-BPMN | 50565763414f52d9e84004494cf550c6fe2358fa | [
"MIT"
] | 1 | 2021-09-18T01:22:25.000Z | 2021-09-18T01:22:25.000Z | from django.urls import path
from django.contrib.auth.views import LoginView
from .views.activity_view import *
from .views.activity_type_view import *
from .views.event_view import *
from .views.flow_view import *
from .views.lane_view import *
from .views.pool_view import *
from .views.process_type_view import *
from .views.process_view import *
from .views.sequence_view import *
urlpatterns = [
path('', LoginView.as_view(template_name='accounts/login.html'), name="login"),
path('activity_type_list/', ActivityTypeView.as_view(), name='activity_type_list'),
path('activity_type_create_form/', ActivityTypeCreate.as_view(), name='activity_type_create_form'),
path('activity_type_update_form/<int:pk>', ActivityTypeUpdate.as_view(), name='activity_type_update_form'),
path('activity_type_delete_confirmation/<int:pk>', ActivityTypeDelete.as_view(), name='activity_type_delete_confirmation'),
path('process_type_list/', ProcessTypeView.as_view(), name='process_type_list'),
path('process_type_create_form/', ProcessTypeCreate.as_view(), name='process_type_create_form'),
path('process_type_update_form/<int:pk>', ProcessTypeUpdate.as_view(), name='process_type_update_form'),
path('process_type_delete_confirmation/<int:pk>', ProcessTypeDelete.as_view(), name='process_type_delete_confirmation'),
path('pool_list/', PoolView.as_view(), name='pool_list'),
path('pool_create_form/', PoolCreate.as_view(), name='pool_create_form'),
path('pool_update_form/<int:pk>', PoolUpdate.as_view(), name='pool_update_form'),
path('pool_delete_confirmation/<int:pk>', PoolDelete.as_view(), name='pool_delete_confirmation'),
path('lane_list/', LaneView.as_view(), name='lane_list'),
path('lane_create_form/', LaneCreate.as_view(), name='lane_create_form'),
path('lane_update_form/<int:pk>', LaneUpdate.as_view(), name='lane_update_form'),
path('lane_delete_confirmation/<int:pk>', LaneDelete.as_view(), name='lane_delete_confirmation'),
path('event_list/', EventView.as_view(), name='event_list'),
path('event_create_form/', EventCreate.as_view(), name='event_create_form'),
path('event_update_form/<int:pk>', EventUpdate.as_view(), name='event_update_form'),
path('event_delete_confirmation/<int:pk>', EventDelete.as_view(), name='event_delete_confirmation'),
path('activity_list/', ActivityView.as_view(), name='activity_list'),
path('activity_create_form/', ActivityCreate.as_view(), name='activity_create_form'),
path('activity_update_form/<int:pk>', ActivityUpdate.as_view(), name='activity_update_form'),
path('activity_delete_confirmation/<int:pk>', ActivityDelete.as_view(), name='activity_delete_confirmation'),
path('sequence_list/', SequenceView.as_view(), name='sequence_list'),
path('sequence_create_form/', SequenceCreate.as_view(), name='sequence_create_form'),
path('sequence_update_form/<int:pk>', SequenceUpdate.as_view(), name='sequence_update_form'),
path('sequence_delete_confirmation/<int:pk>', SequenceDelete.as_view(), name='sequence_delete_confirmation'),
path('flow_list/', FlowView.as_view(), name='flow_list'),
path('flow_create_form/', FlowCreate.as_view(), name='flow_create_form'),
path('flow_update_form/<int:pk>', FlowUpdate.as_view(), name='flow_update_form'),
path('flow_delete_confirmation/<int:pk>', FlowDelete.as_view(), name='flow_delete_confirmation'),
path('process_list/', ProcessView.as_view(), name='process_list'),
path('process_create_form/', ProcessCreate.as_view(), name='process_create_form'),
path('process_update_form/<int:pk>', ProcessUpdate.as_view(), name='process_update_form'),
path('process_delete_confirmation/<int:pk>', ProcessDelete.as_view(), name='process_delete_confirmation'),
path('process-modeling/', ProcessModelingView.as_view(), name="process_modeling"),
path('ontology-suggestion', OntologySuggestionView.as_view(), name="ontology_suggestion")
]
| 73.351852 | 127 | 0.757637 |
5598fc6baf6adbca126912ba31690ef9d92c7c11 | 2,106 | py | Python | utils/boilerplate/test_gorilla.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null | utils/boilerplate/test_gorilla.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null | utils/boilerplate/test_gorilla.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null |
import unittest
from pysapets.gorilla import Gorilla
from pysapets.animal import Animal
import pysapets.constants as constants
from unittest.mock import patch
from io import StringIO
from copy import deepcopy
| 31.432836 | 96 | 0.74359 |
559ad11e61e76b073ffa707dfeef7cd524cd64ce | 4,923 | py | Python | delpapa/avalanches/data_analysis.py | delpapa/CritSORN | cdad55d55f39e04f568ca1bc0c6036bec8db08fb | [
"MIT"
] | null | null | null | delpapa/avalanches/data_analysis.py | delpapa/CritSORN | cdad55d55f39e04f568ca1bc0c6036bec8db08fb | [
"MIT"
] | null | null | null | delpapa/avalanches/data_analysis.py | delpapa/CritSORN | cdad55d55f39e04f568ca1bc0c6036bec8db08fb | [
"MIT"
] | null | null | null | ########################################################################
# This script contains all the data analysis functions #
########################################################################
from __future__ import division
from pylab import *
import scipy, scipy.stats
import tables
import os
from tempfile import TemporaryFile
### distribution of the total activity
# Return the average activity (as a array), and std
### calculate the size average as a function of the duration
# receives the non-sorted arrays with measures of size and duration
# returns two non-sorted arrays containing the duration and average
# avalanche size.
| 33.489796 | 78 | 0.558399 |
559adf86675fc57065409a6e9ac6154669c807e5 | 3,404 | py | Python | edwin/__init__.py | AlanSwenson/edwin | 94f62a4db6cc5123224607f92a1f552be072c708 | [
"MIT"
] | null | null | null | edwin/__init__.py | AlanSwenson/edwin | 94f62a4db6cc5123224607f92a1f552be072c708 | [
"MIT"
] | 8 | 2019-03-13T13:39:00.000Z | 2019-04-02T14:58:21.000Z | edwin/__init__.py | AlanSwenson/edwin | 94f62a4db6cc5123224607f92a1f552be072c708 | [
"MIT"
] | null | null | null | import eventlet
eventlet.monkey_patch()
import time
from datetime import datetime, timedelta, timezone
import pytz
from email.utils import parsedate_tz
import json
from flask import Flask, request, render_template
from threading import Thread
from tweepy import OAuthHandler, API, Stream, Cursor
from flask_socketio import (
SocketIO,
emit,
join_room,
leave_room,
close_room,
rooms,
disconnect,
)
from darksky import forecast
socketio = SocketIO()
thread = None
thread2 = None
from edwin.tweets import StdOutListener
| 30.392857 | 84 | 0.574031 |
559ae7307b62942efd1983a817dbb736879880c0 | 2,255 | py | Python | troop/admin.py | packmas13/registration | bfb42c5479d59494b59e7c656cb04826e110e8d2 | [
"MIT"
] | 1 | 2020-08-12T09:51:42.000Z | 2020-08-12T09:51:42.000Z | troop/admin.py | packmas13/registration | bfb42c5479d59494b59e7c656cb04826e110e8d2 | [
"MIT"
] | 46 | 2020-01-24T16:51:41.000Z | 2022-03-29T16:03:12.000Z | troop/admin.py | packmas13/registration | bfb42c5479d59494b59e7c656cb04826e110e8d2 | [
"MIT"
] | 1 | 2020-01-28T21:25:06.000Z | 2020-01-28T21:25:06.000Z | from django import forms
from django.contrib import admin
from .models import Attendance, Diet, Participant, Troop
from payment.admin import DiscountInline, PaymentInline
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Diet, DietAdmin)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(Troop, TroopAdmin)
| 21.47619 | 75 | 0.632373 |
559af5721a6a15c927e5d10a7e185b857bbef70d | 142 | py | Python | {{cookiecutter.project_name}}/service/worker/beat.py | ProjectTemplates/python-backend-service | 5266916e54faaf236bc972a2cd7bb1217e8a8625 | [
"MIT"
] | 7 | 2020-07-28T18:45:20.000Z | 2021-12-11T23:33:49.000Z | {{cookiecutter.project_name}}/service/worker/beat.py | ProjectTemplates/python-fastapi-backend | 5266916e54faaf236bc972a2cd7bb1217e8a8625 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/service/worker/beat.py | ProjectTemplates/python-fastapi-backend | 5266916e54faaf236bc972a2cd7bb1217e8a8625 | [
"MIT"
] | 1 | 2020-05-10T20:26:02.000Z | 2020-05-10T20:26:02.000Z | from conf import celery_settings
from .app import app
| 15.777778 | 43 | 0.788732 |
559b8b906411edd79ce8b01d4b0d9cdea4c7292c | 829 | py | Python | demo_snippets/11_Datenvisualisierung/main.py | fabod/pro2 | 69b1015fa789ef05bf9b514d94b231f76bdf5e29 | [
"MIT"
] | 2 | 2020-03-03T14:57:40.000Z | 2020-03-20T10:59:47.000Z | demo_snippets/11_Datenvisualisierung/main.py | fabod/pro2 | 69b1015fa789ef05bf9b514d94b231f76bdf5e29 | [
"MIT"
] | null | null | null | demo_snippets/11_Datenvisualisierung/main.py | fabod/pro2 | 69b1015fa789ef05bf9b514d94b231f76bdf5e29 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import render_template
import plotly.express as px
from plotly.offline import plot
app = Flask("Datenvisualisierung")
if __name__ == '__main__':
app.run(debug=True, port=5000)
| 18.422222 | 53 | 0.587455 |
559bff5f8a9189b7032f820f194b11e430ff84ea | 24,336 | py | Python | sdk/python/pulumi_digitalocean/database_connection_pool.py | mikealgj/pulumi-digitalocean | 77c109ab364eb69b7668b007c29413f5d2c95209 | [
"ECL-2.0",
"Apache-2.0"
] | 53 | 2019-04-25T14:43:12.000Z | 2022-03-14T15:51:44.000Z | sdk/python/pulumi_digitalocean/database_connection_pool.py | mikealgj/pulumi-digitalocean | 77c109ab364eb69b7668b007c29413f5d2c95209 | [
"ECL-2.0",
"Apache-2.0"
] | 158 | 2019-04-15T21:47:18.000Z | 2022-03-29T21:21:57.000Z | sdk/python/pulumi_digitalocean/database_connection_pool.py | mikealgj/pulumi-digitalocean | 77c109ab364eb69b7668b007c29413f5d2c95209 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2019-04-15T20:16:11.000Z | 2021-05-28T19:08:32.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DatabaseConnectionPoolArgs', 'DatabaseConnectionPool']
class DatabaseConnectionPool(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseConnectionPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
db_name: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseConnectionPoolArgs.__new__(DatabaseConnectionPoolArgs)
if cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_id'")
__props__.__dict__["cluster_id"] = cluster_id
if db_name is None and not opts.urn:
raise TypeError("Missing required property 'db_name'")
__props__.__dict__["db_name"] = db_name
if mode is None and not opts.urn:
raise TypeError("Missing required property 'mode'")
__props__.__dict__["mode"] = mode
__props__.__dict__["name"] = name
if size is None and not opts.urn:
raise TypeError("Missing required property 'size'")
__props__.__dict__["size"] = size
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
__props__.__dict__["host"] = None
__props__.__dict__["password"] = None
__props__.__dict__["port"] = None
__props__.__dict__["private_host"] = None
__props__.__dict__["private_uri"] = None
__props__.__dict__["uri"] = None
super(DatabaseConnectionPool, __self__).__init__(
'digitalocean:index/databaseConnectionPool:DatabaseConnectionPool',
resource_name,
__props__,
opts)
| 39.764706 | 166 | 0.623973 |
559c155e6e0b7efb591c20bbc5e5237149bd61eb | 2,940 | py | Python | data_analysis/get_model_statistics.py | fluTN/influenza | 40cbede52bc4e95d52369eebe4a50ad4b71369d1 | [
"MIT"
] | 1 | 2020-10-29T09:56:31.000Z | 2020-10-29T09:56:31.000Z | data_analysis/get_model_statistics.py | fluTN/influenza | 40cbede52bc4e95d52369eebe4a50ad4b71369d1 | [
"MIT"
] | null | null | null | data_analysis/get_model_statistics.py | fluTN/influenza | 40cbede52bc4e95d52369eebe4a50ad4b71369d1 | [
"MIT"
] | 1 | 2022-01-22T11:34:29.000Z | 2022-01-22T11:34:29.000Z | # -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
import numpy as np
from scipy import stats
from docopt import docopt
import os
import glob
from sklearn.metrics import mean_squared_error
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Get only the weeks we care for
start_year = "2007-42" if not args["--start-year"] else args["--start-year"]
end_year = "2019-15" if not args["--end-year"] else args["--end-year"]
start_season = data["week"] >= start_year
end_season = data["week"] <= str(int(end_year.split("-")[0]) + 1) + "-" + end_year.split("-")[1]
total = start_season & end_season
data = data[total]
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
total_pearson = 0
for i in np.arange(0, len(data["prediction"]), 26):
total_pearson += stats.pearsonr(data["prediction"][i:i+26], data["incidence"][i:i+26])[0]
print("Pearson Correlation (value/p): ", total_pearson/(len(data["prediction"])/26))
print("")
print("Mean Squared Error: ", mean_squared_error(data["prediction"], data["incidence"]))
print("")
if not args["--no-graph"]:
ax = sns.distplot(residuals, label="Residual")
plt.figure()
ax = sns.distplot(data["incidence"], label="Incidence")
ax = sns.distplot(data["prediction"], label="Prediction")
plt.legend()
plt.show()
| 33.409091 | 172 | 0.644558 |
559f3ab5a294666e58af2d7a21dc2e34d7f16b41 | 21,887 | py | Python | sisu/summarizer.py | balouf/sisu | 07541e6a02e545372452b33f7df056331397001f | [
"BSD-3-Clause"
] | null | null | null | sisu/summarizer.py | balouf/sisu | 07541e6a02e545372452b33f7df056331397001f | [
"BSD-3-Clause"
] | null | null | null | sisu/summarizer.py | balouf/sisu | 07541e6a02e545372452b33f7df056331397001f | [
"BSD-3-Clause"
] | null | null | null | from scipy.sparse import vstack
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from sisu.preprocessing.tokenizer import is_relevant_sentence, make_sentences, sanitize_text
from gismo.gismo import Gismo, covering_order
from gismo.common import auto_k
from gismo.parameters import Parameters
from gismo.corpus import Corpus
from gismo.embedding import Embedding
from sisu.embedding_idf import IdfEmbedding
def cosine_order(projection, sentences, query):
"""
Order relevant sentences by cosine similarity to the query.
Parameters
----------
projection: callable
A function that converts a text into a tuple whose first element is an embedding (typically a Gismo :meth:`~gismo.embedding.Embedding.query_projection`).
sentences: :class:`list` of :class:`dict`
Sentences as output by :func:`~sisu.summarizer.extract_sentences`.
query: :class:`str`
Target query
Returns
-------
:class:`list` of :class:`int`
Ordered list of indexes of relevant sentences, sorted by cosine similarity
"""
relevant_indices = [s['index'] for s in sentences if s['relevant']]
projected_query = projection(query)[0]
projected_sentences = vstack([projection(sentences[i]['sanitized'])[0] for i in relevant_indices])
order = np.argsort(- cosine_similarity(projected_sentences, projected_query)[:, 0])
return [relevant_indices[i] for i in order]
def extract_sentences(source, indices, getter=None, tester=None):
"""
Pick up the entries of the source corresponding to indices and build a list of sentences out of that.
Each sentence is a dictionary with the following keys:
- `index`: position of the sentence in the returned list
- `sentence`: the actual sentence
- `relevant`: a boolean that tells if the sentence is eligible for being part of the summary
- `sanitized`: for relevant sentences, a simplified version to be fed to the embedding
Parameters
----------
source: :class:`list`
list of objects
indices: iterable of :class:`int`
Indexes of the source items to select
getter: callable, optional
Tells how to convert a source entry into text.
tester: callable, optional
Tells if the sentence is eligible for being part of the summary.
Returns
-------
list of dict
Examples
--------
>>> doc1 = ("This is a short sentence! This is a sentence with reference to the url http://www.ix.com! "
... "This sentence is not too short and not too long, without URL and without citation. "
... "I have many things to say in that sentence, to the point "
... "I do not know if I will stop anytime soon but don\'t let it stop "
... "you from reading this meaninless garbage and this goes on and "
... "this goes on and this goes on and this goes on and this goes on and "
... "this goes on and this goes on and this goes on and this goes on "
... "and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and that is all.")
>>> doc2 = ("This is a a sentence with some citations [3, 7]. "
... "This sentence is not too short and not too long, without URL and without citation. "
... "Note that the previous sentence is already present in doc1. "
... "The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes "
... "the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).")
>>> extract_sentences([doc1, doc2], [1, 0]) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
[{'index': 0, 'sentence': 'This is a a sentence with some citations [3, 7].', 'relevant': False, 'sanitized': ''},
{'index': 1, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': True, 'sanitized': 'This sentence is not too short and not too long without URL and without citation'},
{'index': 2, 'sentence': 'Note that the previous sentence is already present in doc1.',
'relevant': True, 'sanitized': 'Note that the previous sentence is already present in doc'},
{'index': 3, 'sentence': 'The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes
the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).',
'relevant': False, 'sanitized': ''},
{'index': 4, 'sentence': 'This is a short sentence!', 'relevant': False, 'sanitized': ''},
{'index': 5, 'sentence': 'This is a sentence with reference to the url http://www.ix.com!',
'relevant': False, 'sanitized': ''},
{'index': 6, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': False, 'sanitized': ''},
{'index': 7, 'sentence': "I have many things to say in that sentence...",
'relevant': False, 'sanitized': ''}]
"""
if getter is None:
getter = str
if tester is None:
tester = is_relevant_sentence
sentences = [{'index': i, 'sentence': sent, 'relevant': tester(sent)}
for i, sent in enumerate([sent for j in indices
for sent in make_sentences(getter(source[j]))])]
used = set()
for s in sentences:
if s['sentence'] in used and s['relevant']:
s['relevant'] = False
else:
used.add(s['sentence'])
s['sanitized'] = sanitize_text(s['sentence']) if s['relevant'] else ""
return sentences
default_summarizer_parameters = {
'order': 'rank',
'text_getter': None,
'sentence_tester': is_relevant_sentence,
'itf': True,
'post_processing': lambda summa, i: summa.sentences_[i]['sentence'],
'sentence_gismo_parameters': {'post': False, 'resolution': .99},
'num_documents': None,
'num_query': None,
'num_sentences': None,
'max_chars': None}
"""
List of parameters for the summarizer with their default values.
Parameters
-----------
order: :class:`str`
Sorting function.
text_getter: callable
Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used.
sentence_tester: callable
Function that estimates if a sentence is eligible to be part of the summary
itf: :class:`bool`
Use of ITF normalization in the sentence-level Gismo
post_processing: callable
post_processing transformation. Signature is (:class:`~sisu.summarizer.Summarizer`, :class:`int`) -> :class:`str`
sentence_gismo_parameters: :class:`dict`
Tuning of sentence-level gismo. `post` MUST be set to False.
num_documents: :class:`int` or None
Number of documents to pre-select
num_query: :class:`int` or None
Number of features to use in generic query
num_sentences: :class:`int` or None
Number of sentences to return
max_chars: :class:`int` or None
Maximal number of characters to return
"""
| 46.077895 | 161 | 0.646 |
559fa91e2cb3fcb7a60d3f0698d9ba9ef4cfe606 | 4,482 | py | Python | automr/bridge.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 5 | 2021-06-03T07:49:02.000Z | 2022-02-21T11:35:20.000Z | automr/bridge.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 2 | 2022-01-20T08:33:59.000Z | 2022-03-26T12:21:15.000Z | automr/bridge.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 1 | 2022-02-21T11:35:34.000Z | 2022-02-21T11:35:34.000Z | import numpy as np
import os
from automr import dump_mat
from functools import partial, reduce
print = partial(print, flush=True)
einsum = partial(np.einsum, optimize=True)
| 35.015625 | 86 | 0.51071 |
55a1b6b516c4d12eb63cdf47d747201063521f8c | 487 | py | Python | Example/playstore.py | goodop/api-imjustgood.com | 6406b531c4393fa8a4ace3c206d23895da915caf | [
"MIT"
] | 4 | 2021-01-01T10:20:13.000Z | 2021-11-08T09:32:54.000Z | Example/playstore.py | goodop/api-imjustgood.com | 6406b531c4393fa8a4ace3c206d23895da915caf | [
"MIT"
] | null | null | null | Example/playstore.py | goodop/api-imjustgood.com | 6406b531c4393fa8a4ace3c206d23895da915caf | [
"MIT"
] | 25 | 2021-01-09T18:22:32.000Z | 2021-05-29T07:42:06.000Z | from justgood import imjustgood
media = imjustgood("YOUR_APIKEY_HERE")
query = "gojek" # example query
data = media.playstore(query)
# Get attributes
number = 0
result = "Playstore :"
for a in data["result"]:
number += 1
result += "\n\n{}. {}".format(number, a["title"])
result += "\nDeveloper : {}".format(a["developer"])
result += "\nThumbnail : {}".format(a["thumbnail"])
result += "\nURL : {}".format(a["pageUrl"])
print(result)
# Get JSON results
print(data)
| 24.35 | 55 | 0.63655 |
55a448450ef16dcbbfd95d6484daa13257f8e1ca | 1,089 | py | Python | disjoint_set.py | Mt-Kunlun/Object-Saliency-Map-Atari | 759f7d9d2658626043f6b0e0dcaf8acd3c0e4655 | [
"MIT"
] | null | null | null | disjoint_set.py | Mt-Kunlun/Object-Saliency-Map-Atari | 759f7d9d2658626043f6b0e0dcaf8acd3c0e4655 | [
"MIT"
] | null | null | null | disjoint_set.py | Mt-Kunlun/Object-Saliency-Map-Atari | 759f7d9d2658626043f6b0e0dcaf8acd3c0e4655 | [
"MIT"
] | null | null | null | import numpy as np
# disjoint-set forests using union-by-rank and path compression (sort of).
| 27.923077 | 75 | 0.459137 |
55a528f7f755e76f01a1fec6c18655befd899209 | 131 | py | Python | Logon.py | fenglihanxiao/multi_test | 46ee84aaa36f1d9594ccf7a14caa167dfcd719d5 | [
"MIT"
] | null | null | null | Logon.py | fenglihanxiao/multi_test | 46ee84aaa36f1d9594ccf7a14caa167dfcd719d5 | [
"MIT"
] | null | null | null | Logon.py | fenglihanxiao/multi_test | 46ee84aaa36f1d9594ccf7a14caa167dfcd719d5 | [
"MIT"
] | null | null | null | num1 = 1
num2 = 20
num3 = 168
# dev first commit
num1 = 1
# resolve conflict
num2 = 88888888
# Test next commit
num3 = 99
| 8.1875 | 18 | 0.641221 |
55a5624a3d2ac28eb83b211136e77b9c0d5431d3 | 1,441 | py | Python | latteys/latteys/doctype/auto_mail.py | hrgadesha/lattyeys | 428b752ac99620ac7ad706fd305f07210bdcb315 | [
"MIT"
] | 1 | 2021-09-10T03:51:22.000Z | 2021-09-10T03:51:22.000Z | latteys/latteys/doctype/auto_mail.py | hrgadesha/lattyeys | 428b752ac99620ac7ad706fd305f07210bdcb315 | [
"MIT"
] | null | null | null | latteys/latteys/doctype/auto_mail.py | hrgadesha/lattyeys | 428b752ac99620ac7ad706fd305f07210bdcb315 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe
from datetime import datetime
from frappe.model.document import Document
| 65.5 | 277 | 0.687717 |
55a57c64b93ff64ee4143c416e8510e88ce162fa | 8,022 | py | Python | foulacces.py | Danukeru/FOULACCES | 54304c7a91326f9517c45f6981c4ab8de4eb3964 | [
"BSD-3-Clause"
] | 1 | 2019-10-21T23:43:21.000Z | 2019-10-21T23:43:21.000Z | foulacces.py | Danukeru/FOULACCES | 54304c7a91326f9517c45f6981c4ab8de4eb3964 | [
"BSD-3-Clause"
] | null | null | null | foulacces.py | Danukeru/FOULACCES | 54304c7a91326f9517c45f6981c4ab8de4eb3964 | [
"BSD-3-Clause"
] | 1 | 2019-10-21T23:43:29.000Z | 2019-10-21T23:43:29.000Z | #!/usr/bin/env python
import os
import sys
import hashlib
import httplib
import base64
import socket
from xml.dom.minidom import *
RAC_CODE = { 'x' : 'Unknown error',
'0x0' : 'Success',
'0x4' : 'Number of arguments does not match',
'0xc' : 'Syntax error in xml2cli command',
'0x408' : 'Session Timeout',
'0x43' : 'No such subfunction',
'0x62' : 'Command not supported on this platform for this firmware',
'0xb0002' : 'Invalid handle',
'0x140000' : 'Too many sessions',
'0x140002' : 'Logout',
'0x140004' : 'Invalid password',
'0x140005' : 'Invalid username',
'0x150008' : 'Too many requests',
'0x15000a' : 'No such event',
'0x15000c' : 'No such function',
'0x15000d' : 'Unimplemented',
'0x170003' : 'Missing content in POST ?',
'0x170007' : 'Dont know yet',
'0x1a0004' : 'Invalid sensorname',
'0x10150006' : 'Unknown sensor error',
'0x10150009' : 'Too many sensors in sensorlist',
'0x20308' : 'Console not available',
'0x30003' : 'Console not active',
'0x3000a' : 'Console is in text mode',
'0x3000b' : 'Console is in VGA graphic mode',
'0x30011' : [ 'Console is in Linux mode (no ctrl+alt+del)',
'Console is in Windows or Netware mode' ],
'0xe0003' : 'Unknown serveraction',
'0xf0001' : 'Offset exceeds number of entries in eventlog',
'0xf0003' : 'Request exceeds number of entries in eventlog',
'0xf0004' : 'Invalid number of events requested'
}
SEVERITY = { 'x' : 'Unknown severity. ',
'' : '-',
'0x1' : 'Unknown',
'0x2' : 'OK',
'0x3' : 'Information',
'0x4' : 'Recoverable',
'0x5' : 'Non-Critical',
'0x6' : 'Critical',
'0x7' : 'Non-Recoverable',
}
BOGUS_IDS_1650 = [ '0x1010018', '0x1020010', '0x1020018',
'0x1020062', '0x1030010', '0x1030018',
'0x1030062', '0x1040010', '0x1040018',
'0x1050018', '0x1060010', '0x1060018',
'0x1060062', '0x1070018', '0x1070062',
'0x1080010', '0x1080062', '0x1090010',
'0x10a0010', '0x10f0062', '0x1100010',
'0x1110010', '0x1120010', '0x1120062',
'0x1130010', '0x1140010', '0x1150010',
'0x13b0010', '0x13c0010', '0x13f0010',
'0x14b0010', '0x14d0010', '0x20e0062',
'0x2110062', '0x2160061', '0x2160062',
'0x2170061', '0x2170062', '0x2180061',
'0x2180062', '0x2190061', '0x2190062',
'0x21a0061', '0x21a0062', '0x21b0061',
'0x21b0062', '0x21e0010', '0x21e0061',
'0x21e0062', '0x21f0061', '0x21f0062',
'0x2210010', '0x2220010', '0x2230010',
'0x2240010', '0x2250010', '0x2260010',
'0x2270010', '0x2280010', '0x2290010',
'0x22a0010', '0x22b0010', '0x22c0010',
'0x22d0010', '0x22e0010', '0x22f0010',
'0x2300010', '0x2310010', '0x2320010',
'0x2330010', '0x2340010', '0x2350010',
'0x2360010', '0x2370010', '0x2380010',
'0x2390010', '0x23a0010', '0x23e0010',
'0x2410010', '0x2420010', '0x2430010',
'0x2440010', '0x2450010', '0x2460010',
'0x2470010', '0x2480010', '0x2530010',
]
BOGUS_IDS_2650 = [ '0x1350010', '0x1360010', '0x2160061',
'0x2170061', '0x2180061', '0x2190061',
'0x21a0061', '0x21b0061', '0x21c0061',
'0x21d0061', '0x21e0060', '0x21e0061',
'0x21f0060', '0x21f0061', '0x2d00010',
]
BOGUS_IDS_1750 = [ '0x1060062', '0x1070062', '0x1080062',
'0x10f0062', '0x1120062', '0x1030062',
'0x1020062', '0x20e0062', '0x2110062',
'0x2160062', '0x2170062', '0x2180062',
'0x2190062', '0x21a0062', '0x21b0062',
'0x21f0062', '0x21e0062', '0x2160061',
'0x2170061', '0x2180061', '0x2190061',
'0x21a0061', '0x21b0061', '0x21f0061',
'0x21e0061', '0x1010010', '0x1020010',
'0x1030010', '0x1040010', '0x1080010',
'0x1090010', '0x10a0010', '0x1100010',
'0x1110010', '0x1120010', '0x1130010',
'0x1140010', '0x1150010', '0x21e0010',
'0x2210010', '0x2220010', '0x2230010',
'0x2240010', '0x2250010', '0x2260010',
'0x2290010', '0x22a0010', '0x22b0010',
'0x22c0010', '0x22d0010', '0x22e0010',
'0x22f0010', '0x2300010', '0x2310010',
'0x2320010', '0x2330010', '0x2340010',
'0x2350010', '0x2360010', '0x2370010',
'0x2380010', '0x2390010', '0x23a0010',
'0x13b0010', '0x13c0010', '0x13f0010',
'0x2440010', '0x2450010', '0x2460010',
'0x2470010', '0x2480010', '0x14a0010',
'0x14d0010', '0x14e0010', '0x1500010',
'0x1510010', '0x2000010', '0x2570010',
'0x10f0060', '0x1120060', '0x1020060',
'0x1010018', '0x1020018', '0x1030018',
'0x1040018', '0x1050018', '0x1060018',
'0x1070018',
]
PROPNAMES = [ 'NAME',
'SEVERITY',
'LOW_CRITICAL',
'LOW_NON_CRITICAL',
'VAL',
'UNITS',
'UPPER_NON_CRITICAL',
'UPPER_CRITICAL',
'SENSOR_TYPE',
]
DRIVE_SLOT_CODES = { '0' : 'Good',
'1' : 'No Error',
'2' : 'Faulty Drive',
'4' : 'Drive Rebuilding',
'8' : 'Drive In Failed Array',
'16' : 'Drive In Critical Array',
'32' : 'Parity Check Error',
'64' : 'Predicted Error',
'128' : 'No Drive',
}
POWER_UNIT_CODES = { '0' : 'AC Power Unit',
'1' : 'DC Power Unit',
}
BUTTON_CODES = { '0' : 'Power Button Disabled',
'1' : 'Power Button Enabled'
}
FAN_CONTROL_CODES = { '0' : 'Normal Operation',
'1' : 'Unknown',
}
INTRUSION_CODES = { '0' : 'No Intrusion',
'1' : 'Cover Intrusion Detected',
'2' : 'Bezel Intrusion Detected',
}
POWER_SUPPLY_CODES = { '1' : 'Good',
'2' : 'Failure Detected',
'4' : 'Failure Predicted',
'8' : 'Power Lost',
'16' : 'Not Present',
}
PROCESSOR_CODES = { '1' : 'Good',
'2' : 'Failure Detected',
'4' : 'Failure Predicted',
'8' : 'Power Lost',
'16' : 'Not Present',
}
CODES = { 'button' : BUTTON_CODES,
'drive slot' : DRIVE_SLOT_CODES,
'fan control' : FAN_CONTROL_CODES,
'intrusion' : INSTRUSION_CODES,
'power supply' : POWER_SUPPLY_CODES,
'power unit' : POWER_UNIT_CODES,
'processor' : PROCESSOR_CODES,
}
| 40.11 | 87 | 0.446771 |
55a63e41c61dfc7f2803753c38bd275ef075fcb4 | 10,272 | py | Python | codes/3_derive_elementary_effects.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | 6 | 2019-09-27T15:38:37.000Z | 2021-02-03T13:58:01.000Z | codes/3_derive_elementary_effects.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | null | null | null | codes/3_derive_elementary_effects.py | aviolinist/EEE | 032e2029815229875048cc92dd7da24ff3f71e93 | [
"MIT"
] | 5 | 2019-09-27T15:38:52.000Z | 2022-03-22T17:24:37.000Z | #!/usr/bin/env python
from __future__ import print_function
# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca
#
# License
# This file is part of the EEE code library for "Computationally inexpensive identification
# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)".
#
# The EEE code library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The MVA code library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with The EEE code library.
# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.
#
# If you use this method in a publication please cite:
#
# M Cuntz & J Mai et al. (2015).
# Computationally inexpensive identification of noninformative model parameters by sequential screening.
# Water Resources Research, 51, 6417-6441.
# https://doi.org/10.1002/2015WR016907.
#
#
#
# python 3_derive_elementary_effects.py \
# -i example_ishigami-homma/model_output.pkl \
# -d example_ishigami-homma/parameters.dat \
# -m example_ishigami-homma/parameter_sets_1_para3_M.dat \
# -v example_ishigami-homma/parameter_sets_1_para3_v.dat \
# -o example_ishigami-homma/eee_results.dat
"""
Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i)
using specified model parameters (option -d). The model parameters were sampled beforehand as Morris
trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The
Elementary Effects are stored in a file (option -o).
History
-------
Written, JM, Mar 2019
"""
# -------------------------------------------------------------------------
# Command line arguments
#
modeloutputs = 'example_ishigami-homma/model_output.pkl'
modeloutputkey = 'All'
maskfile = 'example_ishigami-homma/parameters.dat'
morris_M = 'example_ishigami-homma/parameter_sets_1_para3_M.dat'
morris_v = 'example_ishigami-homma/parameter_sets_1_para3_v.dat'
outfile = 'example_ishigami-homma/eee_results.dat'
skip = None # number of lines to skip in Morris files
import optparse
parser = optparse.OptionParser(usage='%prog [options]',
description="Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i) using specified model parameters (option -d). The model parameters were sampled beforehand as Morris trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The Elementary Effects are stored in a file (option -o).")
parser.add_option('-i', '--modeloutputs', action='store',
default=modeloutputs, dest='modeloutputs', metavar='modeloutputs',
help="Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').")
parser.add_option('-k', '--modeloutputkey', action='store',
default=modeloutputkey, dest='modeloutputkey', metavar='modeloutputkey',
help="Key of model output dictionary stored in pickle output file. If 'All', all model outputs are taken into account and multi-objective EEE is applied. (default: 'All').")
parser.add_option('-d', '--maskfile', action='store', dest='maskfile', type='string',
default=maskfile, metavar='File',
help='Name of file where all model parameters are specified including their distribution, distribution parameters, default value and if included in analysis or not. (default: maskfile=parameters.dat).')
parser.add_option('-m', '--morris_M', action='store', dest='morris_M', type='string',
default=morris_M, metavar='morris_M',
help="Morris trajectory information: The UNSCALED parameter sets. (default: 'parameter_sets_1_para3_M.dat').")
parser.add_option('-v', '--morris_v', action='store', dest='morris_v', type='string',
default=morris_v, metavar='morris_v',
help="Morris trajectory information: The indicator which parameter changed between subsequent sets in a trajectory. (default: 'parameter_sets_1_para3_v.dat').")
parser.add_option('-s', '--skip', action='store',
default=skip, dest='skip', metavar='skip',
help="Number of lines to skip in Morris output files (default: None).")
parser.add_option('-o', '--outfile', action='store', dest='outfile', type='string',
default=outfile, metavar='File',
help='File containing Elementary Effect estimates of all model parameters listed in parameter information file. (default: eee_results.dat).')
(opts, args) = parser.parse_args()
modeloutputs = opts.modeloutputs
modeloutputkey = opts.modeloutputkey
maskfile = opts.maskfile
morris_M = opts.morris_M
morris_v = opts.morris_v
outfile = opts.outfile
skip = opts.skip
del parser, opts, args
# -----------------------
# add subolder scripts/lib to search path
# -----------------------
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path+'/lib')
import numpy as np
import pickle
from fsread import fsread # in lib/
from autostring import astr # in lib/
# -------------------------
# read parameter info file
# -------------------------
# parameter info file has following header:
# # para dist lower upper default informative(0)_or_noninformative(1)
# # mean stddev
nc,snc = fsread(maskfile, comment="#",cskip=1,snc=[0,1],nc=[2,3,4,5])
snc = np.array(snc)
para_name = snc[:,0]
para_dist = snc[:,1]
lower_bound = nc[:,0]
upper_bound = nc[:,1]
initial = nc[:,2]
# if informative(0) -> maskpara=False
# if noninformative(1) -> maskpara=True
mask_para = np.where((nc[:,3].flatten())==1.,True,False)
dims_all = np.shape(mask_para)[0]
idx_para = np.arange(dims_all)[mask_para] # indexes of parameters which will be changed [0,npara-1]
dims = np.sum(mask_para)
# pick only non-masked bounds
lower_bound_mask = lower_bound[np.where(mask_para)]
upper_bound_mask = upper_bound[np.where(mask_para)]
para_dist_mask = para_dist[np.where(mask_para)]
para_name_mask = para_name[np.where(mask_para)]
# -------------------------
# read model outputs
# -------------------------
model_output = pickle.load( open( modeloutputs, "rb" ) )
if modeloutputkey == 'All':
keys = list(model_output.keys())
else:
keys = [ modeloutputkey ]
model_output = [ np.array(model_output[ikey]) for ikey in keys ]
nkeys = len(model_output)
# -------------------------
# read Morris M
# -------------------------
ff = open(morris_M, "r")
parasets = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parasets[0].strip().split(':')[1])
else:
skip = np.int(skip)
parasets = parasets[skip:]
for iparaset,paraset in enumerate(parasets):
parasets[iparaset] = list(map(float,paraset.strip().split()))
parasets = np.array(parasets)
# -------------------------
# read Morris v
# -------------------------
ff = open(morris_v, "r")
parachanged = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parachanged[0].strip().split(':')[1])
else:
skip = np.int(skip)
parachanged = parachanged[skip:]
for iparachanged,parachan in enumerate(parachanged):
parachanged[iparachanged] = np.int(parachan.strip())
parachanged = np.array(parachanged)
# -------------------------
# calculate Elementary Effects
# -------------------------
ee = np.zeros([dims_all,nkeys],dtype=float)
ee_counter = np.zeros([dims_all,nkeys],dtype=int)
ntraj = np.int( np.shape(parasets)[0] / (dims+1) )
nsets = np.shape(parasets)[0]
for ikey in range(nkeys):
for iset in range(nsets):
ipara_changed = parachanged[iset]
if ipara_changed != -1:
ee_counter[ipara_changed,ikey] += 1
if ( len(np.shape(model_output[ikey])) == 1):
# scalar model output
ee[ipara_changed,ikey] += np.abs(model_output[ikey][iset]-model_output[ikey][iset+1]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed])
elif ( len(np.shape(model_output[ikey])) == 2):
# 1D model output
ee[ipara_changed,ikey] += np.mean(np.abs(model_output[ikey][iset,:]-model_output[ikey][iset+1,:]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed]))
else:
raise ValueError('Only scalar and 1D model outputs are supported!')
for ikey in range(nkeys):
for ipara in range(dims_all):
if ee_counter[ipara,ikey] > 0:
ee[ipara,ikey] /= ee_counter[ipara,ikey]
# -------------------------
# write final file
# -------------------------
# format:
# # model output #1: 'out1'
# # model output #2: 'out2'
# # ii para_name elemeffect(ii),ii=1:3,jj=1:1 counter(ii),ii=1:3,jj=1:1
# 1 'x_1' 0.53458196335158181 5
# 2 'x_2' 7.0822368906630215 5
# 3 'x_3' 3.5460086652980554 5
f = open(outfile, 'w')
for ikey in range(nkeys):
f.write('# model output #'+str(ikey+1)+': '+keys[ikey]+'\n')
f.write('# ii para_name elemeffect(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' counter(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' \n')
for ipara in range(dims_all):
f.write(str(ipara)+' '+para_name[ipara]+' '+' '.join(astr(ee[ipara,:],prec=8))+' '+' '.join(astr(ee_counter[ipara,:]))+'\n')
f.close()
print("wrote: '"+outfile+"'")
| 43.897436 | 405 | 0.633178 |
55a64a7a3b06450aa004faf6e58c77885b9ba532 | 1,377 | py | Python | leetcode/medium/113-Path_sum_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | 1 | 2020-02-25T10:32:27.000Z | 2020-02-25T10:32:27.000Z | leetcode/medium/113-Path_sum_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | leetcode/medium/113-Path_sum_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | """
Leetcode #113
"""
from typing import List
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(4)
root.left.left = TreeNode(11)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(2)
root.right = TreeNode(8)
root.right.left = TreeNode(13)
root.right.right = TreeNode(4)
root.right.right.left = TreeNode(5)
root.right.right.right = TreeNode(1)
"""
Expected Tree
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
"""
print(Solution().pathSum(root, 22))
| 19.394366 | 67 | 0.511256 |
55a6a32920fa2fc82181f6e01d6935314fa6f974 | 137 | py | Python | transiter_ny_mta/transiter_ny_mta/__init__.py | Pizza-Ratz/transiter-ny | 40091d3ff0c1b9e046b0d3ca708acb81df5019c6 | [
"MIT"
] | 1 | 2021-01-25T16:02:14.000Z | 2021-01-25T16:02:14.000Z | transiter_ny_mta/transiter_ny_mta/__init__.py | Pizza-Ratz/transiter-ny | 40091d3ff0c1b9e046b0d3ca708acb81df5019c6 | [
"MIT"
] | null | null | null | transiter_ny_mta/transiter_ny_mta/__init__.py | Pizza-Ratz/transiter-ny | 40091d3ff0c1b9e046b0d3ca708acb81df5019c6 | [
"MIT"
] | 1 | 2021-07-02T14:34:04.000Z | 2021-07-02T14:34:04.000Z | from .alertsparser import AlertsParser
from .subwaytripsparser import SubwayTripsParser
from .stationscsvparser import StationsCsvParser
| 34.25 | 48 | 0.890511 |
55a76346989d9cefd61701c39bcea10af1d5f5b9 | 4,254 | py | Python | main.py | MrValdez/ggj-2018 | d8806a47f561f54afd915d7b5e03181fbd2dbcfa | [
"MIT"
] | null | null | null | main.py | MrValdez/ggj-2018 | d8806a47f561f54afd915d7b5e03181fbd2dbcfa | [
"MIT"
] | null | null | null | main.py | MrValdez/ggj-2018 | d8806a47f561f54afd915d7b5e03181fbd2dbcfa | [
"MIT"
] | 1 | 2018-02-25T15:04:43.000Z | 2018-02-25T15:04:43.000Z | import os
import pygame
from input import Input
from stages.stage import Stage
from stages.stage_example import StageExample
from stages.stage1 import Stage1
from stages.stage2 import Stage2
from stages.stage3 import Stage3
from stages.stage4 import Stage4
from stages.stage5 import Stage5
from stages.stage6 import Stage6
from stages.stage7 import Stage7
from stages.stage8 import Stage8
from stages.stage9 import Stage9
from stages.stage10 import Stage10
from stages.stage11 import Stage11
from stages.stage12 import Stage12
from stages.stage13 import Stage13
from stages.stage14 import Stage14
from stages.stage15 import Stage15
from stages.stage16 import Stage16
from stages.stage17 import Stage17
from stages.stage18 import Stage18
from stages.stage19 import Stage19
from stages.stage20 import Stage20
from stages.stage21 import Stage21
from stages.stage22 import Stage22
from stages.stage23 import Stage23
from stages.stage24 import Stage24
from stages.stage25 import Stage25
from stages.stage26 import Stage26
from stages.stage27 import Stage27
from stages.stage28 import Stage28
from stages.stage29 import Stage29
from stages.stage30 import Stage30
from stages.stage31 import Stage31
from stages.stage32 import Stage32
from stages.stage_start import Stage_start
from stages.stage_end import Stage_end
from stages.stage_transition import Stage_transition
#os.environ['SDL_VIDEO_WINDOW_POS'] = "1, 0"
os.environ['SDL_VIDEO_WINDOW_POS'] = "100, 10"
resolution = [800, 600]
pygame.init()
pygame.mouse.set_visible(False)
pygame.display.set_caption("32 bits of delivery")
screen = pygame.display.set_mode(resolution)
clock = pygame.time.Clock()
GameIsRunning = True
input = Input()
stages = [
# StageExample(resolution),
# Stage1(resolution),
Stage_start(resolution),
Stage2(resolution), # have you tried turning it on and off again?
Stage29(resolution), # Button mash to transmit
Stage27(resolution), # Stop Spamming
Stage26(resolution), # Share love by petting
Stage8(resolution), # Two auth factor
Stage7(resolution), # USB connection
Stage16(resolution), # Poop
Stage18(resolution), # Upgrade PC
Stage9(resolution), # Dancing
Stage22(resolution), # Psychic transmission
Stage21(resolution), # Fix TV
Stage20(resolution), # Tune TV signal
Stage17(resolution), # Buy coffee
Stage25(resolution), # Share regrets
Stage23(resolution), # Send SMS
Stage13(resolution), # Love transmission!
Stage3(resolution), # chrome game
Stage15(resolution), # Clap to transmit noise
Stage19(resolution), # Sell trash
Stage14(resolution), # Find the strongest transmission
Stage28(resolution), # Game and Watch
Stage24(resolution), # Send Like
Stage6(resolution), # energize with coffee
Stage5(resolution), # crowd surfing game
Stage32(resolution), # transmit knowledge
Stage30(resolution), # transmit toothpaste
Stage31(resolution), # transmit toothpaste to teeth
Stage12(resolution), # Charge!
Stage11(resolution), # Space Defender
Stage4(resolution), # punching game
Stage10(resolution), # Ninja Turtle Van
Stage_end(resolution),
]
# add transtitions
updated_stages = []
for stage in stages:
updated_stages.append(stage)
updated_stages.append(Stage_transition(resolution))
stages = updated_stages
currentStage = 0
#currentStage = -2
while GameIsRunning:
pygame.display.flip()
tick = clock.tick(60)
screen.fill([0, 0, 0])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
GameIsRunning = False
if event.type == pygame.QUIT:
GameIsRunning = False
if not GameIsRunning:
pygame.quit()
break
input.update()
complete = stages[currentStage].update(input, tick)
if complete:
currentStage = (currentStage + 1) % len(stages)
stages[currentStage].__init__(resolution)
stages[currentStage].draw(screen) | 32.723077 | 77 | 0.704278 |