hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9a902b8d915c544016edb40caeb6192c52c369d | 3,914 | py | Python | hightime/util.py | tkrebes/hightime | 5312266c808556f11bde1725bc968564180df0f1 | [
"MIT"
] | null | null | null | hightime/util.py | tkrebes/hightime | 5312266c808556f11bde1725bc968564180df0f1 | [
"MIT"
] | null | null | null | hightime/util.py | tkrebes/hightime | 5312266c808556f11bde1725bc968564180df0f1 | [
"MIT"
] | null | null | null | import sys
from .sitimeunit import SITimeUnit
isPython3Compat = (sys.version_info.major == 3)
isPython36Compat = (isPython3Compat and (sys.version_info.minor >= 6))
def normalize_frac_seconds(a, b):
"""Returns 3-tuple containing (normalized frac_seconds for a, normalized
frac_seconds for b, most precise (smallest) frac_seconds_exponent between
both), where "normalized" is the frac_seconds multiplied to be equalivent
under the more precise frac_seconds_exponent.
Ex. a.frac_seconds = 10
a.frac_seconds_exponent = -1
b.frac_seconds = 12
b.frac_seconds_exponent = -2
returns: (100, 12, -2)
"""
# Lots of code to handle singular "second" as used in datetime and
# DateTime, and plural "seconds" as used in timedelta and
# TimeDelta...
if hasattr(a, "frac_second") and hasattr(a, "frac_second_exponent"):
a_frac_seconds = a.frac_second
a_frac_seconds_exponent = a.frac_second_exponent
elif hasattr(a, "frac_seconds") and hasattr(a, "frac_seconds_exponent"):
a_frac_seconds = a.frac_seconds
a_frac_seconds_exponent = a.frac_seconds_exponent
elif hasattr(a, "microsecond"):
a_frac_seconds = a.microsecond
a_frac_seconds_exponent = SITimeUnit.MICROSECONDS
elif hasattr(a, "microseconds"):
a_frac_seconds = a.microseconds
a_frac_seconds_exponent = SITimeUnit.MICROSECONDS
else:
raise TypeError("invalid type for a: %s" % type(a))
if hasattr(b, "frac_second") and hasattr(b, "frac_second_exponent"):
b_frac_seconds = b.frac_second
b_frac_seconds_exponent = b.frac_second_exponent
elif hasattr(b, "frac_seconds") and hasattr(b, "frac_seconds_exponent"):
b_frac_seconds = b.frac_seconds
b_frac_seconds_exponent = b.frac_seconds_exponent
elif hasattr(b, "microsecond"):
b_frac_seconds = b.microsecond
b_frac_seconds_exponent = SITimeUnit.MICROSECONDS
elif hasattr(b, "microseconds"):
b_frac_seconds = b.microseconds
b_frac_seconds_exponent = SITimeUnit.MICROSECONDS
else:
raise TypeError("invalid type for b: %s" % type(b))
if a_frac_seconds_exponent == b_frac_seconds_exponent:
return (a_frac_seconds, b_frac_seconds,
a_frac_seconds_exponent)
multiplier = 10 ** (abs(a_frac_seconds_exponent -
b_frac_seconds_exponent))
# a is more precise, multiply b
if a_frac_seconds_exponent < b_frac_seconds_exponent:
return (a_frac_seconds, b_frac_seconds * multiplier,
a_frac_seconds_exponent)
# b is more precise, multiply a
else:
return (a_frac_seconds * multiplier, b_frac_seconds,
b_frac_seconds_exponent)
def get_subsecond_component(frac_seconds, frac_seconds_exponent,
subsec_component_exponent, upper_exponent_limit):
"""Return the number of subseconds from frac_seconds *
(10**frac_seconds_exponent) corresponding to subsec_component_exponent that
does not exceed upper_exponent_limit.
For example:
If frac_seconds*(10**frac_seconds_exponent) is 0.1234567,
upper_exponent_limit is SITimeUnit.SECONDS, and subsec_component_exponent is
SITimeUnit.MICROSECONDS, 123456 would be returned.
If frac_seconds*(10**frac_seconds_exponent) is 0.123456789,
upper_exponent_limit is SITimeUnit.MICROSECONDS, and
subsec_component_exponent is SITimeUnit.NANOSECONDS, 789 would be returned.
Same example as above, but with upper_exponent_limit = SITimeUnit.SECONDS,
123456789 would be returned.
"""
total_subsecs = int(frac_seconds * (10 ** (frac_seconds_exponent -
subsec_component_exponent)))
return total_subsecs % (10 ** abs(subsec_component_exponent -
upper_exponent_limit))
| 42.543478 | 80 | 0.699796 |
c9a91a5cf9ffb0b7d6c657ce1005cb03ff51c2eb | 1,784 | py | Python | src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py | bellmast/supply-chain-simulation-environment | af797c1d057e216184727fdd934ebd372d90f4d5 | [
"Apache-2.0"
] | 26 | 2021-06-23T00:58:25.000Z | 2022-03-29T19:41:18.000Z | src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py | bellmast/supply-chain-simulation-environment | af797c1d057e216184727fdd934ebd372d90f4d5 | [
"Apache-2.0"
] | null | null | null | src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py | bellmast/supply-chain-simulation-environment | af797c1d057e216184727fdd934ebd372d90f4d5 | [
"Apache-2.0"
] | 13 | 2021-06-23T09:16:38.000Z | 2022-03-22T20:01:19.000Z | """
An agent representing the (retail) customer behavior following a Poisson distribution for demand.
"""
import networkx as nx
from scse.api.module import Agent
import numpy as np
import logging
logger = logging.getLogger(__name__)
| 35.68 | 98 | 0.623318 |
c9ab0ef6affb1be12f6d367b89eb7c08b1fd954b | 2,340 | py | Python | time_to_get_rewards.py | GJuceviciute/MineRL-2020 | 095ca6598b6a58120dcc5dcee05c995fc58d540a | [
"MIT"
] | 4 | 2021-03-23T21:12:57.000Z | 2021-07-03T16:22:01.000Z | time_to_get_rewards.py | GJuceviciute/MineRL-2020 | 095ca6598b6a58120dcc5dcee05c995fc58d540a | [
"MIT"
] | null | null | null | time_to_get_rewards.py | GJuceviciute/MineRL-2020 | 095ca6598b6a58120dcc5dcee05c995fc58d540a | [
"MIT"
] | null | null | null | import numpy as np
import os
from utils import MINERL_DATA_ROOT, CUMULATIVE_REWARDS
import sys
import pandas
def time_to_rewards(data_set, trajectory):
"""
Takes a data_set and a trajectory, and returns times (in ticks) to achieve each cumulative reward (from the last
cumulative reward, not from start).
:param data_set: data set name (for example: 'MineRLObtainDiamond-v0')
:param trajectory: trajectory path
:return: a list of times to achieve cumulative rewards
"""
doc = os.path.join(MINERL_DATA_ROOT, data_set, trajectory, 'rendered.npz')
f = np.load(doc)
rewards = list(f['reward'])
times = []
c = 0
sum_rew = 0
for i in range(len(rewards)):
while rewards[i] + sum_rew >= CUMULATIVE_REWARDS[c]:
times.append(i)
c += 1
sum_rew += rewards[i]
time_periods = [times[i] - times[i - 1] for i in range(1, len(times))]
return time_periods
if __name__ == "__main__":
main()
| 34.925373 | 117 | 0.624359 |
c9ab86e75a48317e7194cbc265fb079c04b726b2 | 2,224 | py | Python | ooobuild/lo/document/x_view_data_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/document/x_view_data_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/document/x_view_data_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..container.x_index_access import XIndexAccess as XIndexAccess_f0910d6d
__all__ = ['XViewDataSupplier']
| 41.185185 | 341 | 0.747752 |
c9ab9f36bed5aec87ea9576f35b5c24866acecd2 | 634 | py | Python | pyflarum/client/extensions/flarum/FoF_PreventNecrobumping.py | CWKevo/pyflarum | bdf162a6c94e3051843ec7299a4302054927498a | [
"MIT"
] | 9 | 2021-06-23T21:26:29.000Z | 2021-11-16T13:25:34.000Z | pyflarum/client/extensions/flarum/FoF_PreventNecrobumping.py | CWKevo/pyflarum | bdf162a6c94e3051843ec7299a4302054927498a | [
"MIT"
] | 3 | 2021-09-11T00:08:14.000Z | 2022-02-07T15:34:27.000Z | pyflarum/client/extensions/flarum/FoF_PreventNecrobumping.py | CWKevo/pyFlarum | 2c4e17a16b00367f140c3436f7a9148072ddd2d3 | [
"MIT"
] | 1 | 2021-08-18T12:45:14.000Z | 2021-08-18T12:45:14.000Z | import typing as t
from ....extensions import ExtensionMixin
from ...flarum.core.discussions import DiscussionFromBulk
| 23.481481 | 77 | 0.712934 |
c9abcbc9f24259365718e0b6fb124db1e9b1a358 | 30,988 | py | Python | gsflow_prep/gsflow_model_prep.py | dgketchum/MT_RSense | 0048c1ccb1ff6e48bd630edd477f95ae29fea06d | [
"Apache-2.0"
] | null | null | null | gsflow_prep/gsflow_model_prep.py | dgketchum/MT_RSense | 0048c1ccb1ff6e48bd630edd477f95ae29fea06d | [
"Apache-2.0"
] | null | null | null | gsflow_prep/gsflow_model_prep.py | dgketchum/MT_RSense | 0048c1ccb1ff6e48bd630edd477f95ae29fea06d | [
"Apache-2.0"
] | null | null | null | import os
import json
from copy import copy
from subprocess import call, Popen, PIPE, STDOUT
import time
import numpy as np
import pandas as pd
from pyproj import Transformer
import rasterio
import fiona
from affine import Affine
from shapely.geometry import shape
from scipy.ndimage.morphology import binary_erosion
from pandas.plotting import register_matplotlib_converters
import matplotlib
import matplotlib.pyplot as plt
import flopy
from flopy.utils import GridIntersect
import richdem as rd
from gsflow.builder import GenerateFishnet, FlowAccumulation, PrmsBuilder, ControlFileBuilder
from gsflow.builder.builder_defaults import ControlFileDefaults
from gsflow.builder import builder_utils as bu
from gsflow.prms.prms_parameter import ParameterRecord
from gsflow.prms import PrmsData, PrmsParameters
from gsflow.control import ControlFile
from gsflow.output import StatVar
from model_config import PRMSConfig
from gsflow_prep import PRMS_NOT_REQ
from datafile import write_basin_datafile
register_matplotlib_converters()
pd.options.mode.chained_assignment = None
# RichDEM flow-direction coordinate system:
# 234
# 105
# 876
d8_map = {5: 1, 6: 2, 7: 4, 8: 8, 1: 16, 2: 32, 3: 64, 4: 128}
def features(shp):
with fiona.open(shp, 'r') as src:
return [f for f in src]
def plot_stats(stats):
fig, ax = plt.subplots(figsize=(16, 6))
ax.plot(stats.Date, stats.basin_cfs_1, color='r', linewidth=2.2, label="simulated")
ax.plot(stats.Date, stats.runoff_1, color='b', linewidth=1.5, label="measured")
ax.legend(bbox_to_anchor=(0.25, 0.65))
ax.set_xlabel("Date")
ax.set_ylabel("Streamflow, in cfs")
# ax.set_ylim([0, 2000])
# plt.savefig('/home/dgketchum/Downloads/hydrograph.png')
plt.show()
plt.close()
if __name__ == '__main__':
matplotlib.use('TkAgg')
conf = './model_files/uyws_parameters.ini'
stdout_ = '/media/research/IrrigationGIS/Montana/upper_yellowstone/gsflow_prep/uyws_carter_1000/out.txt'
prms_build = StandardPrmsBuild(conf)
prms_build.build_model_files()
prms = MontanaPrmsModel(prms_build.control_file,
prms_build.parameter_file,
prms_build.data_file)
prms.run_model(stdout_)
stats = prms.get_statvar()
plot_stats(stats)
pass
# ========================= EOF ====================================================================
| 41.932341 | 108 | 0.538499 |
c9ad6518f65c2c95cc6e2e31c3f0906ae816c864 | 1,147 | py | Python | Interview Questions/readmail.py | Shivams9/pythoncodecamp | e6cd27f4704a407ee360414a8c9236b254117a59 | [
"MIT"
] | 6 | 2021-08-04T08:15:22.000Z | 2022-02-02T11:15:56.000Z | Interview Questions/readmail.py | Shivams9/pythoncodecamp | e6cd27f4704a407ee360414a8c9236b254117a59 | [
"MIT"
] | 14 | 2021-08-02T06:28:00.000Z | 2022-03-25T10:44:15.000Z | Interview Questions/readmail.py | Shivams9/pythoncodecamp | e6cd27f4704a407ee360414a8c9236b254117a59 | [
"MIT"
] | 6 | 2021-07-16T04:56:41.000Z | 2022-02-16T04:40:06.000Z | # Importing libraries
import imaplib, email
user = 'vsjtestmail@gmail.com'
password = 'TestMa1lPass'
imap_url = 'imap.gmail.com'
# Function to get email content part i.e its body part
# Function to search for a key value pair
# Function to get the list of emails under this label
# this is done to make SSL connection with GMAIL
con = imaplib.IMAP4_SSL(imap_url)
# logging the user in
con.login(user, password)
# calling function to check for email under this label
con.select('Inbox')
# fetching emails from this user "tu**h*****1@gmail.com"
msgs = get_emails(search('FROM', 'champaksworld@gmail.com', con))
# Uncomment this to see what actually comes as data
print(msgs)
print(type(msgs))
print(len(msgs))
| 24.934783 | 65 | 0.727114 |
c9af346978608c3c30e9cd43ee6263e02cda79fe | 5,695 | py | Python | openstack_dashboard/dashboards/admin/rbac_policies/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/dashboards/admin/rbac_policies/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 26 | 2015-02-23T16:37:31.000Z | 2020-07-02T08:37:41.000Z | openstack_dashboard/dashboards/admin/rbac_policies/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
| 39.275862 | 78 | 0.657419 |
c9af8bee70751d27aa98a3e3c87e41286832285c | 522 | py | Python | config.py | navidsalehi/blockchain | 0add1e6e4898097360cafd006e391d1b8735da08 | [
"MIT"
] | 2 | 2021-11-30T05:16:39.000Z | 2021-12-01T10:13:29.000Z | config.py | navidsalehi/blockchain | 0add1e6e4898097360cafd006e391d1b8735da08 | [
"MIT"
] | null | null | null | config.py | navidsalehi/blockchain | 0add1e6e4898097360cafd006e391d1b8735da08 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
| 26.1 | 52 | 0.731801 |
c9b0a85450199612c6bc6f56c812cbb9f71f501d | 3,585 | py | Python | legacy/text_classification/utils.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 4 | 2020-01-04T13:15:02.000Z | 2021-07-21T07:50:02.000Z | legacy/text_classification/utils.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 2 | 2019-06-26T03:21:49.000Z | 2019-09-19T09:43:42.000Z | legacy/text_classification/utils.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 3 | 2019-10-31T07:18:49.000Z | 2020-01-13T03:18:39.000Z | import logging
import os
import argparse
from collections import defaultdict
logger = logging.getLogger("paddle")
logger.setLevel(logging.INFO)
| 33.194444 | 80 | 0.577406 |
c9b413225370fcaafee9296e6fca98be93952f44 | 2,188 | py | Python | cards/views.py | KrTG/CardLabeling | 8d267cf5d2dcc936005850a8f791115b3f716c92 | [
"Apache-2.0"
] | null | null | null | cards/views.py | KrTG/CardLabeling | 8d267cf5d2dcc936005850a8f791115b3f716c92 | [
"Apache-2.0"
] | null | null | null | cards/views.py | KrTG/CardLabeling | 8d267cf5d2dcc936005850a8f791115b3f716c92 | [
"Apache-2.0"
] | null | null | null | from .models import Card
from .helpers import fetch_unidentified, populate_db
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
import json
| 30.388889 | 64 | 0.55713 |
c9b43f16dd23711b256eacbc743cd82a999578fd | 2,439 | py | Python | cptm/experiment_calculate_perplexity.py | egpbos/cptm | c5f310858c341040b4afd166cf628aeee6845159 | [
"Apache-2.0"
] | 13 | 2016-03-14T14:58:04.000Z | 2020-11-03T22:48:59.000Z | cptm/experiment_calculate_perplexity.py | egpbos/cptm | c5f310858c341040b4afd166cf628aeee6845159 | [
"Apache-2.0"
] | 5 | 2015-10-30T12:34:16.000Z | 2017-10-27T04:55:07.000Z | cptm/experiment_calculate_perplexity.py | egpbos/cptm | c5f310858c341040b4afd166cf628aeee6845159 | [
"Apache-2.0"
] | 3 | 2016-03-03T10:49:05.000Z | 2018-02-03T14:36:59.000Z | """Calculate opinion perplexity for different numbers of topics
Calclulate opinion perplexity for the test set as described in [Fang et al.
2012] section 5.1.1.
This script should be run after experiment_number_of_topics.py.
Usage: python cptm/experiment_calculate_perplexity.py /path/to/experiment.json.
"""
import pandas as pd
import logging
from multiprocessing import Pool
import argparse
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nPerplexity = [0] + range(9, config.get('nIter')+1, 10)
# calculate perplexity
pool = Pool(processes=config.get('nProcesses'))
results = [pool.apply_async(calculate_perplexity, args=(config, corpus,
nPerplexity, n))
# reverse list, so longest calculation is started first
for n in nTopics[::-1]]
pool.close()
pool.join()
# aggrate and save results
data = [p.get() for p in results]
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for result in data:
for n, s, tw_perp, ow_perp in result:
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
outDir = config.get('outDir')
logger.info('writing perplexity results to {}'.format(outDir.format('')))
topic_perp.to_csv(outDir.format('perplexity_topic.csv'))
opinion_perp.to_csv(outDir.format('perplexity_opinion.csv'))
| 33.410959 | 79 | 0.727347 |
c9b4d11f803a768b9c496032b0ea1a63387421c9 | 133 | py | Python | app/services/v1/healthcheck.py | rvmoura96/flask-template | d1383be7e17bff580e3ddf61ae580271c30201c4 | [
"MIT"
] | 2 | 2019-09-25T19:19:11.000Z | 2019-10-08T01:05:35.000Z | app/services/v1/healthcheck.py | rvmoura96/flask-template | d1383be7e17bff580e3ddf61ae580271c30201c4 | [
"MIT"
] | 10 | 2019-09-13T23:41:42.000Z | 2020-05-10T21:12:32.000Z | app/services/v1/healthcheck.py | rvmoura96/flask-template | d1383be7e17bff580e3ddf61ae580271c30201c4 | [
"MIT"
] | 9 | 2019-09-30T15:26:23.000Z | 2020-09-28T23:36:25.000Z | from flask_restful import Resource
import app
from app.services.healthcheck import HealthApi
| 19 | 46 | 0.827068 |
c9b5574ee7cafcbc4a7c1273ed0bb1bc35434615 | 819 | py | Python | Eulers method.py | pramotharun/Numerical-Methods-with-Python | bd5676bcc4ac5defd13608728df2387b5fdcdfcb | [
"MIT"
] | null | null | null | Eulers method.py | pramotharun/Numerical-Methods-with-Python | bd5676bcc4ac5defd13608728df2387b5fdcdfcb | [
"MIT"
] | null | null | null | Eulers method.py | pramotharun/Numerical-Methods-with-Python | bd5676bcc4ac5defd13608728df2387b5fdcdfcb | [
"MIT"
] | null | null | null | #Eulers method
import numpy as np
#Note: change the derivative function based on question!!!!!! Example: y-x
y0 = 0.5 #float(input"what is the y(0)?")
h = 0.1 #float(input"h?")
x_final = 0.3 #float(input"x_final")
#initiating input variables
x = 0
y = y0
# remember to change yn+1 and xn+1 values if you already know them!!!
ynew = 0
xnew = 0
i = 0
#####################################################
iterations = x_final/h
while x <= x_final:
derivative_of_y = dy(ynew,xnew,y,x,h)
xnew = x + h
ynew = y + (xnew - x)*(derivative_of_y)
print("iteration: ____ ")
print(i)
print("\n")
print("x = ")
print(xnew)
print("\n")
print("y = ")
print(ynew)
x = xnew
y = ynew
i+=1
| 19.5 | 76 | 0.543346 |
c9b764a791904b90c564bbc7b72661cf5b307b36 | 18,896 | py | Python | modules/network_dictionary_builder.py | shartzog/CovidCNN | 68bafe185c53f98b896ee01fdcf99f828f251036 | [
"MIT"
] | null | null | null | modules/network_dictionary_builder.py | shartzog/CovidCNN | 68bafe185c53f98b896ee01fdcf99f828f251036 | [
"MIT"
] | null | null | null | modules/network_dictionary_builder.py | shartzog/CovidCNN | 68bafe185c53f98b896ee01fdcf99f828f251036 | [
"MIT"
] | null | null | null | """
Contains Net and NetDictionary class for creating a random collection of CNN structures
or loading a previously created collection.
"""
from __future__ import division, print_function
from random import random
import os.path
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import numpy as np
from numpy.random import randint as r_i
from tqdm import tqdm
DEBUG = False #prints tensor size after each network layer during network creation
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.cuda.is_available():
torch.cuda.set_device(0)
else:
print('**** CUDA not available - continuing with CPU ****')
#global classes
| 48.953368 | 116 | 0.556467 |
c9b7d5c05e7bdbe4c159664bc93dea9d1f8df223 | 1,917 | py | Python | src/vmshepherd/app.py | DreamLab/VmShepherd | f602bb814080d2d3f62c6cb5fa6b9dd685833c24 | [
"Apache-2.0"
] | 10 | 2018-06-10T17:54:57.000Z | 2022-02-07T19:37:07.000Z | src/vmshepherd/app.py | DreamLab/VmShepherd | f602bb814080d2d3f62c6cb5fa6b9dd685833c24 | [
"Apache-2.0"
] | 10 | 2018-06-10T18:46:07.000Z | 2021-05-13T13:01:22.000Z | src/vmshepherd/app.py | DreamLab/VmShepherd | f602bb814080d2d3f62c6cb5fa6b9dd685833c24 | [
"Apache-2.0"
] | 3 | 2019-07-18T14:10:10.000Z | 2022-02-07T19:37:08.000Z | import asyncio
import logging
import os
from vmshepherd.drivers import Drivers
from vmshepherd.http import WebServer
from vmshepherd.utils import gen_id, prefix_logging
from vmshepherd.worker import Worker
| 32.491525 | 96 | 0.639019 |
c9b84e52e79d954ea22decc10bdcb695a3cc56e1 | 1,762 | py | Python | opsdroid/connector/slack/events.py | himanshu1root/opsdroid | 26699c5e7cc014a0d3ab74baf66fbadce939ab73 | [
"Apache-2.0"
] | 1 | 2020-04-29T20:44:44.000Z | 2020-04-29T20:44:44.000Z | opsdroid/connector/slack/events.py | himanshu1root/opsdroid | 26699c5e7cc014a0d3ab74baf66fbadce939ab73 | [
"Apache-2.0"
] | 10 | 2019-06-22T11:18:55.000Z | 2019-09-03T13:26:47.000Z | opsdroid/connector/slack/events.py | himanshu1root/opsdroid | 26699c5e7cc014a0d3ab74baf66fbadce939ab73 | [
"Apache-2.0"
] | 1 | 2019-06-11T22:30:49.000Z | 2019-06-11T22:30:49.000Z | """Classes to describe different kinds of Slack specific event."""
import json
from opsdroid.events import Message
| 39.155556 | 79 | 0.648127 |
c9b8a09501b36968a133bb1816fb52f2dd36b599 | 42 | py | Python | examples/modules/object_tracker/__init__.py | jagin/dvg-utils | a7d19ead75398b09a9f1e146464cf4227f06a476 | [
"MIT"
] | 7 | 2020-09-02T08:39:22.000Z | 2021-10-13T18:13:04.000Z | examples/modules/object_tracker/__init__.py | jagin/dvg-utils | a7d19ead75398b09a9f1e146464cf4227f06a476 | [
"MIT"
] | null | null | null | examples/modules/object_tracker/__init__.py | jagin/dvg-utils | a7d19ead75398b09a9f1e146464cf4227f06a476 | [
"MIT"
] | null | null | null | from .object_tracker import ObjectTracker
| 21 | 41 | 0.880952 |
c9b95e3837c6ec2e9141c7cfae3e53054b21d5b5 | 3,449 | py | Python | src/train.py | SYHPARK/MalConv-keras | 2b68ba82e2201290130bed6d58f5725b17a87867 | [
"MIT"
] | null | null | null | src/train.py | SYHPARK/MalConv-keras | 2b68ba82e2201290130bed6d58f5725b17a87867 | [
"MIT"
] | null | null | null | src/train.py | SYHPARK/MalConv-keras | 2b68ba82e2201290130bed6d58f5725b17a87867 | [
"MIT"
] | null | null | null | from os.path import join
import argparse
import pickle
import warnings
import pandas as pd
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
import utils
from malconv import Malconv
from preprocess import preprocess
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Malconv-keras classifier training')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--limit', type=float, default=0., help="limit gpy memory percentage")
parser.add_argument('--max_len', type=int, default=200000, help="model input legnth")
parser.add_argument('--win_size', type=int, default=500)
parser.add_argument('--val_size', type=float, default=0.1, help="validation percentage")
parser.add_argument('--save_path', type=str, default='../saved/', help='Directory to save model and log')
parser.add_argument('--model_path', type=str, default='../saved/malconv.h5', help="model to resume")
parser.add_argument('--save_best', action='store_true', help="Save model with best validation accuracy")
parser.add_argument('--resume', action='store_true')
parser.add_argument('csv', type=str)
if __name__ == '__main__':
args = parser.parse_args()
# limit gpu memory
if args.limit > 0:
utils.limit_gpu_memory(args.limit)
print("[*] Flag0")
# prepare model
if args.resume:
model = load_model(args.model_path)
else:
model = Malconv(args.max_len, args.win_size)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print("[*] Flag1")
# prepare data
# preprocess is handled in utils.data_generator
df = pd.read_csv(args.csv, header=None)
data, label = df[0].values, df[1].values
x_train, x_test, y_train, y_test = utils.train_test_split(data, label, args.val_size)
print('Train on %d data, test on %d data' % (len(x_train), len(x_test)))
print("[*] Flag2")
history = train(model, args.max_len, args.batch_size, args.verbose, args.epochs, args.save_path, args.save_best)
print("[*] Flag3")
with open(join(args.save_path, 'history.pkl'), 'wb') as f:
pickle.dump(history.history, f)
| 40.104651 | 116 | 0.684836 |
c9bc28a5d5d38a212f5e0f03eba96a2a3f217595 | 1,870 | py | Python | unittesting.py | slobbishbody/routegetter | b6c279c1734530fd2aec08da9317575b66150092 | [
"MIT"
] | null | null | null | unittesting.py | slobbishbody/routegetter | b6c279c1734530fd2aec08da9317575b66150092 | [
"MIT"
] | null | null | null | unittesting.py | slobbishbody/routegetter | b6c279c1734530fd2aec08da9317575b66150092 | [
"MIT"
] | null | null | null | '''We will test all routegetter methods in this test suite'''
from os.path import join, abspath, sep
import unittest
import logging
import routesparser
from faker import Faker
LOG_FILE = join(sep.join(sep.split(abspath(__file__))[:-1]), 'log', 'testing', 'testing.log')
if __name__ == '__main__':
logging.basicConfig(filename=LOG_FILE, filemode='w')
unittest.main()
| 33.392857 | 93 | 0.673797 |
c9bca9a508473ab1f3f0748890578a6eb5bddb04 | 710 | py | Python | setup.py | thetongs/hello_world | 5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846 | [
"MIT"
] | null | null | null | setup.py | thetongs/hello_world | 5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846 | [
"MIT"
] | null | null | null | setup.py | thetongs/hello_world | 5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846 | [
"MIT"
] | null | null | null | from setuptools import setup
VERSION = '0.0.4'
DESCRIPTION = 'Hello world checking'
# Setting up
setup(
name="hello_world",
version=VERSION,
author="Kishan Tongrao",
author_email="kishan.tongs@gmail.com",
description=DESCRIPTION,
long_description_content_type="text/markdown",
packages=['hello_world'],
include_package_data=True,
install_requires=[],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) | 28.4 | 52 | 0.625352 |
c9bd340296dec5cc98f4fa44de42146d4f90d4d2 | 123 | py | Python | python/basic_utils.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 17 | 2020-04-28T09:18:28.000Z | 2021-12-28T08:38:00.000Z | python/basic_utils.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T04:10:51.000Z | 2022-03-31T05:28:25.000Z | python/basic_utils.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T05:06:17.000Z | 2021-12-14T16:25:06.000Z | import hashlib
| 20.5 | 88 | 0.617886 |
c9bfafc48a06f70a20cac8ad26dc2486eeccba0a | 4,354 | py | Python | MSspeechAPI_class.py | houhry/AutosubBehindWall | 3903ee457d90c31db9a39957ad06468d556023ee | [
"MIT"
] | 14 | 2019-03-14T03:12:25.000Z | 2020-12-23T14:28:05.000Z | MSspeechAPI_class.py | houhry/AutosubBehindWall | 3903ee457d90c31db9a39957ad06468d556023ee | [
"MIT"
] | null | null | null | MSspeechAPI_class.py | houhry/AutosubBehindWall | 3903ee457d90c31db9a39957ad06468d556023ee | [
"MIT"
] | 6 | 2019-03-12T03:46:14.000Z | 2021-12-11T13:55:47.000Z | # -*- coding:utf-8 -*-
import certifi
import pycurl
import requests
import os
import json
import uuid
from StringIO import StringIO
#---------------------
#
#CSDN
#https://blog.csdn.net/joyjun_1/article/details/52563713
# | 36.588235 | 124 | 0.589113 |
c9c3b89ba882bb0aa6a88e4ec97e3255252d24db | 1,565 | py | Python | projects/gettingStarted/CannyStill.py | lucasbrsa/OpenCV3.2 | b6db40bd43dce7847dce1a808fd29bb1b140dea3 | [
"MIT"
] | null | null | null | projects/gettingStarted/CannyStill.py | lucasbrsa/OpenCV3.2 | b6db40bd43dce7847dce1a808fd29bb1b140dea3 | [
"MIT"
] | null | null | null | projects/gettingStarted/CannyStill.py | lucasbrsa/OpenCV3.2 | b6db40bd43dce7847dce1a808fd29bb1b140dea3 | [
"MIT"
] | null | null | null | # CannyStill.py
import cv2
import numpy as np
import os
###################################################################################################
###################################################################################################
if __name__ == "__main__":
main()
| 42.297297 | 124 | 0.502236 |
c9c4069353131d5a64d6da6f767d0fbe4eba61e3 | 212 | py | Python | examples/timeseries_from_dataframe.py | yarikoptic/seaborn | ed4baa32267cc4a44abb40dc243ae75a1d180e85 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2016-01-25T16:54:25.000Z | 2020-05-01T15:15:30.000Z | examples/timeseries_from_dataframe.py | PureW/seaborn | f400d86002c6d4b2d67eb9740adad908e84f8328 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-06-23T16:40:53.000Z | 2021-06-23T16:40:53.000Z | examples/timeseries_from_dataframe.py | PureW/seaborn | f400d86002c6d4b2d67eb9740adad908e84f8328 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-04-02T19:52:25.000Z | 2021-07-06T21:17:27.000Z | """
Timeseries from DataFrame
=========================
"""
import seaborn as sns
sns.set(style="darkgrid")
gammas = sns.load_dataset("gammas")
sns.tsplot(gammas, "timepoint", "subject", "ROI", "BOLD signal")
| 17.666667 | 64 | 0.617925 |
c9c71fc7edb4fb9a65e3ae3dd552c204669f2537 | 533 | py | Python | bc/ed/definition.py | ajah/represent-canada-data | f79092442767faa0b4babe50a377408e8576c8c4 | [
"OML"
] | null | null | null | bc/ed/definition.py | ajah/represent-canada-data | f79092442767faa0b4babe50a377408e8576c8c4 | [
"OML"
] | null | null | null | bc/ed/definition.py | ajah/represent-canada-data | f79092442767faa0b4babe50a377408e8576c8c4 | [
"OML"
] | null | null | null | from datetime import date
import boundaries
boundaries.register('British Columbia electoral districts',
domain='British Columbia',
last_updated=date(2011, 12, 12),
name_func=boundaries.attr('edname'),
id_func=boundaries.attr('edabbr'),
authority='Elections BC',
source_url='http://www.elections.bc.ca/index.php/voting/electoral-maps-profiles/geographic-information-system-spatial-data-files-2011/',
data_url='http://www.elections.bc.ca/docs/map/redis11/GIS/ED_Province.exe',
encoding='iso-8859-1',
) | 38.071429 | 140 | 0.744841 |
c9c93db3dbc0d8e8af8b81d596af15d7ca55058b | 2,228 | py | Python | src/cms/medias/hooks.py | UniversitaDellaCalabria/uniCMS | b0af4e1a767867f0a9b3c135a5c84587e713cb71 | [
"Apache-2.0"
] | 6 | 2021-01-26T17:22:53.000Z | 2022-02-15T10:09:03.000Z | src/cms/medias/hooks.py | UniversitaDellaCalabria/uniCMS | b0af4e1a767867f0a9b3c135a5c84587e713cb71 | [
"Apache-2.0"
] | 5 | 2020-12-24T14:29:23.000Z | 2021-08-10T10:32:18.000Z | src/cms/medias/hooks.py | UniversitaDellaCalabria/uniCMS | b0af4e1a767867f0a9b3c135a5c84587e713cb71 | [
"Apache-2.0"
] | 2 | 2020-12-24T14:13:39.000Z | 2020-12-30T16:48:52.000Z | import logging
import magic
import os
from cms.medias.utils import get_file_type_size
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from . import settings as app_settings
from . utils import to_webp
logger = logging.getLogger(__name__)
FILETYPE_IMAGE = getattr(settings, 'FILETYPE_IMAGE',
app_settings.FILETYPE_IMAGE)
| 32.289855 | 91 | 0.609964 |
c9ca952daadbee6b22e4cb8d53f17d4f335031b8 | 222 | py | Python | accounts/urls.py | afg984/happpycoding | d881de4d70abea3062928454d55dcc816d37b3a5 | [
"MIT"
] | 6 | 2015-11-28T13:34:38.000Z | 2016-10-11T11:59:50.000Z | accounts/urls.py | afg984/happpycoding | d881de4d70abea3062928454d55dcc816d37b3a5 | [
"MIT"
] | null | null | null | accounts/urls.py | afg984/happpycoding | d881de4d70abea3062928454d55dcc816d37b3a5 | [
"MIT"
] | null | null | null | from django_fsu import url
from . import views
urlpatterns = [
url('login/', views.login, name='login'),
url('logout/', views.logout, name='logout'),
url('profile/<int:pk>', views.profile, name='profile'),
]
| 22.2 | 59 | 0.648649 |
c9cb7becdd0922382057eaf8cf2a26ecd9e3c101 | 747 | py | Python | setup.py | Coldwave96/PocLibrary | 4bdc069c257f441379e9fd428ac8df7d4f5e9ca9 | [
"Apache-2.0"
] | 11 | 2020-08-24T03:31:23.000Z | 2022-01-15T12:19:31.000Z | setup.py | Coldwave96/PocLibrary | 4bdc069c257f441379e9fd428ac8df7d4f5e9ca9 | [
"Apache-2.0"
] | null | null | null | setup.py | Coldwave96/PocLibrary | 4bdc069c257f441379e9fd428ac8df7d4f5e9ca9 | [
"Apache-2.0"
] | 3 | 2020-08-24T03:31:28.000Z | 2021-09-19T14:54:46.000Z | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['PocLibrary.py']
APP_NAME = "PocLibrary"
DATA_FILES = []
OPTIONS = {
'iconfile': 'logo.icns',
'plist': {
'CFBundleName': APP_NAME,
'CFBundleDisplayName': APP_NAME,
'CFBundleGetInfoString': "Personal Poc Library",
'CFBundleVersion': "1.0",
'CFBundleShortVersionString': "1.0",
'NSHumanReadableCopyright': u"Copyright 2020, Coldsnap, All Rights Reserved"
},
'packages': ['wx','pyperclip'],
'resources': 'Library'
}
setup(
name=APP_NAME,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 21.970588 | 86 | 0.631861 |
c9cba2d718dc17bd9bd34864d1e448f3f16a9751 | 8,840 | py | Python | tests/thumbor.py | hurbcom/libthumbor | 8362f08071ed1ce345be59713825844808873a80 | [
"MIT"
] | null | null | null | tests/thumbor.py | hurbcom/libthumbor | 8362f08071ed1ce345be59713825844808873a80 | [
"MIT"
] | null | null | null | tests/thumbor.py | hurbcom/libthumbor | 8362f08071ed1ce345be59713825844808873a80 | [
"MIT"
] | null | null | null | # encoding: utf-8
import base64
import hashlib
import hmac
import re
import six
from six.moves.urllib.parse import quote
from Crypto.Cipher import AES
| 30.801394 | 119 | 0.508145 |
c9ce4ccacd1980f9dcbf0a2c852bfa9e74a3ad5a | 4,420 | py | Python | config.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | null | null | null | config.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | 4 | 2021-06-08T23:01:47.000Z | 2022-03-12T00:53:53.000Z | config.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | null | null | null | """ Config class for training the InvNet """
import argparse
from dp_layer.graph_layer.edge_functions import edge_f_dict as d
def get_parser(name):
"""
:param name: String for Config Name
:return: parser
"""
parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
return parser
| 53.253012 | 122 | 0.682579 |
c9d237ae48e81118b5aaea91722859235e40aa06 | 1,599 | py | Python | flaskrst/modules/tags.py | jarus/flask-rst | 05b6a817f5986d6f6a4552d16a133deb8859ce3e | [
"BSD-3-Clause"
] | 7 | 2015-01-22T14:32:55.000Z | 2021-07-14T02:54:42.000Z | flaskrst/modules/tags.py | jarus/flask-rst | 05b6a817f5986d6f6a4552d16a133deb8859ce3e | [
"BSD-3-Clause"
] | null | null | null | flaskrst/modules/tags.py | jarus/flask-rst | 05b6a817f5986d6f6a4552d16a133deb8859ce3e | [
"BSD-3-Clause"
] | 2 | 2016-03-14T01:06:13.000Z | 2016-04-15T13:26:54.000Z | # -*- coding: utf-8 -*-
"""
flask-rst.modules.tags
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by Christoph Heer.
:license: BSD, see LICENSE for more details.
"""
from math import log
from flask import Blueprint, render_template
from jinja2 import Markup
from flaskrst.modules.blog import posts
tags = Blueprint('tags', __name__)
def setup(app, cfg):
app.jinja_env.globals['tags'] = template_tags
app.register_blueprint(tags) | 26.213115 | 72 | 0.605378 |
c9d4af05e2b5e939ee1aea3341e906f371d84c8b | 12,560 | py | Python | tests/test_apsw.py | hideaki-t/sqlite-fts-python | 2afdc4ad6d3d7856d801bd3f9106160825f49d00 | [
"MIT"
] | 35 | 2015-05-06T13:37:02.000Z | 2022-01-06T02:52:49.000Z | tests/test_apsw.py | polyrand/sqlite-fts-python | 2afdc4ad6d3d7856d801bd3f9106160825f49d00 | [
"MIT"
] | 18 | 2015-11-21T19:00:57.000Z | 2021-12-31T12:41:08.000Z | tests/test_apsw.py | polyrand/sqlite-fts-python | 2afdc4ad6d3d7856d801bd3f9106160825f49d00 | [
"MIT"
] | 11 | 2015-01-12T12:20:29.000Z | 2021-04-07T21:43:48.000Z | # coding: utf-8
from __future__ import print_function, unicode_literals
import re
import pytest
import sqlitefts as fts
from sqlitefts import fts5, fts5_aux
apsw = pytest.importorskip("apsw")
def test_createtable():
c = apsw.Connection(":memory:")
name = "simple"
sql = "CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name)
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(sql)
r = (
c.cursor()
.execute(
"SELECT type, name, tbl_name, sql FROM sqlite_master WHERE type='table' AND name='fts'"
)
.fetchone()
)
assert r == ("table", "fts", "fts", sql)
c.close()
def test_insert():
c = apsw.Connection(":memory:")
name = "simple"
content = ""
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.cursor().execute("INSERT INTO fts VALUES(?)", (content,))
assert c.changes() == 1
r = c.cursor().execute("SELECT content FROM fts").fetchone()
assert r[0] == content
c.close()
def test_match():
c = apsw.Connection(":memory:")
name = "simple"
contents = [("abc def",), ("abc xyz",), (" ",), (" ",)]
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name))
r = c.cursor().executemany("INSERT INTO fts VALUES(?)", contents)
r = c.cursor().execute("SELECT * FROM fts").fetchall()
assert len(r) == 4
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'def'").fetchall()
assert len(r) == 1 and r[0][0] == contents[0][0]
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'xyz'").fetchall()
assert len(r) == 1 and r[0][0] == contents[1][0]
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'zzz'").fetchall()
assert len(r) == 0
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH ''").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH ''").fetchall()
assert len(r) == 1 and r[0][0] == contents[2][0]
r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH ''").fetchall()
assert len(r) == 1 and r[0][0] == contents[3][0]
r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH ''").fetchall()
assert len(r) == 0
c.close()
def test_full_text_index_queries():
name = "simple"
docs = [
(
"README",
"sqlitefts-python provides binding for tokenizer of SQLite Full-Text search(FTS3/4). It allows you to write tokenizers in Python.",
),
(
"LICENSE",
"""Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:""",
),
("", " "),
]
with apsw.Connection(":memory:") as c:
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(
"CREATE VIRTUAL TABLE docs USING FTS4(title, body, tokenize={})".format(
name
)
)
c.cursor().executemany("INSERT INTO docs(title, body) VALUES(?, ?)", docs)
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'Python'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'bind'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH ''").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH ''").fetchall()
assert len(r) == 0
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'binding'")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH 'binding'")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH ''")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH ''")
.fetchall()[0]
)
assert (
c.cursor()
.execute("SELECT * FROM docs WHERE body MATCH ''")
.fetchall()[0]
== c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'body:'")
.fetchall()[0]
)
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:bind'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:README'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'title:'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH 'bind'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE title MATCH 'README'")
.fetchall()
)
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH ''").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to in'").fetchall()
assert len(r) == 2
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Py*'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Z*'").fetchall()
assert len(r) == 0
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH '*'").fetchall()
assert len(r) == 1
r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH '*'").fetchall()
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tokenizer SQLite\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH ' '")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\" \"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tok* SQL*\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"tok* of SQL*\"'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"* *\"'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH '\"* *\"'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer NEAR SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/2 SQLite'")
.fetchall()
)
assert len(r) == 0
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/3 SQLite'")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH ' NEAR '")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH ' NEAR/2 '")
.fetchall()
)
assert len(r) == 1
r = (
c.cursor()
.execute("SELECT * FROM docs WHERE docs MATCH ' NEAR/3 '")
.fetchall()
)
assert len(r) == 1
def test_tokenizer_output():
name = "simple"
with apsw.Connection(":memory:") as c:
fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer()))
c.cursor().execute(
"CREATE VIRTUAL TABLE tok1 USING fts3tokenize({})".format(name)
)
expect = [
("This", 0, 4, 0),
("is", 5, 7, 1),
("a", 8, 9, 2),
("test", 10, 14, 3),
("sentence", 15, 23, 4),
]
for a, e in zip(
c.cursor().execute(
"SELECT token, start, end, position "
"FROM tok1 WHERE input='This is a test sentence.'"
),
expect,
):
assert e == a
s = " "
expect = [(None, 0, -1, 0)]
for i, t in enumerate(s.split()):
expect.append(
(t, expect[-1][2] + 1, expect[-1][2] + 1 + len(t.encode("utf-8")), i)
)
expect = expect[1:]
for a, e in zip(
c.cursor().execute(
"SELECT token, start, end, position " "FROM tok1 WHERE input=?", [s]
),
expect,
):
assert e == a
| 34.31694 | 143 | 0.525398 |
c9d7bec33f61ca45367ed74051d9e674ed9eb713 | 211 | py | Python | unit_03/random/passwd1.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | null | null | null | unit_03/random/passwd1.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | null | null | null | unit_03/random/passwd1.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | 2 | 2019-11-14T15:04:22.000Z | 2021-10-31T07:34:46.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
"""
import random
print ''.join([random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) for x in range(12)]) | 26.375 | 120 | 0.739336 |
c9d8ca7a24eddf8714bfa4046edd0feee39e2a38 | 1,283 | py | Python | lib/core/network.py | lck1201/seq2seq-3Dpose | 3f45cc0f001ac5d25705834541d55938bf1907b6 | [
"MIT"
] | 13 | 2019-03-29T13:39:36.000Z | 2021-09-07T11:15:45.000Z | lib/core/network.py | lck1201/seq2seq-3Dpose | 3f45cc0f001ac5d25705834541d55938bf1907b6 | [
"MIT"
] | 1 | 2019-12-14T21:12:17.000Z | 2019-12-14T21:12:17.000Z | lib/core/network.py | lck1201/seq2seq-3Dpose | 3f45cc0f001ac5d25705834541d55938bf1907b6 | [
"MIT"
] | null | null | null | import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet.gluon import nn, rnn
from config import config
nJoints = config.NETWORK.nJoints | 36.657143 | 87 | 0.639127 |
c9d97586e443bc62d5fe8b8784de68a2c4bfe273 | 536 | py | Python | Session1_2018/Practice/karatsuba_multiplication.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | 1 | 2018-09-21T10:51:15.000Z | 2018-09-21T10:51:15.000Z | Session1_2018/Practice/karatsuba_multiplication.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | Session1_2018/Practice/karatsuba_multiplication.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | from math import ceil, floor
if __name__ == "__main__":
print(k_multiply(2104, 2421))
print(k_multiply(21, 24))
print(k_multiply(1, 4))
| 26.8 | 59 | 0.507463 |
c9da6ebaaad2c77b2b6e79ec9dbb080561fa3b98 | 1,138 | py | Python | day10/day10.py | ecly/a | 73642e7edae484984430492ca9b62bd52b315a50 | [
"MIT"
] | null | null | null | day10/day10.py | ecly/a | 73642e7edae484984430492ca9b62bd52b315a50 | [
"MIT"
] | null | null | null | day10/day10.py | ecly/a | 73642e7edae484984430492ca9b62bd52b315a50 | [
"MIT"
] | null | null | null | import sys
if __name__ == '__main__':
main()
| 23.708333 | 78 | 0.516696 |
c9dbe4020052218bd87d8a5c72620da1aa4c792c | 1,850 | py | Python | fuzzystring.py | ZackDev/fuzzystring | 70d5e55f8cf90bcebdb491ba26baa3e05d479189 | [
"MIT"
] | null | null | null | fuzzystring.py | ZackDev/fuzzystring | 70d5e55f8cf90bcebdb491ba26baa3e05d479189 | [
"MIT"
] | null | null | null | fuzzystring.py | ZackDev/fuzzystring | 70d5e55f8cf90bcebdb491ba26baa3e05d479189 | [
"MIT"
] | null | null | null | import re
import random
import string
import os
supported_types = ['a', 'n', 's']
count_types = []
if __name__ == '__main__':
s = fuzzyfy('ans', 10)
print(s)
| 22.02381 | 78 | 0.571351 |
c9dcd26ab8ee7882eebaee13880a0044570deca1 | 787 | py | Python | tests/conftest.py | m-schmoock/lightning | 5ffc516133d07aa653c680cf96d5316a614dbc1f | [
"MIT"
] | 1 | 2021-01-20T05:46:35.000Z | 2021-01-20T05:46:35.000Z | tests/conftest.py | m-schmoock/lightning | 5ffc516133d07aa653c680cf96d5316a614dbc1f | [
"MIT"
] | 5 | 2020-12-16T13:44:59.000Z | 2021-06-06T06:11:12.000Z | tests/conftest.py | m-schmoock/lightning | 5ffc516133d07aa653c680cf96d5316a614dbc1f | [
"MIT"
] | 7 | 2019-10-07T23:53:49.000Z | 2021-11-23T18:26:30.000Z | import pytest
# This function is based upon the example of how to
# "[make] test result information available in fixtures" at:
# https://pytest.org/latest/example/simple.html#making-test-result-information-available-in-fixtures
# and:
# https://github.com/pytest-dev/pytest/issues/288
| 32.791667 | 101 | 0.707751 |
c9dcfb8d245f4e4c379ad41c4bae671d93734033 | 1,236 | py | Python | perfkitbenchmarker/context.py | robfrut135/PerfKitBenchmarker | ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | perfkitbenchmarker/context.py | robfrut135/PerfKitBenchmarker | ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c | [
"Apache-2.0"
] | 1 | 2018-03-15T21:01:27.000Z | 2018-03-15T21:01:27.000Z | perfkitbenchmarker/context.py | robfrut135/PerfKitBenchmarker | ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for working with the current thread context."""
import threading
_thread_local = _ThreadData()
def SetThreadBenchmarkSpec(benchmark_spec):
"""Sets the current thread's BenchmarkSpec object."""
_thread_local.benchmark_spec = benchmark_spec
def GetThreadBenchmarkSpec():
"""Gets the current thread's BenchmarkSpec object.
If SetThreadBenchmarkSpec() has not been called in either the current thread
or in an ancestor, then this method will return None by default.
"""
return _thread_local.benchmark_spec
| 30.9 | 78 | 0.771036 |
c9df6ba0ed0d28f7270862edcecc5a88bc403d3d | 615 | py | Python | arcade/rainbow.py | itsMadesh/python-personal-programs | 05355aa098afd87b345c9a2ca21b48552bf5a23b | [
"MIT"
] | null | null | null | arcade/rainbow.py | itsMadesh/python-personal-programs | 05355aa098afd87b345c9a2ca21b48552bf5a23b | [
"MIT"
] | null | null | null | arcade/rainbow.py | itsMadesh/python-personal-programs | 05355aa098afd87b345c9a2ca21b48552bf5a23b | [
"MIT"
] | null | null | null | import arcade
arcade.open_window(500,750,"Rainbow")
arcade.set_background_color(arcade.color.SKY_BLUE)
arcade.start_render()
arcade.draw_parabola_filled(25,80,500,300,arcade.color.RED,0)
arcade.draw_parabola_filled(50,80,470,280,arcade.color.ORANGE,0)
arcade.draw_parabola_filled(75,80,440,260,arcade.color.YELLOW ,0)
arcade.draw_parabola_filled(100,80,410,240,arcade.color.GREEN,0)
arcade.draw_parabola_filled(125,80,380,220,arcade.color.BLUE,0)
arcade.draw_parabola_filled(150,80,350,200,arcade.color.INDIGO,0)
arcade.draw_parabola_filled(175,80,320,180,arcade.color.VIOLET,0)
arcade.finish_render()
arcade.run()
| 43.928571 | 65 | 0.827642 |
c9e0e47d0408e03065a4fc6bb39fcef4c8c2b570 | 403 | py | Python | portal/migrations/0017_remove_professor_nome_abreviado.py | leodiasp/abcmobile | 470966239230becc1d52cbd7c2794d9572915dfd | [
"MIT"
] | null | null | null | portal/migrations/0017_remove_professor_nome_abreviado.py | leodiasp/abcmobile | 470966239230becc1d52cbd7c2794d9572915dfd | [
"MIT"
] | null | null | null | portal/migrations/0017_remove_professor_nome_abreviado.py | leodiasp/abcmobile | 470966239230becc1d52cbd7c2794d9572915dfd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-02-13 18:49
from __future__ import unicode_literals
from django.db import migrations
| 20.15 | 48 | 0.622829 |
c9e2dcad41a62fabe6d852cdc47cfde976426a83 | 1,510 | py | Python | modules/drop.py | a-wing/mavelous | eef41c096cc282bb3acd33a747146a88d2bd1eee | [
"MIT"
] | 80 | 2015-01-02T23:23:19.000Z | 2021-11-02T16:03:07.000Z | modules/drop.py | Y-H-T/mavelous | eef41c096cc282bb3acd33a747146a88d2bd1eee | [
"MIT"
] | 1 | 2016-04-13T15:44:23.000Z | 2016-04-13T15:44:23.000Z | modules/drop.py | Y-H-T/mavelous | eef41c096cc282bb3acd33a747146a88d2bd1eee | [
"MIT"
] | 63 | 2015-01-03T19:35:39.000Z | 2022-02-08T17:15:44.000Z | #!/usr/bin/env python
''' simple bottle drop module'''
import time
mpstate = None
hold_pwm = 983
release_pwm = 1776
drop_channel = 5
drop_time = 2.0
def name():
'''return module name'''
return "drop"
def description():
'''return module description'''
return "bottle drop control"
def cmd_drop(args):
'''drop a bottle'''
mpstate.drop_state.start_drop = time.time()
mpstate.drop_state.waiting = True
mpstate.status.override[drop_channel-1] = release_pwm
mpstate.override_period.force()
print("started drop")
def check_drop(m):
'''check if drop is complete'''
if mpstate.drop_state.waiting and time.time() > mpstate.drop_state.start_drop+drop_time:
mpstate.status.override[drop_channel-1] = 0
mpstate.drop_state.waiting = False
mpstate.override_period.force()
print("drop complete")
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.drop_state = drop_state()
mpstate.command_map['drop'] = (cmd_drop, "drop bottle")
print("drop initialised")
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'RC_CHANNELS_RAW':
check_drop(m)
if m.get_type() == 'PARAM_VALUE':
if str(m.param_id) == 'RC5_FUNCTION' and m.param_value != 1.0:
print("DROP WARNING: RC5_FUNCTION=%u" % m.param_value)
| 26.491228 | 92 | 0.660265 |
c9e3019d7f86a0fcc9bd9c9aa1f3b2b74e02646a | 9,232 | py | Python | tests/data/pandas_valid_data.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 14 | 2016-08-26T07:06:57.000Z | 2020-09-22T07:41:21.000Z | tests/data/pandas_valid_data.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 94 | 2016-08-02T14:07:59.000Z | 2021-10-06T11:50:52.000Z | tests/data/pandas_valid_data.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 8 | 2017-02-07T12:05:57.000Z | 2021-10-14T09:45:30.000Z | import pandas as pd
import numpy as np
from numpy.random import randn
from craft_ai.pandas import MISSING_VALUE, OPTIONAL_VALUE
from random import random, randint
NB_OPERATIONS = 300
NB_MANY_OPERATIONS = 1000
SIMPLE_AGENT_BOOSTING_CONFIGURATION = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "timezone"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "day_of_week", "is_generated": True},
"d": {"type": "timezone"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = {
"model_type": "boosting",
"context": {
"a": {"type": "enum"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "timezone"},
"f": {"type": "day_of_week", "is_generated": True},
"g": {"type": "month_of_year"},
},
"output": ["a"],
"min_samples_per_leaf": 1,
"operations_as_events": True,
"tree_max_operations": 50000,
"num_iterations": 20,
"learning_rate": 0.5,
}
SIMPLE_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "continuous"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = pd.DataFrame(
[[str(randint(1, 3)), random()] for i in range(NB_OPERATIONS)],
columns=["a", "b"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_BOOSTING_DATA = pd.DataFrame(
[
[str(randint(1, 3)), random(), random(), random(), "+01:00"]
for i in range(NB_OPERATIONS)
],
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_BOOSTING_MANY_DATA = pd.DataFrame(
[
[str(randint(1, 3)), random(), random(), random(), "+01:00"]
for i in range(NB_MANY_OPERATIONS)
],
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_MANY_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_DATA = pd.DataFrame(
randn(NB_OPERATIONS, 5),
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_MANY_DATA = pd.DataFrame(
randn(NB_MANY_OPERATIONS, 5),
columns=["a", "b", "c", "d", "e"],
index=pd.date_range("20200101", periods=NB_MANY_OPERATIONS, freq="T").tz_localize(
"Europe/Paris"
),
)
SIMPLE_AGENT_DATA_DICT = [
{
"timestamp": 1558741230,
"context": {"a": 10, "b": 10, "c": 10, "d": 10, "e": 10},
},
{"timestamp": 1558741331, "context": {"a": 10, "b": 11, "c": 12, "e": 13}},
{"timestamp": 1558741432, "context": {"a": 13, "b": 44, "c": 33, "d": 22}},
{"timestamp": 1558741533, "context": {"a": 11, "d": 55, "e": 55}},
{"timestamp": 1558741634, "context": {"a": 33, "c": 66, "d": 22, "e": 44}},
{"timestamp": 1558741735, "context": {"a": 1, "b": 33, "c": 33, "d": 44}},
]
COMPLEX_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
"operations_as_events": True,
"learning_period": 3600 * 24 * 365,
"tree_max_operations": 50000,
}
COMPLEX_AGENT_CONFIGURATION_2 = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
"operations_as_events": True,
"learning_period": 3600 * 24 * 365,
"tree_max_operations": 50000,
}
COMPLEX_AGENT_DATA = pd.DataFrame(
[
[1, "Pierre", "+02:00"],
[2, "Paul"],
[3],
[4],
[5, "Jacques"],
[6],
[7],
[8, np.nan, "+01:00"],
[9],
[10],
],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
COMPLEX_AGENT_DATA_2 = pd.DataFrame(
[
[1, "Pierre", "+02:00", [8, 9]],
[2, "Paul"],
[3],
[4],
[5, "Jacques"],
[6],
[7],
[8, np.nan, "+01:00", [1, 2, 3]],
[9],
[10],
],
columns=["a", "b", "tz", "arrays"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
DATETIME_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"myTimeOfDay": {"type": "time_of_day"},
"myCoolTimezone": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 3600,
"min_samples_per_leaf": 1,
}
DATETIME_AGENT_DATA = pd.DataFrame(
[
[1, "Pierre", "+02:00"],
[2, "Paul"],
[3, np.nan, "+04:00"],
[4],
[5, "Jacques", "UTC"],
[6],
[7, np.nan, "+08:00"],
[8],
[9],
[10, np.nan, "+10:00"],
],
columns=["a", "b", "myCoolTimezone"],
index=pd.date_range("20200101 00:00:00", periods=10, freq="H").tz_localize("UTC"),
)
MISSING_AGENT_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
MISSING_AGENT_DATA = pd.DataFrame(
[
[1, MISSING_VALUE, "+02:00"],
[2, "Paul"],
[3, OPTIONAL_VALUE],
[4],
[5, "Jacques"],
[6],
[np.nan, OPTIONAL_VALUE],
[8, None, "+01:00"],
[9],
[10],
],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
MISSING_AGENT_DATA_DECISION = pd.DataFrame(
[[1, MISSING_VALUE, "+02:00"], [3, OPTIONAL_VALUE]],
columns=["a", "b", "tz"],
index=pd.date_range("20200101", periods=2, freq="D").tz_localize("Europe/Paris"),
)
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"1_b": {"type": "enum"},
"None": {"type": "enum"},
"_c": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["a"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
}
INVALID_PYTHON_IDENTIFIER_DATA = pd.DataFrame(
[
[1, "Pierre", "Mignon", "Toto", "+02:00"],
[2, "Paul"],
[3],
[4, "Tata", "Tutu"],
[5, "Jacques"],
[6],
[7],
[8, np.nan, np.nan, np.nan, "+01:00"],
[9],
[10],
],
columns=["a", "1_b", "None", "_c", "tz"],
index=pd.date_range("20200101", periods=10, freq="D").tz_localize("Europe/Paris"),
)
INVALID_PYTHON_IDENTIFIER_DECISION = pd.DataFrame(
[
[1, "Pierre", "Mignon", "Toto", "+02:00"],
[2, "Paul", "Mignon", "Toto", "+02:00"],
[3, "Tata", "Tutu", "Toto", "+02:00"],
],
columns=["a", "1_b", "None", "_c", "tz"],
index=pd.date_range("20200101", periods=3, freq="D").tz_localize("Europe/Paris"),
)
EMPTY_TREE = {
"_version": "2.0.0",
"configuration": {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"min_samples_per_leaf": 1,
},
"trees": {
"b": {"output_values": [], "prediction": {"confidence": 0, "nb_samples": 0}}
},
}
VALID_GENERATOR_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "continuous"},
"c": {"type": "continuous"},
"d": {"type": "continuous"},
"e": {"type": "continuous"},
},
"output": ["a"],
"time_quantum": 100,
"operations_as_events": True,
"learning_period": 6000000,
"tree_max_operations": 50000,
"filter": ["test_filter"],
}
VALID_COMPLEX_GENERATOR_CONFIGURATION = {
"context": {
"a": {"type": "continuous"},
"b": {"type": "enum"},
"tz": {"type": "timezone"},
},
"output": ["b"],
"time_quantum": 100,
"operations_as_events": True,
"learning_period": 6000000,
"tree_max_operations": 50000,
"filter": ["test_filter"],
}
VALID_TIMESTAMP = 1577833200
VALID_LAST_TIMESTAMP = 1577847600
| 26.682081 | 86 | 0.517006 |
c9e4abe60b90e60426a219ee6fec07063b3f40f3 | 305 | py | Python | src/api/pages.py | nhardy/py-js-web-scaffold | adf3e3ada0b21cdb9620676de795579107442dd7 | [
"MIT"
] | null | null | null | src/api/pages.py | nhardy/py-js-web-scaffold | adf3e3ada0b21cdb9620676de795579107442dd7 | [
"MIT"
] | null | null | null | src/api/pages.py | nhardy/py-js-web-scaffold | adf3e3ada0b21cdb9620676de795579107442dd7 | [
"MIT"
] | null | null | null | import tornado.web
from content import PAGES
| 21.785714 | 58 | 0.721311 |
c9e532019c14012309cd048823903e390b14f730 | 3,767 | py | Python | retropie/influx-retropie.py | Epaphus/personal-influxdb | 6357bc8a1b362280b0ce79674ddd8e804573f2a9 | [
"Apache-2.0"
] | 217 | 2020-01-07T20:25:46.000Z | 2022-03-29T06:09:58.000Z | retropie/influx-retropie.py | Epaphus/personal-influxdb | 6357bc8a1b362280b0ce79674ddd8e804573f2a9 | [
"Apache-2.0"
] | 16 | 2020-02-10T12:40:23.000Z | 2022-02-26T13:01:55.000Z | retropie/influx-retropie.py | Epaphus/personal-influxdb | 6357bc8a1b362280b0ce79674ddd8e804573f2a9 | [
"Apache-2.0"
] | 34 | 2020-01-15T15:42:20.000Z | 2022-02-22T17:29:15.000Z | #!/usr/bin/python3
# Copyright (C) 2020 Sam Steele
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import xml.etree.ElementTree as ET
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
INFLUXDB_HOST = 'localhost'
INFLUXDB_PORT = 8086
INFLUXDB_USERNAME = 'root'
INFLUXDB_PASSWORD = 'root'
GAMING_DATABASE = 'gaming'
f = open('/run/shm/influx-retropie', 'r')
start = datetime.utcfromtimestamp(int(f.readline().strip()))
platform = f.readline().strip()
emulator = f.readline().strip()
rom = name = os.path.basename(f.readline().strip())
end = datetime.utcfromtimestamp(int(f.readline().strip()))
duration = (end - start).seconds
f.close()
if not rom:
rom = name = emulator
platform = "Linux"
#Ignore games played less than 60 seconds
if duration < 60:
print("Ignoring '" + emulator + ": " + name +"' played less than 60 seconds")
sys.exit()
#Ignore non-games and Macintosh platform which doesn't provide game names
if platform == "macintosh" or rom.startswith("+") or rom == "Desktop.sh" or rom == "Kodi.sh" or rom == "Steam Link.sh":
print("Ignoring non-game: '" + emulator + ": " + name +"'")
sys.exit()
gamelist = os.path.expanduser('~/.emulationstation/gamelists/' + platform + '/gamelist.xml')
if os.path.exists(gamelist):
root = ET.parse(gamelist).getroot()
for game in root.findall('game'):
path = os.path.basename(game.find('path').text)
if path == name:
name = game.find('name').text
break
if platform == "nes":
platform = "NES"
elif platform == "snes":
platform = "SNES"
elif platform == "gba":
platform = "Game Boy Advance"
elif platform == "gbc":
platform = "Game Boy Color"
elif platform == "megadrive" or platform == "genesis":
platform = "Sega Genesis"
elif platform == "sega32x":
platform = "Sega 32X"
elif platform == "segacd":
platform = "Sega CD"
elif platform == "pc":
platform = "MS-DOS"
elif platform == "scummvm":
platform = "ScummVM"
elif platform == "mame-libretro":
platform = "Arcade"
elif platform == "mastersystem":
platform = "Sega MasterSystem"
else:
platform = platform.capitalize()
url = ""
image = ""
if name == "openttd":
name = "OpenTTD"
url = "https://www.openttd.org"
image = "https://www.openttd.org/static/img/layout/openttd-128.gif"
if url and image:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration,
"image": image,
"url": url
}
}]
else:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration
}
}]
try:
client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)
client.create_database(GAMING_DATABASE)
except InfluxDBClientError as err:
print("InfluxDB connection failed: %s" % (err))
sys.exit()
try:
client.switch_database(GAMING_DATABASE)
client.write_points(points)
except InfluxDBClientError as err:
print("Unable to write points to InfluxDB: %s" % (err))
sys.exit()
print("Successfully wrote %s data points to InfluxDB" % (len(points)))
| 27.297101 | 123 | 0.689673 |
c9e56f5f70dd474993a40687a674f32c37bed1cb | 7,470 | py | Python | molecule.py | Ved-P/molecule | 9727a9e7f8c0412feee27bbe034a1540cff7534e | [
"MIT"
] | null | null | null | molecule.py | Ved-P/molecule | 9727a9e7f8c0412feee27bbe034a1540cff7534e | [
"MIT"
] | 1 | 2022-01-03T20:07:31.000Z | 2022-01-04T18:45:21.000Z | molecule.py | Ved-P/molecule | 9727a9e7f8c0412feee27bbe034a1540cff7534e | [
"MIT"
] | null | null | null | # Molecule
#
# This program takes in a molecular formula and creates a Lewis diagram and a 3D
# model of the molecule as the output.
#
# Author: Ved Pradhan
# Since: December 31, 2021
import json
import matplotlib.pyplot as plt
import sys
import math
# Opens the JSON file for use.
with open("elements.json", "r", encoding="utf8") as file:
data = json.load(file)
# Gets the formula and charge from the user.
formula = input("\n\n\nWelcome to Molecule! Please enter a molecular formula "
+ "(case sensitive): ")
temp = input("What is the charge of the molecule? Enter an integer (0 for no "
+ "charge): ")
try:
charge = int(temp)
except ValueError:
print("Error: '" + temp + "' is not a valid charge.\n\n\n")
sys.exit()
# A list to store each individual atom in the molecule.
atoms = []
# A dictionary to store each type of element and its frequency.
element_frequency = {}
# A list to store the bonds between Atom objects.
bonds = []
# Class to represent each individual atom in the molecule.
# Retrieves the element corresponding to the given symbol.
def get_element(symbol):
for element in data["elements"]:
if element["symbol"] == symbol:
return element
print("Error: Element '" + symbol + "' not found.\n\n\n")
return False
# Parses through the inputted formula, splitting it into elements and frequencies.
# Prints a "not supported" message and quits the program.
# Checks if the molecule is supported.
# Bonds two atoms together; updates in the object and the data structure.
# Distributes the valence electrons as loose ones or through bonds.
# Draws the lewis diagram using matplotlib.
parse(formula)
check()
distribute()
print(element_frequency)
for a in atoms:
print(a)
draw_lewis()
print("\n\n\n")
| 30.995851 | 107 | 0.58822 |
c9e7b5a8abbdd10976c1ff71d253777d5ecde531 | 9,185 | py | Python | app/transaction/attendence.py | rrsk/hiwayPay | c84b7581475164751f64540a521b803bdf08a9fb | [
"MIT"
] | 31 | 2020-07-01T06:40:16.000Z | 2022-03-30T18:49:02.000Z | app/transaction/attendence.py | rrsk/hiwayPay | c84b7581475164751f64540a521b803bdf08a9fb | [
"MIT"
] | 2 | 2020-11-02T06:21:23.000Z | 2021-06-02T00:31:06.000Z | app/transaction/attendence.py | rrsk/hiwayPay | c84b7581475164751f64540a521b803bdf08a9fb | [
"MIT"
] | 13 | 2020-07-02T07:06:05.000Z | 2022-03-15T11:34:41.000Z | from flask import Blueprint
from flask import render_template, redirect, url_for, request, session, jsonify
from flask_login import login_user, logout_user, current_user
from app.transaction import bp
from app.transaction.model_att import Attendence, AttendenceSchema , CompanySchema
from app.employee.model import Employee
from app.master.model import Company
from app import db, ma
from datetime import datetime
import json
# @bp.route('/attendence/employee/data/<emp_id>', methods=['POST'])
# def emp_attendence_data(emp_id):
# if request.method == "POST":
# data = Attendence.query.filter(
# Attendence.employee.any(Employee.id == int(emp_id))).all()
# # data_schema = AttendenceSchema(many=True)
# today = datetime.now()
# today.year()
# return jsonify(json_data)
# else:
# return jsonify({'message': 'Invalid HTTP request method.'})
| 38.919492 | 157 | 0.507349 |
c9e83e673a43a955f85b17deeccd1c24bc0579dc | 3,385 | py | Python | examples/monitor.py | seba-1511/randopt | 74cefcc734c6a38418151025b0a4d8b6cb41eb14 | [
"Apache-2.0"
] | 115 | 2016-11-21T06:44:19.000Z | 2022-01-21T22:21:27.000Z | examples/monitor.py | seba-1511/randopt | 74cefcc734c6a38418151025b0a4d8b6cb41eb14 | [
"Apache-2.0"
] | 26 | 2016-11-21T07:31:37.000Z | 2019-01-16T14:13:23.000Z | examples/monitor.py | seba-1511/randopt | 74cefcc734c6a38418151025b0a4d8b6cb41eb14 | [
"Apache-2.0"
] | 9 | 2018-04-02T19:54:20.000Z | 2020-02-11T09:12:41.000Z | #!/usr/bin/env python3
"""
Usage:
python monitor.py randopt_results/simple_example/
"""
import sys
import os
import time
import curses
import randopt as ro
USE_MPL = True
USE_CURSES = True
try:
from terminaltables import AsciiTable, SingleTable
except:
raise('run pip install terminaltables')
try:
import matplotlib.pyplot as plt
except:
print('matplotlib not found, live plotting disable.')
USE_MPL = False
if __name__ == '__main__':
exp_path = sys.argv[1]
if exp_path[-1] == '/':
exp_path = exp_path[:-1]
exp_dir, exp_name = os.path.split(exp_path)
exp = ro.Experiment(exp_name, directory=exp_dir)
# init interactive display
if USE_CURSES:
screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(False)
screen.keypad(True)
start_time = time.time()
timings = []
minimums = []
maximums = []
counts = []
try:
while True:
minimums.append(exp.minimum().result)
maximums.append(exp.maximum().result)
counts.append(exp.count())
timings.append(time.time() - start_time)
if USE_MPL:
plot_statistics(counts, timings, minimums, maximums, exp_name)
table = table_statistics(
counts, timings, minimums, maximums, exp_name)
if USE_CURSES:
screen.addstr(0, 0, 'Experiment ' + exp_name + ' Statistics')
for i, line in enumerate(table.split('\n')):
line = line.replace('-', u'\u2500')
line = line.replace('|', u'\u2502')
line = line.replace('+', u'\u253c')
screen.addstr(2 + i, 0, line)
screen.refresh()
else:
print(table)
if USE_MPL:
plt.pause(5)
else:
time.sleep(5)
finally:
if USE_CURSES:
curses.echo()
curses.nocbreak()
screen.keypad(True)
curses.endwin()
| 27.08 | 80 | 0.576662 |
c9e841e014e87b7075f87ca19eeab4f20f7fce6c | 355 | py | Python | RoomsOnRent/Blog/admin.py | DX9807/RoomsOnRent.com | 4147efdce8e13930672c3c7cb12a1f25a70708ed | [
"MIT"
] | null | null | null | RoomsOnRent/Blog/admin.py | DX9807/RoomsOnRent.com | 4147efdce8e13930672c3c7cb12a1f25a70708ed | [
"MIT"
] | null | null | null | RoomsOnRent/Blog/admin.py | DX9807/RoomsOnRent.com | 4147efdce8e13930672c3c7cb12a1f25a70708ed | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post, Comment
admin.site.register(Post,PostAdmin)
admin.site.register(Comment,CommentAdmin)
| 22.1875 | 77 | 0.760563 |
c9e940d8a93717c521e40ddaeecaaa28cbc83b2f | 403 | py | Python | rllib/examples/gpu_test.py | anaskn/ray | 81db5f8060cb093085470ffdc71d8fdecc7bf381 | [
"Apache-2.0"
] | null | null | null | rllib/examples/gpu_test.py | anaskn/ray | 81db5f8060cb093085470ffdc71d8fdecc7bf381 | [
"Apache-2.0"
] | null | null | null | rllib/examples/gpu_test.py | anaskn/ray | 81db5f8060cb093085470ffdc71d8fdecc7bf381 | [
"Apache-2.0"
] | 1 | 2021-05-20T22:00:15.000Z | 2021-05-20T22:00:15.000Z | import os
import ray
from ray import tune
if __name__ == "__main__":
ray.init()
print("ray.get_gpu_ids(): {}".format(ray.get_gpu_ids()))
#print("CUDA_VISIBLE_DEVICES: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
| 25.1875 | 80 | 0.707196 |
c9eb836d5b59ca6961666fd615625d09250cf88f | 42,230 | py | Python | bin/toldiff.py | comscope/comsuite | d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c | [
"BSD-3-Clause"
] | 18 | 2019-06-15T18:08:21.000Z | 2022-01-30T05:01:29.000Z | bin/toldiff.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | null | null | null | bin/toldiff.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | 11 | 2019-06-05T02:57:55.000Z | 2021-12-29T02:54:25.000Z | #!/usr/bin/env python
#
# Copyright (C) 2006 Huub van Dam, Science and Technology Facilities Council,
# Daresbury Laboratory.
# All rights reserved.
#
# Developed by: Huub van Dam
# Science and Technology Facilities Council
# Daresbury Laboratory
# Computational Science and Engineering Department
# Computational Chemistry Group
# http://www.cse.clrc.ac.uk/ccg
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal with the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimers.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# Neither the names of the Science and Technology Facilities Council,
# Daresbury Laboratory, the Computational Science and Engineering Department,
# the Computational Chemistry Group, nor the names of its contributors may be
# used to endorse or promote products derived from this Software without
# specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS WITH THE SOFTWARE.
import os
import sys
import string
import toldiff_files
import toldiff_lcs
import toldiff_diff
import toldiff_update
import toldiff_transfer
import toldiff_show
import toldiff_tokens
def max(a,b):
"""Return the maximum value of the two arguments"""
if a >= b:
result = a
else:
result = b
return result
def license_toldiff(fp,errfp):
"""Print out the license information to the specified file object."""
try:
fp.write("""
Copyright (C) 2006 Huub van Dam, Science and Technology Facilities Council,
Daresbury Laboratory.
All rights reserved.
Developed by: Huub van Dam
Science and Technology Facilities Council
Daresbury Laboratory
Computational Science and Engineering Department
Computational Chemistry Group
http://www.cse.clrc.ac.uk/ccg
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal with the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimers.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the documentation
and/or other materials provided with the distribution.
Neither the names of the Science and Technology Facilities Council,
Daresbury Laboratory, the Computational Science and Engineering Department,
the Computational Chemistry Group, nor the names of its contributors may be
used to endorse or promote products derived from this Software without
specific prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS WITH THE SOFTWARE.
\n""")
sys.exit(1)
except IOError, e:
(errno,errmsg) = e
try:
errfp.write("toldiff: error writing license information\n")
errfp.write("toldiff: error message: ")
errfp.write(errmsg)
errfp.write("\n")
except IOError, e:
pass
sys.exit(5)
def usage_toldiff(fp,errfp):
"""Print out the usage information to the specified file object."""
try:
fp.write("""
Usage:
toldiff [[--diff] <reference file> <data file>]
[--update <reference file> <data file>]
[--transfer <reference file> <new reference file>]
[--show <reference file>]
[--tolerance <tolerance file name>]
[--new-tolerance <new tolerance file name>]
[--diff-exe <diff executable>]
[--output full|summary|none]
[--summary <identical>:<equivalent>:<different>]
[--exit <identical>:<equivalent>:<different>]
[--[no]exact] [--[no]tolerant] [--[no]best]
[--itol-scale <integer tolerance scale factor>]
[--ftol-scale <floating point tolerance scale factor>]
[--ctol-scale <complex tolerance scale factor>]
[--separators <separator character list>]
[--guides <number of guides>]
[--[no]backtrack]
[--help] [--license] [--version]
Toldiff is a script that compares two files allowing for tolerable
differences. Tolerable differences often arise in meta data like who ran
the test and on which date, timing data, and which machines and how many
processors were used. In scientific/technical codes additional variations
may result from numerical accuracy limitations.
Toldiff is designed to assist in software testing by suppressing tolerable
or trivial differences and highlighting only the significant ones. This
facilitates checking whether an output a program has just produced matches
the reference result obtained in the past.
The toldiff script knows of the following files:
A. The reference file:
- THE correct file
B. The data file:
- A file the correctness of which is to be tested against the reference
file. Once its correctness has been established it may be used to update
the tolerances.
C. The tolerance file:
- This file records where all allowed differences can occur, if any.
D. The new reference file:
- The file that is to replace the reference file after a change has
taken place that outdates the reference file
E. The new tolerance file:
- This file records where allowed differences can occur relative to the
new reference file instead of the current reference file.
The script offers three processes:
1. The diff process:
- This process reports all differences between the reference file and
the data file that are not explicitly tolerated.
2. The update process:
- This process updates the tolerances file adding all differences between
the reference file and the data file that were not tolerated before.
3. The transfer process:
- If the current reference file needs to be replaced by a new one this
process will carry as many as possible known tolerances relative to the
current reference file over to the new reference file.
There are various command line options to control toldiff. In cases where
environment variables can be used as an alternative to command line options
the precedence is handled as:
- environment variables take precedence over default settings
- command line options take precedence over environment variables.
There are three categories of options this script will recognise:
1. Process options:
1.1 --diff <reference file name> <data file name>
This triggers the script to perform the default diff process of
comparing the data file against the reference file.
1.2 --update <reference file name> <data file name>
This requests the update process to be performed updating the
tolerance file to allow for any differences between the reference and
data files.
During this process the new tolerances computed can be scaled by a
factor that is equal to or larger than one. This may be useful when
the expected fluctuations are larger than the current differences.
Separate scale factors may be set for each of the three different
numerical data types supported, i.e. integer, floating point, and
complex. The scale factors are always floating point numbers but
after scaling the tolerance the result is rounded where appropriate.
1.2.1 --itol-scale <integer tolerance scale factor>
Sets the scale factor for integer tolerances.
1.2.2 --ftol-scale <floating point tolerance scale factor>
Sets the scale factor for floating point tolerances.
1.2.3 --ctol-scale <complex tolerance scale factor>
Sets the scale factor for complex tolerances.
1.3 --tolerance <tolerance file name>
This option allows explicit specification of the tolerance file name.
If omitted the script will construct a name for the tolerance file
from the name of the reference file.
1.4 --transfer <reference file name> <new reference file name>
This option invokes the transfer process to migrate as many tolerances
as possible from the current reference file over to the new one.
1.5 --new-tolerance <new tolerance file name>
This option allows for the explicit specification of the name of the
new tolerance file. If this is omitted the script will construct a name
for the new tolerance file from the new reference file name.
1.6 --diff-exe <diff executable>
This option enbles replacing some of the Python diff implementation
by invoking a binary diff program. This greatly improves the
performance without changing the functionality. As an alternative
mechanism the environment variable TOLDIFF_EXE may be set to specify
the diff program. In case both the command line option and the
environment variable are provided the command line option has
precedence.
1.7 --output full|summary|none
This option controls the amount of output toldiff produces. The default
setting "full" results in printing a full diff output. The setting
"summary" suppresses the diff output and replaces it with a short
string for files being identical, equivalent or different. The values
of these strings can be specified with the --summary option. Finally,
setting "none" suppresses all output. Other than the --output option
setting the TOLDIFF_OUTPUT environment variable does the same.
1.8 --summary <identical>:<equivalent>:<different>
This option allows the specification of short results for toldiff. The
first string is reported if the reference file and data file are
identical. The second string is reported if the reference and data files
are not identical but all differences are tolerated. The last string
is reported if there are differences that are not tolerated. The
default strings are "identical", "equivalent", and "different". Finally,
these settings can be specified by setting the TOLDIFF_SUMMARY
environment variable. In both ways the values are colomn separated.
1.9 --exit <identical>:<equivalent>:<different>
This option specifies the exit codes for toldiff. The first value is
reported if the reference file and data file are identical. The second
value is reported if the reference and data files are not identical but
all differences are tolerated. The last value is reported if there are
differences that are not tolerated. The default values are 0, 0, and 1.
Finally, these settings can be specified by setting the TOLDIFF_EXIT
environment variable. In both ways the values are colomn separated.
1.10 --separators <separator character list>
Toldiff splits the data in the reference file and the data file into
tokens. It always uses white space to separate tokens. However it may
be necessary to break the tokens up further. It uses any characters
in the separator character list for that purpose. As the tolerances
depend on the separator character list this list can only be specified
when the tolerance file is created. In all other instances specifying
this list will be ignored.
Of course there is the potential to discover that the current set of
separator characters stored in the tolerance file is not optimal.
In that case the transfer process can be used to create a new tolerance
file based on a new set of separators. The specified separator list
will be used to create the new tolerance file.
The separator character list is specified as a white space separated
list of characters, e.g.
--separators "% = ,"
Alternatively the separator character list may be specified using
the environment variable TOLDIFF_SEPARATORS.
1.11 --guides <number of guides>
Tokens are typically short character sequences. As a result if a token
has changed there is a significant chance it will accidentally match
another token. This results in rather unexpected tolerances. Guides
are dummy tokens that direct the diff process to match tokens correctly
even if the tokens do not match exactly. The number of guides used
determines strict this enforcement is, 0 means no enforcement, 2 means
maximum enforcement. Alternatively the environment variable
TOLDIFF_GUIDES may be used.
1.12 --[no]backtrack
Another way to deal with the issue discussed under --guides is to let
the tolerant diff procedure re-analyse some of the differences found
initially. Initially a traditional diff procedure is used that finds
exact matches. As this cannot take tolerances into account suboptimal
matches may result. Rather than rigidly adhering to the matches the
initial diff has found the --backtrack option extends the differences
to the nearest number of whole lines. These whole line sections are
then re-analysed using the tolerant diff procedure, thus allowing
matches to be found that the initial diff by design cannot find.
The environment variable TOLDIFF_BACKTRACK may be used instead of the
command line flag.
Both the --guides and --backtrack options are designed to deal with the
situation where adjacent tokens have overlapping ranges of valid values.
However, even in these situations unintended matches are unlikely unless
the values have very few relevant digits. I.e. is the tolerance is such
that only 1 digit may change then the chance of accidently matching a
neighbouring number is 1 in 10, if 3 digits may change then the chance is
1 in 1000. As a result one may want to check whether the extra expense of
using the --guides and --backtrack options is justified given the
associated risk.
2. Information options:
2.1 --help
Print this information on how to use this scripts.
2.2 --show <reference file name>
Prints the reference file marking all the known tolerances on it.
This allows checking how the program has resolved differences through
the tolerances chosen.
The tolerances are marked on each line in the following order:
1. The number of lines that may be inserted after this line.
2. Whether this line may be deleted in which case it will be marked by
a 'X', otherwise white space indicates that the line has to be
present.
3. The contents of the line are shown with those characters that may
change replaced by '#'.
2.3 --version
Print the version number of the toldiff script you are using.
2.4 --license
Print the license conditions under which this script is distributed.
3. Debug options:
These options are normally set automatically based on the requirements of
the selected process. The default settings aim to complete the selected
process with the highest efficiency. However, for debugging purposes it
is possible to override these settings. You are free to try them to your
own peril.
3.1 --[no]exact
Enable or disable the file differencing procedure that is based on
exact line matches.
3.2 --[no]tolerant
Enable or disable the file differencing procedure that uses a line
comparison which allows for tolerable differences between lines.
3.3 --[no]best
Enable or disable the file differencing procedure that matches lines
based on maximum similarity.
Copyright 2006, Huub van Dam, Science and Technology Facilities Council,
Daresbury Laboratory\n""")
sys.exit(1)
except IOError, e:
(errno,errmsg) = e
try:
errfp.write("toldiff: error writing usage information\n")
errfp.write("toldiff: error message: ")
errfp.write(errmsg)
errfp.write("\n")
except IOError, e:
pass
sys.exit(5)
def load_file(filename,err_fp,separators,nguides):
"""Open and load a file. Returns the file text and the number of lines.
The routine also handles I/O errors. I.e. it reports the error to the
user and terminates the program.
When the file is read the appropriate number of guides are inserted
as specified by nguides.
"""
text = toldiff_tokens.tokenized_file()
lines = 0
tokens = 0
try:
file_fp = open(filename,"r")
(text,lines,tokens) = toldiff_files.load_plain_text(file_fp,text,lines,tokens,separators,nguides)
file_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error on file: ")
err_fp.write(filename)
err_fp.write("\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(10)
return (text,lines,tokens)
def store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,err_fp,separators,nguides):
"""Open and write the tolerance file. The routine handles any I/O errors.
I.e. it reports the error to the user and terminates the program."""
try:
tol_fp = open(tol_fnm,"w")
toldiff_files.save_tolerances(tol_fp,chg_txt,add_txt,del_txt,err_fp,separators,nguides)
tol_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error encountered attempting to write: ")
err_fp.write(tol_fnm)
err_fp.write("\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(30)
def run_diff(diff_exe,ref_fnm,dat_fnm,ref,dat,fp):
"""This routine starts off an external diff program.
As the tokenized versions of the reference and data files do not exist
these have to be written first. Next the diff program is started.
Both the stdout and stderr file descriptors are returned as due file
buffer space the diff program cannot complete if stdout is not read.
So only after reading stdout to drive diff to completion can stderr be
checked to see if diff ran successfully.
If an error is reported on stderr this should be passed on to the user
and the program should terminate.
After diff has run the tokenized files should be deleted.
- diff_exe - the path of the diff executable
- ref_fnm - the filename for the temporary tokenized reference file
- dat_fnm - the filename for the temporary tokenized data file
- ref - the tokenized reference
- dat - the tokenized data
- fp - a file descriptor for error reporting
"""
cmd = diff_exe+" "+ref_fnm+" "+dat_fnm
try:
ref_fp = open(ref_fnm,"w")
toldiff_files.save_tokenized(ref_fp,ref,fp)
ref_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on tokenized reference file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
try:
dat_fp = open(dat_fnm,"w")
toldiff_files.save_tokenized(dat_fp,dat,fp)
dat_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on tokenized data file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
try:
(in_fp,out_fp,err_fp) = os.popen3(cmd)
except IOError, e:
(errno,errmsg) = e
try:
fp.write("toldiff: I/O error on external diff standard error file\n")
fp.write("toldiff: I/O error message: ")
fp.write(errmsg)
fp.write("\n")
except IOError, e:
pass
sys.exit(25)
in_fp.close()
return (out_fp,err_fp)
def find_overall_lcs(lexact,ltol,lbest,tol,ref_fnm,dat_fnm,diff_exe,feps,ieps,err_fp,separators,nguides,snake_trim,update):
"""Find the overall LCS including the tolerances. The general procedure is
simply to establish the exact LCS, then try to resolve as much of the
mismatches by considering the tolerances, then try to match the remaining
differences to minimize the mismatches.
This routine will read in the reference file and the data file as well.
The reason for this is that this is more efficient in case an external
diff program is used for the first phase.
The routine returns the overall LCS, the reference file text, the data
file text and beginning and ending token numbers of both files.
This routine allows each phase to be disabled explicitly through a
flag passed in as an argument:
- lexact: if false skip the exact matching
- ltol : if false skip the tolerant matching
- lbest : if false skip the minimal difference matching.
The number of guides is specified in nguides. This is used in reading
in the reference and data files.
"""
lcs = [ ]
if lexact:
if diff_exe == "":
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
lcs = toldiff_lcs.find_lcs1(ref,Ntb,Nte,dat,Mtb,Mte)
else:
error = false
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
#
# Construct temporary file names
#
pid = os.getpid()
# The extra "a" and "b" ensure unique file names even if the reference
# and data file names are the same.
tmp_ref_fnm = ref_fnm+"a"+str(pid)
tmp_dat_fnm = dat_fnm+"b"+str(pid)
#
# Construct temporary files, invoke diff and parse diff output
#
(diff_out_fp,diff_err_fp) = run_diff(diff_exe,tmp_ref_fnm,tmp_dat_fnm,ref,dat,err_fp)
lcs = toldiff_diff.diff_to_lcs(Ntb,Nte,Mtb,Mte,diff_out_fp,err_fp)
diff_out_fp.close()
#
# Delete temporary files
#
os.remove(tmp_ref_fnm)
os.remove(tmp_dat_fnm)
#
# Check whether the diff program detected any errors
#
try:
line = diff_err_fp.readline()
while line:
error = true
err_fp.write("toldiff:"+line)
line = diff_err_fp.readline()
diff_err_fp.close()
except IOError, e:
(errno,errmsg) = e
try:
err_fp.write("toldiff: I/O error on external diff standard error file\n")
err_fp.write("toldiff: I/O error message: ")
err_fp.write(errmsg)
err_fp.write("\n")
except IOError, e:
pass
sys.exit(25)
if error:
sys.exit(20)
else:
Nb = 1
Ntb = 1
Mb = 1
Mtb = 1
(ref,Ne,Nte) = load_file(ref_fnm,err_fp,separators,nguides)
(dat,Me,Mte) = load_file(dat_fnm,err_fp,separators,nguides)
#Snake trimming may only be used here!
if (snake_trim and ((ltol and len(tol) > 0 ) or (update and lbest))):
lcs = toldiff_lcs.trim_snakes(lcs,ref,Ntb,Nte,dat,Mtb,Mte)
#
if (len(tol) <= 0) or (not ltol):
#
# No tolerances were specified or this phase is explicitly suppressed
#
pass
#
else:
#
# Consider all the differences and try to resolve as many as possible.
#
if (len(lcs) <= 0):
#
# Then the new LCS is simply the result of the tolerant diff
#
lcs = toldiff_lcs.find_lcs2(tol,ref,Ntb,Nte,dat,Mtb,Mte,feps,ieps)
#
else:
#
# First consider whether there is anything to compare before the first
# snake
#
lcs1 = lcs
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > Mtb) and (ybot1 > Ntb):
lcs = toldiff_lcs.find_lcs2(tol,ref,Ntb,ybot1-1,dat,Mtb,xbot1-1,feps,ieps)
else:
lcs = [ ]
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
while (len(lcs1) > 0 ):
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > xtop0+1) and (ybot1 > ytop0+1):
lcs2 = toldiff_lcs.find_lcs2(tol,ref,ytop0+1,ybot1-1,dat,xtop0+1,xbot1-1,feps,ieps)
lcs = lcs + lcs2
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
if (Nte >= ytop0+1) and (Mte >= xtop0+1):
#
# The some more stuff at the end left to do
#
lcs2 = toldiff_lcs.find_lcs2(tol,ref,ytop0+1,Nte,dat,xtop0+1,Mte,feps,ieps)
lcs = lcs + lcs2
if (not lbest):
#
# This phase is explicitly suppressed
#
pass
#
else:
#
# Consider all the differences and try to match different lines as best as
# possible minimizing the number of differences.
#
#Snake trimming does not work here as the lcs3 may pair tokens up in a way
#that is different from what lcs2 would do. The result of this inconsistency
#is that some differences will never be tolerated! Clearly this breaks
#toldiff.
#lcs = toldiff_lcs.trim_snakes(lcs,ref,Ntb,Nte,dat,Mtb,Mte)
if (len(lcs) <= 0):
#
# Then the new LCS is simply the result of the best match diff,
# which will probably hurt as this will get very expensive.
#
lcs = toldiff_lcs.find_lcs3(tol,ref,Ntb,Nte,dat,Mtb,Mte,feps,ieps)
#
else:
#
# First consider whether there is anything to compare before the first
# snake
#
lcs1 = lcs
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > Mtb) and (ybot1 > Ntb):
lcs = toldiff_lcs.find_lcs3(tol,ref,Ntb,ybot1-1,dat,Mtb,xbot1-1,feps,ieps)
else:
lcs = [ ]
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
while (len(lcs1) > 0 ):
(xbot1,ybot1,xtop1,ytop1,type1) = lcs1.pop(0)
if (xbot1 > xtop0+1) and (ybot1 > ytop0+1):
lcs2 = toldiff_lcs.find_lcs3(tol,ref,ytop0+1,ybot1-1,dat,xtop0+1,xbot1-1,feps,ieps)
lcs = lcs + lcs2
xtop0 = xtop1
ytop0 = ytop1
lcs.append((xbot1,ybot1,xtop1,ytop1,type1))
if (Nte >= ytop0+1) and (Mte >= xtop0+1):
#
# There is some more stuff at the end left to do
#
lcs2 = toldiff_lcs.find_lcs3(tol,ref,ytop0+1,Nte,dat,xtop0+1,Mte,feps,ieps)
lcs = lcs + lcs2
return (lcs,ref,Ntb,Nte,dat,Mtb,Mte)
true = (0 == 0)
false = not true
#
# Set up the default comparison options
#
diff = 1
update = 2
transfer = 3
show = 4
process = diff
lexact = true
ltol = true
lbest = false
#
# Set up default comparison results
#
identical = 1
equivalent = 2
different = 3
#
# Set up default comparison exit codes
#
exit_identical = 0
exit_equivalent = 0
exit_different = 1
#
# Set up default comparison summary texts
#
text_identical = "identical"
text_equivalent = "equivalent"
text_different = "different"
#
# Set up output options and default output option
#
output_full = 3
output_summary = 2
output_none = 1
output = output_full
#
# Set up the default list of separator characters for the reference and
# data file tokenisation. In addition to these characters whitespace will
# be used as token separator as well. Note that a long list of separators
# deteriorates the performance significantly.
#
# Separators is the list of additional separator characters used for the
# reference file and the data file.
# Separators_new is the list of additional separator characters used for the
# new reference file in case of a transfer operation.
#
separators = []
separators_new = []
#
# Set the default snake trimming behaviour
#
snake_trim = false
#
# Set the default number of guides
#
nguides = 0
#
# Set up default precisions for floating point and integer numbers
#
feps = 1.0e-12
ieps = 0.1
#
# Set up default scale factors for new tolerances
#
tol_scale = 1.0
itol_scale = tol_scale
ftol_scale = tol_scale
ctol_scale = tol_scale
lcs = [ ]
diff_exe = ""
tol_fnm = ""
tol_new_fnm = ""
ref_fnm = ""
dat_fnm = ""
narg = len(sys.argv)
iarg = 1
if os.environ.has_key("TOLDIFF_EXE"):
diff_exe = os.environ["TOLDIFF_EXE"]
if os.environ.has_key("TOLDIFF_OUTPUT"):
output = os.environ["TOLDIFF_OUTPUT"]
if output == "FULL" or output == "full":
output = output_full
elif output == "SUMMARY" or output == "summary":
output = output_summary
elif output == "NONE" or output == "none":
output = output_none
if os.environ.has_key("TOLDIFF_EXIT"):
exit_codes = os.environ["TOLDIFF_EXIT"]
exit_codes = string.split(exit_codes,":")
if len(exit_codes) == 3:
exit_identical = int(exit_codes[0])
exit_equivalent = int(exit_codes[1])
exit_different = int(exit_codes[2])
if os.environ.has_key("TOLDIFF_SUMMARY"):
text_summaries = os.environ["TOLDIFF_SUMMARY"]
text_summaries = string.split(text_summaries,":")
if len(text_summaries) == 3:
text_identical = text_summaries[0]
text_equivalent = text_summaries[1]
text_different = text_summaries[2]
if os.environ.has_key("TOLDIFF_ITOLSCALE"):
itol_scale = max(tol_scale,float(os.environ["TOLDIFF_ITOLSCALE"]))
if os.environ.has_key("TOLDIFF_FTOLSCALE"):
ftol_scale = max(tol_scale,float(os.environ["TOLDIFF_FTOLSCALE"]))
if os.environ.has_key("TOLDIFF_CTOLSCALE"):
ctol_scale = max(tol_scale,float(os.environ["TOLDIFF_CTOLSCALE"]))
if os.environ.has_key("TOLDIFF_SEPARATORS"):
separators = string.split(os.environ["TOLDIFF_SEPARATORS"])
separators_new = string.split(os.environ["TOLDIFF_SEPARATORS"])
if os.environ.has_key("TOLDIFF_GUIDES"):
nguides = max(0,int(os.environ["TOLDIFF_GUIDES"]))
if os.environ.has_key("TOLDIFF_BACKTRACK"):
tmptxt = os.environ["TOLDIFF_BACKTRACK"]
tmptxt = tmptxt.lower()
if tmptxt == "yes" or tmptxt == "y":
snake_trim = true
elif tmptxt == "no" or tmptxt == "n":
snake_trim = false
else:
try:
sys.stderr.write("toldiff: invalid value for TOLDIFF_BACKTRACK should be \"yes\" or \"no\"\n")
except IOError, e:
pass
sys.exit(5)
if narg == 1:
usage_toldiff(sys.stdout,sys.stderr)
while iarg < narg:
if sys.argv[iarg] == "--exact":
lexact = true
elif sys.argv[iarg] == "--noexact":
lexact = false
elif sys.argv[iarg] == "--tolerant":
ltol = true
elif sys.argv[iarg] == "--notolerant":
ltol = false
elif sys.argv[iarg] == "--best":
lbest = true
elif sys.argv[iarg] == "--nobest":
lbest = false
elif sys.argv[iarg] == "--tolerance":
iarg = iarg + 1
if iarg < narg:
tol_fnm = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing tolerance file name\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--new-tolerance":
iarg = iarg + 1
if iarg < narg:
tol_new_fnm = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing new tolerance file name\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--diff-exe":
iarg = iarg + 1
if iarg < narg:
diff_exe = sys.argv[iarg]
else:
try:
sys.stderr.write("toldiff: missing diff executable specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--diff":
process = diff
elif sys.argv[iarg] == "--update":
process = update
elif sys.argv[iarg] == "--transfer":
process = transfer
elif sys.argv[iarg] == "--show":
process = show
elif sys.argv[iarg] == "--version":
toldiff_files.version_toldiff(sys.stdout,sys.stderr)
sys.exit(0)
elif sys.argv[iarg] == "--help":
usage_toldiff(sys.stdout,sys.stderr)
elif sys.argv[iarg] == "--license":
license_toldiff(sys.stdout,sys.stderr)
elif sys.argv[iarg] == "--exit":
iarg = iarg + 1
if iarg < narg:
exit_codes = sys.argv[iarg]
exit_codes = string.split(exit_codes,":")
if len(exit_codes) == 3:
exit_identical = int(exit_codes[0])
exit_equivalent = int(exit_codes[1])
exit_different = int(exit_codes[2])
else:
try:
sys.stderr.write("toldiff: missing exit codes specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--summary":
iarg = iarg + 1
if iarg < narg:
text_summaries = sys.argv[iarg]
text_summaries = string.split(text_summaries,":")
if len(text_summaries) == 3:
text_identical = text_summaries[0]
text_equivalent = text_summaries[1]
text_different = text_summaries[2]
else:
try:
sys.stderr.write("toldiff: missing summaries specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--output":
iarg = iarg + 1
if iarg < narg:
output = sys.argv[iarg]
if output == "FULL" or output == "full":
output = output_full
elif output == "SUMMARY" or output == "summary":
output = output_summary
elif output == "NONE" or output == "none":
output = output_none
else:
sys.stderr.write("toldiff: unknown output specification: %s\n" % output)
sys.exit(5)
else:
try:
sys.stderr.write("toldiff: missing output specification\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--itol-scale":
iarg = iarg + 1
if iarg < narg:
itol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing integer tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--ftol-scale":
iarg = iarg + 1
if iarg < narg:
ftol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing floating point tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--ctol-scale":
iarg = iarg + 1
if iarg < narg:
ctol_scale = max(tol_scale,float(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing complex tolerance scale factor\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--separators":
iarg = iarg + 1
if iarg < narg:
separators = string.split(sys.argv[iarg])
separators_new = string.split(sys.argv[iarg])
i = 0
n = len(separators)
while (i < n):
if len(separators[i]) != 1:
sys.stderr.write("toldiff: separator character list is not a list of single characters\n")
sys.stderr.write("toldiff: --separators \""+sys.argv[iarg]+"\"\n")
sys.exit(5)
i = i + 1
elif sys.argv[iarg] == "--guides":
iarg = iarg + 1
if iarg < narg:
nguides = max(0,int(sys.argv[iarg]))
else:
try:
sys.stderr.write("toldiff: missing number of guides\n")
except IOError, e:
pass
sys.exit(5)
elif sys.argv[iarg] == "--backtrack":
snake_trim = true
elif sys.argv[iarg] == "--nobacktrack":
snake_trim = false
else:
argstr = sys.argv[iarg]
if (process < show) and (iarg == narg-2):
ref_fnm = sys.argv[iarg]
iarg = iarg + 1
dat_fnm = sys.argv[iarg]
elif (process == show) and (iarg == narg-1):
ref_fnm = sys.argv[iarg]
elif argstr[0:1] == "-":
try:
sys.stderr.write("toldiff: unknow option encountered: ")
sys.stderr.write(argstr)
sys.stderr.write("\n")
except IOError, e:
pass
sys.exit(8)
else:
sys.stderr.write("toldiff: missing reference or data files?\n")
sys.exit(9)
iarg = iarg + 1
if ref_fnm == "":
sys.stderr.write("toldiff: error: no reference filename given\n")
sys.exit(5)
if (process < show) and (dat_fnm == ""):
sys.stderr.write("toldiff: error: no data filename given\n")
sys.exit(6)
tol_fnm = construct_tolerance_filename(ref_fnm,dat_fnm,tol_fnm)
if process == transfer:
tol_new_fnm = construct_tolerance_filename(dat_fnm,ref_fnm,tol_new_fnm)
ref_txt = { }
dat_txt = { }
chg_txt = { }
add_txt = { }
del_txt = { }
ref_lines = 0
dat_lines = 0
try:
tol_fp = open(tol_fnm,"r")
(chg_txt,add_txt,del_txt,separators) = toldiff_files.load_tolerances(tol_fp,separators,nguides)
tol_fp.close()
except IOError, e:
#
# If an exception was thrown it is assumed that there is no valid
# tolerance file present. Hence proceed as if there is no tolerance
# information.
#
pass
if process == diff:
(lcs,ref_txt,Ntb,Nte,dat_txt,Mtb,Mte) = find_overall_lcs(lexact,ltol,lbest,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,false)
lcs = toldiff_lcs.filter_lcs(lcs,Ntb,Nte,Mtb,Mte,add_txt,del_txt)
analysis = toldiff_diff.lcs_analysis(Ntb,Nte,Mtb,Mte,lcs,identical,equivalent,different)
if output == output_full:
(line_lcs,Nlb,Nle,Mlb,Mle) = toldiff_lcs.lcs_tokens2lines(lcs,ref_txt,Ntb,Nte,dat_txt,Mtb,Mte,nguides)
toldiff_diff.lcs_to_diff(ref_txt,Nlb,Nle,dat_txt,Mlb,Mle,line_lcs,sys.stdout,sys.stderr,nguides)
elif output == output_summary:
if analysis == identical:
sys.stdout.write("%s" % text_identical)
elif analysis == equivalent:
sys.stdout.write("%s" % text_equivalent)
elif analysis == different:
sys.stdout.write("%s" % text_different)
else:
sys.stderr.write("illegal value of analysis")
elif output == output_none:
pass
else:
sys.stderr.write("illegal value of output")
if analysis == identical:
sys.exit(exit_identical)
elif analysis == equivalent:
sys.exit(exit_equivalent)
elif analysis == different:
sys.exit(exit_different)
else:
sys.stderr.write("illegal value of analysis")
elif process == update:
(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines) = find_overall_lcs(true,true,true,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,true)
chg_txt = toldiff_update.lcs_to_change(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,chg_txt,feps,ieps,itol_scale,ftol_scale,ctol_scale)
add_txt = toldiff_update.lcs_to_add(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,add_txt)
del_txt = toldiff_update.lcs_to_delete(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines,del_txt)
store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,sys.stderr,separators,nguides)
elif process == transfer:
(lcs,ref_txt,Nb,ref_lines,dat_txt,Mb,dat_lines) = find_overall_lcs(true,true,false,chg_txt,ref_fnm,dat_fnm,diff_exe,feps,ieps,sys.stderr,separators,nguides,snake_trim,false)
(chg_new,add_new,del_new) = toldiff_transfer.transfer_tol(lcs,Nb,ref_lines,Mb,dat_lines,chg_txt,add_txt,del_txt)
store_tolerance(tol_new_fnm,chg_new,add_new,del_new,sys.stderr,separators_new,nguides)
elif process == show:
Nb = 1
Ntb = 1
(ref_txt,Ne,Nte) = load_file(ref_fnm,sys.stderr,separators,nguides)
toldiff_show.show_tolerance(sys.stdout,ref_txt,Nb,Ne,chg_txt,add_txt,del_txt,sys.stderr,nguides)
else:
try:
sys.stderr.write("toldiff: internal error: invalid process")
except IOError, e:
pass
sys.exit(999)
| 36.499568 | 175 | 0.673336 |
c9ec67e739da8431aa5c39d649a7e5eb15794f15 | 6,973 | py | Python | LOG.py | viniciusdc/Protein_structure_SPGm | 861672071f2a47b54e4624fc1f69cf3fff0ff356 | [
"MIT"
] | null | null | null | LOG.py | viniciusdc/Protein_structure_SPGm | 861672071f2a47b54e4624fc1f69cf3fff0ff356 | [
"MIT"
] | null | null | null | LOG.py | viniciusdc/Protein_structure_SPGm | 861672071f2a47b54e4624fc1f69cf3fff0ff356 | [
"MIT"
] | null | null | null | from Methods.utils import rmsd, mde
from datetime import datetime
import logging
import json
import sys
| 43.855346 | 113 | 0.443711 |
c9ee06d94f8d8d17974a31803833016ac95dc1d7 | 1,968 | py | Python | test/test_a69DisjointProperties.py | IDLabResearch/lovstats | dd33183574eed692ee89059ff3c6494160dfb8a9 | [
"MIT"
] | 1 | 2018-12-11T13:57:38.000Z | 2018-12-11T13:57:38.000Z | test/test_a69DisjointProperties.py | IDLabResearch/lovstats | dd33183574eed692ee89059ff3c6494160dfb8a9 | [
"MIT"
] | null | null | null | test/test_a69DisjointProperties.py | IDLabResearch/lovstats | dd33183574eed692ee89059ff3c6494160dfb8a9 | [
"MIT"
] | null | null | null | import unittest
import sys
import helpers
sys.path.append('../LODStats')
sys.path.append('../src/restriction-types-stats')
from A69DisjointProperties import A69DisjointProperties
import lodstats
from lodstats import RDFStats
testfile_path = helpers.resources_path | 41.87234 | 113 | 0.701728 |
c9f038d1fb5d0607ea396a1c5e9bb4c50b48b589 | 449 | py | Python | src/services/sms.py | HutRubberDuck/super-mini-divar | 191c2f9a412ef879b52f4a71e0fe74743138ab13 | [
"Apache-2.0"
] | null | null | null | src/services/sms.py | HutRubberDuck/super-mini-divar | 191c2f9a412ef879b52f4a71e0fe74743138ab13 | [
"Apache-2.0"
] | null | null | null | src/services/sms.py | HutRubberDuck/super-mini-divar | 191c2f9a412ef879b52f4a71e0fe74743138ab13 | [
"Apache-2.0"
] | null | null | null | from kavenegar import KavenegarAPI, APIException, HTTPException
from src.core.settings import OTP_API_KEY
| 23.631579 | 63 | 0.605791 |
c9f1e7cdebfd2710c6c2b7bf206e8cee0c794ff2 | 43 | py | Python | test.py | Taraxa-project/taraxa-py | 95aa0d8054bf4eba2c3200f3298421575b7bb5a0 | [
"MIT"
] | null | null | null | test.py | Taraxa-project/taraxa-py | 95aa0d8054bf4eba2c3200f3298421575b7bb5a0 | [
"MIT"
] | 1 | 2022-03-02T15:51:17.000Z | 2022-03-02T15:51:17.000Z | test.py | Taraxa-project/taraxa-py | 95aa0d8054bf4eba2c3200f3298421575b7bb5a0 | [
"MIT"
] | null | null | null | from pytaraxa.test import *
blockNumber()
| 10.75 | 27 | 0.767442 |
c9f2d64566db5376ed467678309c5e2282462dda | 923 | py | Python | src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py | granum-space/cansat-2017-2018 | 4d9db6f2d55c726e11abbb60fd436ec3eafc2373 | [
"MIT"
] | null | null | null | src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py | granum-space/cansat-2017-2018 | 4d9db6f2d55c726e11abbb60fd436ec3eafc2373 | [
"MIT"
] | 9 | 2017-10-31T19:20:05.000Z | 2018-06-17T19:08:52.000Z | src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py | granum-space/cansat-2018 | 4d9db6f2d55c726e11abbb60fd436ec3eafc2373 | [
"MIT"
] | 1 | 2018-06-12T11:30:10.000Z | 2018-06-12T11:30:10.000Z | import random
import logging
import time
from datetime import timedelta
from pymavlink import mavutil
_log = logging.getLogger(__name__)
| 23.666667 | 60 | 0.612134 |
c9f4af671dfa98273bbb6368b1d6afc8208adaae | 12,548 | py | Python | tests/test_locator.py | somnathrakshit/geograpy3 | 8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4 | [
"Apache-2.0"
] | 53 | 2020-09-09T06:58:29.000Z | 2022-03-08T19:16:12.000Z | tests/test_locator.py | somnathrakshit/geograpy3 | 8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4 | [
"Apache-2.0"
] | 51 | 2020-09-09T09:31:27.000Z | 2022-01-17T07:12:27.000Z | tests/test_locator.py | somnathrakshit/geograpy3 | 8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4 | [
"Apache-2.0"
] | 9 | 2020-09-09T09:13:03.000Z | 2021-12-14T11:04:34.000Z | '''
Created on 2020-09-19
@author: wf
'''
import os.path
import tempfile
import unittest
from pathlib import Path
from lodstorage.storageconfig import StorageConfig
import geograpy
import getpass
from geograpy.locator import Locator, City,CountryManager, Location, LocationContext
from collections import Counter
from lodstorage.uml import UML
import re
from tests.basetest import Geograpy3Test
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 34.190736 | 172 | 0.582483 |
c9fafb5b1dfbe210783fd95968a164f6159dfcac | 685 | py | Python | Python/threadingProcess.py | GuruprasadaShridharHegde/Coder-Mansion | 14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d | [
"MIT"
] | 1 | 2022-01-19T04:22:21.000Z | 2022-01-19T04:22:21.000Z | Python/threadingProcess.py | GuruprasadaShridharHegde/Coder-Mansion | 14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d | [
"MIT"
] | null | null | null | Python/threadingProcess.py | GuruprasadaShridharHegde/Coder-Mansion | 14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d | [
"MIT"
] | null | null | null | # example of automatically starting a thread
from time import sleep
from threading import Thread
# custom thread class that automatically starts threads when they are constructed
# task function
def task():
print('Task starting')
# block for a moment
sleep(1)
# report
print('Task all done')
# create and start the new thread
thread = AutoStartThread(target=task)
# wait for the new thread to finish
thread.join() | 27.4 | 82 | 0.668613 |
c9fbf38f83d878c53f0d81d49f3d590917067274 | 4,332 | py | Python | bin/cora_edit_singletoken.py | comphist/cora | 71555df9a520ccab063a8c5eb907feaa1dd88b38 | [
"MIT"
] | 10 | 2017-07-08T12:05:32.000Z | 2019-09-22T17:39:12.000Z | bin/cora_edit_singletoken.py | comphist/cora | 71555df9a520ccab063a8c5eb907feaa1dd88b38 | [
"MIT"
] | 31 | 2017-02-24T19:29:51.000Z | 2020-11-09T15:58:44.000Z | bin/cora_edit_singletoken.py | comphist/cora | 71555df9a520ccab063a8c5eb907feaa1dd88b38 | [
"MIT"
] | 7 | 2017-02-27T12:25:55.000Z | 2022-01-13T08:55:01.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Marcel Bollmann <bollmann@linguistics.rub.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import json
import argparse
if __name__ == '__main__':
description = "Reads a file containing a single token and returns it unchanged in JSON format. Intended to be called from within CorA."
epilog = ""
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('infile',
metavar='INPUT',
nargs='?',
default=sys.stdin,
type=argparse.FileType('r'),
help='Input file')
# exists for legacy reasons:
parser.add_argument('-s', '--split',
action='store_true',
default=False,
help=('Parse pipe (|) and hash (#) as tokenization symbols; '
'equivalent to --split-mod="|" --split-dipl="#"'))
parser.add_argument('--split-mod',
default='',
type=str,
help='Symbol to split into two moderns (default: None)')
parser.add_argument('--split-dipl',
default='',
type=str,
help='Symbol to split into two dipls (default: None)')
# parser.add_argument('-e', '--encoding',
# default='utf-8',
# help='Encoding of the input file (default: utf-8)')
arguments = parser.parse_args()
# launching application ...
MainApplication(arguments).run()
| 40.111111 | 140 | 0.593029 |
c9ff48db97e05614b8ced49da35379affb1221e8 | 1,851 | py | Python | datasets/utils.py | lulindev/UNet-pytorch | cf91e251891a2926f46b628985ebdda66bc637a2 | [
"MIT"
] | 3 | 2021-04-07T08:05:44.000Z | 2021-06-25T16:55:56.000Z | datasets/utils.py | lulindev/UNet-pytorch | cf91e251891a2926f46b628985ebdda66bc637a2 | [
"MIT"
] | null | null | null | datasets/utils.py | lulindev/UNet-pytorch | cf91e251891a2926f46b628985ebdda66bc637a2 | [
"MIT"
] | 2 | 2021-08-19T10:23:32.000Z | 2021-12-15T03:26:11.000Z | from typing import Union
import matplotlib.pyplot as plt
import torch
import torchvision
# Validate dataset loading code
| 35.596154 | 81 | 0.611021 |
c9ffd31b49092a967f11f75892dae5ddf2b9ea57 | 1,373 | py | Python | src/lm_based/translate_start_end.py | vered1986/time_expressions | 32d182d7f741eec007141f5ca89c0d419e23a9a7 | [
"Apache-2.0"
] | 1 | 2022-02-25T15:00:42.000Z | 2022-02-25T15:00:42.000Z | src/lm_based/translate_start_end.py | vered1986/time_expressions | 32d182d7f741eec007141f5ca89c0d419e23a9a7 | [
"Apache-2.0"
] | null | null | null | src/lm_based/translate_start_end.py | vered1986/time_expressions | 32d182d7f741eec007141f5ca89c0d419e23a9a7 | [
"Apache-2.0"
] | null | null | null | import os
import json
import logging
import argparse
from src.common.translate import translate_time_expression_templates, get_client
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 32.690476 | 118 | 0.680991 |
c9ffdbe67a40939dca316bf68000c8d9a8156ccf | 1,477 | py | Python | overlord/views.py | kimani-njoroge/Uber_Clone | 610a242c75e2873897f8dc9458371c32e52d11ef | [
"MIT"
] | null | null | null | overlord/views.py | kimani-njoroge/Uber_Clone | 610a242c75e2873897f8dc9458371c32e52d11ef | [
"MIT"
] | 4 | 2020-06-05T18:47:50.000Z | 2021-09-08T00:00:03.000Z | overlord/views.py | kimani-njoroge/Uber_Clone | 610a242c75e2873897f8dc9458371c32e52d11ef | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib.auth import get_user_model
from .forms import DriverSignupForm, RiderSignupForm
from driver.models import Driver
User = get_user_model()
# Create your views here.
| 30.142857 | 92 | 0.65606 |
a0011285cd812341126bdf7a6b702e5a57d05603 | 23,485 | py | Python | old/Lissajous/Lissajous.py | Tony031218/manim-projects | b243dec0f0a007649a92938e90d60eccb4c7dd15 | [
"Apache-2.0"
] | 45 | 2019-10-08T23:58:20.000Z | 2020-05-20T03:49:15.000Z | old/Lissajous/Lissajous.py | Tony031218/manim-projects | b243dec0f0a007649a92938e90d60eccb4c7dd15 | [
"Apache-2.0"
] | null | null | null | old/Lissajous/Lissajous.py | Tony031218/manim-projects | b243dec0f0a007649a92938e90d60eccb4c7dd15 | [
"Apache-2.0"
] | 12 | 2019-08-15T08:07:22.000Z | 2020-05-09T12:34:14.000Z | from manimlib.imports import *
from manim_projects.tony_useful.imports import *
def smooth2(t, inflection=6):
error = sigmoid(-inflection / 2)
return np.clip(
(sigmoid(inflection * (t - 0.5)) - error) / (1 - 2 * error),
0, 1,
)
| 39.60371 | 157 | 0.584245 |
a001a953fb7ca73d48a5c0947ed5285912738fe8 | 3,612 | py | Python | socatlord/operations.py | Cervi-Robotics/socatlord | e4d8964cb696c789807d2276698d596dfb68dc2b | [
"MIT"
] | 2 | 2021-05-30T01:05:38.000Z | 2021-12-21T21:20:00.000Z | socatlord/operations.py | Cervi-Robotics/socatlord | e4d8964cb696c789807d2276698d596dfb68dc2b | [
"MIT"
] | null | null | null | socatlord/operations.py | Cervi-Robotics/socatlord | e4d8964cb696c789807d2276698d596dfb68dc2b | [
"MIT"
] | 2 | 2021-05-30T01:05:44.000Z | 2021-12-21T21:19:46.000Z | import os
import subprocess
import sys
import time
import pkg_resources
from satella.coding import silence_excs
from satella.coding.sequences import smart_enumerate
from satella.files import write_to_file, read_in_file
from socatlord.parse_config import parse_etc_socatlord
| 36.857143 | 99 | 0.622647 |
a002a3319b840c90608c40a67a87ec1a46bcac4f | 2,303 | py | Python | src/authutils/oauth2/client/blueprint.py | dvenckusuchgo/authutils | 4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d | [
"Apache-2.0"
] | null | null | null | src/authutils/oauth2/client/blueprint.py | dvenckusuchgo/authutils | 4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d | [
"Apache-2.0"
] | 31 | 2018-02-12T22:32:49.000Z | 2022-01-06T21:39:44.000Z | src/authutils/oauth2/client/blueprint.py | dvenckusuchgo/authutils | 4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d | [
"Apache-2.0"
] | 2 | 2021-01-05T22:54:28.000Z | 2021-11-29T20:57:20.000Z | """
Provide a basic set of endpoints for an application to implement OAuth client
functionality.
These endpoints assume that the ``current_app`` has already been configured
with an OAuth client instance from the ``authlib`` package as follows:
.. code-block:: python
from authutils.oauth2.client import OAuthClient
from service.api import app
app.oauth_client = OAuthClient(
'client-id',
client_secret='...',
api_base_url='https://api.auth.net/',
access_token_url='https://auth.net/oauth/token',
authorize_url='https://auth.net/oauth/authorize',
client_kwargs={
'scope': 'openid data user',
'redirect_uri': 'https://service.net/authorize',
},
)
(NOTE the scopes are space-separated.)
"""
from urllib.parse import urljoin
from cdiserrors import APIError
import flask
from flask import current_app
import authutils.oauth2.client.authorize
blueprint = flask.Blueprint("oauth", __name__)
| 28.7875 | 84 | 0.685627 |
a0032619f7b2be9a51cd2a3915144c4401d3f01e | 655 | py | Python | tests/unit/utils/test_utils.py | jadami10/flower | 05e848d37a5abbdd4b34156d57a23166fc5efc3d | [
"BSD-3-Clause"
] | 7 | 2019-10-07T11:16:06.000Z | 2021-09-24T11:57:56.000Z | tests/unit/utils/test_utils.py | KonstantinKlepikov/flower | 89e71c8c00dcb51bc584e908fc6b2ba97706e89a | [
"BSD-3-Clause"
] | 3 | 2016-07-25T04:16:40.000Z | 2018-08-08T05:05:10.000Z | tests/unit/utils/test_utils.py | Gabriel-Desharnais/flowest | a8c6bdaa24317124c3ba27eed07d62f8c4cc8531 | [
"BSD-3-Clause"
] | 8 | 2019-08-27T16:05:32.000Z | 2021-12-15T17:29:03.000Z | import unittest
from flower.utils import bugreport
from celery import Celery
| 29.772727 | 60 | 0.674809 |
a005c8c77f2a7cfe589eb886411a380fd3864a2b | 4,124 | py | Python | swhlab/analysis/glance.py | swharden/SWHLab | a86c3c65323cec809a4bd4f81919644927094bf5 | [
"MIT"
] | 15 | 2017-03-09T03:08:32.000Z | 2021-11-16T11:31:55.000Z | swhlab/analysis/glance.py | swharden/SWHLab | a86c3c65323cec809a4bd4f81919644927094bf5 | [
"MIT"
] | 2 | 2016-12-06T16:27:54.000Z | 2017-11-04T23:48:49.000Z | swhlab/analysis/glance.py | swharden/SWHLab | a86c3c65323cec809a4bd4f81919644927094bf5 | [
"MIT"
] | 9 | 2016-10-19T13:32:10.000Z | 2020-04-01T21:53:40.000Z | """Methods to generate a SINGLE image to represent any ABF.
There are several categories which are grossly analyzed.
gain function:
* current clamp recording where command traces differ by sweep.
* must also have something that looks like an action potential
* will be analyzed with AP detection information
voltage clamp I/V:
* voltage clamp recording where command traces differ by sweep.
* image will simply be an overlay
drug experiment:
* voltage clamp or current clamp where every command is the same
* tags will be reported over a chronological graph
"""
import sys
import os
import glob
import matplotlib.pyplot as plt
sys.path.insert(0,'../../')
import swhlab
def processFolder(abfFolder):
"""call processAbf() for every ABF in a folder."""
if not type(abfFolder) is str or not len(abfFolder)>3:
return
files=sorted(glob.glob(abfFolder+"/*.abf"))
for i,fname in enumerate(files):
print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname))
processAbf(fname,show=False)
plt.show()
return
def processAbf(abfFname,saveAs=False,dpi=100,show=True):
"""
automatically generate a single representative image for an ABF.
If saveAs is given (full path of a jpg of png file), the image will be saved.
Otherwise, the image will pop up in a matplotlib window.
"""
if not type(abfFname) is str or not len(abfFname)>3:
return
abf=swhlab.ABF(abfFname)
plot=swhlab.plotting.ABFplot(abf)
plot.figure_height=6
plot.figure_width=10
plot.subplot=False
plot.figure(True)
if abf.get_protocol_sequence(0)==abf.get_protocol_sequence(1) or abf.sweeps<2:
# same protocol every time
if abf.lengthMinutes<2:
# short (probably a memtest or tau)
ax1=plt.subplot(211)
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(212,sharex=ax1)
plot.figure_protocol()
plt.title("")
else:
# long (probably a drug experiment)
plot.figure_chronological()
else:
# protocol changes every sweep
plots=[211,212] # assume we want 2 images
if abf.units=='mV': # maybe it's something with APs?
ap=swhlab.AP(abf) # go ahead and do AP detection
ap.detect() # try to detect APs
if len(ap.APs): # if we found some
plots=[221,223,222,224] # get ready for 4 images
ax1=plt.subplot(plots[0])
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(plots[1],sharex=ax1)
plot.figure_protocols()
plt.title("protocol")
if len(plots)>2:
# assume we want to look at the first AP
ax2=plt.subplot(plots[2])
plot.rainbow=False
plot.kwargs["color"]='b'
plot.figure_chronological()
plt.gca().get_xaxis().set_visible(False)
plt.title("first AP magnitude")
# velocity plot
plt.subplot(plots[3],sharex=ax2)
plot.abf.derivative=True
plot.rainbow=False
plot.traceColor='r'
plot.figure_chronological()
plt.axis([ap.APs[0]["T"]-.05,ap.APs[0]["T"]+.05,None,None])
plt.title("first AP velocity")
if saveAs:
print("saving",os.path.abspath(saveAs))
plt.savefig(os.path.abspath(saveAs),dpi=dpi)
return
if show:
plot.show()
def selectFile():
"""launch an ABF file selector to determine what to glance at."""
plt.close("all") # get rid of old stuff
print("GLANCING AT A FILE:")
processAbf(swhlab.common.gui_getFile())
def selectFolder():
"""launch a folder selection dialog to glance at every ABF in a folder."""
plt.close("all") # get rid of old stuff
processFolder(swhlab.common.gui_getFolder())
if __name__=="__main__":
print("DONE") | 35.247863 | 92 | 0.626576 |
a005db92c36fe0ec0c9db64cfb4a8341416d95de | 24,671 | py | Python | catalog/bindings/csw/dictionary_entry_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/dictionary_entry_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/dictionary_entry_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
from bindings.csw.abstract_general_operation_parameter_ref_type import (
OperationParameterGroup,
)
from bindings.csw.actuate_type import ActuateType
from bindings.csw.base_unit import BaseUnit
from bindings.csw.cartesian_cs import CartesianCs
from bindings.csw.concatenated_operation import ConcatenatedOperation
from bindings.csw.conventional_unit import ConventionalUnit
from bindings.csw.coordinate_operation import CoordinateOperation
from bindings.csw.coordinate_reference_system import CoordinateReferenceSystem
from bindings.csw.coordinate_system import CoordinateSystem
from bindings.csw.coordinate_system_axis import CoordinateSystemAxis
from bindings.csw.crs import Crs
from bindings.csw.cylindrical_cs import CylindricalCs
from bindings.csw.datum import Datum
from bindings.csw.definition import Definition
from bindings.csw.definition_proxy import DefinitionProxy
from bindings.csw.definition_type import DefinitionType
from bindings.csw.derived_unit import DerivedUnit
from bindings.csw.ellipsoid import Ellipsoid
from bindings.csw.ellipsoidal_cs import EllipsoidalCs
from bindings.csw.engineering_crs import EngineeringCrs
from bindings.csw.engineering_datum import EngineeringDatum
from bindings.csw.general_conversion_ref_type import (
CompoundCrs,
Conversion,
DerivedCrs,
ProjectedCrs,
GeneralConversion,
GeneralDerivedCrs,
)
from bindings.csw.general_operation_parameter import GeneralOperationParameter
from bindings.csw.general_transformation import GeneralTransformation
from bindings.csw.geocentric_crs import GeocentricCrs
from bindings.csw.geodetic_datum import GeodeticDatum
from bindings.csw.geographic_crs import GeographicCrs
from bindings.csw.image_crs import ImageCrs
from bindings.csw.image_datum import ImageDatum
from bindings.csw.indirect_entry import IndirectEntry
from bindings.csw.linear_cs import LinearCs
from bindings.csw.oblique_cartesian_cs import ObliqueCartesianCs
from bindings.csw.operation_2 import Operation2
from bindings.csw.operation_method import OperationMethod
from bindings.csw.operation_parameter import OperationParameter
from bindings.csw.pass_through_operation import PassThroughOperation
from bindings.csw.polar_cs import PolarCs
from bindings.csw.prime_meridian import PrimeMeridian
from bindings.csw.reference_system import ReferenceSystem
from bindings.csw.show_type import ShowType
from bindings.csw.single_operation import SingleOperation
from bindings.csw.spherical_cs import SphericalCs
from bindings.csw.temporal_crs import TemporalCrs
from bindings.csw.temporal_cs import TemporalCs
from bindings.csw.temporal_datum import TemporalDatum
from bindings.csw.time_calendar import TimeCalendar
from bindings.csw.time_calendar_era import TimeCalendarEra
from bindings.csw.time_clock import TimeClock
from bindings.csw.time_coordinate_system import TimeCoordinateSystem
from bindings.csw.time_ordinal_reference_system import TimeOrdinalReferenceSystem
from bindings.csw.time_reference_system import TimeReferenceSystem
from bindings.csw.transformation import Transformation
from bindings.csw.type_type import TypeType
from bindings.csw.unit_definition import UnitDefinition
from bindings.csw.user_defined_cs import UserDefinedCs
from bindings.csw.vertical_crs import VerticalCrs
from bindings.csw.vertical_cs import VerticalCs
from bindings.csw.vertical_datum import VerticalDatum
__NAMESPACE__ = "http://www.opengis.net/gml"
| 31.588988 | 81 | 0.592963 |
a00686acf3a82fe67d9e295e22aaec66f4b36661 | 2,468 | py | Python | txt2epub_pdf/console.py | drthomas246/txt2epub-pdf | 09d12a61e0d6f66512af7fdf9abfd4b384a5c648 | [
"MIT"
] | null | null | null | txt2epub_pdf/console.py | drthomas246/txt2epub-pdf | 09d12a61e0d6f66512af7fdf9abfd4b384a5c648 | [
"MIT"
] | null | null | null | txt2epub_pdf/console.py | drthomas246/txt2epub-pdf | 09d12a61e0d6f66512af7fdf9abfd4b384a5c648 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .package import txt2epub as txt2epub
from .package import txt2pdf as txt2pdf
import argparse
__version__ = "0.1.0"
| 37.393939 | 120 | 0.657212 |
a006b38b61a96ab48414b8fa22ea5745e9fed4bd | 22 | py | Python | Scripts.py | MattOstgard/HLSL_ST3 | fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1 | [
"MIT"
] | 10 | 2017-11-30T19:43:48.000Z | 2022-02-02T11:10:43.000Z | Scripts.py | MattOstgard/HLSL_ST3 | fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1 | [
"MIT"
] | 27 | 2018-11-06T16:10:57.000Z | 2022-02-25T22:55:33.000Z | Scripts.py | MattOstgard/HLSL_ST3 | fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1 | [
"MIT"
] | 2 | 2018-03-24T04:09:45.000Z | 2018-11-06T14:54:10.000Z | from .Scripts import * | 22 | 22 | 0.772727 |
a00725d52685ae75cf07ae5d77c3ada997c869be | 3,150 | py | Python | tests/test_fileio_operators.py | ptrthomas/blender_mmd_tools | 8b5053b9f2e7391cb9ac1e5114824cbbfd9d80cc | [
"MIT"
] | 2 | 2021-01-22T05:11:50.000Z | 2021-02-19T11:58:00.000Z | tests/test_fileio_operators.py | jiastku98/blender_mmd_tools | ac26c55a985d62ae9439a961d27e796444d09069 | [
"MIT"
] | 1 | 2022-01-29T05:46:50.000Z | 2022-01-29T05:46:50.000Z | tests/test_fileio_operators.py | yhong3/blender_mmd_tools | 53e16a46459328bccc444c84e50f22436e9cbc11 | [
"MIT"
] | 1 | 2021-11-07T19:41:34.000Z | 2021-11-07T19:41:34.000Z | import os
import shutil
import unittest
import bpy
from mmd_tools.core import pmx
from mmd_tools.core.model import Model
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_DIR = os.path.join(os.path.dirname(TESTS_DIR), 'samples')
if __name__ == '__main__':
import sys
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
unittest.main()
| 39.873418 | 108 | 0.626349 |
a008c9a8ae43052aa04c604f338705e7dbe4bc71 | 1,939 | py | Python | gocdapi/stage.py | andrewphilipsmith/gocdapi | 82eb37c6b00a918b6bcf4184a66cad7344cfaa2e | [
"MIT"
] | 8 | 2015-01-23T12:50:30.000Z | 2020-01-21T11:00:19.000Z | gocdapi/stage.py | andrewphilipsmith/gocdapi | 82eb37c6b00a918b6bcf4184a66cad7344cfaa2e | [
"MIT"
] | 7 | 2015-01-27T23:17:05.000Z | 2016-06-08T15:27:07.000Z | gocdapi/stage.py | andrewphilipsmith/gocdapi | 82eb37c6b00a918b6bcf4184a66cad7344cfaa2e | [
"MIT"
] | 2 | 2015-11-23T18:33:24.000Z | 2020-07-15T09:01:34.000Z | """
Module for gocdapi Stage class
"""
from gocdapi.gobase import GoBase
| 29.378788 | 112 | 0.625064 |
a008eb9d3812a49e20b4001c7d7b0873ff6642c9 | 106 | py | Python | tests/exog/random/random_exog_32_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/exog/random/random_exog_32_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/exog/random/random_exog_32_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 32,20); | 26.5 | 60 | 0.858491 |
a00c8ca9d15e99fb1ab604b2860c37b77ff6ba5e | 602 | py | Python | cursoemvideopython/desafio_035.py | edmilsonlibanio/Ola-Mundo-Python | 33fb08da5878f2784983c623df04d2bbdfb30f25 | [
"MIT"
] | null | null | null | cursoemvideopython/desafio_035.py | edmilsonlibanio/Ola-Mundo-Python | 33fb08da5878f2784983c623df04d2bbdfb30f25 | [
"MIT"
] | null | null | null | cursoemvideopython/desafio_035.py | edmilsonlibanio/Ola-Mundo-Python | 33fb08da5878f2784983c623df04d2bbdfb30f25 | [
"MIT"
] | null | null | null | # Desenvolva um programa que leia o comprimento de trs retas e diga ao usurio se elas podem ou no formar um tringulo (pesquisar o princpio matemtico que explica a formao de um triangulo).
r1 = float(input('Informe o comprimento da primeira reta: '))
r2 = float(input('Informe o comprimento da segunda reta: '))
r3 = float(input('Informe o comprimento da terceira reta: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print(f'As medidas {r1}, {r2} e {r3} so capazes de formar um tringulo!')
else:
print(f'As medidas {r1}, {r2} e {r3} no so capazes de formar um tringulo!')
| 54.727273 | 195 | 0.709302 |
a00cf121c8cf260456f4a0552e06a0dd6ae84b59 | 1,070 | py | Python | cv_lib/detection/models/__init__.py | zhfeing/deep-learning-lib-PyTorch | 1a4e1c1939a42c30fe32dd8d6aff210e8604e77b | [
"MIT"
] | 4 | 2021-03-29T07:34:21.000Z | 2021-04-25T08:25:30.000Z | cv_lib/detection/models/__init__.py | zhfeing/deep-learning-lib | f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9 | [
"MIT"
] | null | null | null | cv_lib/detection/models/__init__.py | zhfeing/deep-learning-lib | f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9 | [
"MIT"
] | 1 | 2021-03-30T07:13:31.000Z | 2021-03-30T07:13:31.000Z | from functools import partial
from typing import Dict
import copy
from torch.nn import Module
from torchvision.models.resnet import *
from .ssd_resnet import SSD300_ResNet
from .ssd_vgg import SSD300_VGG16
from .backbones import *
__REGISTERED_MODELS__ = {
"SSD300_ResNet": SSD300_ResNet,
"SSD300_VGG16": SSD300_VGG16
}
__REGISTERED_BACKBONES__ = {
"ResNetBackbone": ResNetBackbone,
"VGGBackbone": VGGBackbone
}
| 24.883721 | 63 | 0.715888 |
a00dbbabf32006769aba7ac26d4086798f8f5b92 | 75 | py | Python | Py26/01/main.py | xhexe/Py8R | 44238c5403e7f76988760a040bf5c292824c22e7 | [
"WTFPL"
] | null | null | null | Py26/01/main.py | xhexe/Py8R | 44238c5403e7f76988760a040bf5c292824c22e7 | [
"WTFPL"
] | null | null | null | Py26/01/main.py | xhexe/Py8R | 44238c5403e7f76988760a040bf5c292824c22e7 | [
"WTFPL"
] | null | null | null | inp = input("Enter string: ")
input_string = ord(inp)
print(input_string)
| 15 | 29 | 0.72 |
a00ec424e1b91d1ccc45e241094dd421a5923bf0 | 430 | py | Python | codewars/tour.py | Imbafar/Codewars_solutions | 1b1bb2ba59bcea0d609e97df00b0fd14a61771ca | [
"BSD-3-Clause"
] | null | null | null | codewars/tour.py | Imbafar/Codewars_solutions | 1b1bb2ba59bcea0d609e97df00b0fd14a61771ca | [
"BSD-3-Clause"
] | null | null | null | codewars/tour.py | Imbafar/Codewars_solutions | 1b1bb2ba59bcea0d609e97df00b0fd14a61771ca | [
"BSD-3-Clause"
] | null | null | null | # https://www.codewars.com/kata/5536a85b6ed4ee5a78000035
import math
| 25.294118 | 57 | 0.562791 |
a0100c7225ae95c3cbbf519ce214f82cef36e0ce | 733 | py | Python | csr/kernels/mkl/multiply.py | mdekstrand/csr | 665ceefff882d7e42db41034246b6ddb1f93e372 | [
"MIT"
] | 11 | 2021-02-07T16:37:31.000Z | 2022-03-19T15:19:16.000Z | csr/kernels/mkl/multiply.py | mdekstrand/csr | 665ceefff882d7e42db41034246b6ddb1f93e372 | [
"MIT"
] | 25 | 2021-02-11T22:42:01.000Z | 2022-01-27T21:04:31.000Z | csr/kernels/mkl/multiply.py | lenskit/csr | 03fde2d8c3cb7eb330028f34765ff2a06f849631 | [
"MIT"
] | 2 | 2021-02-07T02:05:04.000Z | 2021-06-01T15:23:09.000Z | import numpy as np
from numba import njit
from ._api import * # noqa: F403
from .handle import mkl_h
__all__ = [
'mult_ab',
'mult_abt'
]
| 17.452381 | 47 | 0.587995 |
a0108081c8dab4089a37cbfc386521591e071aeb | 4,088 | py | Python | acos_client/v30/glm/license.py | hthompson-a10/acos-client | d480a4f239ae824c9dc9ea49a94b84a5bd9d33f8 | [
"Apache-2.0"
] | 33 | 2015-02-11T16:42:04.000Z | 2021-08-24T16:06:23.000Z | acos_client/v30/glm/license.py | hthompson-a10/acos-client | d480a4f239ae824c9dc9ea49a94b84a5bd9d33f8 | [
"Apache-2.0"
] | 154 | 2015-01-12T18:46:28.000Z | 2022-01-22T13:59:48.000Z | acos_client/v30/glm/license.py | hthompson-a10/acos-client | d480a4f239ae824c9dc9ea49a94b84a5bd9d33f8 | [
"Apache-2.0"
] | 68 | 2015-01-12T22:29:57.000Z | 2021-07-13T07:21:05.000Z | # Copyright (C) 2021, A10 Networks Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from acos_client import errors as acos_errors
from acos_client.v30 import base
| 36.176991 | 84 | 0.604452 |
a0114944e8b3edca3a0286ac5a5fb5a714ad3f65 | 310 | py | Python | engine/gamestate.py | Yooooomi/py-drives | 6a9dd1a1684b1b65ab553d91eebc77fe099301e7 | [
"MIT"
] | null | null | null | engine/gamestate.py | Yooooomi/py-drives | 6a9dd1a1684b1b65ab553d91eebc77fe099301e7 | [
"MIT"
] | null | null | null | engine/gamestate.py | Yooooomi/py-drives | 6a9dd1a1684b1b65ab553d91eebc77fe099301e7 | [
"MIT"
] | null | null | null | from engine.gameobject import Gameobject
objects = []
| 16.315789 | 40 | 0.680645 |
a013e70e32f34350be8bc00a3ce5fb9e45e8fb9c | 4,912 | py | Python | Day3.py | Swicano/AdventCode | 3b6f425c773f05911bcc8d8d2f3cf5eb64bfdeff | [
"MIT"
] | null | null | null | Day3.py | Swicano/AdventCode | 3b6f425c773f05911bcc8d8d2f3cf5eb64bfdeff | [
"MIT"
] | null | null | null | Day3.py | Swicano/AdventCode | 3b6f425c773f05911bcc8d8d2f3cf5eb64bfdeff | [
"MIT"
] | null | null | null | input1str = 'R998,U367,R735,U926,R23,U457,R262,D473,L353,U242,L930,U895,R321,U683,L333,U623,R105,D527,R437,D473,L100,D251,L958,U384,R655,U543,L704,D759,R529,D176,R835,U797,R453,D650,L801,U437,L468,D841,R928,D747,L803,U677,R942,D851,R265,D684,L206,U763,L566,U774,L517,U337,L86,D585,R212,U656,L799,D953,L24,U388,L465,U656,L467,U649,R658,U519,L966,D290,L979,D819,R208,D907,R941,D458,L882,U408,R539,D939,R557,D771,L448,U460,L586,U148,R678,U360,R715,U312,L12,D746,L958,U216,R275,D278,L368,U663,L60,D543,L605,D991,L369,D599,R464,D387,L835,D876,L810,U377,L521,U113,L803,U680,L732,D449,R891,D558,L25,U249,L264,U643,L544,U504,R876,U403,R950,U19,L224,D287,R28,U914,R906,U970,R335,U295,R841,D810,R891,D596,R451,D79,R924,U823,L724,U968,R342,D349,R656,U373,R864,U374,L401,D102,L730,D886,R268,D188,R621,U258,L788,U408,L199,D422,R101,U368,L636,U543,R7,U722,L533,U242,L340,D195,R158,D291,L84,U936,L570,D937,L321,U947,L707,U32,L56,U650,L427,U490,L472,U258,R694,U87,L887,U575,R826,D398,R602,U794,R855,U225,R435,U591,L58,U281,L834,D400,R89,D201,L328,U278,L494,D70,L770,D182,L251,D44,R753,U431,R573,D71,R809,U983,L159,U26,R540,U516,R5,D23,L603,U65,L260,D187,R973,U877,R110,U49,L502,D68,R32,U153,R495,D315,R720,D439,R264,D603,R717,U586,R732,D111,R997,U578,L243,U256,R147,D425,L141,U758,R451,U779,R964,D219,L151,D789,L496,D484,R627,D431,R433,D761,R355,U975,L983,U364,L200,U578,L488,U668,L48,D774,R438,D456,L819,D927,R831,D598,L437,U979,R686,U930,L454,D553,L77,D955,L98,U201,L724,U211,R501,U492,L495,U732,L511'
input2str = 'L998,U949,R912,D186,R359,D694,L878,U542,L446,D118,L927,U175,R434,U473,R147,D54,R896,U890,R300,D537,R254,D322,R758,D690,R231,U269,R288,U968,R638,U192,L732,D355,R879,U451,R336,D872,L141,D842,L126,U584,L973,D940,R890,D75,L104,U340,L821,D590,R577,U859,L948,D199,L872,D751,L368,U506,L308,U827,R181,U94,R670,U901,R739,D48,L985,D801,R722,D597,R654,D606,R183,U646,R939,U677,R32,U936,L541,D934,R316,U354,L415,D930,R572,U571,R147,D609,L534,D406,R872,D527,L816,D960,R652,D429,L402,D858,R374,D930,L81,U106,R977,U251,R917,U966,R353,U732,L613,U280,L713,D937,R481,U52,R746,U203,L500,D557,L209,U249,R89,D58,L149,U872,R331,D460,R343,D423,R392,D160,L876,U981,L399,D642,R525,U515,L537,U113,R886,D516,L301,D680,L236,U399,R460,D869,L942,D280,R669,U476,R683,D97,R199,D444,R137,D489,L704,D120,R753,D100,L737,U375,L495,D325,R48,D269,R575,U895,L184,D10,L502,D610,R618,D744,R585,U861,R695,D775,L942,U64,L819,U161,L332,U513,L461,D366,R273,D493,L197,D97,L6,U63,L564,U59,L699,U30,L68,U861,R35,U564,R540,U371,L115,D595,L412,D781,L185,D41,R207,D264,R999,D799,R421,D117,R377,D571,R268,D947,R77,D2,R712,D600,L516,U389,L868,D762,L996,U205,L178,D339,L844,D629,R67,D732,R109,D858,R630,U470,L121,D542,L751,U353,L61,U770,R952,U703,R264,D537,L569,U55,L795,U389,R836,U166,R585,U275,L734,U966,L130,D357,L260,U719,L647,D606,R547,U575,R791,U686,L597,D486,L774,U386,L163,U912,L234,D238,L948,U279,R789,U300,R117,D28,L833,U835,L340,U693,R343,D573,R882,D241,L731,U812,R600,D663,R902,U402,R831,D802,L577,U920,L947,D538,L192' #221
test0input1str = 'R8,U5,L5,D3' #6 #30
test0input2str = 'U7,R6,D4,L4'
test1input1str = 'R75,D30,R83,U83,L12,D49,R71,U7,L72' #159 #610
test1input2str = 'U62,R66,U55,R34,D71,R55,D58,R83'
test2input1str = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51' #135 #410
test2input2str = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'
# step 0 convert string to list
input1 = input1str.split(',')
input2 = input2str.split(',')
#input1 = test2input1str.split(',')
#input2 = test2input2str.split(',')
# step 1 make a function to generate a list of coordinates of all points a set of instructions passes through
# step2 find the intersection between the two paths and calculate the manhatten distance
path1 = wire_locs(input1)
path2 = wire_locs(input2)
intersects = set(path1) & set(path2)
distances = [ abs(i[0])+abs(i[1]) for i in intersects]
distances.sort()
min_manhatten = distances[0]
print(min_manhatten)
# End Part 1
# Part 2: we have a new distance metric, the total path length
distances2 = [path2.index(i)+path1.index(i)+2 for i in intersects] #+2 because of the index 0
distances2.sort()
min_parttwo = distances2[0]
print(min_parttwo)
| 72.235294 | 1,495 | 0.725366 |
a015a79f3a34467630656ad6b59d1a4c00a2d976 | 13,757 | py | Python | arcfire/arcfire/models.py | allanberry/arcfire | c41bad3ae7792406e169f9f7acd02f7e52467cbe | [
"MIT"
] | null | null | null | arcfire/arcfire/models.py | allanberry/arcfire | c41bad3ae7792406e169f9f7acd02f7e52467cbe | [
"MIT"
] | 38 | 2015-10-21T19:10:36.000Z | 2015-12-18T11:57:12.000Z | arcfire/arcfire/models.py | allanberry/arcfire | c41bad3ae7792406e169f9f7acd02f7e52467cbe | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from django.core.urlresolvers import reverse
from django.conf import settings
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# Level 0: base abstract and infrastructure classes #
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# class CompoundMixin(models.Model):
# '''
# Abstract base class for groups of elements.
# '''
# class Meta:
# abstract = True
# members = []
# # I'm not entirely sure what I want to do with this yet, since fields need
# # to be defined in each subclass instead of overridden. This makes things
# # more complex than I like, but probably OK. In the meantime, I'll
# # leave this here and give it methods soon, hopefully generic to work
# # for all subclasses.
# # # # # # # # # #
# Utility tables #
# # # # # # # # # #
# # # # # # # # # # # # #
# Level 1: Basic Items #
# # # # # # # # # # # # #
# # # # # # # # # # # # # #
# Level 2: Complex Items #
# # # # # # # # # # # # # #
# class Collection(CompoundMixin, Thing):
# '''
# A group of things.
# '''
# class Corpus(CompoundMixin, Person):
# '''
# A group of people. Used to be called "Group", but it turns out that's a built-in Django class.
# '''
# class Memory(Thing):
# '''
# Something a living thing takes with them.
# '''
# life = models.ForeignKey(Life, related_name="memories")
#
# def get_absolute_url(self):
# return reverse('memory', args=(self.slug, ))
# class Plant(Life):
# '''
# A plant (flora).
# '''
# pass
#
# def get_absolute_url(self):
# return reverse('memory', args=(self.slug, ))
# class Animal(Life):
# '''
# An animal (fauna).
# '''
# pass
#
# def get_absolute_url(self):
# return reverse('animal', args=(self.slug, ))
# class Group(Collectable, Person):
# '''
# An organization, class, tribe or family of human beings.
# '''
# # cls = Person
# members = models.ManyToManyField(Person, related_name="groups")
#
# def get_absolute_url(self):
# return reverse('group', args=(self.slug, ))
| 31.993023 | 460 | 0.622301 |
a016b1851ba12bcb3c409fda497f638b8a707e19 | 953 | py | Python | sdk-py/update_user_attributes.py | kg0r0/cognito-examples | 54b7a68a9113b231ead99fa4f531d46243e04566 | [
"MIT"
] | null | null | null | sdk-py/update_user_attributes.py | kg0r0/cognito-examples | 54b7a68a9113b231ead99fa4f531d46243e04566 | [
"MIT"
] | null | null | null | sdk-py/update_user_attributes.py | kg0r0/cognito-examples | 54b7a68a9113b231ead99fa4f531d46243e04566 | [
"MIT"
] | null | null | null | import os
import boto3
from getpass import getpass
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
client = boto3.client("cognito-idp", region_name=os.getenv("REGION_NAME"))
username = input("[*] Enter Your Email Address: ")
password = getpass("[*] Enter Your Password: ")
response = client.initiate_auth(
ClientId=os.getenv("CLIENT_ID"),
AuthFlow="USER_PASSWORD_AUTH",
AuthParameters={"USERNAME": username, "PASSWORD": password},
)
access_token = response["AuthenticationResult"]["AccessToken"]
print("[*] Successful issuance of Access Token")
attribute_name = input("[*] Enter Attribute Name: ")
attribute_value = input("[*] Enter Attribute Value: ")
response = client.update_user_attributes(
UserAttributes=[
{
'Name': attribute_name,
'Value': attribute_value
},
],
AccessToken=access_token,
)
print(response)
| 27.228571 | 74 | 0.704092 |
a0173627d1c723757b35d4f6e9573e1f4a571e05 | 259 | py | Python | echome/network/serializers.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 2 | 2022-01-31T19:32:51.000Z | 2022-01-31T22:42:13.000Z | echome/network/serializers.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 7 | 2021-04-04T01:15:53.000Z | 2022-02-07T03:34:48.000Z | echome/network/serializers.py | jasoncolburne/echome | a5ab87666ae859d1ca8e4902d5c441c0ce36547a | [
"MIT"
] | 1 | 2022-02-01T11:34:50.000Z | 2022-02-01T11:34:50.000Z | from rest_framework import serializers
from .models import VirtualNetwork | 32.375 | 53 | 0.72973 |
a01762ca3e759a9a379ad71578ccb40a1edcad3d | 738 | py | Python | contests_atcoder/abc153/abc153_f.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | contests_atcoder/abc153/abc153_f.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | 1 | 2021-01-02T06:36:51.000Z | 2021-01-02T06:36:51.000Z | contests_atcoder/abc153/abc153_f.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | from bisect import bisect_left, bisect_right
from collections import deque, Counter
from itertools import combinations, permutations
from math import gcd, sin, cos, tan, degrees, radians
import sys
input = lambda: sys.stdin.readline().rstrip()
MOD = 10 ** 9 + 7
INF = float("inf")
n, d, a = map(int, input().split())
monsters = [tuple(map(int, input().split())) for _ in range(n)]
monsters.sort()
now = 0
ans = 0
bomb = deque()
for m in monsters:
x = m[0]
attack_count = -(-m[1] // a)
while len(bomb) and bomb[0][0] < x:
b = bomb.popleft()
now -= b[1]
if attack_count > now:
ans += attack_count - now
bomb.append((x + 2 * d, attack_count - now))
now = attack_count
print(ans) | 23.0625 | 63 | 0.624661 |
a017a1ab05231fbc634e10328c46e53e752448d8 | 16,532 | py | Python | sistema_experto.py | Erubeyy/SistemaExperto- | 6194f798fad684eb83635fe85bf3f1a7d70ed2a2 | [
"MIT"
] | null | null | null | sistema_experto.py | Erubeyy/SistemaExperto- | 6194f798fad684eb83635fe85bf3f1a7d70ed2a2 | [
"MIT"
] | null | null | null | sistema_experto.py | Erubeyy/SistemaExperto- | 6194f798fad684eb83635fe85bf3f1a7d70ed2a2 | [
"MIT"
] | null | null | null | from tkinter import*
from tkinter import font
from experta import *
raiz = Tk()
raiz.title("Sistema experto- Tipos de covid")
raiz.config(bg="#f4f7fa")
#raiz.resizable(0,0)
mi0Frame = Frame(raiz)#, width="1200", height="700")
mi0Frame.grid(row=1, column=0)
mi0Frame.config(bg="#f4f7fa")
mi3Frame = Frame(raiz)#, width="1200", height="700")
mi3Frame.grid(row=1, column=1)
mi3Frame.config(bg="#f4f7fa")
miFrame = Frame(raiz)#, width="1200", height="700")
miFrame.grid(row=2, column=0)
miFrame.config(bg="#f4f7fa")
mi2Frame = Frame(raiz, highlightbackground="black", highlightthickness=0.5)
mi2Frame.grid(row=2, column=1)
mi2Frame.config(bg="#f4f7fa")
mi4Frame = Frame(raiz, highlightbackground="black", highlightthickness=0.5)
mi4Frame.grid(row=0, column=0)
mi4Frame.config(bg="#f4f7fa")
reinicio = 0
#-----------------------------------------------INPUTS DE LOS SNTOMAS------------------------------------------------------------
sin0 = Label(miFrame, text="Dolor de cabeza:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin0.grid(row=0, column=0,padx=10, pady=10,sticky="e")
in_sin0 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin0.grid(row=0, column=1,padx=10, pady=10)
sin1 = Label(miFrame, text="Perdida del olfato:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin1.grid(row=1, column=0,padx=10, pady=10,sticky="e")
in_sin1 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin1.grid(row=1, column=1,padx=10, pady=10)
sin2 = Label(miFrame, text="Dolor muscular:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin2.grid(row=2, column=0,padx=10, pady=10,sticky="e")
in_sin2 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin2.grid(row=2, column=1,padx=10, pady=10)
sin3 = Label(miFrame, text="Tos:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin3.grid(row=3, column=0,padx=10, pady=10,sticky="e")
in_sin3 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin3.grid(row=3, column=1,padx=10, pady=10)
sin4 = Label(miFrame, text="Dolor de garganta:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin4.grid(row=4, column=0,padx=10, pady=10,sticky="e")
in_sin4 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin4.grid(row=4, column=1,padx=10, pady=10)
sin5 = Label(miFrame, text="Dolor en el pecho:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin5.grid(row=5, column=0,padx=10, pady=10,sticky="e")
in_sin5 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin5.grid(row=5, column=1,padx=10, pady=10)
sin6 = Label(miFrame, text="Fiebre:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin6.grid(row=6, column=0,padx=10, pady=10,sticky="e")
in_sin6 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin6.grid(row=6, column=1,padx=10, pady=10)
sin7 = Label(miFrame, text="Ronquera:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin7.grid(row=7, column=0,padx=10, pady=10,sticky="e")
in_sin7 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin7.grid(row=7, column=1,padx=10, pady=10)
sin8 = Label(miFrame, text="Prdida del apetito:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin8.grid(row=8, column=0,padx=10, pady=10,sticky="e")
in_sin8 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin8.grid(row=8, column=1,padx=10, pady=10)
sin9 = Label(miFrame, text="Diarrea:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin9.grid(row=9, column=0,padx=10, pady=10,sticky="e")
in_sin9 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin9.grid(row=9, column=1,padx=10, pady=10)
sin10 = Label(miFrame, text="Fatiga:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin10.grid(row=10, column=0,padx=10, pady=10,sticky="e")
in_sin10 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin10.grid(row=10, column=1,padx=10, pady=10)
sin11 = Label(miFrame, text="Confusin:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin11.grid(row=11, column=0,padx=10, pady=10,sticky="e")
in_sin11 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin11.grid(row=11, column=1,padx=10, pady=10)
sin12 = Label(miFrame, text="Dificultad para respirar:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin12.grid(row=12, column=0,padx=10, pady=10,sticky="e")
in_sin12 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin12.grid(row=12, column=1,padx=10, pady=10)
#------Cuadros de los resultados--------
tipo_final_lbl = Label(mi2Frame, text="Tipo de covid diagnosticado:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
tipo_final_lbl.grid(row=2, column=0,padx=10, pady=10,sticky="n")
tipo_final = Entry(mi2Frame, width=35, justify='center', font=('FELIX TITLING', 10, font.BOLD))
tipo_final.grid(row=3, column=0, padx=1, pady=1)
blank = Label(mi2Frame, bg="#F0F8FF")
blank.grid(row=4, column=0,padx=10, pady=10,sticky="n")
descripcion_tipo_lbl = Label(mi2Frame, text="Descripcin del tipo de covid diagnosticado:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
descripcion_tipo_lbl.grid(row=5, column=0,padx=10, pady=10,sticky="n")
descripcion_tipo = Text(mi2Frame, width=60, height=10)
descripcion_tipo.grid(row=6, column=0, padx=10, pady=10)
sugerencias_lbl = Label(mi2Frame, text="Sugerencias para tratar la enfermedad:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sugerencias_lbl.grid(row=7, column=0,padx=10, pady=10,sticky="n")
sugerencias = Text(mi2Frame, width=60, height=10)
sugerencias.grid(row=8, column=0, padx=10, pady=10)
#------HEADER--------
head1 = Label(mi0Frame, text="\nSNTOMAS", bg="#F0F8FF", font=('Elephant', 15))
head1.grid(row=0, column=0, sticky="n")
head1_0 = Label(mi3Frame, text="DIAGNSTICO", bg="#F0F8FF", font=('Elephant', 15))
head1_0.grid(row=0, column=0, sticky="n")
head1 = Label(mi0Frame, bg="#F0F8FF")
head1.grid(row=1, column=0, sticky="n")
head2 = Label(mi0Frame, text=" -Introduce un 'si' o un 'no' dependiendo de los sntomas que presentes",
bg="#F0F8FF", font=('Century Ghotic', 11))
head2.grid(row=2, column=0, sticky="n" )
head3 = Label(mi4Frame, text="Sistema experto - Tipos de COVID", bg="#F0F8FF", font=('Elephant', 15))
head3.grid(row=0)
#-----------------------------------------^^^^^^INPUTS DE LOS SNTOMAS^^^^^^------------------------------------------------------
lista_tipos = []
sintomas_tipo = []
map_sintomas = {}
d_desc_map = {}
d_tratamiento_map = {}
#def identificar_tipo(dolor_cabeza, perdida_olfato, dolor_muscular, tos, dolor_garganta, dolor_pecho, fiebre, ronquera, perdida_apetito , diarrea, fatiga, confusin, dificultad_respiratoria):
#------------------BOTONES---------------------------------------
generarTabla = Button(
miFrame,
text="RESULTADO",
command=iniciar_sistema,
bg="#7fd1ff",
font=("Eurostile", 10, font.BOLD),
padx=20,
pady=5
)
generarTabla.grid(row=13, column=1, padx=10, pady=15)
reiniciar = Button(
mi2Frame, text="REINICIAR",
command=reiniciar,
bg="#7fd1ff",
font=("Eurostile", 10, font.BOLD),
padx=20,
pady=5
)
reiniciar.grid(row=9, column=0, padx=10, pady=15)
salir = Button(
mi2Frame, text="SALIR",
command=salir,
bg="#ea9999",
font=("Eurostile", 9),
border='2p',
padx=20,
pady=3
)
salir.grid(row=10, column=0, padx=10, pady=15)
raiz.mainloop() | 43.851459 | 335 | 0.687576 |
a017aa5f81d90682eeec3d31e4bdb2e999666f4b | 6,105 | py | Python | socket_temperature_connect.py | MeowMeowZi/PPLTestTool | 576f28fb20680b1ed33520d92c552ccafc93d716 | [
"MIT"
] | null | null | null | socket_temperature_connect.py | MeowMeowZi/PPLTestTool | 576f28fb20680b1ed33520d92c552ccafc93d716 | [
"MIT"
] | null | null | null | socket_temperature_connect.py | MeowMeowZi/PPLTestTool | 576f28fb20680b1ed33520d92c552ccafc93d716 | [
"MIT"
] | null | null | null | import socket
import time
import shelve
preset_command = {
1: ['MB0023,1', 'MI0695,'],
2: ['MB0024,1', 'MI0696,'],
3: ['MB0076,1', 'MI0697,'],
4: ['MB0026,1', 'MI0698,'],
}
force_command = 'MB0336,1'
start_command = 'MB0020,0'
stop_command = 'MB0020,1'
if __name__ == '__main__':
temperature = Temperature()
| 30.678392 | 110 | 0.552826 |
a0187e302825ea7cb1c14461fb74435494c1cd4b | 12,938 | py | Python | wwwdccn/chair_mail/models.py | marvinxu99/dccnsys | 8f53728d06b859cace42cc84bc190bc89950d252 | [
"MIT"
] | 16 | 2020-03-15T15:33:30.000Z | 2021-11-26T21:57:27.000Z | wwwdccn/chair_mail/models.py | marvinxu99/dccnsys | 8f53728d06b859cace42cc84bc190bc89950d252 | [
"MIT"
] | 11 | 2019-04-27T19:15:43.000Z | 2022-03-11T23:43:08.000Z | wwwdccn/chair_mail/models.py | marvinxu99/dccnsys | 8f53728d06b859cace42cc84bc190bc89950d252 | [
"MIT"
] | 10 | 2020-03-14T09:25:39.000Z | 2022-02-21T16:46:33.000Z | from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.db.models import ForeignKey, OneToOneField, TextField, CharField, \
SET_NULL, CASCADE, BooleanField, UniqueConstraint
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template import Template, Context
from django.utils import timezone
from markdown import markdown
from html2text import html2text
from chair_mail.context import get_conference_context, get_user_context, \
get_submission_context, get_frame_context
from conferences.models import Conference
from submissions.models import Submission
from users.models import User
MSG_TYPE_USER = 'user'
MSG_TYPE_SUBMISSION = 'submission'
MESSAGE_TYPE_CHOICES = (
(MSG_TYPE_USER, 'Message to users'),
(MSG_TYPE_SUBMISSION, 'Message to submissions'),
)
class SubmissionMessage(GroupMessage):
recipients = models.ManyToManyField(
Submission, related_name='group_emails')
group_message = models.OneToOneField(
GroupMessage, on_delete=models.CASCADE, parent_link=True)
def send(self, sender):
# 1) Update status and save sender chair user:
self.sent = False
self.sent_by = sender
self.save()
# 2) For each user, we render this template with the given context,
# and then build the whole message by inserting this body into
# the frame. Plain-text version is also formed from HTML.
frame = self.conference.email_settings.frame
conference_context = get_conference_context(self.conference)
for submission in self.recipients.all():
submission_context = get_submission_context(submission)
for author in submission.authors.all():
user = author.user
context = Context({
**conference_context,
**submission_context,
**get_user_context(user, self.conference)
}, autoescape=False)
email = EmailMessage.create(
group_message=self.group_message,
user_to=user,
context=context,
frame=frame
)
email.send(sender)
# 3) Update self status, write sending timestamp
self.sent_at = timezone.now()
self.sent = True
self.save()
return self
def get_group_message_model(msg_type):
return {
MSG_TYPE_USER: UserMessage,
MSG_TYPE_SUBMISSION: SubmissionMessage,
}[msg_type]
def get_message_leaf_model(msg):
"""If provided a `GroupMessage` instance, check the inheritance, find
the most descent child and return it. Now the possible leaf models are
`UserMessage` and `SubmissionMessage`."""
if hasattr(msg, 'usermessage'):
return msg.usermessage
elif hasattr(msg, 'submissionmessage'):
return msg.submissionmessage
# Also check, maybe a message is already a leaf:
if isinstance(msg, UserMessage) or isinstance(msg, SubmissionMessage):
return msg
# If neither succeeded, raise an error:
raise TypeError(f'Not a group message: type(msg)')
class EmailMessage(models.Model):
subject = models.TextField(max_length=1024)
text_plain = models.TextField()
text_html = models.TextField()
user_to = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='emails'
)
sent_at = models.DateTimeField(auto_now_add=True)
sent = models.BooleanField(default=False)
sent_by = models.ForeignKey(
User,
on_delete=models.SET_NULL, null=True,
related_name='sent_emails'
)
group_message = models.ForeignKey(
GroupMessage,
on_delete=models.SET_NULL,
null=True,
related_name='messages',
)
class SystemNotification(models.Model):
"""This model represents a system notification fired on a specific event.
The model itself doesn't define the circumstances in which the message
must be sent, which are subject to views.
Notification is defined with a mandatory name, optional description,
subject and template. If template is not assigned or subject is not
specified, messages won't be sent.
Notification can also be turned off with `is_active` flag field.
"""
ASSIGN_STATUS_SUBMIT = 'assign_status_submit'
ASSIGN_STATUS_REVIEW = 'assign_status_review'
ASSIGN_STATUS_ACCEPT = 'assign_status_accept'
ASSIGN_STATUS_REJECT = 'assign_status_reject'
ASSIGN_STATUS_INPRINT = 'assign_status_inprint'
ASSIGN_STATUS_PUBLISHED = 'assign_status_publish'
NAME_CHOICES = (
(ASSIGN_STATUS_REVIEW, 'Assign status REVIEW to the paper'),
(ASSIGN_STATUS_SUBMIT, 'Assign status SUBMIT to the paper'),
(ASSIGN_STATUS_ACCEPT, 'Assign status ACCEPT to the paper'),
(ASSIGN_STATUS_REJECT, 'Assign status REJECT to the paper'),
(ASSIGN_STATUS_INPRINT, 'Assign status IN-PRINT to the paper'),
(ASSIGN_STATUS_PUBLISHED, 'Assign status PUBLISHED to the paper'),
)
name = CharField(max_length=64, choices=NAME_CHOICES)
subject = CharField(max_length=1024, blank=True)
is_active = BooleanField(default=False)
type = CharField(max_length=64, choices=MESSAGE_TYPE_CHOICES, blank=False)
body = TextField(blank=True)
conference = ForeignKey(Conference, related_name='notifications',
on_delete=CASCADE)
DEFAULT_NOTIFICATIONS_DATA = {
SystemNotification.ASSIGN_STATUS_REVIEW: {
'subject': 'Submission #{{ paper_id }} is under review',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** is assigned for the review.
Reviews are expected to be ready at **{{ rev_end_date|time:"H:i:s" }}**.'''
},
SystemNotification.ASSIGN_STATUS_SUBMIT: {
'subject': 'Submission #{{ paper_id }} is in draft editing state',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** is in draft editing
state.
At this point you can modify review manuscript, title and other data if you
need.'''
},
SystemNotification.ASSIGN_STATUS_ACCEPT: {
'subject': 'Submission #{{ paper_id }} was accepted',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
congratulations, your submission #{{ paper_id }} **"{{ paper_title }}"** was
accepted for the conference.'''
},
SystemNotification.ASSIGN_STATUS_REJECT: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
unfortunately your submission #{{ paper_id }} **"{{ paper_title }}"**
was rejected according to the double-blinded review.
'''
},
SystemNotification.ASSIGN_STATUS_INPRINT: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** camera-ready was
sent to the publisher. We will let you know when the paper will be published.
'''
},
SystemNotification.ASSIGN_STATUS_PUBLISHED: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
we are glad to inform you that your submission #{{ paper_id }}
**"{{ paper_title }}"** was published.
'''
},
}
| 35.157609 | 83 | 0.659762 |
a01a6cd80a71c68a6da168b3758e9d7078688990 | 100 | py | Python | Pruebas.py | MacoChave/Server-Iniciales | 035d98793a1c20738b7af885d455fd62197988bd | [
"Apache-2.0"
] | null | null | null | Pruebas.py | MacoChave/Server-Iniciales | 035d98793a1c20738b7af885d455fd62197988bd | [
"Apache-2.0"
] | null | null | null | Pruebas.py | MacoChave/Server-Iniciales | 035d98793a1c20738b7af885d455fd62197988bd | [
"Apache-2.0"
] | null | null | null | from datetime import date
from datetime import datetime
dateToday = date.today()
print(dateToday) | 14.285714 | 29 | 0.8 |
a01dc69fc961ecf3abcdcc4efc76fa8f20eeb48a | 1,753 | py | Python | translator/model.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | translator/model.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | translator/model.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Marco Nicola
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, MarianMTModel, \
MarianTokenizer
from .config import ConfigLanguageModel
| 38.108696 | 79 | 0.718768 |
a01f36af66f5f536cdcfeccc977f7396f86e0837 | 15,082 | py | Python | home/.local/share/tkthemes/clearlooks/create_imgs.py | ssokolow/profile | 09f2a842077909d883a08b546659516deec7d719 | [
"MIT"
] | 9 | 2015-04-14T22:27:40.000Z | 2022-02-23T05:33:00.000Z | home/.local/share/tkthemes/clearlooks/create_imgs.py | ssokolow/profile | 09f2a842077909d883a08b546659516deec7d719 | [
"MIT"
] | 10 | 2018-06-18T07:57:56.000Z | 2021-10-04T06:47:19.000Z | home/.local/share/tkthemes/clearlooks/create_imgs.py | ssokolow/profile | 09f2a842077909d883a08b546659516deec7d719 | [
"MIT"
] | 9 | 2015-04-14T22:27:42.000Z | 2017-11-21T11:34:23.000Z | #!/usr/bin/env python
# -*- mode: python; coding: koi8-r; -*-
import os
import gtk, gobject
imdir = 'images'
imtype = 'png'
background = '#efebe7'
#fill_color = 0xff000000 # red
fill_color = int('ff000000', 16)
if not os.path.exists(imdir):
os.mkdir(imdir)
gc = None
done = False
win = gtk.Window()
win.connect("destroy", gtk.main_quit)
table = gtk.Table()
win.add(table)
row, col = 0, 0
drawing_area = gtk.DrawingArea()
#drawing_area.set_size_request(100, 100)
pack(drawing_area, row, col)
row += 1
vscroll = gtk.VScrollbar()
pack(vscroll, 0, 1)
hscroll = gtk.HScrollbar()
pack(hscroll, row, col)
row += 1
notebook = gtk.Notebook()
label = gtk.Label("Label")
notebook.append_page(label)
label = gtk.Label("Label")
notebook.append_page(label)
pack(notebook, row, col)
row += 1
button = gtk.Button("Button")
pack(button, row, col)
row += 1
checkbutton = gtk.CheckButton("CheckButton")
pack(checkbutton, row, col)
row += 1
progress = gtk.ProgressBar()
pack(progress, row, col)
row += 1
scale = gtk.HScale()
pack(scale, row, col)
row += 1
entry = gtk.Entry()
pack(entry, row, col)
row += 1
togglebutton = gtk.ToggleButton()
pack(togglebutton, row, col)
togglebutton.set_active(True)
row += 1
drawing_area.connect("expose-event", save_callback)
#gobject.timeout_add(2000, save_callback)
win.show_all()
#drawing_area.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('red'))
gtk.main()
| 34.045147 | 79 | 0.579631 |
a01fef4f0de34d121af8b1cbdb955d6acb92f36a | 4,687 | py | Python | coil_COMdistance.py | Johanu/MDAnalysis_scripts | f44d2b32bb916daa3bd61e9f3ad636db503293bf | [
"MIT"
] | null | null | null | coil_COMdistance.py | Johanu/MDAnalysis_scripts | f44d2b32bb916daa3bd61e9f3ad636db503293bf | [
"MIT"
] | null | null | null | coil_COMdistance.py | Johanu/MDAnalysis_scripts | f44d2b32bb916daa3bd61e9f3ad636db503293bf | [
"MIT"
] | null | null | null | from __future__ import division
import matplotlib.pyplot as plt
import MDAnalysis as md
import numpy as np
coil_distance, ASP_distance, ASP_distance1, ASP_distance2 = calculate_dists('structure.pdb', 'equ.dcd')
x_vals = [x / 10 for x in range(0, len(coil_distance))]
plt.plot(x_vals, coil_distance, linewidth=0.5)
#leg = plt.legend(ncol=3, loc=9, fancybox=True)
#leg.get_frame().set_alpha(0.5)
plt.xlabel('Time / ns')
plt.ylabel(ur'Loop COM distance / $\AA$')
plt.axhline(y=9.84, linewidth=1, color = 'red')
plt.axhline(y=11.11, linewidth=1, color = 'green')
plt.savefig('coil_COMdistance.png', dpi=300)
plt.close()
plt.plot(x_vals, ASP_distance, linewidth=0.5)
plt.plot(x_vals, ASP_distance1, linewidth=0.5)
plt.plot(x_vals, ASP_distance2, linewidth=0.5)
print 'Loop1 average: ', np.average(ASP_distance1[500:]), np.std(ASP_distance1[500:])
print 'Loop2 average: ', np.average(ASP_distance2[500:]), np.std(ASP_distance2[500:])
plt.xlabel('Time / ns')
plt.ylabel(ur'Loop COM distance / $\AA$')
plt.axhline(y=21.29, linewidth=1, color = '#C45AEC', label='PR20')
plt.axhline(y=15.18, linewidth=1, color = '#C45AEC')
plt.axhline(y=20.36, linewidth=1, color = '#EAC117', label='PR')
plt.axhline(y=15.11, linewidth=1, color = '#EAC117')
plt.axhline(y=np.average(ASP_distance1), linewidth=1, color = 'green', label='Loop1 average')
plt.axhline(y=np.average(ASP_distance2), linewidth=1, color = 'red', label='Loop2 average')
leg = plt.legend(fancybox=True, loc=2, framealpha=0.5)
#leg.get_frame().set_alpha(0.5)
plt.savefig('ASP_COMdistance.png', dpi=300)
plt.close()
| 39.058333 | 103 | 0.667164 |
a021e7c81cd72a8cb8466d95bea774bd4667239f | 1,692 | py | Python | src/api/content_flag.py | Viewly/alpha-2 | 6b6d827197489164d8c4bde4f4d591dcec5a2163 | [
"MIT"
] | null | null | null | src/api/content_flag.py | Viewly/alpha-2 | 6b6d827197489164d8c4bde4f4d591dcec5a2163 | [
"MIT"
] | 1 | 2021-05-07T06:26:16.000Z | 2021-05-07T06:26:16.000Z | src/api/content_flag.py | Viewly/alpha-2 | 6b6d827197489164d8c4bde4f4d591dcec5a2163 | [
"MIT"
] | null | null | null | import datetime as dt
import json
from flask_restful import (
Resource,
reqparse,
)
from flask_security import current_user
from marshmallow_sqlalchemy import ModelSchema
from .utils import auth_required
from .. import db
from ..core.utils import log_exception
from ..models import ContentFlag
flag_schema = FlagSchema()
parser = reqparse.RequestParser()
parser.add_argument('video_id', type=str, required=True)
parser.add_argument('flag_type', type=str)
| 26.030769 | 77 | 0.60461 |
a0220e4b4dae9e864bc6a43965e05ecf1eb56be9 | 13,231 | py | Python | cgmodsel/utils.py | franknu/cgmodsel | b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a | [
"MIT"
] | 1 | 2020-09-01T08:39:14.000Z | 2020-09-01T08:39:14.000Z | cgmodsel/utils.py | franknu/cgmodsel | b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a | [
"MIT"
] | null | null | null | cgmodsel/utils.py | franknu/cgmodsel | b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a | [
"MIT"
] | 1 | 2020-09-04T13:35:41.000Z | 2020-09-04T13:35:41.000Z | # -*- coding: utf-8 -*-
"""
Copyright: Frank Nussbaum (frank.nussbaum@uni-jena.de)
This file contains various functions used in the module including
- sparse norms and shrinkage operators
- a stable logsumexp implementation
- array printing-method that allows pasting the output into Python code
"""
import numpy as np
#################################################################################
# norms and shrinkage operators
#################################################################################
try:
# the following requires setup
# import os
# os.system('python cyshrink/setup.py build_ext --inplace')
# TODO(franknu): configure n_threads/interface
from cyshrink.shrink.shrink import grp as grp_soft_shrink
from cyshrink.shrink.shrink import grp_weight as grp_soft_shrink_weight
print('successfully imported shrink.shrink')
except Exception as e:
print(e)
# from cyshrink.shrink.shrink import grp_weight as grp_soft_shrink_weight2
# naive and slow implementations
print('''
Failed to import Cython shrink functions, setup is required...
using slower native Python functions instead''')
def grp_soft_shrink(mat, tau, glims, off=False):
"""just a wrapper for grp_soft_shrink_weight with weiths=None"""
return grp_soft_shrink_weight(mat, tau, glims, off=False, weights=None)
def grp_soft_shrink_weight(mat, tau,
glims,
off=False,
weights=None):
"""
calculate (group-)soft-shrinkage.
Args:
mat (np.array): matrix.
tau (float): non-negative shrinkage parameter.
off (bool): if True, do not shrink diagonal entries.
glims: group delimiters (cumulative sizes of groups).
weights (optional): weights for weighted l_{1,2} norm/shrinkage.
Returns:
tuple: shrunken matrix, (group) l_{1,2}-norm of shrunken matrix.
Note:
this code could be made much faster
(by parallizing loops, efficient storage access).
"""
shrinkednorm = 0
# if glims is None:
n_groups = len(glims) - 1
if glims[-1] == n_groups: # each group has size 1
tmp = np.abs(mat)
if not weights is None: # weighted l1-norm
# tmp = np.multiply(tmp, weights).flatten
tmp -= tau * weights
else:
tmp -= tau
tmp[tmp < 1e-25] = 0
shrinked = np.multiply(np.sign(mat), tmp)
l1norm = np.sum(np.abs(shrinked.flatten()))
if off:
l1norm -= np.sum(np.abs(np.diag(shrinked)))
shrinked -= np.diag(np.diag(shrinked))
shrinked += np.diag(np.diag(mat))
return shrinked, l1norm
# group soft shrink
if weights is None:
weights = np.ones(mat.shape) # TODO(franknu): improve style
tmp = np.empty(mat.shape)
for i in range(n_groups):
for j in range(n_groups):
# TODO(franknu): use symmetry
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
if (i == j) and off:
tmp[glims[i]:glims[i + 1], glims[i]:glims[i + 1]] = group
continue
gnorm = np.linalg.norm(group, 'fro')
w_ij = tau * weights[i,j]
if gnorm <= w_ij:
tmp[glims[i]:glims[i + 1],
glims[j]:glims[j + 1]] = np.zeros(group.shape)
else:
tmp[glims[i]:glims[i+1], glims[j]:glims[j+1]] = \
group * (1 - w_ij / gnorm)
shrinkednorm += weights[i,j] * (1 - w_ij / gnorm) * gnorm
return tmp, shrinkednorm
def l21norm(mat, glims=None, off=False, weights=None):
"""
calculate l_{1,2}-norm.
Args:
mat (np.array): matrix.
off (bool): if True, do not shrink diagonal entries.
glims: group delimiters (cumulative sizes of groups).
n_groups: # groups per row/column (if this is given,
perform group soft shrink instead of soft shrink).
weights (optional): weights for weighted l_{1,2} norm.
Returns:
float: (group) l_{1,2}-norm.
"""
if glims is None:
# calculate regular l1-norm
tmp = np.abs(mat) # tmp is copy, can do this inplace by specifying out
if not weights is None: # weighted l1-norm
tmp = np.multiply(tmp, weights).flatten
tmp = np.sum(tmp)
if off:
tmp -= np.sum(np.diag(np.abs(mat)))
return tmp
n_groups = len(glims) - 1
l21sum = 0
if weights is None:
for i in range(n_groups):
for j in range(i):
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
l21sum += np.linalg.norm(group, 'fro')
else:
for i in range(n_groups):
for j in range(i):
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
l21sum += weights[i,j] * np.linalg.norm(group, 'fro')
l21sum *= 2 # use symmetry
if not off:
for i in range(n_groups):
group = mat[glims[i]:glims[i + 1], glims[i]:glims[i + 1]]
l21sum += np.linalg.norm(group, 'fro')
return l21sum
###############################################################################
# stable implementation of logsumexp etc.
###############################################################################
#from scipy.special import logsumexp
def _exp_shiftedmax(array, axis=None):
"""calculate exponentials of array shifted by its max, avoiding overflow
by subtracting maximum before"""
a_max = np.amax(array, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
# print((a-a_max).shape)
exp_shiftedamax = np.exp(array - a_max)
# last line: a_max is repeated columnwise (if axis = 1)
return exp_shiftedamax, a_max
def logsumexp(array, axis=None, keepdims=True):
"""Compute the log of the sum of exponentials of input elements.
Args:
array (np.array): array on which to compute logsumexp.
axis (int): axis along which to compute logsupexp.
keepdims (bool): passed to np.sum.
Returns:
np.array: logsumexp
Note:
This is an adaptation of logsumexp in scipy.special (v1.1.0)
"""
exp_shifted, a_max = _exp_shiftedmax(array, axis=axis)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
summed = np.sum(exp_shifted, axis=axis, keepdims=keepdims)
out = np.log(summed)
if not keepdims:
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out
def _logsumexp_and_conditionalprobs(array):
"""return logsumexp and conditional probabilities from array a
that has the same shape as the discrete data in dummy-representation"""
exp_shifted, a_max = _exp_shiftedmax(array, axis=1)
summed = np.sum(exp_shifted, axis=1, keepdims=True) # entries always > 1
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out_logsumexp = np.log(summed)
out_logsumexp += a_max
# node conditional probabilities
size = array.shape[1]
out_conditionalprobs = np.divide(exp_shifted,
np.dot(summed, np.ones((1, size))))
# unstable = np.log(np.sum(np.exp(a), axis = 1)).reshape((a.shape[0], 1))
# diff = unstable - out_logsumexp
# print (unstable)
# for i in range(unstable.shape[0]):
# if abs(diff[i, 0]) > 10e-5:
# print('a', a[i, :])
# print('unstable', unstable[i, 0])
# print('stable', out_logsumexp[i, 0])
# break
# assert np.linalg.norm(unstable - out_logsumexp) < 10E-5
# print(out_logsumexp)
# print(out_logsumexp[:1, 0])
# assert 1 == 0
out_logsumexp = np.squeeze(out_logsumexp)
return out_logsumexp, out_conditionalprobs
def _logsumexp_condprobs_red(array):
"""normalization and conditional probabilities for reduced levels,
a ... two-dimensional array"""
a_max = np.amax(array, axis=1, keepdims=True)
a_max = np.maximum(a_max, 0)
# last line: account for missing column with probs exp(0) for 0th level
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
exp_shifted = np.exp(array - a_max) # a_max is repeated columnwise (axis=1)
# calc column vector s of (shifted) normalization sums
# note that entries always > 1, since one summand in each col is exp(0)
summed = np.sum(exp_shifted, axis=1, keepdims=True)
summed += np.exp(-a_max) # add values from missing 0th column
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out_logsumexp = np.log(summed)
out_logsumexp += a_max
out_logsumexp = np.squeeze(out_logsumexp)
# node conditional probabilities, required for gradient
size = array.shape[1]
out_conditionalprobs = np.divide(exp_shifted,
np.dot(summed, np.ones((1, size))))
# note: log of this is not stable if probabilities close to zero
# - use logsumexp instead for calculating plh value
return out_logsumexp, out_conditionalprobs
###############################################################################
# some conversion functions for representations of discrete data
###############################################################################
def dummy_to_index_single(dummy_x, sizes):
"""convert dummy to index representation"""
offset = 0
ind = np.empty(len(sizes), dtype=np.int)
for i, size_r in enumerate(sizes):
for j in range(size_r):
if dummy_x[offset + j] == 1:
ind[i] = j
break
offset += size_r
return ind
def dummy_to_index(dummy_data, sizes):
"""convert dummy to index representation"""
n_data, ltot = dummy_data.shape
assert ltot == sum(sizes)
n_cat = len(sizes)
index_data = np.empty((n_data, n_cat), dtype=np.int)
for k in range(n_data):
offset = 0
for i, size_r in enumerate(sizes):
for j in range(size_r):
if dummy_data[offset + j] == 1:
index_data[k, i] = j
break
offset += size_r
return index_data
#def dummypadded_to_unpadded(dummy_data, n_cat):
# """remove convert dummy to index representation"""
# unpadded = np.empty(n_cat)
# for i,x in enumerate(dummy_data):
# if i % 2 == 1:
# unpadded[i // 2] = x
# return unpadded
def index_to_dummy(idx, glims, ltot):
"""convert index to dummy representation"""
dummy_data = np.zeros(ltot)
for i, ind in enumerate(idx):
dummy_data[glims[i] + ind] = 1
return dummy_data
def dummy2dummyred(dummy_data, glims):
"""convert dummy to reduced dummy representation"""
return np.delete(dummy_data, glims[:-1], 1)
###############################################################################
# testing utilities
###############################################################################
def strlistfrom(array, rnd=2):
"""a convenient representation for printing out numpy array
s.t. it can be reused as a list"""
string = np.array2string(array, precision=rnd, separator=',')
string = 'np.array(' + string.translate({ord(c): None for c in '\n '}) + ')'
return string
def tomatlabmatrix(mat):
"""print numpy matrix in a way that can be pasted into MATLAB code."""
nrows, ncols = mat.shape
string = "["
for i in range(nrows):
string += "["
for j in range(ncols):
string += str(mat[i, j]) + " "
string += "];"
string = string[:-1] + "]"
print(string)
if __name__ == '__main__':
SIZES = [2, 2, 2]
GLIMS = [0, 2, 4, 6]
LTOT = 6
IND = [0, 0, 1]
DUMMY = index_to_dummy(IND, GLIMS, LTOT)
IND2 = dummy_to_index_single(DUMMY, SIZES)
MAT = np.arange(6).reshape((3, 2))
RES = _logsumexp_condprobs_red(MAT)
print(RES)
# res should be
# (array([ 1.55144471, 3.34901222, 5.31817543]), array([[ 0.21194156, 0.57611688],
# [ 0.25949646, 0.70538451],
# [ 0.26762315, 0.72747516]]))
| 33.752551 | 90 | 0.542438 |
4e41eecc288939d5378c49ce5811a41875918b72 | 1,091 | py | Python | authorization/migrations/0002_auto_20200207_2011.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null | authorization/migrations/0002_auto_20200207_2011.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null | authorization/migrations/0002_auto_20200207_2011.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2020-02-07 18:11
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
| 32.088235 | 122 | 0.604033 |
4e428e1c353f9ae16acebfc45bfab7a9a4bd2704 | 2,081 | py | Python | ssd/modeling/head/ssd_head.py | tkhe/ssd-family | a797ec36fda59549aff54419c105813c33d8cdd3 | [
"MIT"
] | 1 | 2019-07-12T02:21:24.000Z | 2019-07-12T02:21:24.000Z | ssd/modeling/head/ssd_head.py | tkhe/ssd-family | a797ec36fda59549aff54419c105813c33d8cdd3 | [
"MIT"
] | null | null | null | ssd/modeling/head/ssd_head.py | tkhe/ssd-family | a797ec36fda59549aff54419c105813c33d8cdd3 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
from ssd.modeling.anchor import make_anchor_generator
from ssd.utils import bbox
from .inference import make_post_processor
from .loss import make_loss_evaluator
from .predictor import make_ssd_predictor
| 35.87931 | 96 | 0.675156 |
4e42bcb647690572f850059e2f35498edac0af13 | 415 | py | Python | find_max_occurence_simple.py | swatmantis/my-pyscripts | e16af5879b101c30e34e82727292849d1d33f440 | [
"Apache-2.0"
] | null | null | null | find_max_occurence_simple.py | swatmantis/my-pyscripts | e16af5879b101c30e34e82727292849d1d33f440 | [
"Apache-2.0"
] | null | null | null | find_max_occurence_simple.py | swatmantis/my-pyscripts | e16af5879b101c30e34e82727292849d1d33f440 | [
"Apache-2.0"
] | null | null | null | """Find max element"""
#!/usr/bin/env python3
"""Find max element"""
import random
from collections import Counter
List = [random.randrange(1, 15) for num in range(10)]
frequent_number, frequency = most_frequent(List)[0]
print(f"List {List}: \nMost frequent number {frequent_number} \nFrequency: {frequency}")
| 27.666667 | 88 | 0.742169 |
4e4b385ebb874ffc51cb3af951c49e948dbf2c97 | 1,659 | py | Python | plugin.video.SportsDevil/lib/dialogs/dialogProgress.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 105 | 2015-11-28T00:03:11.000Z | 2021-05-05T20:47:42.000Z | plugin.video.SportsDevil/lib/dialogs/dialogProgress.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 918 | 2015-11-28T14:12:40.000Z | 2022-03-23T20:24:49.000Z | plugin.video.SportsDevil/lib/dialogs/dialogProgress.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 111 | 2015-12-01T14:06:10.000Z | 2020-08-01T10:44:39.000Z | # -*- coding: utf-8 -*-
import xbmcgui | 28.603448 | 90 | 0.603978 |
4e4b7d98eca7eba2d20b079df0bbd0eb0b4e7a32 | 3,828 | py | Python | bitbake/lib/bb/manifest.py | KDAB/OpenEmbedded-Archos | a525c5629a57ccb8656c22fe5528ce264003f9d8 | [
"MIT"
] | 3 | 2015-05-25T10:56:21.000Z | 2021-11-27T17:25:26.000Z | bitbake/lib/bb/manifest.py | KDAB/OpenEmbedded-Archos | a525c5629a57ccb8656c22fe5528ce264003f9d8 | [
"MIT"
] | 1 | 2021-11-27T17:24:21.000Z | 2021-11-27T17:24:21.000Z | bitbake/lib/bb/manifest.py | KDAB/OpenEmbedded-Archos | a525c5629a57ccb8656c22fe5528ce264003f9d8 | [
"MIT"
] | 2 | 2016-08-13T08:40:48.000Z | 2021-03-26T03:01:03.000Z | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, sys
import bb, bb.data
| 26.4 | 112 | 0.544148 |