hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b0afc3b991d4aa30a7baf6f443e94f56c8d47d5 | 2,657 | py | Python | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
] | null | null | null | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
] | 2 | 2021-02-08T20:36:58.000Z | 2022-03-29T21:58:35.000Z | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
] | null | null | null | # project/main/views.py
#################
#### imports ####
#################
import os
import json
import requests
import pickle
from flask import Blueprint, Flask, jsonify, request, g, url_for, abort, redirect, flash, render_template, current_app
from flask_login import current_user
from flask_login import login_required
from .forms import UploadForm
from werkzeug.utils import secure_filename
from project.indexer_lib import get_metadata_from_file
################
#### config ####
################
main_blueprint = Blueprint('main', __name__,)
################
#### routes ####
################
| 34.960526 | 122 | 0.617237 |
9b0ea10947bac276566d22b561a64d291c54aa39 | 3,195 | py | Python | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
] | null | null | null | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
] | null | null | null | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
] | null | null | null | import string
from django import forms
from django.conf import settings
from django.shortcuts import get_object_or_404
from accounts.models import User, Profile
from .models import Blogs
from .utils import get_limit_for_level, write_to_limit
| 34.728261 | 179 | 0.747418 |
9b0f367d08c895d53158d4654de98cbeabd4b541 | 1,032 | py | Python | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
] | null | null | null | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
] | null | null | null | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
] | null | null | null | a = [1, 2, 3, 5, 6]
# print(recursive_search(a, 3))
a = [3,4,5,6,10,12,20]
print(binary_search(a, 5))
print(recursive_fibonacci(0)) | 21.957447 | 117 | 0.593992 |
9b0fef936f066c73b4c06e85baae1161aaa35969 | 1,134 | py | Python | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
] | null | null | null | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
] | null | null | null | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
] | null | null | null | from heap.max_binary_heap import MaxBinaryHeap | 22.235294 | 46 | 0.611993 |
9b10e4943ad1ee0b4dae85b2c1d4d6a1aefffc28 | 409 | py | Python | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- | 31.461538 | 49 | 0.635697 |
9b119221fff46228bdcf97a9b0a6cdd84ac53dfa | 6,623 | py | Python | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
] | 45 | 2016-03-19T14:39:40.000Z | 2021-12-15T06:34:57.000Z | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
] | 73 | 2016-03-19T16:15:45.000Z | 2022-02-22T16:37:16.000Z | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
] | 41 | 2016-04-08T14:04:00.000Z | 2021-09-09T20:49:41.000Z | # -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
import numpy.random as nr
from .mea import staggered_positions
from .h5 import open_h5
from .model import _create_clustering
#------------------------------------------------------------------------------
# Mock functions
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True,
with_kwd=True,
add_original=True,
):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
kwx_filename = op.join(dir_path, '_test.kwx')
kwd_filename = op.join(dir_path, '_test.raw.kwd')
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high_factor', 0.95 * .5)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('n_features_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
if recording_size > 0:
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
else:
recording_offset = 1
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1', 'channel_order',
np.arange(1, n_channels - 1)[::-1])
graph = np.array([[1, 2], [2, 3]])
f.write_attr('/channel_groups/1', 'adjacency_graph', graph)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters)]
if add_original:
clusterings += [('original', n_clusters * 2)]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
groups = {0: 0, 1: 1, 2: 2}
_create_clustering(f, clustering, 1, spike_clusters, groups)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
f.write_attr('/recordings/0/raw', 'hdf5_path', kwd_filename)
f.write_attr('/recordings/1/raw', 'hdf5_path', kwd_filename)
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
| 36.191257 | 79 | 0.562434 |
9b11b55cfbda19b56fe51d5da114dd0268d96bc2 | 1,824 | py | Python | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
] | 8 | 2019-07-03T15:33:52.000Z | 2021-10-21T00:56:43.000Z | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
] | 3 | 2020-09-02T19:04:36.000Z | 2022-03-12T19:46:50.000Z | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
] | 7 | 2019-07-03T15:50:24.000Z | 2020-11-26T12:16:10.000Z | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code to compute the audio intensity for preprocessing.
Code that stores incoming arbitrary audio data, and then yields fixed window
sizes for processing (like computing the intensity.)
After initializing the object, add a block of data to the object, and then
pull fixed sized blocks of data with a given half_window_width, and separated
by window_step samples. Data is always X x num_features, where X can change from
add_data call to call, but num_features must not change. Do not reuse the object
because it has internal state from previous calls.
"""
import numpy as np
from telluride_decoding import result_store
| 36.48 | 80 | 0.733553 |
9b122420662104df8bedddda57c416404fd43cea | 3,355 | py | Python | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
] | 2 | 2019-01-26T02:44:14.000Z | 2019-08-06T00:40:56.000Z | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
] | 1 | 2019-05-23T22:35:27.000Z | 2019-05-25T20:23:50.000Z | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
] | null | null | null | import logging
from urllib.parse import urlsplit
import asyncio as aio
from functools import partial
from .api.service import Service
from .api.xsd import device as deviceParser
from ..utils import requests_get
log = logging.getLogger(__name__)
def test():
device = Device("http://10.42.1.102:49152/setup.xml")
print(device.get_service('basicevent').SetBinaryState(BinaryState=1))
if __name__ == "__main__":
test()
| 28.432203 | 76 | 0.614903 |
9b1232a2760be1096b010b97407d362bad15d50f | 2,012 | py | Python | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | null | null | null | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | 3 | 2021-03-17T16:05:01.000Z | 2021-05-01T18:47:43.000Z | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | null | null | null | """File providing localtime support."""
import time
import network
import ntptime
from machine import RTC, reset
from config import Config
system_config = Config('system_config.json')
| 39.45098 | 92 | 0.578032 |
9b126b83c2c4f4a5775d0727f5ece4feb0b27a5c | 448 | py | Python | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
] | 59 | 2019-12-05T13:23:14.000Z | 2021-12-07T13:54:25.000Z | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
] | 266 | 2020-09-22T16:22:56.000Z | 2021-10-17T18:13:11.000Z | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
] | 213 | 2020-05-20T18:17:21.000Z | 2022-03-06T11:03:42.000Z | from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registration_view, name='api_register'),
path('login/', views.LoginView.as_view(), name='api_login'),
path('complete_profile/', views.complete_profile_view, name='api_complete_profile'),
path('logout/', views.LogoutView.as_view(), name='api_logout'),
path('check_login_status/', views.check_login_status, name='api_check_login_status'),
] | 44.8 | 89 | 0.736607 |
9b148edd9574c90b50e4da5fcd67e478a02f6b95 | 8,347 | py | Python | IPython/kernel/multikernelmanager.py | techtonik/ipython | aff23ecf89ba87ee49168d3cecc213bdbc3b06f9 | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-13T23:06:43.000Z | 2022-03-13T23:06:43.000Z | IPython/kernel/multikernelmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/kernel/multikernelmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
] | 1 | 2020-05-03T10:25:12.000Z | 2020-05-03T10:25:12.000Z | """A kernel manager for multiple kernels
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import uuid
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import (
Instance, Dict, Unicode, Any, DottedObjectName,
)
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
| 32.103846 | 84 | 0.576614 |
9b15d3d976307caf107a8e4d5a8af162262589b1 | 256 | py | Python | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
] | null | null | null | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
] | 1 | 2016-09-29T05:34:12.000Z | 2016-09-30T16:26:07.000Z | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 3
#
# 2 2 4 6 10 ...
# 20
f1 = 2
f2 = 2
for i in range(1, 20):
print '%d\n%d' % (f1, f2)
f1 = f1 + f2
f2 = f1 + f2
| 17.066667 | 43 | 0.582031 |
9b17a28e7a678defe48fd07eac1522b08da41fac | 13,312 | py | Python | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | 2 | 2018-10-05T13:32:46.000Z | 2022-01-01T22:51:20.000Z | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | 4 | 2021-06-08T19:33:40.000Z | 2022-03-11T23:18:06.000Z | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | null | null | null | from random import randrange
from time import time
from __future__ import generators,division
from Subs import *
patch = {
'side l' : 45,
'side r' : 46,
'main 1' : 1,
'main 2' : 2,
'main 3' : 3,
'main 4' : 4,
'main 5' : 5,
'main 6' : 6,
'main 7' : 7,
'main 8' : 8,
'main 9' : 9,
'main 10' : 10,
'center sc' : 20,
'sr sky' : 43,
'blacklight' : 15,
'house':68,
('b0 1 r' ,'b01'):54, # left bank over house
('b0 2 p' ,'b02'):53,
('b0 3 o' ,'b03'):52,
('b0 4 b' ,'b04'):51,
('b0 5 r' ,'b05'):50,
('b0 6 lb','b06'):49,
('b1 1' ,'b11'):55, # mid bank
('b1 2' ,'b12'):56,
('b1 3' ,'b13'):57,
('b1 4' ,'b14'):58,
('b1 5' ,'b15'):59,
('b1 6' ,'b16'):60,
('b2 1 lb','b21'):61, # right bank
('b2 2 r' ,'b22'):62,
('b2 3 b' ,'b23'):63,
('b2 4 o' ,'b24'):64,
('b2 5 p' ,'b25'):65,
('b2 6 r' ,'b26'):66,
}
from util import maxes,scaledict
FL=100
subs = {
'over pit sm' : levs(range(1, 13),(100,0,0,91,77,79,86,55,92,77,59,0)),
'over pit lg' : fulls(range(1, 13)),
('house', 'black') : { 68:100 },
('cyc', 'lightBlue'):{42:FL,43:FL},
('scp hot ctr', 'yellow'):{18:FL},
('scp more', '#AAAA00'):{18:FL,14:FL},
('scp all', '#AAAA00'):fulls((13,16,18,19,39)),
('col oran', '#EEEE99'):fulls((21,25,29,33)),
('col red', 'red'):fulls((24,28,32,36)),
('col red big', 'red'):fulls((24,28,32,36,
'b0 1 r','b0 5 r','b2 2 r','b2 6 r')),
('col blue', 'blue'):fulls((23,27,31,35,'b0 4 b','b2 3 b')),
('col gree', 'green'):fulls((22,26,30,34)),
'sidepost':fulls((45,46)),
'edges':fulls((55,60,49,54,61,66)),
'bank1ctr':fulls(('b12','b13','b14','b15')),
('blacklight', 'purple'):blacklight,
'over pit ctr' : fulls((6,)),
('strobe', 'grey'):strobe,
# 'midstage' : dict([(r, 100) for r in range(11, 21)]),
# 'backstage' : dict([(r, 100) for r in range(21, 31)]),
# 'frontchase' : mr_effect,
'chase' : chase,
'chase2' : chase,
# 'random' : randomdimmer,
}
subs["*10"] = { "14" : 46.000000,
"18" : 46.000000,
"22" : 88.000000,
"23" : 95.000000,
"24" : 19.000000,
"26" : 88.000000,
"27" : 95.000000, "28" : 19.000000,
"30" : 88.000000, "31" : 95.000000,
"32" : 19.000000, "34" : 88.000000,
"35" : 95.000000, "36" : 19.000000,
"b0 5 r" : 7.000000, "b0 4 b" : 95.000000,
"b0 1 r" : 7.000000, "b2 2 r" : 7.000000,
"b2 3 b" : 95.000000, "b2 6 r" : 7.000000, }
subs["*13"] = { "main 1" : 51.0, "main 2" : 51.0, "main 3" : 51.0,
"main 4" : 51.0, "main 5" : 51.0, "main 6" : 51.0,
"main 7" : 51.0, "main 8" : 51.0, "main 9" : 51.0,
"main 10" : 51.0, "11" : 51.0, "12" : 51.0,
"blacklight" : 0.0, "21" : 56.0, "22" : 50.0,
"24" : 51.0, "25" : 56.0, "26" : 50.0, "28" : 51.0,
"29" : 56.0, "30" : 50.0, "32" : 51.0, "33" : 56.0,
"34" : 50.0, "36" : 51.0, "b0 5 r" : 51.0,
"b0 1 r" : 51.0, "b2 2 r" : 51.0, "b2 6 r" : 51.0, }
subs["*16"] = { "main 1" : 54, "main 4" : 49, "main 5" : 41, "main 6" : 43,
"main 7" : 46, "main 8" : 29, "main 9" : 50, "main 10" : 41,
"11" : 32, "13" : 77, "16" : 77, "18" : 77, "19" : 77, "39" : 77,
"42" : 30, "sr sky" : 30,}
subs["*3"] = { "main 1" : 47, "main 2" : 47, "main 3" : 47, "main 4" : 47,
"main 5" : 47, "main 6" : 47, "main 7" : 47, "main 8" : 47, "main 9" : 47,
"main 10" : 47, "11" : 47, "12" : 47, "blacklight" : 0, "21" : 67,
"22" : 69, "23" : 69, "24" : 78, "25" : 67, "26" : 69, "27" : 69,
"28" : 78, "29" : 67, "30" : 69, "31" : 69, "32" : 78, "33" : 67,
"34" : 69, "35" : 69, "36" : 78, "b0 4 b" : 69, "b1 2" : 61,
"b1 3" : 61, "b1 4" : 61, "b1 5" : 61, "b2 3 b" : 69,}
subs["*12"] = { "main 1" : 25, "main 4" : 23, "main 5" : 19, "main 6" : 20,
"main 7" : 22, "main 8" : 14, "main 9" : 23, "main 10" : 19,
"11" : 15, "13" : 36, "16" : 36, "18" : 36, "19" : 36, "22" : 65,
"23" : 100, "24" : 23, "26" : 65, "27" : 100, "28" : 23, "30" : 65,
"31" : 100, "32" : 23, "34" : 65, "35" : 100, "36" : 23, "39" : 36,
"b0 4 b" : 100, "b1 2" : 62, "b1 3" : 62, "b1 4" : 62, "b1 5" : 62,
"b2 3 b" : 100,}
subs["*curtain"] = { "main 4" : 44, "main 5" : 37, "main 6" : 86,
"main 7" : 42, "main 8" : 32, "main 9" : 45, "42" : 41, "sr sky" : 41,
"b0 6 lb" : 27, "b0 1 r" : 27, "b1 1" : 27, "b1 2" : 100, "b1 3" : 100,
"b1 4" : 100, "b1 5" : 100, "b1 6" : 27, "b2 1 lb" : 27, "b2 6 r" : 27,
}
subs["ba outrs"] = fulls("b01 b02 b03 b04 b05 b06 b21 b22 b23 b24 b25 b26".split())
subs["ba some"] = {'b02':40,'b03':FL,'b04':FL,'b05':40,
'b22':40,'b23':FL,'b24':FL,'b25':40,}
subs['*curtain'].update(subs['ba some'])
subs["*2"] = { "main 1" : 77, "main 4" : 70, "main 5" : 59, "main 6" : 61,
"main 7" : 66, "main 8" : 42, "main 9" : 71, "main 10" : 59,
"11" : 45, "24" : 77, "28" : 77, "32" : 77, "36" : 77, "b0 5 r" : 77,
"b0 1 r" : 77, "b2 2 r" : 77, "b2 6 r" : 77,}
subs["*6"] = { "main 1" : 37, "main 4" : 33, "main 5" : 28, "main 6" : 29,
"main 7" : 32, "main 8" : 20, "main 9" : 34, "main 10" : 28,
"11" : 22, "13" : 37, "blacklight" : 0, "16" : 37, "18" : 37,
"19" : 37, "21" : 82, "22" : 82, "23" : 82, "24" : 82, "25" : 82,
"26" : 82, "27" : 82, "28" : 82, "29" : 82, "30" : 82, "31" : 82,
"32" : 82, "33" : 82, "34" : 82, "35" : 82, "36" : 82, "39" : 37,
"b0 5 r" : 82, "b0 4 b" : 82, "b0 1 r" : 82, "b2 2 r" : 82, "b2 3 b" : 82,
"b2 6 r" : 82,}
subs["*8"] = { "13" : 60, "16" : 60, "18" : 60, "19" : 60, "22" : 14,
"23" : 100, "26" : 14, "27" : 100, "30" : 14, "31" : 100, "34" : 14,
"35" : 100, "39" : 60, "b0 6 lb" : 14, "b0 4 b" : 100, "b0 1 r" : 14,
"b1 1" : 14, "b1 2" : 70, "b1 3" : 70, "b1 4" : 70, "b1 5" : 70,
"b1 6" : 14, "b2 1 lb" : 14, "b2 3 b" : 100, "b2 6 r" : 14,}
subs["*5"] = { "main 1" : 81, "main 4" : 74, "main 5" : 62, "main 6" : 64,
"main 7" : 70, "main 8" : 44, "main 9" : 75, "main 10" : 62,
"11" : 48, "21" : 29, "24" : 29, "25" : 29, "28" : 29, "29" : 29,
"32" : 29, "33" : 29, "36" : 29, "42" : 37, "sr sky" : 37, "b0 5 r" : 29,
"b0 4 b" : 72, "b0 3 o" : 72, "b0 2 p" : 29, "b2 2 r" : 29, "b2 3 b" : 72,
"b2 4 o" : 72, "b2 5 p" : 29,}
| 38.810496 | 100 | 0.457031 |
9b1b79fb32008ae0e7fb1fae04c9752108435ac6 | 3,672 | py | Python | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
] | null | null | null | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
] | null | null | null | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author Simon Heybrock
# flake8: noqa
from . import runtime_config
user_configuration_filename = runtime_config.config_filename
config = runtime_config.load()
del runtime_config
from ._scipp import _debug_
if _debug_:
import warnings
warnings.formatwarning = custom_formatwarning
warnings.warn(
'You are running a "Debug" build of scipp. For optimal performance use a "Release" build.'
)
from ._scipp import __version__
# Import classes
from ._scipp.core import Variable, DataArray, Dataset, GroupByDataArray, \
GroupByDataset, Unit
# Import errors
from ._scipp.core import BinEdgeError, BinnedDataError, CoordError, \
DataArrayError, DatasetError, DimensionError, \
DTypeError, NotFoundError, SizeError, SliceError, \
UnitError, VariableError, VariancesError
# Import submodules
from ._scipp.core import units, dtype, buckets, geometry
# Import functions
from ._scipp.core import choose, divide, floor_divide, logical_and, \
logical_or, logical_xor, minus, mod, plus, times
# Import python functions
from .show import show, make_svg
from .table import table
from .plotting import plot
from .extend_units import *
from .html import to_html, make_html
from .object_list import _repr_html_
from ._utils import collapse, slices
from ._utils.typing import is_variable, is_dataset, is_data_array, \
is_dataset_or_array
from .compat.dict import to_dict, from_dict
from .sizes import _make_sizes
# Wrappers for free functions from _scipp.core
from ._bins import *
from ._counts import *
from ._comparison import *
from ._cumulative import *
from ._dataset import *
from ._groupby import *
from ._math import *
from ._operations import *
from ._unary import *
from ._reduction import *
from ._shape import *
from ._trigonometry import *
from ._variable import *
setattr(Variable, '_repr_html_', make_html)
setattr(DataArray, '_repr_html_', make_html)
setattr(Dataset, '_repr_html_', make_html)
from .io.hdf5 import to_hdf5 as _to_hdf5
setattr(Variable, 'to_hdf5', _to_hdf5)
setattr(DataArray, 'to_hdf5', _to_hdf5)
setattr(Dataset, 'to_hdf5', _to_hdf5)
setattr(Variable, 'sizes', property(_make_sizes))
setattr(DataArray, 'sizes', property(_make_sizes))
setattr(Dataset, 'sizes', property(_make_sizes))
from ._bins import _bins, _set_bins, _events
setattr(Variable, 'bins', property(_bins, _set_bins))
setattr(DataArray, 'bins', property(_bins, _set_bins))
setattr(Dataset, 'bins', property(_bins, _set_bins))
setattr(Variable, 'events', property(_events))
setattr(DataArray, 'events', property(_events))
from ._structured import _fields
setattr(
Variable, 'fields',
property(
_fields,
doc=
"""Provides access to fields of structured types such as vectors or matrices."""
))
from ._bins import _groupby_bins
setattr(GroupByDataArray, 'bins', property(_groupby_bins))
setattr(GroupByDataset, 'bins', property(_groupby_bins))
setattr(Variable, 'plot', plot)
setattr(DataArray, 'plot', plot)
setattr(Dataset, 'plot', plot)
# Prevent unwanted conversion to numpy arrays by operations. Properly defining
# __array_ufunc__ should be possible by converting non-scipp arguments to
# variables. The most difficult part is probably mapping the ufunc to scipp
# functions.
for _obj in [Variable, DataArray, Dataset]:
setattr(_obj, '__array_ufunc__', None)
| 31.930435 | 98 | 0.735566 |
9b1bd86935affb209f3416a74dae1cedee23495f | 1,733 | py | Python | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
] | 2 | 2021-07-15T14:11:29.000Z | 2022-03-25T23:20:54.000Z | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
] | null | null | null | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
] | 1 | 2021-07-15T14:11:48.000Z | 2021-07-15T14:11:48.000Z | # Import the required 'libraries' for pin definitions and PWM
from machine import Pin, PWM
# Also import a subset for sleep and millisecond sleep. If you just import
# the utime you will have to prefix each call with "utime."
from utime import sleep, sleep_ms
# Define what the buzzer object is - a PWM output on pin 15
buzzer = PWM(Pin(15))
# A list of frequencies
tones = (200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1100, 1200, 1400, 1500)
# Define the function to play a single tone then stop
# Define a similar functionm with no delay between tones
# Now sound the tones, one after the other
for tone in range(len(tones)):
buzz(tones[tone])
# Small gap in SECONDS after the ascending tones
sleep(1)
# Don't do this, it puts the device to Seep Sleep but it reboots on wakeup just
# like the ESP8266
#machine.deepsleep(1)
# Now sound the tones IN REVERSE ORDER ie descending
for tone in range(len(tones) -1, -1, -1):
buzz(tones[tone])
# Another delay
sleep(1)
# Now sound ALL the frequencies from X to Y
for tone in range(500, 2500):
buzz2(tone)
sleep_ms(5)
buzzer.duty_u16(0);
# And repeat in reverse order
for tone in range(2500, 500, -1):
buzz2(tone)
sleep_ms(4)
buzzer.duty_u16(0); | 28.883333 | 119 | 0.671091 |
9b1c20b6056395f07046b2fb8132dfe7ff823554 | 1,789 | py | Python | vendor/packages/sqlalchemy/test/orm/test_bind.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2016-05-09T09:17:35.000Z | 2016-08-03T16:30:16.000Z | test/orm/test_bind.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
] | null | null | null | test/orm/test_bind.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
] | null | null | null | from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import MetaData, Integer
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, create_session
import sqlalchemy as sa
from sqlalchemy.test import testing
from test.orm import _base
| 29.816667 | 75 | 0.618222 |
9b1c4ea2bc7164000ac7237aaef4748989fffac3 | 2,607 | py | Python | pdftables/pdf_document.py | tessact/pdftables | 89b0c0f7215fa50651b37e5b1505229c329cc0ab | [
"BSD-2-Clause"
] | 73 | 2015-01-07T01:42:45.000Z | 2021-01-20T01:19:04.000Z | pdftables/pdf_document.py | MartinThoma/pdftables | bd34a86cba8b70d1af2267cf8a30f387f7e5a43e | [
"BSD-2-Clause"
] | 1 | 2020-08-02T18:31:16.000Z | 2020-08-02T18:31:16.000Z | pdftables/pdf_document.py | MartinThoma/pdftables | bd34a86cba8b70d1af2267cf8a30f387f7e5a43e | [
"BSD-2-Clause"
] | 40 | 2015-03-10T05:24:37.000Z | 2019-08-30T06:11:02.000Z | """
Backend abstraction for PDFDocuments
"""
import abc
import os
DEFAULT_BACKEND = "poppler"
BACKEND = os.environ.get("PDFTABLES_BACKEND", DEFAULT_BACKEND).lower()
# TODO(pwaller): Use abstract base class?
# What does it buy us? Can we enforce that only methods specified in an ABC
# are used by client code?
| 27.15625 | 80 | 0.623322 |
9b1ea81c58845b4a3bb52fdf9a88f5aa5548c833 | 3,316 | py | Python | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
] | 40 | 2019-05-19T01:29:12.000Z | 2022-03-27T04:37:31.000Z | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
] | null | null | null | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
] | 19 | 2019-05-02T19:55:57.000Z | 2022-02-26T01:51:45.000Z | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
from class_ppo import *
from gym_torcs import TorcsEnv
#----------------------------------------------------------------------------------------
EP_MAX = 2000
EP_LEN = 1000
GAMMA = 0.95
A_LR = 1e-4
C_LR = 1e-4
BATCH = 128
A_UPDATE_STEPS = 10
C_UPDATE_STEPS = 10
S_DIM, A_DIM = 29, 3
METHOD = dict(name='clip', epsilon=0.1)
# train_test = 0 for train; =1 for test
train_test = 0
# irestart = 0 for fresh restart; =1 for restart from ckpt file
irestart = 0
iter_num = 0
if (irestart == 0):
iter_num = 0
#----------------------------------------------------------------------------------------
sess = tf.Session()
ppo = PPO(sess, S_DIM, A_DIM, A_LR, C_LR, A_UPDATE_STEPS, C_UPDATE_STEPS, METHOD)
saver = tf.train.Saver()
env = TorcsEnv(vision=False, throttle=True, gear_change=False)
#----------------------------------------------------------------------------------------
if (train_test == 0 and irestart == 0):
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, "ckpt/model")
for ep in range(iter_num, EP_MAX):
print("-"*50)
print("episode: ", ep)
if np.mod(ep, 100) == 0:
ob = env.reset(relaunch=True) #relaunch TORCS every N episode because of the memory leak error
else:
ob = env.reset()
s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
a = ppo.choose_action(s)
a[0] = np.clip(a[0],-1.0,1.0)
a[1] = np.clip(a[1],0.0,1.0)
a[2] = np.clip(a[2],0.0,1.0)
#print("a: ", a)
ob, r, done, _ = env.step(a)
s_ = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
if (train_test == 0):
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
s = s_
ep_r += r
if (train_test == 0):
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1 or done == True:
#if t == EP_LEN-1 or done == True:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs = np.array(np.vstack(buffer_s))
ba = np.array(np.vstack(buffer_a))
br = np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
print("ppo update")
ppo.update(bs, ba, br)
#print("screen out: ")
#ppo.screen_out(bs, ba, br)
#print("-"*50)
if (done == True):
break
print('Ep: %i' % ep,"|Ep_r: %i" % ep_r,("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',)
if (train_test == 0):
with open("performance.txt", "a") as myfile:
myfile.write(str(ep) + " " + str(t) + " " + str(round(ep_r,4)) + "\n")
if (train_test == 0 and ep%25 == 0):
saver.save(sess, "ckpt/model")
| 25.507692 | 123 | 0.495778 |
9b2030f197c3c1a90df176f2d19174c439599012 | 681 | py | Python | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
] | 7 | 2015-08-21T17:17:35.000Z | 2021-03-02T21:40:00.000Z | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
] | 2 | 2015-04-14T12:40:37.000Z | 2015-04-14T12:44:03.000Z | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
] | null | null | null | """ Test case for the DocumentationWidget """
from tempfile import mkdtemp
from pySUMOQt import MainWindow
import pysumo
import shutil
"""
Steps:
1. Open pySUMO
2. Open Merge.kif
3. Open DocumentationWidget
3a. Switch to the Ontology tab in the DocumentationWidget
4. Type subrelation into the search field
4a. Press Enter
5. Open TextEditor
5a. Select Merge.kif in TextEditor
6. Press one of the links listed under "Merge"
7. Switch to the WordNet tab in the DocumentationWidget
8. Search for 'Object'
9. Search for 'Table'
"""
if __name__ == "__main__":
tmpdir = mkdtemp()
pysumo.CONFIG_PATH = tmpdir
MainWindow.main()
shutil.rmtree(tmpdir, ignore_errors=True)
| 24.321429 | 57 | 0.756241 |
9b22737cee51dac49b519ede06b216b061a09833 | 1,628 | py | Python | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | import unittest
import threading
from garage.threads import executors
if __name__ == '__main__':
unittest.main()
| 29.071429 | 67 | 0.595823 |
9b227c99cc76d04bed95afc7abf3ffae257b32fd | 2,619 | py | Python | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
] | 3 | 2020-04-29T12:35:33.000Z | 2022-03-22T20:08:22.000Z | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
] | 1 | 2020-10-23T00:08:35.000Z | 2020-10-29T04:10:35.000Z | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
] | 4 | 2020-04-05T15:09:08.000Z | 2020-10-21T15:08:34.000Z | import os
import json
from tqdm import tqdm
from loader.Database import DBViewIndex, DBView, check_target_path
from exporter.Shared import snakey
from exporter.Adventurers import CharaData
from exporter.Dragons import DragonData
if __name__ == "__main__":
index = DBViewIndex()
view = BattleRoyalUnit(index)
view.export_all_to_folder()
| 37.956522 | 124 | 0.649866 |
9b26d22dac1fa85ff57a7518cc0afd23693491bf | 111 | py | Python | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
] | 6 | 2020-01-20T20:02:09.000Z | 2020-02-24T08:40:23.000Z | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
] | null | null | null | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
] | null | null | null | from .user.queries import UserQuery # isort:skip
from .user_group.queries import UserGroupQuery # isort:skip
| 37 | 60 | 0.801802 |
f1955c751f92a084391167fe5becfed42fd578e2 | 772 | py | Python | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
] | null | null | null | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
] | 7 | 2018-03-20T14:23:16.000Z | 2019-04-09T11:57:57.000Z | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
] | 7 | 2017-04-28T07:41:01.000Z | 2021-03-15T18:17:20.000Z | import matplotlib
matplotlib.use('Agg')
from statiskit import core
from statiskit.data import core as data
import unittest
from nose.plugins.attrib import attr | 25.733333 | 140 | 0.676166 |
f1966b5ea95fad48b2c50f6ae0e84a62362e0d49 | 688 | py | Python | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
] | 18 | 2019-03-07T06:25:58.000Z | 2022-03-07T04:38:36.000Z | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
] | null | null | null | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
] | 3 | 2020-06-21T23:22:19.000Z | 2022-03-07T05:11:14.000Z | from scipy.io import loadmat
import pickle
mldinfo =loadmat('mldinfo.mat')["mldinfo"]
out={}
print(mldinfo)
for i in mldinfo:
line={}
line["floatNumber"] = i[0]
line["cycleNumber"] = i[26]
line["tempMLTFIT"] = i[27]
line["tempMLTFITIndex"] = i[28]
line["densityMLTFIT"] = i[30]
line["salinityMLTFIT"] = i[31]
line["steepest"] = i[29]
line["tempAlgo"] = i[4]
line["salinityAlgo"] = i[8]
line["densityAlgo"] = i[9]
line["tempThreshold"] = i[13]
line["densityThreshold"] = i[17]
line["tempGradient"] = i[21]
line["densityGradient"] = i[22]
out[i[0],i[26]]=line
with open("matOutput.pickle","wb") as f:
pickle.dump(out,f)
| 25.481481 | 42 | 0.604651 |
f19839bccee38959af0b437965974c79d3cf702f | 1,578 | py | Python | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
] | 500 | 2018-09-27T17:28:11.000Z | 2022-03-30T02:05:57.000Z | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
] | 888 | 2018-09-20T05:04:46.000Z | 2022-03-28T04:11:22.000Z | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
] | 79 | 2019-02-13T19:49:21.000Z | 2022-02-27T16:39:04.000Z | #!/usr/bin/env python
"""
This is a special app instance that allows us to perform database operations
without going through the app's migration_needed check. Running this script
is functionally equivalent to what `flask db` normally does. The reason we
can't continue to use that is that command is that it invokes the app instance from
FLASK_APP env variable (natlas-server.py) which performs the migration check and exits
during initialization.
"""
import argparse
from app import create_app
from config import Config
from migrations import migrator
parser_desc = """Perform database operations for Natlas.\
It is best practice to take a backup of your database before you upgrade or downgrade, just in case something goes wrong.\
"""
if __name__ == "__main__":
main()
| 33.574468 | 135 | 0.716096 |
f19909329b0b6001c89ab80ab88194f8528fba3b | 4,368 | py | Python | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 33 | 2017-12-02T04:09:24.000Z | 2021-11-07T08:41:57.000Z | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 189 | 2017-11-16T04:06:29.000Z | 2022-03-11T23:35:59.000Z | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 30 | 2017-11-30T03:35:44.000Z | 2022-01-31T03:08:08.000Z | # -*- coding: utf-8 -*-
"""Forms to process action related fields.
ActionUpdateForm: Basic form to process the name/description of an action
ActionForm: Inherits from Basic to process name, description and type
ActionDescriptionForm: Inherits from basic but process only description (for
surveys)
FilterForm: Form to process filter elements
ConditionForm: Form to process condition elements
"""
from builtins import str
import json
from typing import Dict
from django import forms
from django.utils.translation import ugettext_lazy as _
from ontask import models
from ontask.core import RestrictedFileField
import ontask.settings
| 29.315436 | 78 | 0.63576 |
f199c3663d40296d492582d4c84325e0a23a8f49 | 27,740 | py | Python | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | null | null | null | #------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 01/05/2006
#
#------------------------------------------------------------------------------
""" Defines tree node classes and editors for various types of values.
"""
#-------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------
from __future__ import absolute_import
import inspect
from operator import itemgetter
from types import FunctionType, MethodType
from traits.api import Any, Bool, HasPrivateTraits, HasTraits, Instance, List, Str
from .tree_node import ObjectTreeNode, TreeNode, TreeNodeObject
from .editors.tree_editor import TreeEditor
import six
#-------------------------------------------------------------------------
# 'SingleValueTreeNodeObject' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'MultiValueTreeNodeObject' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'StringNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'NoneNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'BoolNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'IntNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'FloatNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'ComplexNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'OtherNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'TupleNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'ListNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'SetNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'ArrayNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'DictNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'FunctionNode' class:
#-------------------------------------------------------------------------
#---------------------------------------------------------------------------
# 'MethodNode' class:
#---------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'ObjectNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'ClassNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'TraitsNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# 'RootNode' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Define the mapping of object types to nodes:
#-------------------------------------------------------------------------
_basic_types = None
def basic_types():
global _basic_types
if _basic_types is None:
# Create the mapping of object types to nodes:
_basic_types = [
(type(None), NoneNode),
(str, StringNode),
(six.text_type, StringNode),
(bool, BoolNode),
(int, IntNode),
(float, FloatNode),
(complex, ComplexNode),
(tuple, TupleNode),
(list, ListNode),
(set, SetNode),
(dict, DictNode),
(FunctionType, FunctionNode),
(MethodType, MethodNode),
(HasTraits, TraitsNode)
]
try:
from numpy import array
_basic_types.append((type(array([1])), ArrayNode))
except ImportError:
pass
return _basic_types
#-------------------------------------------------------------------------
# '_ValueTree' class:
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Defines the value tree editor(s):
#-------------------------------------------------------------------------
# Nodes in a value tree:
value_tree_nodes = [
ObjectTreeNode(
node_for=[NoneNode, StringNode, BoolNode, IntNode, FloatNode,
ComplexNode, OtherNode, TupleNode, ListNode, ArrayNode,
DictNode, SetNode, FunctionNode, MethodNode, ObjectNode,
TraitsNode, RootNode, ClassNode])
]
# Editor for a value tree:
value_tree_editor = TreeEditor(
auto_open=3,
hide_root=True,
editable=False,
nodes=value_tree_nodes
)
# Editor for a value tree with a root:
value_tree_editor_with_root = TreeEditor(
auto_open=3,
editable=False,
nodes=[
ObjectTreeNode(
node_for=[NoneNode, StringNode, BoolNode, IntNode, FloatNode,
ComplexNode, OtherNode, TupleNode, ListNode, ArrayNode,
DictNode, SetNode, FunctionNode, MethodNode,
ObjectNode, TraitsNode, RootNode, ClassNode]
),
TreeNode(node_for=[_ValueTree],
auto_open=True,
children='values',
move=[SingleValueTreeNodeObject],
copy=False,
label='=Values',
icon_group='traits_node',
icon_open='traits_node')
]
)
#-------------------------------------------------------------------------
# Defines a 'ValueTree' trait:
#-------------------------------------------------------------------------
# Trait for a value tree:
ValueTree = Instance(_ValueTree, (), editor=value_tree_editor_with_root)
| 34.805521 | 82 | 0.369755 |
f199cbd96d64f014fd31d99a8774f29dfb8baff8 | 3,400 | py | Python | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | from django.contrib.auth.models import Group
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django_dynamic_fixture import G
from apps.authentication.models import OnlineUser
from ..constants import EventType
from ..models import Event
from .utils import (
add_event_permissions,
add_to_group,
create_committee_group,
generate_event,
)
EVENTS_ADMIN_LIST_URL = reverse_lazy("admin:events_event_changelist")
EVENTS_DASHBOARD_INDEX_URL = reverse_lazy("dashboard_events_index")
| 33.009709 | 83 | 0.746471 |
f19a9b4226505a42ffa94930bb3319c14ebc1a93 | 359 | py | Python | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
] | null | null | null | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
] | null | null | null | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
] | null | null | null | from pyuvs.files import DataFilenameCollection
| 29.916667 | 57 | 0.70195 |
f19aa91679864846081cef43f5707f10afbe079f | 9,380 | py | Python | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
] | null | null | null | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
] | 2 | 2019-09-06T12:19:18.000Z | 2019-09-06T15:21:36.000Z | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
] | null | null | null | # coding: utf-8
import urllib2
import message_box
from ast import literal_eval
| 50.430108 | 133 | 0.615032 |
f19bffe1d8db01545aa2bac87ec675c56149bef9 | 195 | py | Python | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | 1 | 2021-06-30T18:08:42.000Z | 2021-06-30T18:08:42.000Z | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | null | null | null | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | null | null | null | import sys
import os
import subprocess
import pyautogui
import time
subprocess.run("C:/Windows/system32/cmd.exe")
time.sleep(3)
pyautogui.typewrite("python")
| 8.478261 | 46 | 0.651282 |
f19c254391cc08472493c02b34a771daed15156b | 75 | py | Python | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
] | null | null | null | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
] | null | null | null | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
] | null | null | null | #
import pip_test
if __name__ == '__main__':
pip_test.hello() | 15 | 26 | 0.733333 |
f19cbc9fa4b054f10523c99c5ea25ef1f89616fb | 26 | py | Python | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
] | null | null | null | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
] | null | null | null | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
] | null | null | null | from .main import MainPort | 26 | 26 | 0.846154 |
f19e04b462dda85e0bd45e84d17a144a85a0f4c3 | 1,830 | py | Python | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
] | null | null | null | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
] | 19 | 2019-01-23T16:59:55.000Z | 2021-07-30T15:12:27.000Z | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
] | 9 | 2018-10-31T10:40:56.000Z | 2020-12-09T07:44:45.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018, 2019 Esteban J. G. Gabancho.
#
# Invenio-S3 is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
from __future__ import absolute_import, print_function
from invenio_s3 import InvenioS3
def test_version():
"""Test version import."""
from invenio_s3 import __version__
assert __version__
def test_init(appctx):
"""Test extension initialization."""
assert 'invenio-s3' in appctx.extensions
appctx.config['S3_ENDPOINT_URL'] = 'https://example.com:1234'
appctx.config['S3_REGION_NAME'] = 'eu-west-1'
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['client_kwargs'][
'endpoint_url'] == 'https://example.com:1234'
assert s3_connection_info['client_kwargs'][
'region_name'] == 'eu-west-1'
def test_access_key(appctx):
"""Test correct access key works together with flawed one."""
appctx.config['S3_ACCCESS_KEY_ID'] = 'secret'
try:
# Delete the cached value in case it's there already
del appctx.extensions['invenio-s3'].__dict__['init_s3fs_info']
except KeyError:
pass
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['key'] == 'secret'
def test_secret_key(appctx):
"""Test correct secret key works together with flawed one."""
appctx.config['S3_SECRECT_ACCESS_KEY'] = 'secret'
try:
# Delete the cached value in case it's there already
del appctx.extensions['invenio-s3'].__dict__['init_s3fs_info']
except KeyError:
pass
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['secret'] == 'secret'
| 33.272727 | 72 | 0.701639 |
f1a11c9a3c3f708c9cfe435d2e5adfed43004799 | 600 | py | Python | textattack/constraints/pre_transformation/max_word_index_modification.py | cclauss/TextAttack | 98b8d6102aa47bf3c41afedace0215d48f8ed046 | [
"MIT"
] | 1 | 2021-06-24T19:35:18.000Z | 2021-06-24T19:35:18.000Z | textattack/constraints/pre_transformation/max_word_index_modification.py | 53X/TextAttack | e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be | [
"MIT"
] | null | null | null | textattack/constraints/pre_transformation/max_word_index_modification.py | 53X/TextAttack | e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be | [
"MIT"
] | 1 | 2021-11-12T05:26:21.000Z | 2021-11-12T05:26:21.000Z | from textattack.constraints.pre_transformation import PreTransformationConstraint
from textattack.shared.utils import default_class_repr
| 37.5 | 95 | 0.765 |
f1a149c6c08f22569c5bb980bf68d3996a092d95 | 2,012 | bzl | Python | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
] | null | null | null | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
] | 1 | 2021-10-01T05:24:29.000Z | 2021-10-01T05:24:29.000Z | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
] | null | null | null | # TODO(jonathan): try to simplify this.
def merge_kwargs(d1, d2, limit = 5):
"""Combine kwargs in a useful way.
merge_kwargs combines dictionaries by inserting keys from d2 into d1. If
the same key exists in both dictionaries:
* if the value is a scalar, d2[key] overrides d1[key].
* if the value is a list, the contents of d2[key] not already in d1[key]
are appended to d1[key].
* if the value is a dict, the sub-dictionaries are merged similarly
(scalars are overriden, lists are appended).
By default, this function limits recursion to 5 levels. The "limit"
argument can be specified if deeper recursion is needed.
"""
merged = {}
to_expand = [(merged, d1, k) for k in d1] + [(merged, d2, k) for k in d2]
for _ in range(limit):
expand_next = []
for m, d, k in to_expand:
if k not in m:
if type(d[k]) == "list":
m[k] = list(d[k])
continue
if type(d[k]) == "dict":
m[k] = dict(d[k])
continue
# type must be scalar:
m[k] = d[k]
continue
if type(m[k]) == "dict":
expand_next.extend([(m[k], d[k], k2) for k2 in d[k]])
continue
if type(m[k]) == "list":
# uniquify as we combine lists:
for item in d[k]:
if item not in m[k]:
m[k].append(item)
continue
# type must be scalar:
m[k] = d[k]
to_expand = expand_next
if not to_expand:
break
# If <limit> layers of recursion were not enough, explicitly fail.
if to_expand:
fail("merge_kwargs: exceeded maximum recursion limit.")
return merged
def add_tag(k, t):
"""Returns a kwargs dict that ensures tag `t` is present in kwargs["tags"]."""
return merge_kwargs(k, {"tags": [t]})
| 32.983607 | 82 | 0.524851 |
f1a1a49462e4695e563f4953333c397736ce81f0 | 24,083 | py | Python | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
] | null | null | null | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
] | null | null | null | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
] | null | null | null | # import libraries
import os, os.path
import numpy as np
import pandas as pd
# import geopandas as gpd
import sys
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import scipy
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
from datetime import date
import datetime
import time
from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
################################################################
#####
##### Function definitions
#####
################################################################
########################################################################
def addToDF_SOS_EOS_White(pd_TS, VegIdx = "EVI", onset_thresh=0.15, offset_thresh=0.15):
"""
In this methods the NDVI_Ratio = (NDVI - NDVI_min) / (NDVI_Max - NDVI_min)
is computed.
SOS or onset is when NDVI_ratio exceeds onset-threshold
and EOS is when NDVI_ratio drops below off-set-threshold.
"""
pandaFrame = pd_TS.copy()
VegIdx_min = pandaFrame[VegIdx].min()
VegIdx_max = pandaFrame[VegIdx].max()
VegRange = VegIdx_max - VegIdx_min + sys.float_info.epsilon
colName = VegIdx + "_ratio"
pandaFrame[colName] = (pandaFrame[VegIdx] - VegIdx_min) / VegRange
SOS_candidates = pandaFrame[colName] - onset_thresh
EOS_candidates = offset_thresh - pandaFrame[colName]
BOS, EOS = find_signChange_locs_DifferentOnOffset(SOS_candidates, EOS_candidates)
pandaFrame['SOS'] = BOS * pandaFrame[VegIdx]
pandaFrame['EOS'] = EOS * pandaFrame[VegIdx]
return(pandaFrame)
########################################################################
def correct_big_jumps_1DaySeries(dataTMS_jumpie, give_col, maxjump_perDay = 0.015):
"""
in the function correct_big_jumps_preDefinedJumpDays(.) we have
to define the jump_amount and the no_days_between_points.
For example if we have a jump more than 0.4 in less than 20 dats, then
that is an outlier detected.
Here we modify the approach to be flexible in the following sense:
if the amount of increase in NDVI is more than #_of_Days * 0.02 then
an outlier is detected and we need interpolation.
0.015 came from the SG based paper that used 0.4 jump in NDVI for 20 days.
That translates into 0.02 = 0.4 / 20 per day.
But we did choose 0.015 as default
"""
dataTMS = dataTMS_jumpie.copy()
dataTMS = initial_clean(df = dataTMS, column_to_be_cleaned = give_col)
dataTMS.sort_values(by=['image_year', 'doy'], inplace=True)
dataTMS.reset_index(drop=True, inplace=True)
dataTMS['system_start_time'] = dataTMS['system_start_time'] / 1000
thyme_vec = dataTMS['system_start_time'].values.copy()
Veg_indks = dataTMS[give_col].values.copy()
time_diff = thyme_vec[1:] - thyme_vec[0:len(thyme_vec)-1]
time_diff_in_days = time_diff / 86400
time_diff_in_days = time_diff_in_days.astype(int)
Veg_indks_diff = Veg_indks[1:] - Veg_indks[0:len(thyme_vec)-1]
jump_indexes = np.where(Veg_indks_diff > maxjump_perDay)
jump_indexes = jump_indexes[0]
jump_indexes = jump_indexes.tolist()
# It is possible that the very first one has a big jump in it.
# we cannot interpolate this. so, lets just skip it.
if len(jump_indexes) > 0:
if jump_indexes[0] == 0:
jump_indexes.pop(0)
if len(jump_indexes) > 0:
for jp_idx in jump_indexes:
if Veg_indks_diff[jp_idx] >= (time_diff_in_days[jp_idx] * maxjump_perDay):
#
# form a line using the adjacent points of the big jump:
#
x1, y1 = thyme_vec[jp_idx-1], Veg_indks[jp_idx-1]
x2, y2 = thyme_vec[jp_idx+1], Veg_indks[jp_idx+1]
# print (x1)
# print (x2)
m = np.float(y2 - y1) / np.float(x2 - x1) # slope
b = y2 - (m*x2) # intercept
# replace the big jump with linear interpolation
Veg_indks[jp_idx] = m * thyme_vec[jp_idx] + b
dataTMS[give_col] = Veg_indks
return(dataTMS)
########################################################################
########################################################################
########################################################################
########################################################################
def add_human_start_time_by_YearDoY(a_Reg_DF):
"""
This function is written for regularized data
where we miss the Epoch time and therefore, cannot convert it to
human_start_time using add_human_start_time() function
Learn:
x = pd.to_datetime(datetime.datetime(2016, 1, 1) + datetime.timedelta(213 - 1))
x
year = 2020
DoY = 185
x = str(date.fromordinal(date(year, 1, 1).toordinal() + DoY - 1))
x
datetime.datetime(2016, 1, 1) + datetime.timedelta(213 - 1)
"""
DF_C = a_Reg_DF.copy()
# DF_C.image_year = DF_C.image_year.astype(float)
DF_C.doy = DF_C.doy.astype(int)
DF_C['human_system_start_time'] = pd.to_datetime(DF_C['image_year'].astype(int) * 1000 + DF_C['doy'], format='%Y%j')
# DF_C.reset_index(drop=True, inplace=True)
# DF_C['human_system_start_time'] = "1"
# for row_no in np.arange(0, len(DF_C)):
# year = DF_C.loc[row_no, 'image_year']
# DoY = DF_C.loc[row_no, 'doy']
# x = str(date.fromordinal(date(year, 1, 1).toordinal() + DoY - 1))
# DF_C.loc[row_no, 'human_system_start_time'] = x
return(DF_C)
########################################################################
########################################################################
########################################################################
#
# Kirti look here
#
# detect passing the threshod
def extract_XValues_of_2Yrs_TS(regularized_TS, SF_yr):
# old name extract_XValues_of_RegularizedTS_2Yrs().
# I do not know why I had Regularized in it.
# new name extract_XValues_of_2Yrs_TS
"""
Jul 1.
This function is being written since Kirti said
we do not need to have parts of the next year. i.e.
if we are looking at what is going on in a field in 2017,
we only need data since Aug. 2016 till the end of 2017.
We do not need anything in 2018.
"""
X_values_prev_year = regularized_TS[regularized_TS.image_year == (SF_yr - 1)]['doy'].copy().values
X_values_full_year = regularized_TS[regularized_TS.image_year == (SF_yr)]['doy'].copy().values
if check_leap_year(SF_yr - 1):
X_values_full_year = X_values_full_year + 366
else:
X_values_full_year = X_values_full_year + 365
return (np.concatenate([X_values_prev_year, X_values_full_year]))
########################################################################
########################################################################
########################################################################
#
# These will not give what we want. It is a 10-days window
# The days are actual days. i.e. between each 2 entry of our
# time series there is already some gap.
#
########################################################################
########################################################################
########################################################################
def save_matlab_matrix(filename, matDict):
"""
Write a MATLAB-formatted matrix file given a dictionary of
variables.
"""
try:
sio.savemat(filename, matDict)
except:
print("ERROR: could not write matrix file " + filename)
| 37.222566 | 120 | 0.614334 |
f1a1c6bbb5f8fd9057ce629a8986541e09412fdc | 251 | py | Python | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | 1 | 2021-01-20T18:27:22.000Z | 2021-01-20T18:27:22.000Z | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | null | null | null | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | null | null | null |
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# ...
#
#
| 7.84375 | 68 | 0.677291 |
f1a479eb0ca5a8f8bbec21a491ef98b110500e1b | 1,584 | py | Python | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
| 40.615385 | 77 | 0.709596 |
f1a543e42a5ea04e653279a8af75516ed7470802 | 144 | py | Python | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
] | 623 | 2018-02-16T20:43:01.000Z | 2022-03-31T05:00:17.000Z | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
] | 339 | 2018-02-26T21:27:04.000Z | 2022-03-31T03:16:50.000Z | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
] | 152 | 2018-02-24T01:20:22.000Z | 2022-03-31T07:41:35.000Z | # SPDX-License-Identifier: Apache-2.0
# To register converter for libsvm operators, import associated modules here.
from . import SVMConverter
| 28.8 | 77 | 0.798611 |
f1aa3fd77846f2c70da5ebcb50efbe7da8be193b | 333 | py | Python | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
] | null | null | null | # for backwards compatibility with aspen-renderer modules
from .simplates.renderers import Factory, Renderer
Factory, Renderer # make pyflakes happy
import warnings
warnings.warn('aspen.renderers is deprecated and will be removed in a future version. '
'Please use aspen.simplates.renderers instead.', FutureWarning)
| 37 | 87 | 0.780781 |
f1ad55c7e2b9846cac3302cc84dc78c54a2ce31b | 3,562 | py | Python | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
] | null | null | null | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
] | null | null | null | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
] | null | null | null | import pygame
import sys
import collections # for ordered dict
import pickle # for saving and loading highscores
from constants import (SCREEN_WIDTH, SCREEN_HEIGHT, RED, GREEN, GREY, BLACK, WHITE)
# class that shows, saves and loads highscores | 37.893617 | 168 | 0.588433 |
f1afaf0a95380f8c421a56c623e2af9bfd01fd81 | 27,795 | py | Python | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
] | null | null | null | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
] | null | null | null | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
] | 1 | 2022-03-26T11:34:20.000Z | 2022-03-26T11:34:20.000Z | #!/usr/bin/env python2
import glob as glob
import os as os
import re
import shutil as shutil
import signal as signal
import subprocess as sp
import sys as sys
from lib import build
from lib import scripts
from lib import setup
from lib import analysis
ion_def = []
poses_list = []
poses_def = []
release_eq = []
translate_apr = []
attach_rest = []
lambdas = []
weights = []
components = []
aa1_poses = []
aa2_poses = []
# Read arguments that define input file and stage
if len(sys.argv) < 5:
scripts.help_message()
sys.exit(0)
for i in [1, 3]:
if '-i' == sys.argv[i].lower():
input_file = sys.argv[i + 1]
elif '-s' == sys.argv[i].lower():
stage = sys.argv[i + 1]
else:
scripts.help_message()
sys.exit(1)
# Open input file
with open(input_file) as f_in:
# Remove spaces and tabs
lines = (line.strip(' \t\n\r') for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
for i in range(0, len(lines)):
# split line using the equal sign, and remove text after #
if not lines[i][0] == '#':
lines[i] = lines[i].split('#')[0].split('=')
# Read parameters from input file
for i in range(0, len(lines)):
if not lines[i][0] == '#':
lines[i][0] = lines[i][0].strip().lower()
lines[i][1] = lines[i][1].strip()
if lines[i][0] == 'pull_ligand':
if lines[i][1].lower() == 'yes':
pull_ligand = 'yes'
elif lines[i][1].lower() == 'no':
pull_ligand = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether to pull out the ligand or not.')
sys.exit(1)
elif lines[i][0] == 'temperature':
temperature = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps1':
eq_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps2':
eq_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps1':
prep_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps2':
prep_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps1':
a_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps2':
a_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps1':
l_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps2':
l_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps1':
t_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps2':
t_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps1':
u_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps2':
u_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps1':
c_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps2':
c_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps1':
r_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps2':
r_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps1':
e_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps2':
e_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps1':
v_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps2':
v_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps1':
w_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps2':
w_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps1':
f_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps2':
f_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'pull_spacing':
pull_spacing = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'poses_list':
newline = lines[i][1].strip('\'\"-,.:;#()][').split(',')
for j in range(0, len(newline)):
poses_list.append(scripts.check_input('int', newline[j], input_file, lines[i][0]))
elif lines[i][0] == 'calc_type':
calc_type = lines[i][1].lower()
elif lines[i][0] == 'celpp_receptor':
celp_st = lines[i][1]
elif lines[i][0] == 'p1':
H1 = lines[i][1]
elif lines[i][0] == 'p2':
H2 = lines[i][1]
elif lines[i][0] == 'p3':
H3 = lines[i][1]
elif lines[i][0] == 'ligand_name':
mol = lines[i][1]
elif lines[i][0] == 'fe_type':
if lines[i][1].lower() == 'rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'all':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'custom':
fe_type = lines[i][1].lower()
else:
print('Free energy type not recognized, please choose all, rest (restraints), dd (double decoupling) or pmf (umbrella sampling), pmf-rest, dd-rest, or custom')
sys.exit(1)
elif lines[i][0] == 'dd_type':
if lines[i][1].lower() == 'mbar':
dd_type = lines[i][1].lower()
elif lines[i][1].lower() == 'ti':
dd_type = lines[i][1].lower()
else:
print('Double decoupling type not recognized, please choose ti or mbar')
sys.exit(1)
elif lines[i][0] == 'blocks':
blocks = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'hmr':
if lines[i][1].lower() == 'yes':
hmr = 'yes'
elif lines[i][1].lower() == 'no':
hmr = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether hydrogen mass repartitioning '
'will be used.')
sys.exit(1)
elif lines[i][0] == 'water_model':
if lines[i][1].lower() == 'tip3p':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'tip4pew':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'spce':
water_model = lines[i][1].upper()
else:
print('Water model not supported. Please choose TIP3P, TIP4PEW or SPCE')
sys.exit(1)
elif lines[i][0] == 'num_waters':
num_waters = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'neutralize_only':
if lines[i][1].lower() == 'yes':
neut = 'yes'
elif lines[i][1].lower() == 'no':
neut = 'no'
else:
print('Wrong input! Please choose neutralization only or add extra ions')
sys.exit(1)
elif lines[i][0] == 'cation':
cation = lines[i][1]
elif lines[i][0] == 'anion':
anion = lines[i][1]
elif lines[i][0] == 'num_cations':
num_cations = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'num_cat_ligbox':
num_cat_ligbox = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_x':
buffer_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_y':
buffer_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_buffer':
lig_buffer = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_distance_force':
rec_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_angle_force':
rec_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_dihcf_force':
rec_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_discf_force':
rec_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_distance_force':
lig_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_angle_force':
lig_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_dihcf_force':
lig_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_discf_force':
lig_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_x':
l1_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_y':
l1_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_z':
l1_z = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_zm':
l1_zm = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_range':
l1_range = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'min_adis':
min_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'max_adis':
max_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_bb':
if lines[i][1].lower() == 'yes':
rec_bb = 'yes'
elif lines[i][1].lower() == 'no':
rec_bb = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether protein backbone restraints'
'will be used.')
sys.exit(1)
elif lines[i][0] == 'bb_start':
bb_start = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_end':
bb_end = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_equil':
if lines[i][1].lower() == 'yes':
bb_equil = lines[i][1].lower()
else:
bb_equil = 'no'
elif lines[i][0] == 'release_eq':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
release_eq.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'translate_apr':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
translate_apr.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'attach_rest':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
attach_rest.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'lambdas':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
lambdas.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'weights':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
weights.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'components':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
components.append(strip_line[j])
elif lines[i][0] == 'ntpr':
ntpr = lines[i][1]
elif lines[i][0] == 'ntwr':
ntwr = lines[i][1]
elif lines[i][0] == 'ntwe':
ntwe = lines[i][1]
elif lines[i][0] == 'ntwx':
ntwx = lines[i][1]
elif lines[i][0] == 'cut':
cut = lines[i][1]
elif lines[i][0] == 'gamma_ln':
gamma_ln = lines[i][1]
elif lines[i][0] == 'barostat':
barostat = lines[i][1]
elif lines[i][0] == 'receptor_ff':
receptor_ff = lines[i][1]
elif lines[i][0] == 'ligand_ff':
if lines[i][1].lower() == 'gaff':
ligand_ff = 'gaff'
elif lines[i][1].lower() == 'gaff2':
ligand_ff = 'gaff2'
else:
print('Wrong input! Available options for ligand force-field are gaff and gaff2')
sys.exit(1)
elif lines[i][0] == 'dt':
dt = lines[i][1]
# Number of simulations, 1 equilibrium and 1 production
apr_sim = 2
# Define free energy components
if fe_type == 'rest':
components = ['c', 'a', 'l', 't', 'r']
elif fe_type == 'dd':
components = ['e', 'v', 'f', 'w']
elif fe_type == 'pmf':
components = ['u']
elif fe_type == 'all':
components = ['c', 'a', 'l', 't', 'r', 'u', 'v', 'w', 'e', 'f']
elif fe_type == 'pmf-rest':
components = ['c', 'a', 'l', 't', 'r', 'u']
elif fe_type == 'dd-rest':
components = ['c', 'a', 'l', 't', 'r', 'e', 'v', 'w', 'f']
# Pull ligand out or not
if pull_ligand == 'no':
translate_apr = [ 0.00 ]
pull_spacing = 1.0
prep_steps2 = 0
# Do not apply protein backbone restraints
if rec_bb == 'no':
bb_start = 1
bb_end = 0
bb_equil = 'no'
# Create poses definitions
if calc_type == 'dock':
for i in range(0, len(poses_list)):
poses_def.append('pose'+str(poses_list[i]))
elif calc_type == 'crystal':
poses_def = [celp_st]
# Total distance
apr_distance = translate_apr[-1]
rng = 0
# Create restraint definitions
rest = [rec_distance_force, rec_angle_force, rec_dihcf_force, rec_discf_force, lig_distance_force, lig_angle_force, lig_dihcf_force, lig_discf_force]
# Create ion definitions
ion_def = [cation, anion, num_cations]
ion_lig = [cation, anion, num_cat_ligbox]
# Define number of steps for all stages
dic_steps1 = {}
dic_steps2 = {}
dic_steps1['a'] = a_steps1
dic_steps2['a'] = a_steps2
dic_steps1['l'] = l_steps1
dic_steps2['l'] = l_steps2
dic_steps1['t'] = t_steps1
dic_steps2['t'] = t_steps2
dic_steps1['c'] = c_steps1
dic_steps2['c'] = c_steps2
dic_steps1['r'] = r_steps1
dic_steps2['r'] = r_steps2
if stage == 'equil':
comp = 'q'
win = 0
trans_dist = 0
# Create equilibrium systems for all poses listed in the input file
for i in range(0, len(poses_def)):
rng = len(release_eq) - 1
pose = poses_def[i]
if not os.path.exists('./all-poses/'+pose+'.pdb'):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = len(release_eq)
# Create aligned initial complex
anch = build.build_equil(pose, celp_st, mol, H1, H2, H3, calc_type, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis, ligand_ff)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Equil release weights:')
for i in range(0, len(release_eq)):
weight = release_eq[i]
print('%s' %str(weight))
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%02d.rest' %int(i))
shutil.copy('./'+pose+'/disang%02d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, eq_steps1, eq_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand is most likely not in the defined binding site in these systems.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'prep':
win = 0
weight = 100.0
comp = 's'
# Prepare systems after equilibration for poses listed in the input file
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('./equil/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = int(apr_distance/pull_spacing)+1
rng = num_sim - 1
# Create aligned initial complex
fwin = len(release_eq) - 1
anch = build.build_prep(pose, mol, fwin, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Pulling distance interval: %s' %pull_spacing)
print('Total pulling distance: %s' %apr_distance)
print('Creating pulling steps...')
for i in range(0, num_sim):
trans_dist = float(i*pull_spacing)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%03d.rest' %int(i))
shutil.copy('./'+pose+'/disang%03d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, prep_steps1, prep_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand most likely left the binding site during equilibration.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'fe':
# Create systems for all poses after preparation
num_sim = apr_sim
# Create and move to apr directory
if not os.path.exists('fe'):
os.makedirs('fe')
os.chdir('fe')
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('../prep/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Create and move to pose directory
if not os.path.exists(pose):
os.makedirs(pose)
os.chdir(pose)
# Generate folder and restraints for all components and windows
for j in range(0, len(components)):
comp = components[j]
# Translation (umbrella)
if (comp == 'u'):
if not os.path.exists('pmf'):
os.makedirs('pmf')
os.chdir('pmf')
weight = 100.0
for k in range(0, len(translate_apr)):
trans_dist = translate_apr[k]
win = k
print('window: %s%02d distance: %s' %(comp, int(win), str(trans_dist)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, u_steps1, u_steps2, rng)
os.chdir('../')
# Ligand conformational release in a small box
elif (comp == 'c'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
os.chdir('../')
# Receptor conformational release in a separate box
elif (comp == 'r'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = translate_apr[-1]
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for apo protein...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
os.chdir('../')
# Van der Waals decoupling
# site
elif (comp == 'v'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, v_steps1, v_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'w'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
os.chdir('../../')
# Charge decoupling
# site
elif (comp == 'e'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, e_steps1, e_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'f'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand decharging in bulk...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
os.chdir('../../')
# Attachments in the bound system
else:
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
steps1 = dic_steps1[comp]
steps2 = dic_steps2[comp]
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, steps1, steps2, rng)
os.chdir('../')
os.chdir('../')
elif stage == 'analysis':
# Free energies MBAR/TI and analytical calculations
for i in range(0, len(poses_def)):
pose = poses_def[i]
analysis.fe_values(blocks, components, temperature, pose, attach_rest, translate_apr, lambdas, weights, dd_type, rest)
os.chdir('../../')
| 43.565831 | 188 | 0.612916 |
f1b05065492f951ddbe7f464e95a73ced555ef67 | 693 | py | Python | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
] | null | null | null | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
] | 2 | 2021-03-05T06:48:11.000Z | 2021-03-26T08:14:17.000Z | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
] | 2 | 2021-09-19T15:45:19.000Z | 2021-10-05T05:07:41.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-30 06:22
from __future__ import unicode_literals
from django.db import migrations, models
| 33 | 281 | 0.620491 |
f1b0c5d59ac79b7bc53e1a8befc59467c9a655ae | 3,188 | py | Python | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
] | null | null | null | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
] | 6 | 2021-04-04T06:19:30.000Z | 2021-09-18T16:48:41.000Z | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional, Tuple
import typer
from onlinejudge import utils
from pydantic.networks import HttpUrl
from pydantic.types import DirectoryPath
from judge.schema import JudgeConfig
from judge.tools.download import DownloadArgs, LoginForm, SaveArgs
from judge.tools.download import download as download_tool
from judge.tools.download import save as save_tool
def main(
workdir: Path = typer.Argument(".", help="a directory path for working directory"),
url: Optional[str] = typer.Option(None, help="a download URL"),
directory: Path = typer.Option(None, help="a directory path for test cases"),
no_store: bool = typer.Option(False, help="testcases is shown but not saved"),
format: str = typer.Option("sample-%i.%e", help="custom filename format"),
login: bool = typer.Option(False, help="login into target service"),
cookie: Path = typer.Option(utils.default_cookie_path, help="directory for cookie"),
) -> None:
"""
Here is shortcut for download with `online-judge-tools`.
At first, call `judge conf` for configuration.
Pass `problem` at `contest` you want to test.
Ex) the following leads to download test cases for Problem `C` at `ABC 051`:
```download```
"""
typer.echo("Load configuration...")
if not workdir.exists():
typer.secho(f"Not exists: {str(workdir.resolve())}", fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
try:
_config = JudgeConfig.from_toml(workdir)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
__config = _config.dict()
if url or directory:
# check arguments
if url:
__config["URL"] = url
if directory:
__config["testdir"] = directory.resolve()
try:
config = DownloadJudgeConfig(**__config)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
typer.echo(f"Download {config.URL}")
try:
login_form: Optional[LoginForm] = None
if login:
login_form = CLILoginForm()
testcases = download_tool(
DownloadArgs(
url=config.URL,
login_form=login_form,
cookie=cookie,
)
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if not no_store:
try:
save_tool(
testcases,
SaveArgs(
format=format,
directory=Path(config.testdir),
),
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if __name__ == "__main__":
typer.run(main)
| 30.653846 | 88 | 0.631117 |
f1b1cfe08adc3b1c3d213a90411b75dbb6594980 | 682 | py | Python | labs/Bonus_Labs/custom/filter_plugins/ntc.py | ryanaa08/NPA | 45173efa60713858bb8b1d884fe12c50fe69920c | [
"BSD-Source-Code"
] | 1 | 2021-11-06T20:39:22.000Z | 2021-11-06T20:39:22.000Z | labs/Bonus_Labs/custom/filter_plugins/ntc.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | null | null | null | labs/Bonus_Labs/custom/filter_plugins/ntc.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | null | null | null | import re
import difflib
from ansible import errors
| 22.733333 | 74 | 0.527859 |
f1b716086bee59aea60d9505833a19bb60e79bc5 | 161 | py | Python | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
] | null | null | null | from django.urls import path
from .api.view import get_all_countries_view
app_name = "core"
urlpatterns = [
path('all-countries', get_all_countries_view)
]
| 20.125 | 49 | 0.770186 |
f1b7cdef9de310ce5a7fb0146da43f000e1ce55f | 18,861 | py | Python | gitflow/context.py | abacusresearch/gitflow | 81ea7f5d468f9b128cd593f62972f13352bd3a63 | [
"MIT"
] | null | null | null | gitflow/context.py | abacusresearch/gitflow | 81ea7f5d468f9b128cd593f62972f13352bd3a63 | [
"MIT"
] | null | null | null | gitflow/context.py | abacusresearch/gitflow | 81ea7f5d468f9b128cd593f62972f13352bd3a63 | [
"MIT"
] | null | null | null | import atexit
import os
import re
import shutil
from enum import Enum
from typing import List, Optional
import collections
from gitflow import cli, const, repotools, _, utils
from gitflow.common import Result
from gitflow.const import VersioningScheme
from gitflow.properties import PropertyIO
from gitflow.repotools import RepoContext
from gitflow.version import VersionMatcher, VersionConfig
| 38.64959 | 117 | 0.564233 |
f1b884785bf603bff438ce57a6af789de6bc8891 | 2,307 | py | Python | test/test_modify_contact.py | peruana80/python_training | 0070bdc07b22d80594c029984c9967e56ba51951 | [
"Apache-2.0"
] | null | null | null | test/test_modify_contact.py | peruana80/python_training | 0070bdc07b22d80594c029984c9967e56ba51951 | [
"Apache-2.0"
] | null | null | null | test/test_modify_contact.py | peruana80/python_training | 0070bdc07b22d80594c029984c9967e56ba51951 | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
from random import randrange
#def test_modify_first_contact_first_name(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(first_name="zmodyfikuj imie"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
#def test_modify_first_contact_email(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(last_name="Zmodyfikuj nazwisko"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts) | 56.268293 | 186 | 0.702211 |
f1b8db0ca9074a5d55378aaf5be9d198fcaa6a0b | 734 | py | Python | base.py | oknalv/linky | 78fba19946e2212b10f3d1a5b27c7d9329556290 | [
"MIT"
] | null | null | null | base.py | oknalv/linky | 78fba19946e2212b10f3d1a5b27c7d9329556290 | [
"MIT"
] | null | null | null | base.py | oknalv/linky | 78fba19946e2212b10f3d1a5b27c7d9329556290 | [
"MIT"
] | null | null | null | import webapp2
from webapp2_extras import sessions | 29.36 | 69 | 0.647139 |
f1b9ea9a68748f5299174c8b988d634a02fb6fda | 6,999 | py | Python | tests/test_helpers.py | albertoalcolea/dbhelpers | c65f77a750cf46874ae7b5b0e6d4930e9df729af | [
"Apache-2.0"
] | 2 | 2015-10-31T20:36:22.000Z | 2021-10-05T12:08:10.000Z | tests/test_helpers.py | albertoalcolea/dbhelpers | c65f77a750cf46874ae7b5b0e6d4930e9df729af | [
"Apache-2.0"
] | null | null | null | tests/test_helpers.py | albertoalcolea/dbhelpers | c65f77a750cf46874ae7b5b0e6d4930e9df729af | [
"Apache-2.0"
] | null | null | null | import unittest
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
from dbhelpers import cm_cursor, fetchiter, fetchone_nt, fetchmany_nt, fetchall_nt, fetchiter_nt
| 38.456044 | 96 | 0.570796 |
f1baa95b451bcaf546bfb42baf9ea8122be52ea7 | 2,641 | py | Python | scripts/joystick_node.py | kscottz/owi_arm | a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f | [
"BSD-2-Clause"
] | null | null | null | scripts/joystick_node.py | kscottz/owi_arm | a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f | [
"BSD-2-Clause"
] | null | null | null | scripts/joystick_node.py | kscottz/owi_arm | a08f1ed8a5bccfe8cca5a1fd1829beca15a1060f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# THIS SHEBANG IS REALLY REALLY IMPORTANT
import rospy
import roscpp
import numpy as np
from sensor_msgs.msg import Joy
from std_msgs.msg import Int16MultiArray
if __name__ == '__main__':
try:
# boiler plate to spin up a node.
rospy.init_node('joystick_node')
node = JoystickNode()
except rospy.ROSInterruptException:
rospy.logwarn('ERROR!!!')
| 31.440476 | 74 | 0.540704 |
f1bc287fa4269a85fe5cdf284f15691d29943f53 | 650 | py | Python | nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py | YefriTavarez/NomiApp | a532ae7a3871ee91ec6f17b4b46ba67db7a056b5 | [
"MIT"
] | 1 | 2016-12-29T13:58:28.000Z | 2016-12-29T13:58:28.000Z | nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py | YefriTavarez/NomiApp | a532ae7a3871ee91ec6f17b4b46ba67db7a056b5 | [
"MIT"
] | null | null | null | nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py | YefriTavarez/NomiApp | a532ae7a3871ee91ec6f17b4b46ba67db7a056b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Soldeva, SRL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document | 26 | 51 | 0.716923 |
f1be0f593e7493f91a2f96246f4cf8a9df42b366 | 1,367 | py | Python | electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 4 | 2020-07-06T12:13:14.000Z | 2021-07-29T12:45:27.000Z | electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 62 | 2020-07-04T04:50:27.000Z | 2021-08-19T21:06:10.000Z | electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 3 | 2021-01-21T09:22:45.000Z | 2021-06-12T10:16:03.000Z | import logging
import typing
from electrumsv_sdk.utils import get_directory_name
COMPONENT_NAME = get_directory_name(__file__)
logger = logging.getLogger(COMPONENT_NAME)
if typing.TYPE_CHECKING:
from .electrumsv_server import Plugin
| 34.175 | 96 | 0.686906 |
f1be97cb28ba644933394a127fc92f299492f132 | 4,955 | py | Python | cluster/core/include/python/http_parser.py | JarryShaw/broapt | 5a6253af862cb618718d8fad69343a23ef2ac9e4 | [
"BSD-3-Clause"
] | 3 | 2020-04-25T08:47:55.000Z | 2020-11-04T11:18:21.000Z | cluster/core/include/python/http_parser.py | JarryShaw/broapt | 5a6253af862cb618718d8fad69343a23ef2ac9e4 | [
"BSD-3-Clause"
] | 11 | 2020-06-15T16:28:15.000Z | 2021-11-29T17:11:07.000Z | source/include/python/http_parser.py | JarryShaw/broapt | 5a6253af862cb618718d8fad69343a23ef2ac9e4 | [
"BSD-3-Clause"
] | 3 | 2019-07-24T02:41:37.000Z | 2021-12-06T09:38:58.000Z | # -*- coding: utf-8 -*-
# pylint: disable=all
import base64
import binascii
import contextlib
import math
import os
import textwrap
import time
import urllib.parse
from const import LOGS_PATH
from logparser import parse
from utils import is_nan, print_file
# from utils import IPAddressJSONEncoder, is_nan, print_file
# today
DATE = time.strftime('%Y-%m-%d')
# log path
LOGS = os.path.join(LOGS_PATH, 'http')
os.makedirs(LOGS, exist_ok=True)
# http log
HTTP_LOG = os.path.join(LOGS_PATH, 'http', f'{DATE}.log')
# macros
SEPARATOR = '\t'
SET_SEPARATOR = ','
EMPTY_FIELD = '(empty)'
UNSET_FIELD = 'NoDef'
FIELDS = ('scrip', 'ad', 'ts', 'url', 'ref', 'ua', 'dstip', 'cookie', 'src_port', 'json', 'method', 'body')
TYPES = ('addr', 'string', 'time', 'string', 'string', 'string', 'addr', 'string', 'port', 'vector[string]', 'string', 'string')
| 30.030303 | 128 | 0.594147 |
f1c021de79d124febfa8a831e976cd4dc12aeed9 | 1,647 | py | Python | src/compute_trust_values.py | johndpope/FacialRetargeting | 5fb0c1da6af6c3d59aef264f567bfa7a244d0764 | [
"MIT"
] | 21 | 2020-08-19T02:52:16.000Z | 2022-02-25T12:35:04.000Z | src/compute_trust_values.py | johndpope/FacialRetargeting | 5fb0c1da6af6c3d59aef264f567bfa7a244d0764 | [
"MIT"
] | 3 | 2020-10-16T07:11:25.000Z | 2021-06-30T10:26:04.000Z | src/compute_trust_values.py | johndpope/FacialRetargeting | 5fb0c1da6af6c3d59aef264f567bfa7a244d0764 | [
"MIT"
] | 7 | 2020-08-24T08:30:53.000Z | 2022-03-28T15:55:24.000Z | import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = np.shape(ckl)[0]
low_trig = np.zeros(num_k)
for k in range(num_k):
val = 0
for l in range(k):
val += ckl[k, l]
low_trig[k] = val
max_low_trig = np.max(low_trig)
# compute trust values (formula 6)
tk = np.zeros(num_k)
for k in range(len(tk)):
tk[k] = 1 - low_trig[k]/max_low_trig
return tk
if __name__ == '__main__':
"""
test compute_trust_values function
run: python -m src.compute_trust_values
"""
np.random.seed(0)
from utils.re_order_delta import re_order_delta
# test compute trust values
sk = np.random.rand(6, 3) # (k, n)
sorted_sk = re_order_delta(sk)
tk = compute_trust_values(sorted_sk, do_plot=False)
print("tk")
print(tk)
| 26.564516 | 123 | 0.651488 |
f1c16c5d4d00c03eee3d9db1e1fe2c9c3aca5189 | 2,042 | py | Python | test/core/test_constant.py | haikusw/jaqalpaq | d507e894cb897756a1e51c99582b736254995b4e | [
"Apache-2.0"
] | 8 | 2021-02-19T23:25:28.000Z | 2021-09-24T20:11:13.000Z | test/core/test_constant.py | haikusw/jaqalpaq | d507e894cb897756a1e51c99582b736254995b4e | [
"Apache-2.0"
] | null | null | null | test/core/test_constant.py | haikusw/jaqalpaq | d507e894cb897756a1e51c99582b736254995b4e | [
"Apache-2.0"
] | null | null | null | import unittest
from jaqalpaq.core.parameter import ParamType
from jaqalpaq.core.constant import Constant
from . import randomize
from . import common
if __name__ == "__main__":
unittest.main()
| 37.127273 | 82 | 0.669931 |
f1c26fda7f69a42db47f3f5783c055c679831e9b | 8,035 | py | Python | src/richard/videos/migrations/0001_initial.py | pyvideo/richard | 894f5380e07d7e66453fe730891a21aca32d8edb | [
"Apache-2.0"
] | 51 | 2015-01-24T07:53:56.000Z | 2020-08-30T12:19:39.000Z | src/richard/videos/migrations/0001_initial.py | westurner/richard | 894f5380e07d7e66453fe730891a21aca32d8edb | [
"Apache-2.0"
] | 34 | 2015-02-23T11:15:00.000Z | 2016-01-04T11:25:42.000Z | src/richard/videos/migrations/0001_initial.py | westurner/richard | 894f5380e07d7e66453fe730891a21aca32d8edb | [
"Apache-2.0"
] | 16 | 2015-03-20T17:36:09.000Z | 2022-01-07T01:04:17.000Z | # -*- coding: utf-8 -*-
# richard -- video index system
# Copyright (C) 2012, 2013, 2014, 2015 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from django.db import models, migrations
| 49.598765 | 191 | 0.571873 |
f1c41c955777189a3b733180afda82b9ed458a7c | 1,399 | py | Python | descwl_shear_sims/tests/test_artifacts.py | LSSTDESC/descwl_shear_sims | 1c696518104b7f301dd6c69571239431c6232110 | [
"BSD-3-Clause"
] | null | null | null | descwl_shear_sims/tests/test_artifacts.py | LSSTDESC/descwl_shear_sims | 1c696518104b7f301dd6c69571239431c6232110 | [
"BSD-3-Clause"
] | 11 | 2019-12-10T23:30:27.000Z | 2019-12-24T13:59:32.000Z | descwl_shear_sims/tests/test_artifacts.py | LSSTDESC/wl-shear-testing-sims | 6e4a0baa6f664b5bc52b08b55614eaa58c8b0748 | [
"BSD-3-Clause"
] | null | null | null | """
copy-paste from my (beckermr) personal code here
https://github.com/beckermr/metadetect-coadding-sims
"""
import numpy as np
import galsim
from descwl_shear_sims.masking import get_bmask_and_set_image
from descwl_shear_sims.artifacts import (
generate_bad_columns,
generate_cosmic_rays,
)
| 25.436364 | 68 | 0.719085 |
f1c44279c1c78e6d3ae1d50d41837fb4c6fd7df0 | 2,270 | py | Python | stylobate_mgmt/commands/init.py | digitaltembo/stylobate-mgmt | 26483aab27d2496dcbd71d7de2f5780bc43a959e | [
"MIT"
] | null | null | null | stylobate_mgmt/commands/init.py | digitaltembo/stylobate-mgmt | 26483aab27d2496dcbd71d7de2f5780bc43a959e | [
"MIT"
] | null | null | null | stylobate_mgmt/commands/init.py | digitaltembo/stylobate-mgmt | 26483aab27d2496dcbd71d7de2f5780bc43a959e | [
"MIT"
] | null | null | null | from getpass import getpass
import os
from .utils import Command
from .db import DB
| 33.382353 | 119 | 0.644934 |
f1c47788397390c41f153d775e370f60b472f99d | 628 | py | Python | leetcode_submissions/7.reverse-integer.18198620.ac.py | aenon/online_judge | bff3991519cd4f2d80dea9b17680dbc5d4c44b9b | [
"MIT"
] | null | null | null | leetcode_submissions/7.reverse-integer.18198620.ac.py | aenon/online_judge | bff3991519cd4f2d80dea9b17680dbc5d4c44b9b | [
"MIT"
] | null | null | null | leetcode_submissions/7.reverse-integer.18198620.ac.py | aenon/online_judge | bff3991519cd4f2d80dea9b17680dbc5d4c44b9b | [
"MIT"
] | 1 | 2015-01-10T16:02:43.000Z | 2015-01-10T16:02:43.000Z | #!/usr/bin/env python
# Reverse Integer https://oj.leetcode.com/problems/reverse-integer/
# Reverse digits of an integer.
# Example1: x = 123, return 321
# Example2: x = -123, return -321
#Math
# Xilin SUN
# Dec 7 2014
| 19.030303 | 68 | 0.593949 |
f1c529b5976d0a2cdf007169fc8e0ee8525206e1 | 1,400 | py | Python | src/z3c/configurator/tests.py | zopefoundation/z3c.configurator | 390416d2fa61ddf97c28e6af32eae3660bb725e2 | [
"ZPL-2.1"
] | null | null | null | src/z3c/configurator/tests.py | zopefoundation/z3c.configurator | 390416d2fa61ddf97c28e6af32eae3660bb725e2 | [
"ZPL-2.1"
] | 1 | 2021-01-08T15:34:08.000Z | 2021-01-08T15:34:08.000Z | src/z3c/configurator/tests.py | zopefoundation/z3c.configurator | 390416d2fa61ddf97c28e6af32eae3660bb725e2 | [
"ZPL-2.1"
] | 1 | 2015-04-03T05:49:32.000Z | 2015-04-03T05:49:32.000Z | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
#############################################################################
"""Configurator Test Setup"""
import re
import doctest
from zope.component import testing
from zope.testing.renormalizing import RENormalizing
| 31.818182 | 78 | 0.620714 |
f1c67bf4245b574bcd2ed4dfcba7d08e3e6e8419 | 174 | py | Python | example.py | LucasHazardous/SkinReaper | c910cebe2aed3dd8e442515e4415f3e253e5a4ac | [
"MIT"
] | null | null | null | example.py | LucasHazardous/SkinReaper | c910cebe2aed3dd8e442515e4415f3e253e5a4ac | [
"MIT"
] | null | null | null | example.py | LucasHazardous/SkinReaper | c910cebe2aed3dd8e442515e4415f3e253e5a4ac | [
"MIT"
] | null | null | null | from skin_reaper import SkinReaper
if __name__ == "__main__":
r = SkinReaper()
data = r.harvestLinks(5)
r.setSkinPreview()
r.collectRandom(data)
r.kill() | 21.75 | 34 | 0.666667 |
f1c6b2f9d9acd98dcef1131f691572e33395120a | 528 | py | Python | time_to_speech.py | besi/stereopi | c03a1ae990af67dde4e2cd832a20b49d697de230 | [
"MIT"
] | 2 | 2020-02-18T18:10:50.000Z | 2020-08-04T21:00:29.000Z | time_to_speech.py | besi/stereopi | c03a1ae990af67dde4e2cd832a20b49d697de230 | [
"MIT"
] | 4 | 2020-02-19T10:46:02.000Z | 2021-01-09T18:52:45.000Z | time_to_speech.py | besi/stereopi | c03a1ae990af67dde4e2cd832a20b49d697de230 | [
"MIT"
] | null | null | null | # Credits go to <http://codereview.stackexchange.com/q/37522>
import random
import time
def current_time():
'''Returns a tuple containing (hour, minute) for current local time.'''
local_time = time.localtime(time.time())
return (local_time.tm_hour, local_time.tm_min)
(hour, minute) = current_time()
print(ishtime(hour, minute))
| 22 | 75 | 0.657197 |
f1c6e01e5913573733f519b9c5d164e6fed7195b | 575 | py | Python | setup.py | ckuzma/solar-viability-tester | c34d03d1914374279ca269ab402eb5074f7555a6 | [
"MIT"
] | null | null | null | setup.py | ckuzma/solar-viability-tester | c34d03d1914374279ca269ab402eb5074f7555a6 | [
"MIT"
] | 2 | 2017-04-03T13:59:00.000Z | 2017-04-06T04:57:50.000Z | setup.py | ckuzma/solar-viability-tester | c34d03d1914374279ca269ab402eb5074f7555a6 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='solar-viability-tester',
version='1.0.0',
description='Solar viability tester utilizing the AT&T IoT Starter Kit and PubNub.',
long_description=long_description,
url='https://github.com/ckuzma/solar-viability-tester',
license='Apache-2.0'
)
| 30.263158 | 89 | 0.707826 |
f1c78560c5fc55f8dc09c8791ab3fa9dcc1ccd67 | 31,028 | py | Python | framework/framework.py | wbqhb/SEPC | 1a5e03b70984b759b615424dc06f530d5de00f51 | [
"MIT"
] | null | null | null | framework/framework.py | wbqhb/SEPC | 1a5e03b70984b759b615424dc06f530d5de00f51 | [
"MIT"
] | null | null | null | framework/framework.py | wbqhb/SEPC | 1a5e03b70984b759b615424dc06f530d5de00f51 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/5/4 3:05
# @Author : godwaitup
# @FileName: framework.py
# original framework for joint extraction.
import torch.optim as optim
from torch import nn
import os
import data_loader
import torch.nn.functional as F
import numpy as np
import json
from functools import partial
from data_loader import cmed_collate_fn
import torch
| 48.786164 | 223 | 0.561718 |
f1c88d2448c823f942e8276b943c094ce146f49b | 799 | py | Python | tests/settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | tests/settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | tests/settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | """
Settings for application when being run in the test suite.
"""
import os
import sys
# Add the directory containing this file to the search path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# Import function to generate a self-signed cert dynamically
from x509cert import gen_self_signed_cert
DEBUG = True
TESTING = True
SECRET_KEY = 'bonjour, monde'
# Configure the testing database. The database URI is specified by the
# COMPONENTSDB_DATABASE_URI environment variable.
SQLALCHEMY_DATABASE_URI = os.environ.get(
'COMPONENTSDB_DATABASE_URI', 'sqlite://'
)
SQLALCHEMY_ECHO = True
_cert, _key = gen_self_signed_cert()
GOOGLE_OAUTH2_CERTS = {'selfsigned': _cert}
GOOGLE_OAUTH2_ALLOWED_CLIENT_IDS = ['my-client']
TESTING_GOOGLE_OAUTH2_CERT_PRIV_KEYS = {'selfsigned': _key}
| 26.633333 | 70 | 0.787234 |
f1c8a2ea1e6774516b221761cec538d39be7d6c1 | 254 | py | Python | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null |
ips = [
'10.0.0.5',
'10.5.3.1',
'192.168.11.10',
'2.2.2.2',
'100.0.0.1',
'20.3.2.4'
]
print(sort_ips(ips))
| 12.095238 | 52 | 0.566929 |
f1c983b126df00c8a011720ca60d9fd2cfbf09df | 5,330 | py | Python | tbss_wm_atlas_stats.py | shanqing-cai/MRI_analysis | 39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9 | [
"BSD-4-Clause"
] | 1 | 2016-02-08T18:31:36.000Z | 2016-02-08T18:31:36.000Z | tbss_wm_atlas_stats.py | shanqing-cai/MRI_analysis | 39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9 | [
"BSD-4-Clause"
] | null | null | null | tbss_wm_atlas_stats.py | shanqing-cai/MRI_analysis | 39b3d48e2158623ffd9a8a6ea47d16a4a7b83cd9 | [
"BSD-4-Clause"
] | null | null | null | #!/usr/bin/python
import os
import sys
import glob
import argparse
import tempfile
import numpy as np
from scipy.io import *
from scipy import stats
from subprocess import Popen, PIPE
from scai_utils import *
from get_qdec_info import get_qdec_info
from read_xml_labels import read_xml_labels
atlas_label_fn = \
"/usr/share/fsl/5.0/data/atlases/JHU/JHU-ICBM-labels-1mm.nii.gz"
atlas_label_xml = \
"/usr/share/fsl/5.0/data/atlases/JHU-labels.xml"
P_THRESH_UNC = 0.05
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Get stats (e.g., average FA) from in atlas-defined WM regions in TBSS-aligned diffusion-tensor images")
ap.add_argument("tbssDir", help="Base TBSS directory (e.g., /users/cais/STUT/analysis/dt_tbss_dtiprep2)")
if len(sys.argv) == 1:
ap.print_help()
sys.exit(0)
# === Parse input arguments === #
args = ap.parse_args()
tbssDir = args.tbssDir
# === Input sanity check === #
check_dir(tbssDir)
statsDir = os.path.join(tbssDir, "stats")
check_dir(statsDir)
origDir = os.path.join(tbssDir, "origdata")
check_dir(origDir)
check_file(atlas_label_fn)
# === Read JHU-ICBM labels === #
check_file(atlas_label_xml)
labs = read_xml_labels(atlas_label_xml)
# === Locate the all_FA image === #
allFA = os.path.join(statsDir, "all_FA.nii.gz")
check_file(allFA)
# === Find out the subject IDs and their groups === #
origDir = os.path.join(tbssDir, "origdata")
check_dir(origDir)
ds = glob.glob(os.path.join(origDir, "S??.nii.gz"))
ds.sort()
sIDs = []
idxPWS = []
idxPFS = []
for (i0, d) in enumerate(ds):
[tpath, tfn] = os.path.split(d)
sID = tfn.replace(".nii.gz", "")
sIDs.append(sID)
if get_qdec_info(sID, "diagnosis") == "PWS":
idxPWS.append(i0)
elif get_qdec_info(sID, "diagnosis") == "PFS":
idxPFS.append(i0)
else:
raise Exception, "Unrecognized diagnosis for subject %s: %s" % \
(sID, get_qdec_info(sID, "diagnosis"))
# === Split the all_FA image, for later use by fslstats === #
splitBase = tempfile.mktemp()
split_cmd = "fslsplit %s %s -t" % (allFA, splitBase)
saydo(split_cmd)
splitFNs = glob.glob(splitBase + "*.nii.gz")
splitFNs.sort()
if len(splitFNs) != len(sIDs):
raise Exception, "Number of volumes in 4D series %s (%d) does not match the number of subjects in origdata (%d)" % \
(allFA, len(splitFNs), len(sIDs))
# === Iterate through the WM labels and get the stats info === #
labRes = {"labels": [], "meanFA": [], "tt_t": [], "tt_p": []}
for (i0, lab) in enumerate(labs['name']):
ind = labs['ind'][i0]
if ind == 0:
continue
print("\nProcessing label #%d: %s\n" % (i0, lab))
labRes["labels"].append(lab)
labRes["meanFA"].append({"PWS": [], "PFS": []})
tmpfn = tempfile.mktemp() + ".nii.gz"
# == Binarize, get label mask == #
bin_cmd = "mri_binarize --i %s --match %d --o %s" % \
(atlas_label_fn, ind, tmpfn)
saydo(bin_cmd)
check_file(tmpfn)
# == Use fslstats to get the masked mean == #
t_vals = [-1] * len(sIDs)
for (i1, splitFN) in enumerate(splitFNs):
(sout, serr) = Popen(["fslstats", splitFN, "-k", tmpfn, "-m"], \
stdout=PIPE, stderr=PIPE).communicate()
if len(serr) > 0:
raise Exception, \
"ERROR occurred during fslstats on %s" % splitFN
t_vals[i1] = float(sout.split(' ')[0])
t_vals = np.array(t_vals)
labRes["meanFA"][-1]["PWS"] = t_vals[idxPWS]
labRes["meanFA"][-1]["PFS"] = t_vals[idxPFS]
(t, p) = stats.ttest_ind(labRes["meanFA"][-1]["PWS"], \
labRes["meanFA"][-1]["PFS"])
labRes["tt_t"].append(t)
labRes["tt_p"].append(p)
os.system("rm -f %s" % tmpfn)
os.system("rm -f %s*" % splitBase)
# === Save results to mat file === #
resMatFN = "/users/cais/STUT/scripts/tbss_wm_atlas_stats.mat"
os.system("rm -f %s" % resMatFN)
savemat(resMatFN, labRes)
check_file(resMatFN)
print("\nINFO: Results saved to .mat file: %s" % resMatFN)
# === Print results === #
print("=== Significant results at P_THRESH_UNC = %f ===" % P_THRESH_UNC)
for (i0, labName) in enumerate(labRes["labels"]):
if labRes["tt_p"][i0] < P_THRESH_UNC:
mean_PFS = np.mean(labRes["meanFA"][i0]["PFS"])
mean_PWS = np.mean(labRes["meanFA"][i0]["PWS"])
ste_PFS = np.std(labRes["meanFA"][i0]["PFS"]) / \
np.sqrt(len(idxPFS))
ste_PWS = np.std(labRes["meanFA"][i0]["PWS"]) / \
np.sqrt(len(idxPWS))
print("WM label [%s]:" % labName)
print("\tPFS: mean = %f; SE = %f" % (mean_PFS, ste_PFS))
print("\tPWS: mean = %f; SE = %f" % (mean_PWS, ste_PWS))
print("\tt = %f; p = %f" % \
(labRes["tt_t"][i0], labRes["tt_p"][i0]))
| 32.108434 | 149 | 0.547842 |
f1cbb897fe4f7aa594e93ad56844d2bed4a73d65 | 1,995 | py | Python | Alt_DE/psacard/psa_card/code/loadall_auction_items.py | royadityak94/Interview | 40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f | [
"MIT"
] | null | null | null | Alt_DE/psacard/psa_card/code/loadall_auction_items.py | royadityak94/Interview | 40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f | [
"MIT"
] | null | null | null | Alt_DE/psacard/psa_card/code/loadall_auction_items.py | royadityak94/Interview | 40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f | [
"MIT"
] | null | null | null | # Module to scrap all auction listings on the auction prices page
from selenium import webdriver
from bs4 import BeautifulSoup
import csv
import os
# Utility to write as .csv file format
# Selenium Driver Handler
# Main handler controlling all auction listing parsing
# Entry-point of the progran
# Capability for stand-alone execution
if __name__ == '__main__':
main()
| 37.641509 | 128 | 0.700752 |
f1cbcf01c46f003c5909284f4d2d85198beda10f | 96 | py | Python | venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/numpy/typing/tests/data/pass/numerictypes.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/7d/da/46/b543433b18dcfd975ecc18a25baa2105812baf0edc0bdbfae3890e1df2 | 96 | 96 | 0.895833 |
f1ccaa26614fd533c6b9140b49b0a5e2c602d313 | 3,343 | py | Python | onirim/card/_location.py | cwahbong/onirim-py | d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3 | [
"MIT"
] | null | null | null | onirim/card/_location.py | cwahbong/onirim-py | d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3 | [
"MIT"
] | null | null | null | onirim/card/_location.py | cwahbong/onirim-py | d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3 | [
"MIT"
] | null | null | null | """Location cards."""
import logging
from onirim.card._base import ColorCard
from onirim import exception
from onirim import util
LOGGER = logging.getLogger(__name__)
def _can_obtain_door(content):
"""
Check if the explored cards can obtain a door.
"""
last_card = content.explored[-1]
same_count = 0
for card in reversed(content.explored):
if last_card.color == card.color:
same_count += 1
else:
break
return same_count % 3 == 0
def sun(color):
"""
Make a sun location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A sun location card.
"""
return _Location(color, LocationKind.sun)
def moon(color):
"""
Make a moon location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A moon location card.
"""
return _Location(color, LocationKind.moon)
def key(color):
"""
Make a key location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A key location card.
"""
return _KeyLocation(color)
| 23.055172 | 80 | 0.606342 |
f1ccfab0d2faebbdb592b40f848ee1bf3127a09c | 4,247 | py | Python | gitlabform/gitlabform/test/test_branches.py | rbartuzel/gitlabform | 4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18 | [
"MIT"
] | null | null | null | gitlabform/gitlabform/test/test_branches.py | rbartuzel/gitlabform | 4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18 | [
"MIT"
] | null | null | null | gitlabform/gitlabform/test/test_branches.py | rbartuzel/gitlabform | 4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18 | [
"MIT"
] | null | null | null | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, create_readme_in_project, \
GROUP_NAME
PROJECT_NAME = 'branches_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
protect_branch_but_allow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_but_allow_all:
protected: true
developers_can_push: true
developers_can_merge: true
"""
protect_branch_and_disallow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_disallow_all:
protected: true
developers_can_push: false
developers_can_merge: false
"""
mixed_config = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: true
developers_can_push: false
developers_can_merge: true
protect_branch_and_allow_pushes:
protected: true
developers_can_push: true
developers_can_merge: false
"""
unprotect_branches = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: false
protect_branch_and_allow_pushes:
protected: false
"""
| 31.227941 | 117 | 0.721215 |
f1cdf2cb5f5f7dc477b7b2cf95774b2b25e88788 | 2,543 | py | Python | bespin/layers.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 5 | 2017-04-05T00:46:41.000Z | 2017-11-09T01:21:44.000Z | bespin/layers.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 69 | 2016-10-11T04:40:09.000Z | 2022-01-12T23:57:27.000Z | bespin/layers.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 7 | 2016-10-11T04:32:21.000Z | 2017-12-18T05:59:17.000Z | from bespin.errors import StackDepCycle
| 31.012195 | 98 | 0.563508 |
f1ce356bd1c13f7cdfe09167b87b3a43fdb85c66 | 6,851 | py | Python | src/pulsebox/events.py | rhosak/pulsebox | f2ce859ac5cd968bcd85a1e0eedf320414602a40 | [
"MIT"
] | 3 | 2019-02-23T23:15:48.000Z | 2020-03-23T12:33:15.000Z | src/pulsebox/events.py | rhosak/pulsebox | f2ce859ac5cd968bcd85a1e0eedf320414602a40 | [
"MIT"
] | null | null | null | src/pulsebox/events.py | rhosak/pulsebox | f2ce859ac5cd968bcd85a1e0eedf320414602a40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""events.py
Pulse sequence events for the Arduino Due pulsebox.
Radim Hok <hosak(at)optics.upol.cz>
2021 Quantum Optics Lab Olomouc
"""
from functools import reduce
from pulsebox.codeblocks import state_change, loop, channel_states_to_odsr
from pulsebox.config import calibration, pulsebox_pincount
def read_time(time_string):
"""Calculate time from a string containing a number and a time unit.
The unit is denoted by the last character of `time_string`. Time is
calculated by multiplying the 'number part' of `time_string` by a factor
corresponding to the unit.
The following units are accepted:
* n: nanoseconds (factor = 1e-9)
* u: microseconds (1e-6)
* m: milliseconds (1e-3)
* s: seconds (1)
* TODO: c: MCU clock cycles (12e-9)
* TODO: i: delay loop iterations (see `calibration` in config.ini)
Args:
* time_string (str): The (number + unit) string, for example "1m"
Returns:
* float time: Time (in seconds).
"""
factors = {
"n": 1e-9,
"u": 1e-6,
"m": 1e-3,
"s": 1
}
# Check that the time string is properly formatted, e. g. time part
# is followed by the unit part. The string should contain at least two
# character, otherwise splitting it into two parts will raise an IndexError.
try:
number, unit = time_string[:-1], time_string[-1]
except (IndexError, TypeError):
raise ValueError("Invalid time string given.")
# If the 'time part' cannot be converted to float, this raises a ValueError.
number = float(number)
if number < 0:
raise ValueError("Negative time values are not allowed.")
# Check that a valid time unit was specified. If no unit was specified,
# then what we call 'unit' will in fact be the last digit of the time value
# and as we do not use numeric unit symbols, we still get an error.
try:
factor = factors[unit]
except KeyError:
raise ValueError("Invalid time unit given.")
time = number * factor
return time
def time2iters(time):
"""Get the number of loop iterations required to achieve a given time delay.
Args:
* time (float): The time to convert to the number of delay loop iters.
Returns:
* int iters: The number of iterations through the ASM delay loop
required to produce a delay of a given length.
Notes:
The possible delay times are discrete, with a step given by the
structure of the ASM loop. This step is given by the `calibration`
variable in the config.
For example, if our delays for 1, 2, and 3 delay loop iterations are
50 ns, 100 ns, and 150 ns, respectively, and we want to convert
120 ns to delay loop iterations, we would see that 2.4 iterations are
required. As this is impossible, we round this to the nearest integer
amount of iterations. In this case, that's 2 iterations and instead of
120 ns delay we produced a 100 ns delay.
"""
if time < 0:
raise ValueError("Negative time is not allowed.")
iters = int(round(time / calibration))
return iters
def parse_events(event_string, channel=None):
"""Convert a long string of events into an array of event instances.
"""
event_substrings = event_string.split(" ")
events = []
for substring in event_substrings:
try:
event_type, event_params = substring[0], substring[1:]
except (IndexError, ValueError):
print(f"CH {channel} - Invalid event string: " \
f"{event_string.__repr__()}")
return events
if event_type.lower() == "p": # PulseEvent
# Pulse event contains two timestrings - start and duration.
# Separate them.
timestamp, duration = None, None
for n, ch in enumerate(event_params):
if ch.isalpha():
timestamp = read_time(event_params[:n+1])
duration = read_time(event_params[n+1:])
break
pe = PulseEvent(channel, timestamp, duration)
new_events = pe.flips
for event in new_events:
events.append(event)
return events
| 34.084577 | 80 | 0.618888 |
f1d17c8b8c557bcd6739e64fad4920995078f733 | 160 | py | Python | 7KYU/words_to_sentence.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 4 | 2021-07-17T22:48:03.000Z | 2022-03-25T14:10:58.000Z | 7KYU/words_to_sentence.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | null | null | null | 7KYU/words_to_sentence.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 3 | 2021-06-14T14:18:16.000Z | 2022-03-16T06:02:02.000Z | def words_to_sentence(words: list) -> str:
""" This function create a string from a list of strings, separated by space. """
return ' '.join(words)
| 40 | 85 | 0.6625 |
f1d2400def017bc7e08b7a2881ecb907828aa29c | 1,839 | py | Python | saber/postprocessing/blob_detect/blob_detect.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 12 | 2018-05-14T17:43:18.000Z | 2021-11-16T04:03:33.000Z | saber/postprocessing/blob_detect/blob_detect.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 34 | 2019-05-06T19:13:36.000Z | 2021-05-06T19:12:35.000Z | saber/postprocessing/blob_detect/blob_detect.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 3 | 2019-10-08T17:42:17.000Z | 2021-07-28T05:52:02.000Z | # Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from skimage.measure import label, regionprops
import argparse
if __name__ == "__main__":
main()
| 34.055556 | 86 | 0.707993 |
f1d2cd28a494d8ac54d14b248cb64af3757ff63c | 3,291 | py | Python | tests/test_analyzer.py | kozajaku/spectra-analyzer | 00de0d89fc4f210dca05249a2e823c6c49f3e917 | [
"MIT"
] | null | null | null | tests/test_analyzer.py | kozajaku/spectra-analyzer | 00de0d89fc4f210dca05249a2e823c6c49f3e917 | [
"MIT"
] | null | null | null | tests/test_analyzer.py | kozajaku/spectra-analyzer | 00de0d89fc4f210dca05249a2e823c6c49f3e917 | [
"MIT"
] | null | null | null | import pytest
import os
from tests import test_analyzer
from spectra_analyzer import analyzer
def file_ref(name):
"""Helper function for getting paths to testing spectra."""
file = os.path.join(os.path.dirname(test_analyzer.__file__),
"test_analyzer", name)
return file
def normalized(spectrum):
"""Test if passed spectrum is truly normalized."""
for i in range(spectrum.shape[0]):
if spectrum[i] < 0.0 or spectrum[i] > 1.0:
return False
return True
def test_trans_parameters(spectrum_inst):
"""Test modification of transformation parameters inside spectrum instance."""
# test initial parameters
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 5
scales = len(spectrum_inst.scales)
assert scales == 48 # set for the specific spectrum
mod = spectrum_inst.modify_parameters
mod(48, 0)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(48, 10)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(48, 1)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(47, 1)
assert spectrum_inst.freq0 == 47
assert spectrum_inst.wSize == 0
mod(46, 2)
assert spectrum_inst.freq0 == 46
assert spectrum_inst.wSize == 1
mod(0, 48)
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 47
mod(0, 47)
assert spectrum_inst.freq0 == 0
assert spectrum_inst.wSize == 47
mod(1, 47)
assert spectrum_inst.freq0 == 1
assert spectrum_inst.wSize == 46
def test_spectrum_plotting(spectrum_inst):
"""Test that spectrum plotting returns some output."""
plot = spectrum_inst.plot_spectrum()
assert type(plot) == str
assert len(plot) > 0
def test_cwt_plotting(spectrum_inst):
"""Test that cwt plotting returns some output."""
plot = spectrum_inst.plot_cwt()
assert type(plot) == str
assert len(plot) > 0
def test_transformation_plotting(spectrum_inst):
"""Test that transformation plotting returns some output."""
plot = spectrum_inst.plot_reduced_spectrum()
assert type(plot) == str
assert len(plot) > 0
plot = spectrum_inst.plot_reduced_spectrum(only_transformation=True)
assert type(plot) == str
assert len(plot) > 0
def test_rec_invalidation(spectrum_inst):
"""Test that _rec variable is properly invalidated after parameter modification."""
assert spectrum_inst._rec is None
spectrum_inst.plot_reduced_spectrum()
assert spectrum_inst._rec is not None
spectrum_inst.modify_parameters(5, 4)
assert spectrum_inst._rec is None
| 31.644231 | 96 | 0.696141 |
f1d3dc26cb6e1253349d57f3b6bf5b06931d5da6 | 774 | py | Python | forms_app/views.py | sudee404/forms_project | ba60e41d13d72c80f412a7928e32000db200ea17 | [
"Apache-2.0"
] | null | null | null | forms_app/views.py | sudee404/forms_project | ba60e41d13d72c80f412a7928e32000db200ea17 | [
"Apache-2.0"
] | null | null | null | forms_app/views.py | sudee404/forms_project | ba60e41d13d72c80f412a7928e32000db200ea17 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from .models import User
from . import forms
# Create your views here. | 26.689655 | 67 | 0.639535 |
f1d3eb9d9dab05a31381c38ed24576dd96752996 | 920 | py | Python | python_submitty_utils/tests/test_string_utils.py | zeez2030/Submitty | 7118944ff4adc6f15d76984eb10a1e862926d724 | [
"BSD-3-Clause"
] | 411 | 2016-06-14T20:52:25.000Z | 2022-03-31T21:20:25.000Z | python_submitty_utils/tests/test_string_utils.py | KaelanWillauer/Submitty | cf9b6ceda15ec0a661e2ca81ea7864790094c64a | [
"BSD-3-Clause"
] | 5,730 | 2016-05-23T21:04:32.000Z | 2022-03-31T10:08:06.000Z | python_submitty_utils/tests/test_string_utils.py | KaelanWillauer/Submitty | cf9b6ceda15ec0a661e2ca81ea7864790094c64a | [
"BSD-3-Clause"
] | 423 | 2016-09-22T21:11:30.000Z | 2022-03-29T18:55:28.000Z | import unittest
from submitty_utils import string_utils
if __name__ == '__main__':
unittest.main()
| 36.8 | 117 | 0.742391 |
f1d87b5f62ca7da3adff2398d764af03ea29ed10 | 486 | py | Python | simple/file.py | asafonov/simple-backup | 4e90162cb10219537da42c57d49f8f2409ba7148 | [
"MIT"
] | null | null | null | simple/file.py | asafonov/simple-backup | 4e90162cb10219537da42c57d49f8f2409ba7148 | [
"MIT"
] | null | null | null | simple/file.py | asafonov/simple-backup | 4e90162cb10219537da42c57d49f8f2409ba7148 | [
"MIT"
] | null | null | null | import os
| 23.142857 | 68 | 0.549383 |
f1d9fe63dcda29a6aafbbbb348278fbcaa1eb8c3 | 3,449 | py | Python | metrics.py | mksarker/data_preprocessing | dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027 | [
"MIT"
] | null | null | null | metrics.py | mksarker/data_preprocessing | dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027 | [
"MIT"
] | null | null | null | metrics.py | mksarker/data_preprocessing | dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027 | [
"MIT"
] | null | null | null | import os
import argparse
import logging
import numpy as np
import SimpleITK as sitk
logging.basicConfig(level=logging.INFO)
from tqdm import tqdm
import cv2
import sys
from PIL import Image
from sklearn import metrics
def Dice(y_true, y_pred):
"""Returns Dice Similarity Coefficient for ground truth and predicted masks."""
#print(y_true.dtype)
#print(y_pred.dtype)
y_true = np.squeeze(y_true)/255
y_pred = np.squeeze(y_pred)/255
y_true.astype('bool')
y_pred.astype('bool')
intersection = np.logical_and(y_true, y_pred).sum()
return ((2. * intersection.sum()) + 1.) / (y_true.sum() + y_pred.sum() + 1.)
if __name__ == '__main__':
main()
| 35.556701 | 114 | 0.643665 |
f1db626c6c51f4c9710e0e6d1e887229859a9043 | 2,757 | py | Python | src/app/parser/parser.py | IliaValov/SofiaAirPurity | 71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf | [
"MIT"
] | null | null | null | src/app/parser/parser.py | IliaValov/SofiaAirPurity | 71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf | [
"MIT"
] | 1 | 2021-12-02T23:20:51.000Z | 2021-12-02T23:20:51.000Z | src/app/parser/parser.py | IliaValov/SofiaAirPurity | 71d0b005a9f8f5bfabfae99d1f4f8e1d11825adf | [
"MIT"
] | 1 | 2022-01-10T15:18:27.000Z | 2022-01-10T15:18:27.000Z | from app.models.enums.station import Station
| 50.127273 | 132 | 0.602466 |
f1dc37b00019bdcd4fd7800d93e149be0dfe2bdf | 11,747 | py | Python | synapse/tools/storm.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 216 | 2017-01-17T18:52:50.000Z | 2022-03-31T18:44:49.000Z | synapse/tools/storm.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 2,189 | 2017-01-17T22:31:48.000Z | 2022-03-31T20:41:45.000Z | synapse/tools/storm.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 44 | 2017-01-17T16:50:57.000Z | 2022-03-16T18:35:52.000Z | import os
import sys
import copy
import asyncio
import logging
import argparse
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.cli as s_cli
import synapse.lib.cmd as s_cmd
import synapse.lib.node as s_node
import synapse.lib.time as s_time
import synapse.lib.output as s_output
import synapse.lib.parser as s_parser
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
ERROR_COLOR = '#ff0066'
WARNING_COLOR = '#f4e842'
NODEEDIT_COLOR = "lightblue"
welcome = '''
Welcome to the Storm interpreter!
Local interpreter (non-storm) commands may be executed with a ! prefix:
Use !quit to exit.
Use !help to see local interpreter commands.
'''
def getArgParser():
pars = argparse.ArgumentParser(prog='synapse.tools.storm')
pars.add_argument('cortex', help='A telepath URL for the Cortex.')
pars.add_argument('onecmd', nargs='?', help='A single storm command to run and exit.')
return pars
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
| 29.589421 | 125 | 0.556823 |
f1dcbdb70b490e3b7a9741698dbd0c921ce6d7ff | 374 | py | Python | Feature Selection/variance-thresholding-binary-features.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | null | null | null | Feature Selection/variance-thresholding-binary-features.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | null | null | null | Feature Selection/variance-thresholding-binary-features.py | WyckliffeAluga/data-chronicles | 5219fe9cdbafb9fd7be88727483952c4c13f2790 | [
"MIT"
] | 1 | 2021-02-09T12:22:55.000Z | 2021-02-09T12:22:55.000Z | from sklearn.feature_selection import VarianceThreshold
# Create feature matrix with:
# Feature 0: 80% class 0
# Feature 1: 80% class 1
# Feature 2: 60% class 0, 40% class 1
X = [[0, 1, 0],
[0, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]]
# Run threshold by variance
thresholder = VarianceThreshold(threshold=(.75 * (1 - .75)))
thresholder.fit_transform(X)
| 23.375 | 60 | 0.628342 |
f1dd06b091ae6fa97dc90f3e28bc1d5770af8082 | 1,677 | py | Python | scripts/03_BuildLITypeModels/14_TrainLemmaModel.py | danielplatt/LemmInflect | 7db0633098409800fbe7056bdab7d6f5f144cebb | [
"MIT"
] | 157 | 2019-05-11T21:17:20.000Z | 2022-03-21T12:05:12.000Z | scripts/03_BuildLITypeModels/14_TrainLemmaModel.py | danielplatt/LemmInflect | 7db0633098409800fbe7056bdab7d6f5f144cebb | [
"MIT"
] | 10 | 2019-05-14T19:49:04.000Z | 2021-06-03T13:15:16.000Z | scripts/03_BuildLITypeModels/14_TrainLemmaModel.py | danielplatt/LemmInflect | 7db0633098409800fbe7056bdab7d6f5f144cebb | [
"MIT"
] | 20 | 2019-08-21T12:40:51.000Z | 2021-10-02T15:06:07.000Z | #!/usr/bin/python3
import sys
sys.path.insert(0, '../..') # make '..' first in the lib search path
import gzip
import numpy
from lemminflect.kmodels.ModelLemma import ModelLemma
from lemminflect.kmodels.ModelLemmaInData import ModelLemmaInData
from lemminflect.kmodels.ModelLemmaClasses import ModelLemmaClasses
from lemminflect import config
if __name__ == '__main__':
# Load the lemmatization data
print('Loading ', config.lemma_tcorp_fn)
indata = ModelLemmaInData(config.lemma_tcorp_fn)
print('Loaded {:,} entries'.format(len(indata.entries)))
# Load the lemmatization rules
print('Loading ', config.model_lemma_cl_fn)
rules = ModelLemmaClasses(config.model_lemma_cl_fn)
# Convert data into training format
X = []
Y = []
input_len = ModelLemmaInData.WVEC_LEN
input_letters = ModelLemmaInData.getLetterClasses()
output_rules = rules.rules
for entry in indata.entries:
rule = ModelLemmaClasses.computeSuffixRule(entry.infl, entry.lemma)
idx = rules.getRuleIndex(rule)
vec = ModelLemmaInData.wordToVec(entry.infl, entry.category)
X.append( vec )
Y.append( idx )
X = numpy.asarray(X, dtype='float32')
Y = numpy.asarray(Y, dtype='int32')
print('X.shape= ', X.shape)
print('Y.shape= ', Y.shape)
print()
# Create the model
batch_size = 32
nepochs = 50
model = ModelLemma()
model.create(input_len, input_letters, output_rules)
model.model.summary()
model.train(X, Y, batch_size, nepochs)
print()
print('Saving model to ', config.model_lemma_fn)
model.save(config.model_lemma_fn)
print('done')
| 31.641509 | 75 | 0.690519 |
f1dd06cdb53d42d5c3f71ef66179e31f525e4e55 | 9,006 | py | Python | python/snips_nlu_parsers/builtin_entities.py | f-laurens/snips-nlu-parsers | 82d24c0b4258acd1191af5d558b7592a18f2dada | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14 | 2019-04-17T15:10:39.000Z | 2022-02-14T09:38:47.000Z | python/snips_nlu_parsers/builtin_entities.py | f-laurens/snips-nlu-parsers | 82d24c0b4258acd1191af5d558b7592a18f2dada | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2019-04-07T19:36:24.000Z | 2020-05-28T12:46:37.000Z | python/snips_nlu_parsers/builtin_entities.py | f-laurens/snips-nlu-parsers | 82d24c0b4258acd1191af5d558b7592a18f2dada | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 43 | 2019-04-20T07:31:57.000Z | 2022-01-12T16:24:13.000Z | # coding=utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
from _ctypes import byref, pointer
from builtins import range, str
from ctypes import c_char_p, string_at
from snips_nlu_parsers.utils import (CStringArray, check_ffi_error, lib,
string_array_pointer, string_pointer)
_ALL_LANGUAGES = None
_SUPPORTED_ENTITIES = dict()
_SUPPORTED_GAZETTEER_ENTITIES = dict()
_SUPPORTED_GRAMMAR_ENTITIES = dict()
_ENTITIES_EXAMPLES = dict()
_ALL_BUILTIN_ENTITIES = None
_ALL_GAZETTEER_ENTITIES = None
_ALL_GRAMMAR_ENTITIES = None
_BUILTIN_ENTITIES_SHORTNAMES = dict()
_COMPLETE_ENTITY_ONTOLOGY = None
_LANGUAGE_ENTITY_ONTOLOGY = dict()
def get_all_languages():
"""Lists all the supported languages"""
global _ALL_LANGUAGES
if _ALL_LANGUAGES is None:
lib.snips_nlu_ontology_supported_languages.restype = CStringArray
array = lib.snips_nlu_ontology_supported_languages()
_ALL_LANGUAGES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_LANGUAGES
def get_all_builtin_entities():
"""Lists the builtin entities that are supported in at least one
language"""
global _ALL_BUILTIN_ENTITIES
if _ALL_BUILTIN_ENTITIES is None:
lib.snips_nlu_ontology_all_builtin_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_builtin_entities()
_ALL_BUILTIN_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_BUILTIN_ENTITIES
def get_all_gazetteer_entities():
"""Lists the gazetteer entities that are supported in at least one
language"""
global _ALL_GAZETTEER_ENTITIES
if _ALL_GAZETTEER_ENTITIES is None:
lib.snips_nlu_ontology_all_gazetteer_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_gazetteer_entities()
_ALL_GAZETTEER_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GAZETTEER_ENTITIES
def get_all_grammar_entities():
"""Lists the grammar entities that are supported in at least one
language"""
global _ALL_GRAMMAR_ENTITIES
if _ALL_GRAMMAR_ENTITIES is None:
lib.snips_nlu_ontology_all_grammar_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_grammar_entities()
_ALL_GRAMMAR_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GRAMMAR_ENTITIES
def get_builtin_entity_shortname(entity):
"""Get the short name of the entity
Examples:
>>> get_builtin_entity_shortname(u"snips/amountOfMoney")
'AmountOfMoney'
"""
global _BUILTIN_ENTITIES_SHORTNAMES
if entity not in _BUILTIN_ENTITIES_SHORTNAMES:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_ontology_entity_shortname(
entity.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity shortname")
result = string_at(ptr)
_BUILTIN_ENTITIES_SHORTNAMES[entity] = result.decode("utf8")
return _BUILTIN_ENTITIES_SHORTNAMES[entity]
def get_supported_entities(language):
"""Lists the builtin entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_builtin_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported entities")
array = ptr.contents
_SUPPORTED_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_ENTITIES[language]
def get_supported_gazetteer_entities(language):
"""Lists the gazetteer entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GAZETTEER_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GAZETTEER_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = \
lib.snips_nlu_parsers_supported_builtin_gazetteer_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported gazetteer entities")
array = ptr.contents
_SUPPORTED_GAZETTEER_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GAZETTEER_ENTITIES[language]
def get_supported_grammar_entities(language):
"""Lists the grammar entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GRAMMAR_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GRAMMAR_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_grammar_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported grammar entities")
array = ptr.contents
_SUPPORTED_GRAMMAR_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GRAMMAR_ENTITIES[language]
def get_builtin_entity_examples(builtin_entity_kind, language):
"""Provides some examples of the builtin entity in the specified language
"""
global _ENTITIES_EXAMPLES
if not isinstance(builtin_entity_kind, str):
raise TypeError("Expected `builtin_entity_kind` to be of type 'str' "
"but found: %s" % type(builtin_entity_kind))
if not isinstance(language, str):
raise TypeError("Expected `language` to be of type 'str' but found: %s"
% type(language))
if builtin_entity_kind not in _ENTITIES_EXAMPLES:
_ENTITIES_EXAMPLES[builtin_entity_kind] = dict()
if language not in _ENTITIES_EXAMPLES[builtin_entity_kind]:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_builtin_entity_examples(
builtin_entity_kind.encode("utf8"),
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity examples")
array = ptr.contents
_ENTITIES_EXAMPLES[builtin_entity_kind][language] = list(
array.data[i].decode("utf8") for i in range(array.size))
return _ENTITIES_EXAMPLES[builtin_entity_kind][language]
def get_complete_entity_ontology():
"""Lists the complete entity ontology for all languages in JSON format
"""
global _COMPLETE_ENTITY_ONTOLOGY
if _COMPLETE_ENTITY_ONTOLOGY is None:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_complete_entity_ontology_json(byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"complete entity ontology")
json_str = string_at(ptr).decode("utf8")
_COMPLETE_ENTITY_ONTOLOGY = json.loads(json_str, encoding="utf8")
return _COMPLETE_ENTITY_ONTOLOGY
def get_language_entity_ontology(language):
"""Lists the complete entity ontology for the specified language in JSON format
"""
global _LANGUAGE_ENTITY_ONTOLOGY
if language not in _LANGUAGE_ENTITY_ONTOLOGY:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_language_entity_ontology_json(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"language entity ontology")
json_str = string_at(ptr).decode("utf8")
_LANGUAGE_ENTITY_ONTOLOGY[language] = json.loads(json_str, encoding="utf8")
return _LANGUAGE_ENTITY_ONTOLOGY[language]
| 40.751131 | 87 | 0.67777 |
f1decafed3dd9912b1ab456a5f7d5b245e48033e | 521 | py | Python | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 9 | 2021-04-20T15:28:36.000Z | 2022-03-08T19:53:48.000Z | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | null | null | null | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 6 | 2021-06-24T03:25:21.000Z | 2022-02-20T21:44:52.000Z | import os;os.environ['TMPDIR'] = os.path.join(os.environ['HOME'], 'tmp')
import pwn
remote_binary = "/problems/got_5_c5119617c90aa544a639812dbc41e24e/vuln"
segfault()
| 27.421053 | 72 | 0.629559 |
f1e15b839857a50eb242db9bce20dc2231b79a03 | 9,518 | py | Python | miscellaneous/utils.py | tingyuansen/Weak_Lensing | f8f0833345687648c467b4dea7074d9596c81c14 | [
"MIT"
] | null | null | null | miscellaneous/utils.py | tingyuansen/Weak_Lensing | f8f0833345687648c467b4dea7074d9596c81c14 | [
"MIT"
] | null | null | null | miscellaneous/utils.py | tingyuansen/Weak_Lensing | f8f0833345687648c467b4dea7074d9596c81c14 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# *Author: Dezso Ribli*
"""
Util functions for training CNN on weak lesnsing maps.
Mostly data loaders and data generators with some
additional functionality.
"""
import numpy as np
# https://github.com/IntelPython/mkl_fft/issues/11
#np.fft.restore_all()
import cv2
import math
import os
def step_decay(epoch, base_lr, epochs_drop, drop=0.1):
"""Helper for step learning rate decay."""
lrate = base_lr
for epoch_drop in epochs_drop:
lrate *= math.pow(drop,math.floor(epoch/epoch_drop))
return lrate
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf
"""Loaders for various experiments."""
def predict_on_generator(model, datagen, augment):
"""Predict on data generator with augmentation."""
datagen.reset_indices_and_reshuffle(force=True)
y_true, y_pred = [],[]
for i in range(datagen.n_data):
xi,yi = datagen.next()
y_true.append(yi)
y_pred_tmp = np.zeros(yi.shape)
if augment:
for ai in [0,1]:
for aj in [0,1]:
for ak in [0,1]:
y_pred_tmp += model.predict_on_batch(
aug_ims(xi,ai,aj,ak))
y_pred.append(y_pred_tmp/8.)
else:
y_pred.append(model.predict_on_batch(xi))
y_true = np.vstack(y_true)
y_pred = np.vstack(y_pred)
return y_true, y_pred
def aug_ims(ims, fliplr=0, flipud=0, T=0):
"""Augment images with flips and transposition."""
ims_aug = np.array(ims, copy=True)
for i in range(len(ims_aug)):
if fliplr: # flip left right
ims_aug[i] = np.fliplr(ims_aug[i])
if flipud: # flip up down
ims_aug[i] = np.flipud(ims_aug[i])
if T: # transpose
ims_aug[i,:,:,0] = ims_aug[i,:,:,0].T
return ims_aug
def add_shape_noise(x, A, ng, rng=None, sige=0.4):
"""Add shape noise"""
sigpix = sige / (2 * A * ng)**0.5 # final pixel noise scatter
# add shape noise to map
if rng: # use given random generator
return x + rng.normal(loc=0, scale=sigpix, size=x.shape)
else: # or just a random noise
return x + np.random.normal(loc=0, scale=sigpix, size=x.shape)
def smooth(x, smoothing_scale_arcmin, map_size_arcmin):
"""Smooth by Gaussian kernel."""
# smoothing kernel width in pixels instead of arcmins
map_size_pix = x.shape[0]
s = (smoothing_scale_arcmin * map_size_pix) / map_size_arcmin
# cut off at: 6 sigma + 1 pixel
# for large smooothing area and odd pixel number
cutoff = 6 * int(s+1) + 1
return cv2.GaussianBlur(x, ksize=(cutoff, cutoff), sigmaX=s, sigmaY=s)
| 37.179688 | 87 | 0.604434 |
f1e232b6730dde2945dc690b0f6fddabcc0f6b8b | 4,683 | py | Python | bert/utils/common.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null | bert/utils/common.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null | bert/utils/common.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null |
import hashlib
import io
import json
import os
import re
import struct
| 23.771574 | 74 | 0.558189 |
f1e2aa05c0131a4119034421ecbeb1cb9810d8c8 | 2,080 | py | Python | boaphys/elements.py | janpipek/boaphys | f32d972e22ebede2f24bf69506125b7c59a4c8c0 | [
"MIT"
] | null | null | null | boaphys/elements.py | janpipek/boaphys | f32d972e22ebede2f24bf69506125b7c59a4c8c0 | [
"MIT"
] | null | null | null | boaphys/elements.py | janpipek/boaphys | f32d972e22ebede2f24bf69506125b7c59a4c8c0 | [
"MIT"
] | null | null | null |
table = _Table()
| 27.012987 | 93 | 0.516827 |
f1e338fa1474985107d12ea6bcd66b88abed94fc | 2,924 | py | Python | projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py | vmware/versatile-data-kit | c4e10324a4f3203c58079cb18203880f68053f15 | [
"Apache-2.0"
] | 100 | 2021-10-04T09:32:04.000Z | 2022-03-30T11:23:53.000Z | projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py | vmware/versatile-data-kit | c4e10324a4f3203c58079cb18203880f68053f15 | [
"Apache-2.0"
] | 208 | 2021-10-04T16:56:40.000Z | 2022-03-31T10:41:44.000Z | projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py | vmware/versatile-data-kit | c4e10324a4f3203c58079cb18203880f68053f15 | [
"Apache-2.0"
] | 14 | 2021-10-11T14:15:13.000Z | 2022-03-11T13:39:17.000Z | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import unittest
from unittest import mock
from vdk.plugin.control_api_auth.authentication import Authentication
from vdk_provider.hooks.vdk import VDKHook
log = logging.getLogger(__name__)
# Monkey-patch the authentication logic to allow for more granular testing
# of the VDKHook
| 38.986667 | 129 | 0.703146 |
f1e3adf84f989f48fb009dcc9e422f44d758219c | 720 | py | Python | skater/util/logger.py | RPUTHUMA/Skater | 317460b88065b41eebe6790e9efdbb0595cbe450 | [
"UPL-1.0"
] | 718 | 2017-05-19T22:49:40.000Z | 2019-03-27T06:40:54.000Z | skater/util/logger.py | quant1729/Skater | b46a4abe3465ddc7b19ffc762ad45d1414b060a6 | [
"UPL-1.0"
] | 114 | 2017-05-24T16:55:59.000Z | 2019-03-27T12:48:18.000Z | skater/util/logger.py | quant1729/Skater | b46a4abe3465ddc7b19ffc762ad45d1414b060a6 | [
"UPL-1.0"
] | 121 | 2017-05-22T17:20:19.000Z | 2019-03-21T15:06:19.000Z | """Funcs for logging"""
import logging
_CRITICAL = logging.CRITICAL
_ERROR = logging.ERROR
_WARNING = logging.WARNING
_INFO = logging.INFO
_DEBUG = logging.DEBUG
_NOTSET = logging.NOTSET
| 24 | 63 | 0.740278 |
f1e59d1d38ade7999a6cd5e7982c060b5e15cc11 | 575 | py | Python | algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | null | null | null | algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | 1 | 2022-02-16T11:28:56.000Z | 2022-02-16T11:28:56.000Z | algorithms/code/leetcode/lc217_contains_duplicate/lc217_contains_duplicate.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# https://leetcode.com/problems/contains-duplicate/
import unittest
from typing import List
| 23 | 71 | 0.626087 |
f1e6fe5da799ee54688ff5ee8d7c10fc529546e8 | 1,818 | py | Python | examples/hsmm-geo.py | bikash/pyhsmm | 94fab0ea66072a639b20163c40db04c18069496c | [
"MIT"
] | 1 | 2015-11-08T05:20:39.000Z | 2015-11-08T05:20:39.000Z | examples/hsmm-geo.py | bikash/pyhsmm | 94fab0ea66072a639b20163c40db04c18069496c | [
"MIT"
] | null | null | null | examples/hsmm-geo.py | bikash/pyhsmm | 94fab0ea66072a639b20163c40db04c18069496c | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import copy, os
import pyhsmm
from pyhsmm.util.text import progprint_xrange
###################
# generate data #
###################
T = 1000
obs_dim = 2
N = 4
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':10*1,
'beta_0':10*100}
true_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams)
for state in range(N)]
true_dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams)
for state in range(N)]
truemodel = pyhsmm.models.GeoHSMM(
alpha=6.,
init_state_concentration=6.,
obs_distns=true_obs_distns,
dur_distns=true_dur_distns)
data, labels = truemodel.generate(T)
plt.figure()
truemodel.plot()
temp = np.concatenate(((0,),truemodel.states_list[0].durations.cumsum()))
changepoints = zip(temp[:-1],temp[1:])
changepoints[-1] = (changepoints[-1][0],T) # because last duration might be censored
#########################
# posterior inference #
#########################
Nmax = 25
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.GeoHSMMPossibleChangepoints(
alpha=6.,
init_state_concentration=6.,
obs_distns=obs_distns,
dur_distns=dur_distns)
posteriormodel.add_data(data,changepoints=changepoints)
for idx in progprint_xrange(50):
posteriormodel.resample_model()
plt.figure()
posteriormodel.plot()
plt.show()
| 25.25 | 95 | 0.669417 |
f1e7704fa789f92ccdaa67ed757a654c38ed5fda | 2,644 | py | Python | drf_nested/mixins/update_nested_mixin.py | promoteinternational/drf-nested | 0042b9e4c100df4ae43a10684c30348160b39187 | [
"MIT"
] | 1 | 2020-01-05T07:23:48.000Z | 2020-01-05T07:23:48.000Z | drf_nested/mixins/update_nested_mixin.py | promoteinternational/drf-nested | 0042b9e4c100df4ae43a10684c30348160b39187 | [
"MIT"
] | null | null | null | drf_nested/mixins/update_nested_mixin.py | promoteinternational/drf-nested | 0042b9e4c100df4ae43a10684c30348160b39187 | [
"MIT"
] | 2 | 2019-08-12T07:36:57.000Z | 2019-11-30T01:40:30.000Z | from django.db import transaction
from rest_framework.exceptions import ValidationError
from .base_nested_mixin import BaseNestedMixin
| 44.066667 | 112 | 0.655825 |
f1e8ea63244e88c3991257407c19f60101c1fe1a | 27 | py | Python | sbpack/version.py | jdidion/sbpack | 84bd7867a0630a826280a702db715377aa879f6a | [
"Apache-2.0"
] | 11 | 2020-08-12T09:33:46.000Z | 2022-02-18T15:27:26.000Z | sbpack/version.py | jdidion/sbpack | 84bd7867a0630a826280a702db715377aa879f6a | [
"Apache-2.0"
] | 35 | 2020-06-12T16:52:36.000Z | 2022-03-25T04:29:02.000Z | sbpack/version.py | jdidion/sbpack | 84bd7867a0630a826280a702db715377aa879f6a | [
"Apache-2.0"
] | 2 | 2021-09-27T16:17:26.000Z | 2022-01-12T22:18:12.000Z | __version__ = "2021.10.07"
| 13.5 | 26 | 0.703704 |
f1e91dba84a62775f1e1edc376c14039a6a6b66f | 179 | py | Python | forayer/datasets/__init__.py | dobraczka/forayer | df6783f85fb063f58e8b96acef924f9fd2532227 | [
"MIT"
] | 5 | 2021-09-06T13:50:44.000Z | 2022-02-14T09:39:09.000Z | forayer/datasets/__init__.py | dobraczka/forayer | df6783f85fb063f58e8b96acef924f9fd2532227 | [
"MIT"
] | 5 | 2021-09-07T06:53:41.000Z | 2022-01-17T09:51:53.000Z | forayer/datasets/__init__.py | dobraczka/forayer | df6783f85fb063f58e8b96acef924f9fd2532227 | [
"MIT"
] | null | null | null | """Make datasets available."""
from forayer.datasets.oaei_kg import OAEIKGDataset
from forayer.datasets.open_ea import OpenEADataset
__all__ = ["OpenEADataset", "OAEIKGDataset"]
| 29.833333 | 50 | 0.804469 |
f1ed8dbfedb221a10fa60ea9b89b4d29afac3606 | 227 | py | Python | challenge/admin.py | dpmpolo/anniv | 27081ca5bc514050c10ecc5e5c0994a4d5a7066f | [
"MIT"
] | null | null | null | challenge/admin.py | dpmpolo/anniv | 27081ca5bc514050c10ecc5e5c0994a4d5a7066f | [
"MIT"
] | null | null | null | challenge/admin.py | dpmpolo/anniv | 27081ca5bc514050c10ecc5e5c0994a4d5a7066f | [
"MIT"
] | null | null | null | from django.contrib import admin
from challenge.models import Goal, GoalInstance, SignificantOther
# Register your models here.
admin.site.register(Goal)
admin.site.register(GoalInstance)
admin.site.register(SignificantOther)
| 28.375 | 65 | 0.837004 |
f1ee53bc0c6e33469f0d38aac5f3576590fc8660 | 14,142 | py | Python | allocate.py | tomdavsmi/ncl-spa | baa714071d18cc388ccc73702d78a53f7096db6e | [
"MIT"
] | null | null | null | allocate.py | tomdavsmi/ncl-spa | baa714071d18cc388ccc73702d78a53f7096db6e | [
"MIT"
] | null | null | null | allocate.py | tomdavsmi/ncl-spa | baa714071d18cc388ccc73702d78a53f7096db6e | [
"MIT"
] | null | null | null | import library
import random
import re
| 38.53406 | 172 | 0.680031 |