hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdf18580611d8972ffb45869d74bebdda505d879
| 2,310
|
py
|
Python
|
Ass2/lotka-volterra.py
|
Scoudem/modsim
|
a65da4a29a82ac495367278ec694a28432b30c0d
|
[
"Apache-2.0"
] | null | null | null |
Ass2/lotka-volterra.py
|
Scoudem/modsim
|
a65da4a29a82ac495367278ec694a28432b30c0d
|
[
"Apache-2.0"
] | null | null | null |
Ass2/lotka-volterra.py
|
Scoudem/modsim
|
a65da4a29a82ac495367278ec694a28432b30c0d
|
[
"Apache-2.0"
] | null | null | null |
'''
File: lotka-volterra.py
Authors:
- Sjoerd Wenker, 10617558
- Tristan van Vaalen, 10551832
Contains a Lotka-Volterra Model which is simulated using the RK4 method.
'''
from integration import RungeKutta4
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
'''
Method that simulates a Lotka-Volterra Model using RK4 for the given
functions with a list containing sets of startvalues and a list of timesteps
This will show a window for each of the timesteps
'''
''' Main execution '''
if __name__ == '__main__':
startvalues = [[11, 49], [1, 10], [15, 26]]
dt = [1, 0.1, 0.05, 0.01, 0.005, 0.001]
generations = 10000
functions = [
lambda (t, x, y): -0.5 * x + 0.01 * x * y,
lambda (t, x, y): y - 0.1 * x * y
]
lotke_volterra(functions, startvalues, dt, generations)
'''
Calculations for stable point:
x' = -a * x + c * d * x * y
y' = b * y - d * x * y
a = 0.5, b = 1,c = 0.1 and d = 0.1.
x' = -0.5 * x + 0.01 * x * y
y' = y - 0.1 * x * y
Stable:
x'=0 y'=0 x!=0 y!=0
-0.5 * x + 0.01 * x * y = 0
-0.5 + 0.01 * y = 0
0.01y = 0.5
y = 50
y - 0.1 * x * y = 0
1 - 0.1x = 0
0.1x = 1
x = 10
'''
| 26.860465
| 82
| 0.558009
|
fdf306a6233eb15d853aa05d6d7553accacc2060
| 3,717
|
py
|
Python
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
interaction-lab/HARMONI
|
9c88019601a983a1739744919a95247a997d3bb1
|
[
"MIT"
] | 7
|
2020-09-02T06:31:21.000Z
|
2022-02-18T21:16:44.000Z
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
micolspitale93/HARMONI
|
cf6a13fb85e3efb4e421dbfd4555359c0a04acaa
|
[
"MIT"
] | 61
|
2020-05-15T16:46:32.000Z
|
2021-07-28T17:44:49.000Z
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
micolspitale93/HARMONI
|
cf6a13fb85e3efb4e421dbfd4555359c0a04acaa
|
[
"MIT"
] | 3
|
2020-10-05T23:01:29.000Z
|
2022-03-02T11:53:34.000Z
|
#!/usr/bin/env python3
# Common Imports
import io
import rospy
import sys
import unittest
# Specific Imports
import time
import wave
from harmoni_common_lib.action_client import HarmoniActionClient
from harmoni_common_lib.constants import ActionType, DetectorNameSpace, SensorNameSpace, State
from audio_common_msgs.msg import AudioData
from std_msgs.msg import String
PKG = "test_harmoni_stt"
def main():
# TODO combine validity tests into test suite so that setup doesn't have to run over and over.
import rostest
rospy.loginfo("test_deepspeech started")
rospy.loginfo("TestDeepSpeech: sys.argv: %s" % str(sys.argv))
rostest.rosrun(PKG, "test_deepspeech", TestDeepSpeech_Valid, sys.argv)
if __name__ == "__main__":
main()
| 31.235294
| 107
| 0.65456
|
fdf47c8f7eacc32cfd98b13ee0730f15d82165c5
| 2,196
|
py
|
Python
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 6
|
2015-01-14T12:02:58.000Z
|
2021-08-17T23:18:56.000Z
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 7
|
2015-01-24T11:36:07.000Z
|
2015-01-26T04:55:31.000Z
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 1
|
2015-01-25T20:48:06.000Z
|
2015-01-25T20:48:06.000Z
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from optparse import make_option
import time
from django import VERSION
from django.core.management.base import NoArgsCommand
from smoked import default_registry
from smoked.runner import run_tests
stats_msg = """
Results
=======
Total: {total}
Success: {success}
Failure: {failure}
--------
Time: {time:.1f}s
"""
| 27.45
| 72
| 0.539617
|
fdf91475384cb8118e074e63142b83edc4f4d2bd
| 1,735
|
py
|
Python
|
Data Science With Python/07-cleaning-data-in-python/4-cleaning-data-for-analysis/10-testing-your-data-with-asserts.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | 5
|
2021-02-03T14:36:58.000Z
|
2022-01-01T10:29:26.000Z
|
Data Science With Python/07-cleaning-data-in-python/4-cleaning-data-for-analysis/10-testing-your-data-with-asserts.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | null | null | null |
Data Science With Python/07-cleaning-data-in-python/4-cleaning-data-for-analysis/10-testing-your-data-with-asserts.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | 7
|
2018-11-06T17:43:31.000Z
|
2020-11-07T21:08:16.000Z
|
'''
Testing your data with asserts
Here, you'll practice writing assert statements using the Ebola dataset from previous chapters to programmatically check for missing values and to confirm that all values are positive. The dataset has been pre-loaded into a DataFrame called ebola.
In the video, you saw Dan use the .all() method together with the .notnull() DataFrame method to check for missing values in a column. The .all() method returns True if all values are True. When used on a DataFrame, it returns a Series of Booleans - one for each column in the DataFrame. So if you are using it on a DataFrame, like in this exercise, you need to chain another .all() method so that you return only one True or False value. When using these within an assert statement, nothing will be returned if the assert statement is true: This is how you can confirm that the data you are checking are valid.
Note: You can use pd.notnull(df) as an alternative to df.notnull().
INSTRUCTIONS
100XP
INSTRUCTIONS
100XP
-Write an assert statement to confirm that there are no missing values in ebola.
-Use the pd.notnull() function on ebola (or the .notnull() method of ebola) and chain two .all() methods (that is, .all().all()). The first .all() method will return a True or False for each column, while the second .all() method will return a single True or False.
-Write an assert statement to confirm that all values in ebola are greater than or equal to 0.
-Chain two all() methods to the Boolean condition (ebola >= 0).
'''
import pandas as pd
ebola = pd.read_csv('../_datasets/ebola.csv')
# Assert that there are no missing values
assert ebola.notnull().all().all()
# Assert that all values are >= 0
assert (ebola >= 0).all().all()
| 61.964286
| 611
| 0.756196
|
fdfa15c5c9e42a9b497c846a1dd12bc7ab7f4c76
| 623
|
py
|
Python
|
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
COLLIDER_SUITE_OFFSHOOT_RANGE = (0, 100)
COLLIDER_SUITE_SPLIT_ABS_RANGE = (0, 10)
COLLIDER_SUITE_SPLIT_REL_RANGE = (-1, 1, 2)
COLLIDER_SUITE_ASSIMILATE_SIZE_RANGE = (0, 10)
TAPE_FRAME_SEARCH_LIMIT_RANGE = (1, 100000)
TAPE_PIXEL_SEARCH_LIMIT_RANGE = (1, 1000000)
DEFAULT_CALIBRATION_ENCLOSURE_SIZE_RANGE = (0, 1000)
COLLISION_PIXEL_OVERLAP_MARGIN_RANGE = (1, 2000)
SCORE_CONTRAST_RADIO_RANGE = (1.0, 5.0)
SCORE_CONTRAST_DIFF_RANGE = (-0.2, 0.2)
SCORE_GOOD_FRACTION_RANGE = (0.0, 1.1)
SCORE_ACCURACY_RANGE = (0.0, 1.1)
SCORE_COVERAGE_RANGE = (0.0, 1.1)
ROI_BORDER_OFFSET_RANGE = (0, 200)
ROI_CORNER_OFFSET_RANGE = (0, 200)
| 34.611111
| 52
| 0.781701
|
fdfb52a6d5dc1287a0b5c4d900e03718e519b19a
| 6,084
|
py
|
Python
|
aiospotipy/me.py
|
sizumita/aiospotipy
|
3c542ca90559abde2e35268b4eedfdbbef1dab34
|
[
"MIT"
] | 3
|
2019-03-09T14:53:46.000Z
|
2020-06-03T12:50:33.000Z
|
aiospotipy/me.py
|
sizumita/aiospotipy
|
3c542ca90559abde2e35268b4eedfdbbef1dab34
|
[
"MIT"
] | null | null | null |
aiospotipy/me.py
|
sizumita/aiospotipy
|
3c542ca90559abde2e35268b4eedfdbbef1dab34
|
[
"MIT"
] | 1
|
2019-03-09T08:26:46.000Z
|
2019-03-09T08:26:46.000Z
|
from ._http import (HTTPClient,
get_id,
Route,
GET,
PUT,
DELETE,
)
import asyncio
| 34.568182
| 84
| 0.561473
|
fdfb78f1a782871b71fcd4058e86788874102e55
| 582
|
py
|
Python
|
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
import json
| 22.384615
| 55
| 0.689003
|
fdfc80e749f6ee439afc826e7feee5425163a88f
| 1,237
|
py
|
Python
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 5
|
2020-12-10T14:05:04.000Z
|
2020-12-18T09:04:35.000Z
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 4
|
2020-12-15T12:34:51.000Z
|
2021-06-28T14:04:34.000Z
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 5
|
2020-12-15T12:10:22.000Z
|
2022-03-18T20:06:38.000Z
|
# Copyright 2019 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import current_app
| 29.452381
| 81
| 0.712207
|
fdfd3606a932554deb9481786f567a3095afa229
| 395
|
py
|
Python
|
blog/migrations/0015_auto_20190810_1404.py
|
vishnu-chalil/sharecontent
|
bda2cb6db0ffc38f582829abfced163e8a6eafdb
|
[
"Apache-2.0"
] | null | null | null |
blog/migrations/0015_auto_20190810_1404.py
|
vishnu-chalil/sharecontent
|
bda2cb6db0ffc38f582829abfced163e8a6eafdb
|
[
"Apache-2.0"
] | 7
|
2020-02-12T01:20:22.000Z
|
2021-06-10T18:39:59.000Z
|
blog/migrations/0015_auto_20190810_1404.py
|
vishnu-chalil/sharecontent
|
bda2cb6db0ffc38f582829abfced163e8a6eafdb
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-10 14:04
from django.db import migrations, models
| 20.789474
| 69
| 0.594937
|
a90000a60889fa2e13612a2352497c1c01e09cb6
| 71,385
|
py
|
Python
|
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import os
import sys
print_n = sys.stdout.write
STAT_TXT = ("ST", "MA", "VI", "AG")
# Characters
CHAR_OFFSET = "0x24"
CHAR_ID = ("0x75", 2)
CHAR_LVL = ("0x79", 1)
CHAR_EXP = ("0x7C", 2)
CHAR_HP = ("0x82", 2)
CHAR_MP = ("0x84", 2)
CHAR_ST = ("0x7E", 1)
CHAR_MA = ("0x7F", 1)
CHAR_VI = ("0x80", 1)
CHAR_AG = ("0x81", 1)
CHAR_CMD1 = ("0x86", 1)
CHAR_CMD2 = ("0x87", 1)
CHAR_CMD3 = ("0x88", 1)
CHAR_PAS1 = ("0x89", 1)
CHAR_PAS2 = ("0x8A", 1)
CHAR_PAS3 = ("0x8B", 1)
CHAR_RAC = ("0x8C", 1)
CHAR_MOV = ("0x9F", 1)
# Miscellaneous
MISC_MACCA = ("0x6C4", 4)
# Demons
DE_NUM_MAX = 27
DE_OFFSET = "0x20"
DE_ID = ("0x2B6", 2)
DE_LVL = ("0x2B9", 1)
DE_EXP = ("0x2BC", 2)
DE_HP = ("0x2C2", 2)
DE_MP = ("0x2C4", 2)
DE_ST = ("0x2BE", 1)
DE_MA = ("0x2BF", 1)
DE_VI = ("0x2C0", 1)
DE_AG = ("0x2C1", 1)
DE_CMD1 = ("0x2C6", 1)
DE_CMD2 = ("0x2C7", 1)
DE_CMD3 = ("0x2C8", 1)
DE_PAS1 = ("0x2C9", 1)
DE_PAS2 = ("0x2CA", 1)
DE_PAS3 = ("0x2CB", 1)
DE_RAC = ("0x2CC", 1)
# Skill Information
CMD_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109',
'110', '111', '112', '113', '114', '115', '116', '117', '118', '119',
'120', '121', '122', '123', '124', '125', '126', '127', '128', '129',
'130', '131', '132', '133', '134', '135', '136', '137', '138', '139',
'140', '141', '142', '143', '144', '145', '146', '147', '148', '149',
'150', '151', '152', '153', '154', '155', '156', '157', '158', '159',
'160', '161', '162', '163', '164', '165', '166', '167', '168', '169',
'170', '171', '172')
PAS_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107')
AUTO_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39')
RAC_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109')
DEMON_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109',
'110', '111', '112', '113', '114', '115', '116', '117', '118', '119',
'120', '121', '122', '123', '124', '125', '126', '127', '128', '129',
'130', '131', '132', '133', '134', '135', '136', '137', '138', '139',
'140', '141', '142', '143', '144', '145', '146', '147', '148', '149',
'150', '151', '152', '153', '154', '155', '156', '157', '158', '159',
'160', '161', '162', '163', '164', '165', '166', '167', '168', '169',
'170', '171', '172', '173', '174', '175', '176', '177', '178', '179',
'180', '181', '182', '183', '184', '185', '186', '187', '188', '189',
'190', '191', '192', '193', '194', '195', '196', '197', '198', '199',
'200', '201', '202', '203', '204', '205', '206', '207', '208', '209',
'210', '211', '212', '213', '214', '215', '216', '217', '218', '219',
'220', '221', '222', '223', '224', '225', '226', '227', '228', '229',
'230', '231', '232', '233', '234', '235', '236', '237', '238', '239',
'240', '241', '242', '243', '244', '245', '246', '247', '248', '249',
'250', '251', '252', '253', '254', '255', '256', '257', '258', '259',
'260', '261', '262', '263', '264', '265', '266', '267', '268', '269',
'270', '271', '272', '273', '274', '275', '276', '277', '278', '279',
'280', '281', '282', '283', '284', '285', '286', '287', '288', '289',
'290', '291', '292', '293', '294', '295', '296', '297', '298', '299',
'300', '301', '302', '303', '304', '305', '306', '307', '308', '309',
'310', '311', '312', '313', '314', '315', '316', '317', '318', '319',
'320', '321', '322', '323', '324', '325', '326', '327', '328', '329',
'330', '331', '332', '333', '334', '335', '336', '337', '338', '339',
'340', '341', '342', '343', '344', '345', '346', '347', '348', '349',
'350', '351', '352', '353', '354', '355', '356', '357', '358', '359',
'360', '361', '362', '363', '364', '365', '366', '367', '368', '369',
'370', '371', '372', '373', '374', '375', '376', '377', '378', '379',
'380', '381', '382', '383', '384', '385', '386', '387', '388', '389',
'390', '391', '392', "65535")
# DONE
CMD_SKILLS = {
"0": "None",
"1": "Attack",
"2": "Agi",
"3": "Agidyne",
"4": "Maragi",
"5": "Maragidyne",
"6": "Bufu",
"7": "Bufudyne",
"8": "Mabufu",
"9": "Mabufudyne",
"10": "Zio",
"11": "Ziodyne",
"12": "Mazio",
"13": "Maziodyne",
"14": "Zan",
"15": "Zandyne",
"16": "Mazan",
"17": "Mazandyne",
"18": "Megido",
"19": "Megidolaon",
"20": "Fire Dance",
"21": "Ice Dance",
"22": "Elec Dance",
"23": "Force Dance",
"24": "Holy Dance",
"25": "Drain",
"26": "Judgement",
"27": "Petra Eyes",
"28": "Mute Eyes",
"29": "Paral Eyes",
"30": "Death Call",
"31": "Power Hit",
"32": "Berserk",
"33": "Mighty Hit",
"34": "Anger Hit",
"35": "Brutal Hit",
"36": "Hassohappa",
"37": "Deathbound",
"38": "Weak Kill",
"39": "Desperation",
"40": "Makajamon",
"41": "Gigajama",
"42": "Diajama",
"43": "Makarakarn",
"44": "Tetrakarn",
"45": "Might Call",
"46": "Shield All",
"47": "Taunt",
"48": "Dia",
"49": "Diarahan",
"50": "Media",
"51": "Mediarahan",
"52": "Amrita",
"53": "Prayer",
"54": "Recarm",
"55": "Samerecarm",
"56": "Gunfire",
"57": "Guard",
"58": "Devil's Fuge",
"59": "Vampiric Mist",
"60": "Lost Flame",
"61": "Spawn",
"62": "Fire of Sodom",
"63": "Purging Light",
"64": "Babylon",
"65": "Megidoladyne",
"66": "Piercing Hit",
"67": "Multi-Hit",
"68": "Holy Strike",
"69": "Power Charge",
"70": "Sexy Gaze",
"71": "Marin Karin",
"72": "Extra Cancel",
"73": "Assassinate",
"74": "Fatal Strike",
"75": "Diarama",
"76": "Nigayomogi",
"77": "Recarmloss",
"78": "Mow Down",
"79": "Snipe",
"80": "Life Drain",
"81": "Multi-strike",
"82": "Inferno",
"83": "Escape",
"84": "Remain",
"85": "Double Strike",
"86": "Binary Fire",
"87": "Heat Charge",
"88": "N/A",
"89": "Marked Wing",
"90": "Eject Shot",
"91": "Circumpolarity",
"92": "N/A",
"93": "N/A",
"94": "Hacking",
"95": "Dark Tunder",
"96": "Diastrophism",
"97": "Regenerate",
"98": "Ultimate Hit",
"99": "Twin Ultimate",
"100": "Swallow",
"101": "N/A",
"102": "Binary Fire",
"103": "Circumpolarity",
"104": "Alkaid",
"105": "Areadbhar",
"106": "Dark Thunder",
"107": "Regenerate",
"108": "Supernova",
"109": "Power Up",
"110": "Ominous Star",
"111": "Heaven Wrath",
"112": "Cepheid",
"113": "Unheard Prayer",
"114": "Steal Macca",
"115": "Barrage Strike",
"116": "Heaven Wrath",
"117": "Necromancy",
"118": "Gomorrah Fire",
"119": "Vitality Drain",
"120": "Die for Me!",
"121": "Ruinous Wind",
"122": "Star Pressure",
"123": "Ruinous Wind",
"124": "Diastrophism",
"125": "Final Hit",
"126": "Dream Eater",
"127": "Demon Dance",
"128": "Roche Lobe",
"129": "Darkness Blade",
"130": "Defense Knife",
"131": "Carney",
"132": "Then, die!",
"133": "Don't Hurt Me",
"134": "Wanna Beating?",
"135": "Shadow Scythe",
"136": "No Killing...",
"137": "Shadow Shield",
"138": "Nemean Roar",
"139": "Wider-Radius",
"140": "Spica Spear",
"141": "Memory-Sharing",
"142": "Frozen Pillar",
"143": "Vicarious Spell",
"144": "Vicarious Doll",
"145": "Quaser",
"146": "Life Plower",
"147": "Asterion",
"148": "Partial Blast",
"149": "Vrano=Metria",
"150": "Megidoladyne",
"151": "Darkness Blade(Phys)",
"152": "Darkness Blade(Fire)",
"153": "Darkness Blade(Ice)",
"154": "Darkness Blade(Elec)",
"155": "Darkness Blade(Force)",
"156": "Then, die!(Phys)",
"157": "Then, die!(Phys)",
"158": "Then, die!(Phys)",
"159": "Then, die!(Almighty)",
"160": "Lion's Armor",
"161": "Ley Line True",
"162": "Life Plower True",
"163": "Beheadal",
"164": "Primal Fire",
"165": "Gravity Anomaly",
"166": "Orogin Selection",
"167": "Earthly Stars",
"168": "Master of Life",
"169": "Heavenly Rule",
"170": "Fringer's Brand",
"171": "Flaming Fanfare",
"172": "Ley Line"
}
# DONE
PAS_SKILLS = {
"0": "None",
"1": "+Mute",
"2": "+Poison",
"3": "+Paralyze",
"4": "+Stone",
"5": "Life Bonus",
"6": "Mana Bonus",
"7": "Life Surge",
"8": "Mana Surge",
"9": "Hero Aid",
"10": "Ares Aid",
"11": "Drain Hit",
"12": "Attack All",
"13": "Counter",
"14": "Retaliate",
"15": "Avenge",
"16": "Phys Boost",
"17": "Phys Amp",
"18": "Fire Boost",
"19": "Fire Amp",
"20": "Ice Boost",
"21": "Ice Amp",
"22": "Elec Boost",
"23": "Elec Amp",
"24": "Force Boost",
"25": "Force Amp",
"26": "Anti-Phys",
"27": "Anti-Fire",
"28": "Anti-Ice",
"29": "Anti-Elec",
"30": "Anti-Force",
"31": "Anti-Curse",
"32": "Anti-Most",
"33": "Anti-All",
"34": "Null Phys",
"35": "Null Fire",
"36": "Null Ice",
"37": "Null Elec",
"38": "Null Force",
"39": "Null Curse",
"40": "Phys Drain",
"41": "Fire Drain",
"42": "Ice Drain",
"43": "Elec Drain",
"44": "Force Drain",
"45": "Phys Repel",
"46": "Fire Repel",
"47": "Ice Repel",
"48": "Elec Repel",
"49": "Force Repel",
"50": "Watchful",
"51": "Endure",
"52": "Life Aid",
"53": "Life Lift",
"54": "Mana Aid",
"55": "Victory Cry",
"56": "Pierce",
"57": "Race-O",
"58": "Race-D",
"59": "Dual Shadow",
"60": "Extra One",
"61": "Leader Soul",
"62": "Knight Soul",
"63": "Paladin Soul",
"64": "Hero Soul",
"65": "Beast Eye",
"66": "Dragon Eye",
"67": "Crit Up",
"68": "Dodge",
"69": "MoneyBags",
"70": "Quick Move",
"71": "Vigilant",
"72": "Grimoire",
"73": "Double Strike",
"74": "Perserve Extra",
"75": "Anti-Element",
"76": "+Forget",
"77": "Extra Bonus",
"78": "Swift Step",
"79": "Life Stream",
"80": "Mana Stream",
"81": "Ultimate Hit",
"82": "Anti-Almighty",
"83": "Phys Up",
"84": "Pacify Human",
"85": "Dragon Power",
"86": "True Dragon",
"87": "Final Dragon",
"88": "Heavenly Gift",
"89": "Chaos Stir",
"90": "Undead",
"91": "Hidden Strength",
"92": "Holy Blessing",
"93": "Exchange",
"94": "Extra Zero",
"95": "Spirit Gain",
"96": "Hit Rate Gain",
"97": "Quick Wit",
"98": "Parkour",
"99": "Hitori Nabe",
"100": "Ikebukuro King",
"101": "Immortal Barman",
"102": "Defenseless",
"103": "Coiste Bodhar",
"104": "Dark Courier",
"105": "Massive Shadow",
"106": "Hound Eyes",
"107": "Fighting Doll",
}
# DONE
RAC_SKILLS = {
"0": "None",
"1": "Affection",
"2": "Awakening",
"3": "Chaos Wave",
"4": "Constrict",
"5": "Evil Wave",
"6": "Blood Wine",
"7": "Flight",
"8": "Sacrifice",
"9": "Switch",
"10": "Animal Leg",
"11": "Devil Speed",
"12": "Phantasm",
"13": "Glamour",
"14": "Tyranny",
"15": "Double Up",
"16": "Aggravate",
"17": "Bind",
"18": "Devotion",
"19": "Long Range",
"20": "Immortal",
"21": "Evil Flame",
"22": "Hot Flower",
"23": "Dark Hand",
"24": "Violent God",
"25": "King's Gate",
"26": "King's Gate",
"27": "Fiend",
"28": "Four Devas",
"29": "Dark Finger",
"30": "Asura Karma",
"31": "Ghost Wounds",
"32": "Hero's Mark",
"33": "Uncanny Form",
"34": "Asura Destiny",
"35": "Goddess Grace",
"36": "Enlightenment",
"37": "Chaos Breath",
"38": "Dragon Bind",
"39": "Evil Flow",
"40": "Angel Stigma",
"41": "Winged Flight",
"42": "Fallen's Mark",
"43": "Warp Step",
"44": "Free Leap",
"45": "Devil Flash",
"46": "True Phantasm",
"47": "Fairy Dust",
"48": "Blood Treaty",
"49": "Matchless",
"50": "Agitate",
"51": "Evil Bind",
"52": "Mother's Love",
"53": "Possesion",
"54": "Hero's Proof",
"55": "Unearthy Form",
"56": "Dubhe Proof",
"57": "Merak Proof",
"58": "Phecda Proof",
"59": "Megrez Proof",
"60": "Alioth Proof",
"61": "Mizar Proof",
"62": "Alkaid Proof",
"63": "Polaris Proof",
"64": "Alcor Proof",
"65": "Alcor Warrant",
"66": "Merak Envoy",
"67": "Phecda Clone",
"68": "Megrez Bud",
"69": "Alioth Shot",
"70": "Alkaid Bud",
"71": "Alkaid Spawn",
"72": "Alkaid Spawn",
"73": "Alkaid Spawn",
"74": "Alkaid Spawn",
"75": "Polaris Proof",
"76": "Polaris Proof",
"77": "Heaven Throne",
"78": "Dragon Shard",
"79": "Lugh Blessing",
"80": "Heaven Shield",
"81": "Bounty Shield",
"82": "Heaven Spear",
"83": "Bounty Spear",
"84": "Temptation",
"85": "Mizar Proof",
"86": "Mizar Proof",
"87": "Star's Gate",
"88": "Shinjuku Intel",
"89": "Fighting Doll",
"90": "Headless Rider",
"91": "Leonid Five",
"92": "Spica Sign",
"93": "Spica Sign",
"94": "Shiki-Ouji",
"95": "Arcturus Sign",
"96": "Miyako",
"97": "Cor Caroli Sign",
"98": "Cor Caroli Half",
"99": "Agent of Order",
"100": "Universal Law",
"101": "Factor of Heat",
"102": "Factor of Power",
"103": "Factor of Space",
"104": "Factor of Time",
"105": "???",
"106": "Program: Joy",
"107": "Program: Ultra",
"108": "Fangs of Order",
"109": "Gate of Order"
}
# DONE
AUTO_SKILLS = {
"0": "None",
"1": "Blitzkrieg",
"2": "Hustle",
"3": "Fortify",
"4": "Barrier",
"5": "Wall",
"6": "Full Might",
"7": "Ban Phys",
"8": "Ban Fire",
"9": "Ban Ice",
"10": "Ban Elec",
"11": "Ban Force",
"12": "Ban Curse",
"13": "Rage Soul",
"14": "Grace",
"15": "Marksman",
"16": "Tailwind",
"17": "Magic Yin",
"18": "Battle Aura",
"19": "Revive",
"20": "Magic Yang",
"21": "Healing",
"22": "Alter Pain",
"23": "Weaken",
"24": "Debilitate",
"25": "Health Save",
"26": "Strengthen",
"27": "Grimoire +",
"28": "Desperation",
"29": "Rejuvenate",
"30": "Null Auto",
"31": "Pierce +",
"32": "Endure +",
"33": "Neurotoxin",
"34": "Temptation",
"35": "Shield All EX",
"36": "Dual Shadow EX",
"37": "Kinetic Vision",
"38": "Magnet Barrier",
"39": "Distortion",
}
# Character ID's
ALL_CHARS = {
"0": "MC",
"400": "Fumi",
"300": "Yamato",
"900": "Keita",
"800": "Makoto",
"700": "Jungo",
"a00": "Airi",
"b00": "Joe",
"600": "Otome",
"500": "Daichi",
"c00": "Hinako",
"200": "Io",
"100": "Ronaldo",
"d00": "Alcor"
}
# Demon Information
ALL_DEMONS = {
"0": "Human MC",
"1": "Human Ronaldo",
"2": "Human Io",
"3": "Human Yamato",
"4": "Human Fumi",
"5": "Human Daichi",
"6": "Human Otome",
"7": "Human Jungo",
"8": "Human Makoto",
"9": "Human Keita",
"10": "Human Airi",
"11": "Human Joe",
"12": "Human Hinako",
"13": "Human Alcor",
"14": "Omega Tonatiuh",
"15": "Omega Chernobog",
"16": "Omega Wu Kong",
"17": "Omega Susano-o",
"18": "Omega Kartikeya",
"19": "Omega Shiva",
"20": "Megami Hathor",
"21": "Megami Sarasvati",
"22": "Megami Kikuri-hime",
"23": "Megami Brigid",
"24": "Megami Scathach",
"25": "Megami Laksmi",
"26": "Megami Norn",
"27": "Megami Isis",
"28": "Megami Amaterasu",
"29": "Deity Mahakala",
"30": "Deity Thor",
"31": "Deity Arahabaki",
"32": "Deity Odin",
"33": "Deity Yama",
"34": "Deity Lugh",
"35": "Deity Baal",
"36": "Deity Asura",
"37": "Vile Orcus",
"38": "Vile Pazuzu",
"39": "Vile Abaddon",
"40": "Vile Tao Tie",
"41": "Vile Arioch",
"42": "Vile Tezcatlipoca",
"43": "Vile Nyarlathotep",
"44": "Snake Makara",
"45": "Snake Nozuchi",
"46": "Snake Pendragon",
"47": "Snake Gui Xian",
"48": "Snake Quetzacoatl",
"49": "Snake Seiyuu",
"50": "Snake Orochi",
"51": "Snake Ananta",
"52": "Snake Hoyau Kamui",
"53": "Dragon Toubyou",
"54": "Dragon Bai Suzhen",
"55": "Dragon Basilisk",
"56": "Dragon Ym",
"57": "Dragon Python",
"58": "Dragon Culebre",
"59": "Dragon Vritra",
"60": "Dragon Vasuki",
"61": "Divine Holy Ghost",
"62": "Divine Angel",
"63": "Divine Power",
"64": "Divine Lailah",
"65": "Divine Aniel",
"66": "Divine Kazfiel",
"67": "Divine Remiel",
"68": "Divine Metatron",
"69": "Avian Itsumade",
"70": "Avian Moh Shuvuu",
"71": "Avian Hamsa",
"72": "Avian Suparna",
"73": "Avian Vidofnir",
"74": "Avian Badb Catha",
"75": "Avian Anzu",
"76": "Avian Feng Huang",
"77": "Avian Garuda",
"78": "Fallen Gagyson",
"79": "Fallen Abraxas",
"80": "Fallen Flauros",
"81": "Fallen Nisroc",
"82": "Fallen Orobas",
"83": "Fallen Decarabia",
"84": "Fallen Nebiros",
"85": "Fallen Agares",
"86": "Fallen Murmur",
"87": "Avatar Heqet",
"88": "Avatar Kamapua'a",
"89": "Avatar Shiisaa",
"90": "Avatar Bai Ze",
"91": "Avatar Baihu",
"92": "Avatar Airavata",
"93": "Avatar Ukano Mitama",
"94": "Avatar Barong",
"95": "Avatar Anubis",
"96": "Beast Kabuso",
"97": "Beast Hairy Jack",
"98": "Beast Nekomata",
"99": "Beast Cait Sith",
"100": "Beast Nue",
"101": "Beast Orthrus",
"102": "Beast Myrmecolion",
"103": "Beast Cerberus",
"104": "Beast Fenrir",
"105": "Wilder Hare of Inaba",
"106": "Wilder Waira",
"107": "Wilder Garm",
"108": "Wilder Afanc",
"109": "Wilder Mothman",
"110": "Wilder Taown",
"111": "Wilder Behemoth",
"112": "Wilder Ammut",
"113": "Genma Tam Lin",
"114": "Genma Jambavan",
"115": "Genma Tlaloc",
"116": "Genma Ictinike",
"117": "Genma Hanuman",
"118": "Genma Cu Chulainn",
"119": "Genma Kresnik",
"120": "Genma Ganesha",
"121": "Genma Heimdal",
"122": "Fairy Pixie",
"123": "Fairy Knocker",
"124": "Fairy Kijimunaa",
"125": "Fairy Jack Frost",
"126": "Fairy Pyro Jack",
"127": "Fairy Silky",
"128": "Fairy Lorelei",
"129": "Fairy Vivian",
"130": "Fairy Titania",
"131": "Fairy Oberon",
"132": "Tyrant King Frost",
"133": "Tyrant Moloch",
"134": "Tyrant Hecate",
"135": "Tyrant Tzizimitl",
"136": "Tyrant Astaroth",
"137": "Tyrant Mot",
"138": "Tyrant Loki",
"139": "Tyrant Lucifer",
"140": "Kishin Ubelluris",
"141": "Kishin Nalagiri",
"142": "Hitokotonusi",
"143": "Kishin Take-Mikazuchi",
"144": "Kishin Zouchouten",
"145": "Kishin Jikokuten",
"146": "Kishin Koumokuten",
"147": "Kishin Bishamonten",
"148": "Kishin Zaou Gongen",
"149": "Touki Kobold",
"150": "Touki Bilwis",
"151": "Touki Gozuki",
"152": "Touki Mezuki",
"153": "Touki Ikusa",
"154": "Touki Lham Dearg",
"155": "Touki Berserker",
"156": "Touki Yaksa",
"157": "Touki Nata Taishi",
"158": "Touki Oumitsunu",
"159": "Jaki Obariyon",
"160": "Jaki Ogre",
"161": "Jaki Mokoi",
"162": "Jaki Ogun",
"163": "Jaki Wendigo",
"164": "Jaki Legion",
"165": "Jaki Rakshasa",
"166": "Jaki Girimehkala",
"167": "Jaki Grendel",
"168": "Jaki Black Frost",
"169": "Femme Kikimora",
"170": "Femme Lilim",
"171": "Femme Yuki Jyorou",
"172": "Femme Leanan Sidhe",
"173": "Femme Peri",
"174": "Femme Hariti",
"175": "Femme Rangda",
"176": "Femme Kali",
"177": "Femme Lilith",
"178": "Ghost Poltergeist",
"179": "Ghost Agathion",
"180": "Ghost Tenon Cut",
"181": "Ghost Kumbhanda",
"182": "Ghost Loa",
"183": "Ghost Pisaca",
"184": "Ghost Kudlak",
"185": "Ghost Purple Mirror",
"186": "Fiend Biliken",
"187": "Fiend Ghost Q ",
"188": "Fiend Sage of Time",
"189": "Fiend Alice",
"190": "Fiend Trumpeter",
"191": "Hero Neko Shogun",
"192": "Hero Hagen",
"193": "Hero Jeanne d'Arc",
"194": "Hero Yoshitsune",
"195": "Hero Guan Yu",
"196": "Element Flaemis",
"197": "Element Aquans",
"198": "Element Aeros",
"199": "Element Erthys",
"200": "Mitama Ara Mitama",
"201": "Mitama Nigi Mitama",
"202": "Mitama Kusi Mitama",
"203": "Mitama Saki Mitama",
"204": "Fallen Satan",
"205": "Fallen Beelzebub",
"206": "Fallen Belial",
"207": "Divine Sariel",
"208": "Divine Anael",
"209": "Human Atsuro",
"210": "Human Yuzu",
"211": "Dragon Asp",
"212": "Avatar Apis",
"213": "Avatar Pabilsag",
"214": "Wilder Sleipnir",
"215": "Wilder Xiezhai",
"216": "Genma Kangiten",
"217": "Vile Baphomet",
"218": "Famme Anat",
"219": "Megami Pallas Athena",
"220": "Deity Mithra",
"221": "Deity Osiris",
"222": "Snake Gucumatz",
"223": "Avian Da Peng",
"224": "Kishin Ometeotl",
"225": "Genma Jarilo",
"226": "Human Miyako",
"227": "Fallen Botis",
"228": "Human JP's Member",
"229": "Human Salaryman(1)",
"230": "Human Salaryman(2)",
"231": "Human Salaryman(3)",
"232": "Fallen Samael",
"233": "Human Office Lady(1)",
"234": "Human Office Lady(2)",
"235": "Human Office Lady(3)",
"236": "Human Punk(1)",
"237": "Human Punk(2)",
"238": "Human Punk(3)",
"239": "Human Yakuza(1)",
"240": "Human Yakuza(2)",
"241": "Device Module",
"242": "Human Policeman",
"243": "Human JP's Member(F)",
"244": "Human JP's Member(M)",
"245": "Human Young Man(?)",
"246": "Human Old Woman(?)",
"247": "Human Worker",
"248": "Human Student",
"249": "Human Young man",
"250": "Human Buffer(1)",
"251": "Human Buffer(2)",
"252": "Human JP'S Agent(?)",
"253": "Human JP'S Agent(?)",
"254": "Human JP'S Agent(?)",
"255": "Human JP'S Agent(?)",
"256": "Human ?",
"257": "Fallen Bifrons",
"258": "Fallen Barbatos",
"259": "Femme Dzelarhons",
"260": "Genma Kama",
"261": "Megami Parvati",
"262": "Femme Ixtab",
"263": "Tyrant Balor",
"264": "Tyrant Negral",
"265": "Deity Inti",
"266": "Deity Alilat",
"267": "Omega Beji-Weng",
"268": "Deity Lord Nan Dou",
"269": "Hero Masakado",
"270": "Megami Ishtar",
"271": "Megami Black Maria",
"272": "Snake Yurlungr",
"273": "Dragon Fafnir",
"274": "Divine Sraosha",
"275": "Avian Rukh",
"276": "Avian Kau",
"277": "Beast Cbracan",
"278": "Beast Catoblepas",
"279": "Genma Roitschaggata",
"280": "Fairy Spriggan",
"281": "Fairy Troll",
"282": "Tyrant Lucifuge",
"283": "Kishin Okuninushi",
"284": "Touki Dokkaebi",
"285": "Touki Ongyo-Ki",
"286": "Jaki Macabre",
"287": "Femme Jahi",
"288": "Divine Sandalphon",
"289": "Snake Kohruy",
"290": "Exotic Izaya",
"291": "Exotic Celty",
"292": "Exotic Shizuo",
"293": "Touki Momunofu",
"294": "Tyrant Lucifer Frost",
"295": "(Crashes GUI)",
"296": "Hero Frost Five",
"297": "Hero Milk-Kin Frost",
"298": "Hero Strawberry Fost",
"299": "Fairy Lemon Frost",
"300": "Fairy Melon Frost",
"301": "Fairy B. Hawaii Frost",
"302": "Touki Titan",
"303": "Omega Dyonisus",
"304": "Omega Aramisaki",
"305": "Jaki Shiki-Ouji",
"306": "Feeme Xi Wangmu",
"307": "Divine Dominion",
"308": "Fiend Mother Harlot",
"309": "Fiend Dantalian",
"310": "Vile Seth",
"311": "Jaki Shinigami",
"312": "Bel Belberith",
"313": "Bel Jezebel",
"314": "Bel Beldr",
"315": "Maggot Maggot",
"316": "Star Dubhe",
"317": "Star Merak",
"318": "Star Phecda",
"319": "Star Megrez",
"320": "Star Alioth Core",
"321": "Star Mizar",
"322": "Star Benetnasch",
"323": "Star Alcor",
"324": "Star Polaris",
"325": "Star Merak Missile",
"326": "Star Phecda(WK MAG)",
"327": "Star Phecda(WK PHYS)",
"328": "Star Megrez(Empty)",
"329": "Star Alioth (Poison)",
"330": "Energy LayLine Dragon",
"331": "Star Dubhe",
"332": "Star Dunhe(weak)",
"333": "Star Mizar",
"334": "Star Mizar",
"335": "Star Tentacle",
"336": "Star Tentacle",
"337": "Star Tentacle",
"338": "Star Tentacle",
"339": "Star Tentacle",
"340": "Star Tentacle",
"341": "Star Benetnasch(dubhe)",
"342": "Star Benetnasch(merak)",
"343": "Star Benetnasch(phecda)",
"344": "Star Benetnasch(Alioth)",
"345": "Star Benetnasch",
"346": "Star Alcor",
"347": "Star Polaris A",
"348": "Star Polaris Ab",
"349": "Star Polaris B",
"350": "Human Tall Woman",
"351": "Device Tico",
"352": "Device Tico",
"353": "Human Daichi",
"354": "Human Io",
"355": "Human Io",
"356": "Human MC",
"357": "Human SDF Captain",
"358": "Human SDF Member",
"359": "Human Fireman",
"360": "Deity Io",
"361": "Star Guardian",
"362": "Star Guardian",
"363": "Star Guardian",
"364": "Star Guardian",
"365": "Star Guardian",
"366": "Star Guardian",
"367": "Star Guardian",
"368": "Human Salaryman(1)",
"369": "Human Punk(1)",
"370": "Human Student(1)",
"371": "Human Student(2)",
"372": "Human Young Man(1)",
"373": "Human Young Man(2)",
"374": "Human Salaryman(2)",
"375": "Human Salaryman(3)",
"376": "Human Punk(2)",
"377": "Human Punk(3)",
"378": "Human Kitten",
"379": "Human @",
"380": "Human Ronaldo*",
"381": "Human Io*",
"382": "Human Yamato*",
"383": "Human Fumi*",
"384": "Human Daichi*",
"385": "Human Otome*",
"386": "Human Jungo*",
"387": "Human Makoto*",
"388": "Human Keita*",
"389": "Human Airi*",
"390": "Human Joe*",
"391": "Human Hinako*",
"392": "Human Alcor*",
"65535": "Empty"
}
if __name__ == "__main__":
app = mytestapp(None)
app.mainloop()
| 38.255627
| 118
| 0.525867
|
a900d8dec7fd37ab4adca645a03f1689e7145bd6
| 6,692
|
py
|
Python
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 4
|
2021-05-10T15:17:24.000Z
|
2021-08-16T07:40:06.000Z
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 45
|
2020-10-24T08:26:19.000Z
|
2022-03-20T06:14:58.000Z
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 8
|
2020-10-24T06:41:06.000Z
|
2022-01-03T01:29:49.000Z
|
import json
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import yaml
from mpl_toolkits.mplot3d.axes3d import Axes3D
from scipy.interpolate import interp1d
from tf_pwa.config_loader import ConfigLoader
from tf_pwa.experimental.extra_amp import spline_matrix
# import mplhep
# plt.style.use(mplhep.style.LHCb)
def polar_err(r, phi, r_e, phi_e):
"""polar errors for r and phi"""
# print(r, phi, r_e, phi_e)
dxdr = np.cos(phi)
dxdphi = r * np.sin(phi)
dydr = np.sin(phi)
dydphi = -r * np.cos(phi)
x_e = np.sqrt((dxdr * r_e) ** 2 + (dxdphi * phi_e) ** 2)
y_e = np.sqrt((dydr * r_e) ** 2 + (dydphi * phi_e) ** 2)
# print(x_e, y_e)
return x_e, y_e
def dalitz_weight(s12, m0, m1, m2, m3):
"""phase space weight in dalitz plot"""
m12 = np.sqrt(s12)
m12 = np.where(m12 > (m1 + m2), m12, m1 + m2)
m12 = np.where(m12 < (m0 - m3), m12, m0 - m3)
# if(mz < (m_d+m_pi)) return 0;
# if(mz > (m_b-m_pi)) return 0;
E2st = 0.5 * (m12 * m12 - m1 * m1 + m2 * m2) / m12
E3st = 0.5 * (m0 * m0 - m12 * m12 - m3 * m3) / m12
p2st2 = E2st * E2st - m2 * m2
p3st2 = E3st * E3st - m3 * m3
p2st = np.sqrt(np.where(p2st2 > 0, p2st2, 0))
p3st = np.sqrt(np.where(p3st2 > 0, p3st2, 0))
return p2st * p3st
def trans_r2xy(r, phi, r_e, phi_e):
"""r,phi -> x,y """
x = np.array(r) * np.cos(phi)
y = np.array(r) * np.sin(phi)
err = np.array(
[polar_err(i, j, k, l) for i, j, k, l in zip(r, phi, r_e, phi_e)]
)
return x, y, err[:, 0], err[:, 1]
def plot_x_y(name, x, y, x_i, y_i, xlabel, ylabel, ylim=(None, None)):
"""plot x vs y"""
plt.clf()
plt.plot(x, y)
plt.scatter(x_i, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ylim(ylim)
plt.savefig(name)
def plot_phi(name, m, phi, m_i, phi_i):
""" plot phi and gradient of phi"""
grad = phi[2:] - phi[:-2]
mask = (phi < 3) & (phi > -3)
grad_max = np.mean(np.abs(grad))
# grad_max = np.max(grad[mask[1:-1]])
(idx,) = signal.argrelextrema(grad, np.less)
plt.clf()
# plt.plot(m, pq/np.max(pq))# np.sqrt(x_new**2+y_new**2)**2)
plt.plot(m[1:-1], grad / grad_max, label="$\\Delta \\phi$ ")
plt.plot(m, phi, label="$\\phi$") # np.sqrt(x_new**2+y_new**2)**2)
m_delta = m[idx + 1]
print("min Delta phi in mass:", m_delta)
plt.scatter(m_delta, [-np.pi] * len(m_delta))
plt.scatter(m_i, phi_i, label="points")
plt.xlabel("mass")
plt.ylabel("$\\phi$")
plt.ylim((-np.pi, np.pi))
plt.legend()
plt.savefig(name)
def plot_x_y_err(name, x, y, x_e, y_e):
"""plot eror bar of x y"""
plt.clf()
plt.errorbar(x, y, xerr=x_e, yerr=y_e)
plt.xlabel("real R(m)")
plt.ylabel("imag R(m)")
plt.savefig(name)
def plot_all(
res="MI(1+)S",
config_file="config.yml",
params="final_params.json",
prefix="figure/",
):
"""plot all figure"""
config = ConfigLoader(config_file)
config.set_params(params)
particle = config.get_decay().get_particle(res)
mi, r, phi_i, r_e, phi_e = load_params(config_file, params, res)
x, y, x_e, y_e = trans_r2xy(r, phi_i, r_e, phi_e)
m = np.linspace(mi[0], mi[-1], 1000)
M_Kpm = 0.49368
M_Dpm = 1.86961
M_Dstar0 = 2.00685
M_Bpm = 5.27926
# x_new = interp1d(xi, x, "cubic")(m)
# y_new = interp1d(xi, y, "cubic")(m)
rm_new = particle.interp(m).numpy()
x_new, y_new = rm_new.real, rm_new.imag
pq = dalitz_weight(m * m, M_Bpm, M_Dstar0, M_Dpm, M_Kpm)
pq_i = dalitz_weight(mi * mi, M_Bpm, M_Dstar0, M_Dpm, M_Kpm)
phi = np.arctan2(y_new, x_new)
r2 = x_new * x_new + y_new * y_new
plot_phi(f"{prefix}phi.png", m, phi, mi, np.arctan2(y, x))
plot_x_y(
f"{prefix}r2.png",
m,
r2,
mi,
r * r,
"mass",
"$|R(m)|^2$",
ylim=(0, None),
)
plot_x_y(f"{prefix}x_y.png", x_new, y_new, x, y, "real R(m)", "imag R(m)")
plot_x_y_err(
f"{prefix}x_y_err.png", x[1:-1], y[1:-1], x_e[1:-1], y_e[1:-1]
)
plot_x_y(
f"{prefix}r2_pq.png",
m,
r2 * pq,
mi,
r * r * pq_i,
"mass",
"$|R(m)|^2 p \cdot q$",
ylim=(0, None),
)
plot3d_m_x_y(f"{prefix}m_r.gif", m, x_new, y_new)
if __name__ == "__main__":
main()
| 27.539095
| 78
| 0.558876
|
a902196e210ce0c9d3fc255989473f3fdb1ab785
| 3,316
|
py
|
Python
|
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | 2
|
2017-11-18T00:53:33.000Z
|
2017-11-18T00:53:40.000Z
|
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
'''
A script for accessing visualization data (saving images at validation steps during training) and saving them to a local directory.
'''
import pymongo as pm
import pickle
import os
import gridfs
import cPickle
import numpy as np
from PIL import Image
dbname = 'future_pred_test'
collname = 'asymmetric'
port = 27017
exp_id = '3_3'
save_loc = '/home/nhaber/really_temp'
save_fn = os.path.join(save_loc, exp_id + '.p')
target_name = 'valid0'
one_channel_softmax = True
conn = pm.MongoClient(port = 27017)
coll = conn[dbname][collname + '.files']
print('experiments')
print(coll.distinct('exp_id'))
cur = coll.find({'exp_id' : exp_id})
q = {'exp_id' : exp_id, 'validation_results' : {'$exists' : True}}
val_steps = coll.find(q)
val_count = val_steps.count()
print('num val steps so far')
print(val_count)
saved_data = {}
def convert_to_viz(np_arr):
'''I did a silly thing and saved discretized-loss predictions as if they were image predictions.
This recovers and converts to an ok visualization.'''
my_shape = np_arr.shape
num_classes = np_arr.shape[-1]
#I fixed things so that it saves the prediction not converted to 255
if np_arr.dtype == 'float32':
exp_arr = np.exp(np_arr)
else:
exp_arr = np.exp(np_arr.astype('float32') / 255.)
sum_arr = np.sum(exp_arr, axis = -1)
#hack for broadcasting...I don't know broadcasting
softy = (exp_arr.T / sum_arr.T).T
return np.sum((softy * range(num_classes) * 255. / float(num_classes)), axis = -1).astype('uint8')
def convert_to_viz_sharp(np_arr):
'''Similar to the above, but just taking the argmax, hopefully giving a sharper visualization.
'''
num_classes = np_arr.shape[-1]
a_m = np.argmax(np_arr, axis = -1)
return (a_m * 255. / float(num_classes)).astype('uint8')
for val_num in range(val_count):
idx = val_steps[val_num]['_id']
fn = coll.find({'item_for' : idx})[0]['filename']
fs = gridfs.GridFS(coll.database, collname)
fh = fs.get_last_version(fn)
saved_data[val_num] = cPickle.loads(fh.read())['validation_results']
fh.close()
exp_dir = os.path.join(save_loc, exp_id)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
for val_num, val_data in saved_data.iteritems():
val_dir = os.path.join(exp_dir, 'val_' + str(val_num))
if not os.path.exists(val_dir):
os.mkdir(val_dir)
for tgt_desc, tgt in val_data[target_name].iteritems():
tgt_images = [arr for step_results in tgt for arr in step_results]
for (instance_num, arr) in enumerate(tgt_images):
instance_dir = os.path.join(val_dir, 'instance_' + str(instance_num))
if not os.path.exists(instance_dir):
os.mkdir(instance_dir)
if len(arr.shape) == 4:
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.jpeg')
arr = convert_to_viz_sharp(arr)
im = Image.fromarray(arr)
im.save(fn)
#just save in human-readable form if 1-array
elif len(arr.shape) == 1:
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.txt')
np.savetxt(fn, arr)
else:
assert len(arr.shape) == 3
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.jpeg')
if one_channel_softmax and 'pred' in tgt_desc:
arr = sigmoid_it(arr)
im = Image.fromarray(arr)
im.save(fn)
| 29.607143
| 131
| 0.701448
|
a9039f8421d00114c0ba14dfaca35466584a7fcb
| 1,543
|
py
|
Python
|
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
"""
BASICS
Creates the necessary tables and users.
"""
import os
import psycopg2
con = psycopg2.connect (host = os.environ["POSTGRES_URL"], database = os.environ["POSTGRES_DB"], user = os.environ["POSTGRES_USER"], password = os.environ["POSTGRES_PASSWORD"])
cur = con.cursor()
# Creates main user table
cur.execute("""
CREATE TABLE IF NOT EXISTS user_data (
user_id serial PRIMARY KEY,
username VARCHAR (256) UNIQUE NOT NULL,
password VARCHAR (256) NOT NULL,
salt VARCHAR (256) NOT NULL,
date_creation TIMESTAMP NOT NULL,
last_action TIMESTAMP NOT NULL,
last_login TIMESTAMP NOT NULL,
last_logout TIMESTAMP NOT NULL
)""")
# Creates a read only user (SELECT)
# Query is done in an unsafe way because it is the only way, sanitizing it will cause issues
# No user input
read_only_postgres_user = os.environ["R_USERNAME"]
cur.execute("CREATE USER "+ read_only_postgres_user + " WITH ENCRYPTED PASSWORD %s", (os.environ["R_PASSWORD"],))
cur.execute("GRANT SELECT ON ALL TABLES IN SCHEMA public TO " + read_only_postgres_user)
# Creates a write user (SELECT, INSERT, UPDATE)
write_postgres_user = os.environ["RW_USERNAME"]
cur.execute("CREATE USER "+ write_postgres_user + " WITH ENCRYPTED PASSWORD %s", (os.environ["RW_PASSWORD"],))
cur.execute("GRANT SELECT, INSERT, DELETE, UPDATE ON ALL TABLES IN SCHEMA public TO " + write_postgres_user)
cur.execute("GRANT SELECT, USAGE ON ALL SEQUENCES IN SCHEMA public TO " + write_postgres_user)
con.commit()
con.close ()
| 32.829787
| 176
| 0.720674
|
a90540d0d0a5a9bc45b650e47d3f81668b272c4b
| 338
|
py
|
Python
|
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
computing_graph = defaultdict(list)#defaultdict(list),valuelist
"""
for (key, value) in data:
result[key].append(value)
print(result)#defaultdict(<class 'list'>, {'p': [1, 2, 3], 'h': [1, 2, 3]})
"""
n = 'p'
m = [1,2,23]
computing_graph[n].append(m)
print(computing_graph)
| 26
| 76
| 0.659763
|
a905bc7c157d96b2e4f0eee9148f0267c5d741fe
| 597
|
py
|
Python
|
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from botlib.curl import Curl
from botlib.html2list import HTML2List, MatchGroup
URL = 'https://www.vice.com/en/topic/motherboard'
SOURCE = Curl.get(URL, cache_only=True)
SELECT = '.vice-card__content'
match = MatchGroup({
'url': r'<a href="([^"]*)"',
'title': r'<h3[^>]*><a [^>]*>([\s\S]*?)</a>[\s\S]*?</h3>',
'desc': r'<p[^>]*>([\s\S]*?)</p>',
'wrong-regex': r'<a xref="([\s\S]*?)"',
})
for elem in reversed(HTML2List(SELECT).parse(SOURCE)):
match.set_html(elem)
for k, v in match.to_dict().items():
print(k, '=', v)
print()
break
| 28.428571
| 62
| 0.571189
|
a906359018ecf72d4a4f117b4a1b82b665b383a6
| 3,912
|
py
|
Python
|
examples/j1j2_2d_exact_4.py
|
vigsterkr/FlowKet
|
0d8f301b5f51a1bab83021f10f65cfb5f2751079
|
[
"MIT"
] | 21
|
2019-11-19T13:59:13.000Z
|
2021-12-03T10:26:30.000Z
|
examples/j1j2_2d_exact_4.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 10
|
2019-11-15T12:07:28.000Z
|
2020-11-07T18:12:18.000Z
|
examples/j1j2_2d_exact_4.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 11
|
2019-12-09T22:51:17.000Z
|
2021-11-29T22:05:41.000Z
|
from collections import OrderedDict
import itertools
import sys
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from flowket.callbacks import TensorBoard
from flowket.callbacks.exact import default_wave_function_callbacks_factory, ExactObservableCallback
from flowket.operators.j1j2 import J1J2
from flowket.operators import NetketOperatorWrapper
from flowket.machines import ConvNetAutoregressive2D
from flowket.optimization import ExactVariational, VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import FastAutoregressiveSampler
from flowket.optimizers import convert_to_accumulate_gradient_optimizer
import numpy
import netket
params_grid_config = {
'width': [32],
'depth': [5],
'lr': [5e-3, 1e-3],
'weights_normalization': [False, True]
}
run_index = int(sys.argv[-1].strip())
ks, vs = zip(*params_grid_config.items())
params_options = list(itertools.product(*vs))
chosen_v = params_options[run_index % len(params_options)]
params = dict(zip(ks, chosen_v))
print('Chosen params: %s' % str(params))
hilbert_state_shape = (4, 4)
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=params['depth'], num_of_channels=params['width'],
weights_normalization=params['weights_normalization'])
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 2 ** 12
# For fair comparison with monte carlo eacg epoch see 2 ** 18 sampels
steps_per_epoch = 2 ** 6
true_ground_state_energy = -30.022227800323677
operator = J1J2(hilbert_state_shape=hilbert_state_shape, j2=0.5, pbc=False)
exact_variational = ExactVariational(model, operator, batch_size)
optimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
optimizer,
update_params_frequency=exact_variational.num_of_batch_until_full_cycle,
accumulate_sum_or_mean=True)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
total_spin = NetketOperatorWrapper(total_spin_netket_operator(hilbert_state_shape), hilbert_state_shape)
run_name = 'j1j2_4_exact_weights_normalization_%s_depth_%s_width_%s_adam_lr_%s_run_%s' % \
(params['weights_normalization'], params['depth'], params['width'], params['lr'], run_index)
tensorboard = TensorBoard(log_dir='tensorboard_logs/%s' % run_name,
update_freq='epoch',
write_output=False)
callbacks = default_wave_function_callbacks_factory(exact_variational, log_in_batch_or_epoch=False,
true_ground_state_energy=true_ground_state_energy) + [
ExactObservableCallback(exact_variational, total_spin, 'total_spin', log_in_batch_or_epoch=False),
tensorboard]
model.fit_generator(exact_variational.to_generator(), steps_per_epoch=steps_per_epoch, epochs=1000, callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('final_%s.h5' % run_name)
| 40.329897
| 120
| 0.748466
|
a907a743744664923c1dc0146b6eda52d8a91360
| 3,833
|
py
|
Python
|
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | 6
|
2015-02-06T23:41:01.000Z
|
2015-10-21T03:08:51.000Z
|
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | null | null | null |
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | 1
|
2019-10-02T08:41:50.000Z
|
2019-10-02T08:41:50.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A archive_info is a json file describing a single package archive."""
import collections
import hashlib
import json
import os
ArchiveInfoTuple = collections.namedtuple(
'ArchiveInfoTuple',
['name', 'hash', 'url', 'tar_src_dir', 'extract_dir'])
def GetArchiveHash(archive_file):
"""Gets the standardized hash value for a given archive.
This hash value is the expected value used to verify package archives.
Args:
archive_file: Path to archive file to hash.
Returns:
Hash value of archive file, or None if file is invalid.
"""
if os.path.isfile(archive_file):
with open(archive_file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
return None
| 33.920354
| 76
| 0.687712
|
8bef32020f0494687a4f159a327cd70c156e52e5
| 3,546
|
py
|
Python
|
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
"""Ad hoc tests of the LsstLatexDoc class. Other test modules rigorously verify
LsstLatexDoc against sample documents.
"""
from pybtex.database import BibliographyData
import pytest
from lsstprojectmeta.tex.lsstdoc import LsstLatexDoc
def test_no_short_title():
"""title without a short title."""
sample = r"\title{Title}"
lsstdoc = LsstLatexDoc(sample)
assert lsstdoc.title == "Title"
def test_title_variations():
"""Test variations on the title command's formatting."""
# Test with whitespace in title command
input_txt = r"\title [Test Plan] { \product ~Test Plan}"
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.title == r"\product ~Test Plan"
assert lsstdoc.short_title == "Test Plan"
def test_author_variations():
"""Test variations on the author command's formatting."""
input_txt = (r"\author {William O'Mullane, Mario Juric, "
r"Frossie Economou}"
r" % the author(s)")
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == ["William O'Mullane",
"Mario Juric",
"Frossie Economou"]
def test_author_list_amanda():
"""Test author list parsing where one author's name is Amanda.
"""
input_txt = (
r"\author {William O'Mullane, John Swinbank, Leanne Guy, "
r"Amanda Bauer}"
)
expected = [
"William O'Mullane",
"John Swinbank",
"Leanne Guy",
"Amanda Bauer"
]
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == expected
def test_handle_variations():
"""Test variations on the handle command's formatting."""
input_txt = r"\setDocRef {LDM-503} % the reference code "
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.handle == "LDM-503"
def test_abstract_variations():
"""Test variations on the abstract command's formatting."""
input_txt = (r"\setDocAbstract {" + "\n"
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications "
r"for specific items.}")
expected_abstract = (
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications for "
r"specific items."
)
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.abstract == expected_abstract
def test_default_load_bib_db():
"""Test that the common lsst-texmf bibliographies are always loaded.
"""
lsstdoc = LsstLatexDoc('')
assert isinstance(lsstdoc.bib_db, BibliographyData)
| 32.833333
| 79
| 0.645798
|
8bf15c081cf1ec7e2805d8cdda039957d68c5367
| 454
|
py
|
Python
|
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
frase = str(input('Digite o seu nome completo para a anlise ser feita:')).strip()
print('-' * 50)
print('Analisando nome...')
print('O seu nome em maisculas {}.'.format(frase.upper()))
print('O seu nome em minsculas {}.'.format(frase.lower()))
print('O seu nome tem ao todo {} letras.'.format(len(frase) - frase.count(' ')))
print('O seu primeiro nome tem {} letras.'.format(frase.find(' ')))
print('Nome analisado com sucesso!')
print('-' * 50)
| 41.272727
| 82
| 0.665198
|
8bf171a05404452569f820648c7f427a69c301b2
| 8,012
|
py
|
Python
|
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
import logging
import msgpack
import msgpack_numpy as mpn
from confluent_kafka.cimpl import KafkaException
import numpy as np
import pickle
import pytest
from bluesky_kafka import Publisher, BlueskyConsumer
from bluesky_kafka.tests.conftest import get_all_documents_from_queue
from bluesky.plans import count
from event_model import sanitize_doc
# mpn.patch() is recommended by msgpack-numpy as a way
# to patch msgpack but it caused a utf-8 decode error
mpn.patch()
logging.getLogger("bluesky.kafka").setLevel("DEBUG")
# the Kafka test broker should be configured with
# KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
| 33.383333
| 92
| 0.668622
|
8bf35fc329c7f95687b72ea8d092fd4c3193b925
| 407
|
py
|
Python
|
Chapter01/datastructures_06.py
|
vabyte/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 84
|
2018-08-09T09:30:03.000Z
|
2022-01-04T23:20:38.000Z
|
Chapter01/datastructures_06.py
|
jiro74/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 1
|
2019-11-04T18:57:40.000Z
|
2020-09-07T08:52:25.000Z
|
Chapter01/datastructures_06.py
|
jiro74/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 33
|
2018-09-26T11:05:55.000Z
|
2022-03-15T10:31:10.000Z
|
import time
import heapq
pq = PriorityQueue()
pq.add(f2, priority=1)
pq.add(f1, priority=0)
pq.pop()()
pq.pop()()
| 17.695652
| 63
| 0.619165
|
8bf4cdf0dd3a18f2cee9855d7af028188308986c
| 1,080
|
py
|
Python
|
webshots/popular-websites.py
|
acamero/evo-web
|
5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9
|
[
"MIT"
] | null | null | null |
webshots/popular-websites.py
|
acamero/evo-web
|
5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9
|
[
"MIT"
] | null | null | null |
webshots/popular-websites.py
|
acamero/evo-web
|
5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9
|
[
"MIT"
] | null | null | null |
import requests
import sys
from lxml import html
#csv_file_name = sys.argv[1] # output file
csv_file_name = "../webshot_data/popular-web-sites.csv"
csv_file = open(csv_file_name, "w")
categories = ["Arts", "Business", "Computers", "Games", "Health", "Home", "Kids_and_Teens", "News", "Recreation", "Reference", "Regional", "Science", "Shopping", "Society", "Sports", "World"]
# categories = ["Adult", "Arts", "Business", "Computers", "Games", "Health", "Home", "Kids_and_Teens", "News", "Recreation", "Reference", "Regional", "Science", "Shopping", "Society", "Sports", "World"]
base = "http://www.alexa.com/topsites/category/Top/"
for category in categories:
path = base + category
print path
r = requests.get(path)
tree = html.fromstring(r.content)
trs = tree.xpath('.//a/@href')
for tr in trs:
if tr.startswith( '/siteinfo/' ) :
wp = tr.replace( '/siteinfo/', '' )
if len(wp) > 1:
print wp
csv_file.write( category + ',' + wp + '\n')
# end for
# end for
csv_file.close()
| 34.83871
| 202
| 0.605556
|
8bf5aa849ab9919f36bd06cb32baf1102cd57b0f
| 13,653
|
py
|
Python
|
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from __future__ import absolute_import, division
import numpy as np
from astropy import units as u
from astropy.coordinates.representation import (CartesianRepresentation,
UnitSphericalRepresentation,
SphericalRepresentation)
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
RepresentationMapping)
from astropy.coordinates import FrameAttribute
from sunpy import sun # For Carrington rotation number
from .representation import (SphericalWrap180Representation,
UnitSphericalWrap180Representation)
from .frameattributes import TimeFrameAttributeSunPy
RSUN_METERS = sun.constants.get('radius').si.to(u.m)
DSUN_METERS = sun.constants.get('mean distance').si.to(u.m)
__all__ = ['HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective']
| 41.49848
| 90
| 0.620669
|
8bf7588b6e982ef5c34279f0381a39c74ff2495d
| 4,640
|
py
|
Python
|
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import os
import sys
import numpy as np
import ray
from ray import serve
from ray.serve.api import _get_deployments_from_node
from ray.serve.handle import PipelineHandle
from ray.serve.pipeline.pipeline_input_node import PipelineInputNode
def test_single_node_deploy_success(serve_instance):
m1 = Adder.bind(1)
handle = serve.run(m1)
assert ray.get(handle.remote(41)) == 42
def test_single_node_driver_sucess(serve_instance):
m1 = Adder.bind(1)
m2 = Adder.bind(2)
with PipelineInputNode() as input_node:
out = m1.forward.bind(input_node)
out = m2.forward.bind(out)
driver = Driver.bind(out)
handle = serve.run(driver)
assert ray.get(handle.remote(39)) == 42
def test_options_and_names(serve_instance):
m1 = Adder.bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder"
m1 = Adder.options(name="Adder2").bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder2"
m1 = Adder.options(num_replicas=2).bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.num_replicas == 2
def test_passing_handle(serve_instance):
child = Adder.bind(1)
parent = TakeHandle.bind(child)
driver = Driver.bind(parent)
handle = serve.run(driver)
assert ray.get(handle.remote(1)) == 2
def test_passing_handle_in_obj(serve_instance):
child1 = Echo.bind("ed")
child2 = Echo.bind("simon")
parent = Parent.bind({"child1": child1, "child2": child2})
handle = serve.run(parent)
assert ray.get(handle.remote("child1")) == "ed"
assert ray.get(handle.remote("child2")) == "simon"
def test_pass_handle_to_multiple(serve_instance):
child = Child.bind()
parent = Parent.bind(child)
grandparent = GrandParent.bind(child, parent)
handle = serve.run(grandparent)
assert ray.get(handle.remote()) == "ok"
def test_non_json_serializable_args(serve_instance):
# Test that we can capture and bind non-json-serializable arguments.
arr1 = np.zeros(100)
arr2 = np.zeros(200)
handle = serve.run(A.bind(arr1))
ret1, ret2 = ray.get(handle.remote())
assert np.array_equal(ret1, arr1) and np.array_equal(ret2, arr2)
# TODO: check that serve.build raises an exception.
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| 25.217391
| 84
| 0.650647
|
8bf7c2002c8b113a9de4b7623d703ed3f154d1fb
| 118
|
py
|
Python
|
code/super_minitaur/script/lpmslib/lputils.py
|
buenos-dan/quadrupedal_robot
|
605054c027e20b83e347f2aa175c03c965e72983
|
[
"MIT"
] | 5
|
2019-03-22T06:39:42.000Z
|
2021-07-27T13:56:45.000Z
|
code/super_minitaur/script/lpmslib/lputils.py
|
buenos-dan/quadrupedal_robot
|
605054c027e20b83e347f2aa175c03c965e72983
|
[
"MIT"
] | null | null | null |
code/super_minitaur/script/lpmslib/lputils.py
|
buenos-dan/quadrupedal_robot
|
605054c027e20b83e347f2aa175c03c965e72983
|
[
"MIT"
] | 2
|
2021-02-16T09:52:04.000Z
|
2021-11-30T12:12:55.000Z
|
#helpers
| 13.111111
| 32
| 0.550847
|
8bf7e9d1ed3871fd0972273d253da43b826c3e35
| 598
|
py
|
Python
|
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | null | null | null |
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | null | null | null |
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | 2
|
2020-07-28T18:12:09.000Z
|
2021-02-22T06:31:19.000Z
|
from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
# produce json messages
producer = KafkaProducer(bootstrap_servers='kafka:9092', security_protocol='PLAINTEXT',
value_serializer=lambda m: json.dumps(m).encode('ascii'))
result = producer.send('ktig', {'measure_id': 1, 'interval': 10, 'timestamp': '',
'color': 'red', 'sender_tx_counter': 50,
'sender_rx_counter': 50, 'reflector_tx_counter': 48,
'reflector_rx_counter': 48})
producer.close()
| 37.375
| 88
| 0.602007
|
8bf80a6b7a2e719d044ca3071a20a59ca3623e14
| 248
|
py
|
Python
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 126
|
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 38
|
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 55
|
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
try:
import uasyncio.core as asyncio
except:
import asyncio
loop = asyncio.get_event_loop()
loop.call_soon(cb, "test", "test2")
loop.run_forever()
print("OK")
| 14.588235
| 35
| 0.637097
|
8bf8770f23fe5d9c46d48d1b60253229783948a7
| 1,491
|
py
|
Python
|
labdevices/_mock/ando.py
|
jkrauth/labdevices
|
4b00579117216b6431079d79c1c978b73a6c0b96
|
[
"MIT"
] | null | null | null |
labdevices/_mock/ando.py
|
jkrauth/labdevices
|
4b00579117216b6431079d79c1c978b73a6c0b96
|
[
"MIT"
] | null | null | null |
labdevices/_mock/ando.py
|
jkrauth/labdevices
|
4b00579117216b6431079d79c1c978b73a6c0b96
|
[
"MIT"
] | 1
|
2021-04-28T15:17:31.000Z
|
2021-04-28T15:17:31.000Z
|
"""
Provides a mock for the plx_gpib_ethernet package used in the
Ando devices.
"""
from unittest.mock import Mock
# The commands that are used in the methods of the
# ANDO devices and typical responses.
QUERY_COMMANDS = {
# Spectrum Analyzer commands
"*IDN?": "ANDO dummy\r\n",
"SWEEP?": "0\r\n",
"SMPL?": " 501\r\n",
"ANA?": " 490.808, 94.958, 19\r\n",
"CTRWL?": "1050.00\r\n",
"SPAN?": "1300.0\r\n",
"CWPLS?": "1\r\n",
"PLMOD?": " 38\r\n",
}
| 33.886364
| 91
| 0.541247
|
8bf9802eb12db8bd7835a073469cfa2b0ae5ce2e
| 2,898
|
py
|
Python
|
hearthstone/simulator/core/card_graveyard.py
|
JDBumgardner/stone_ground_hearth_battles
|
9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f
|
[
"Apache-2.0"
] | 20
|
2020-08-01T03:14:57.000Z
|
2021-12-19T11:47:50.000Z
|
hearthstone/simulator/core/card_graveyard.py
|
JDBumgardner/stone_ground_hearth_battles
|
9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f
|
[
"Apache-2.0"
] | 48
|
2020-08-01T03:06:43.000Z
|
2022-02-27T10:03:47.000Z
|
hearthstone/simulator/core/card_graveyard.py
|
JDBumgardner/stone_ground_hearth_battles
|
9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f
|
[
"Apache-2.0"
] | 3
|
2020-06-28T01:23:37.000Z
|
2021-11-11T23:09:36.000Z
|
import sys
from inspect import getmembers, isclass
from typing import Union
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.events import CardEvent, EVENTS, BuyPhaseContext, CombatPhaseContext
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
REMOVED_CARDS = [member[1] for member in getmembers(sys.modules[__name__],
lambda member: isclass(member) and issubclass(member,
MonsterCard) and member.__module__ == __name__)]
| 36.683544
| 149
| 0.640787
|
8bfa439c74e0b340dc223e43b06761bdee5d063d
| 1,026
|
py
|
Python
|
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Scan views."""
from flask import Blueprint, render_template, flash, redirect, url_for, session
from flask_login import current_user
from .forms import ScanForm
from .service import ScanService
from cookiecutter_mbam.utils import flash_errors
blueprint = Blueprint('scan', __name__, url_prefix='/scans', static_folder='../static')
from flask import current_app
| 34.2
| 87
| 0.693957
|
8bfa8f2b88f8aca9aab6973afb6831c3aa0a0478
| 3,460
|
py
|
Python
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | 1
|
2021-10-04T14:42:43.000Z
|
2021-10-04T14:42:43.000Z
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | 3
|
2021-09-07T17:54:58.000Z
|
2021-11-16T21:40:52.000Z
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import dbstore
inMemFile = ":memory:"
measurementTable = "measurements"
alertTable = "alerts"
timestampTestData = "2021-04-29T23:25:44Z"
| 23.69863
| 102
| 0.67052
|
8bfbe25b3704f8131128b16676dbbc1e54dcc6b4
| 446
|
py
|
Python
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 55
|
2016-11-20T17:08:19.000Z
|
2022-03-11T22:19:43.000Z
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 17
|
2017-09-20T07:52:17.000Z
|
2021-12-03T10:03:00.000Z
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 29
|
2016-12-10T15:00:11.000Z
|
2021-12-02T12:54:05.000Z
|
import importlib
_NOTIFICATION_BACKENDS = None
| 29.733333
| 99
| 0.695067
|
8bfc984d3b1bbcef2b5af5e9508ff3a2a9c35186
| 604
|
py
|
Python
|
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
labels = ['dog', 'cat', 'panda']
np.random.seed(1)
# Simulate model already trained
W = np.random.randn(3, 3072)
b = np.random.randn(3)
orig = cv2.imread('beagle.png')
image = cv2.resize(orig, (32, 32)).flatten()
scores = W.dot(image) + b
for (label, score) in zip(labels, scores):
print('[INFO] {}: {:2}'.format(label, score))
cv2.putText(orig,
'Label: {}'.format(labels[np.argmax(scores)]),
(10,30),
cv2.FONT_HERSHEY_SIMPLEX,
0.9,
(0, 255, 0),
2)
cv2.imshow('Image', orig)
cv2.waitKey(0)
| 20.133333
| 58
| 0.574503
|
8bfd515b8c9ab45a349fc3b66ded01bb3b315143
| 2,759
|
py
|
Python
|
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | null | null | null |
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | 9
|
2021-09-09T07:40:21.000Z
|
2022-01-13T07:03:59.000Z
|
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | 1
|
2022-01-26T09:51:29.000Z
|
2022-01-26T09:51:29.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .signal_processing import (
resample_data,
normalize_signal,
calculate_magnitude,
calculate_offset_in_seconds_using_cross_correlation,
calculate_sampling_frequency_from_timestamps,
)
def get_synchronization_offset(
video_sync_df: pd.DataFrame,
sensor_sync_df: pd.DataFrame,
use_gradient: bool,
show_plots: bool = False,
) -> pd.Timedelta:
"""
Get the temporal offset between the two given sensor dataframes.
:param video_sync_df: the synchronization information from the video
:param sensor_sync_df: the synchronization information from the sensor
:param use_gradient: if true, the second derivation of the video synchronization data will be used. if false,
the raw data will be used.
:param show_plots: can enable debugging plots
:return: a pd.Timedelta object that specifies how much the sensor_sync_df needs to be moved in time to align it with
the video_sync_df
"""
video_sf = calculate_sampling_frequency_from_timestamps(video_sync_df.index)
sensor_sf = calculate_sampling_frequency_from_timestamps(sensor_sync_df.index)
if use_gradient:
video_acceleration = np.gradient(
np.gradient(video_sync_df.to_numpy(), axis=0), axis=0
)
else:
video_acceleration = video_sync_df.to_numpy()
video_acceleration = resample_data(
video_acceleration,
current_sampling_rate=video_sf,
new_sampling_rate=sensor_sf,
)
video_acceleration = normalize_signal(video_acceleration)
video_acceleration = calculate_magnitude(video_acceleration)
sensor_acceleration = normalize_signal(sensor_sync_df.to_numpy())
sensor_acceleration = calculate_magnitude(sensor_acceleration)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(sensor_acceleration, label="IMU")
plt.xlabel("Time (s)")
plt.ylabel("Acceleration Magnitude (normalized)")
plt.legend()
plt.show()
shift = calculate_offset_in_seconds_using_cross_correlation(
ref_signal=video_acceleration,
target_signal=sensor_acceleration,
sampling_frequency=sensor_sf,
)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(
np.arange(len(sensor_acceleration)) + (sensor_sf * shift),
sensor_acceleration,
label="IMU",
)
plt.xlabel("Time (s)")
plt.ylabel("Acceleration (normalized)")
plt.legend()
plt.show()
return pd.Timedelta(seconds=shift)
| 33.240964
| 120
| 0.696629
|
8bfd607f605b753ac1980b586075777909511585
| 244
|
py
|
Python
|
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
s = 'vpoboooboboobooboboo'
y = 0
counter = 0
times_run = 0
start = 0
end = 3
for letter in s:
sc = s[start:end]
start += 1
end += 1
if sc == str('bob'):
counter += 1
print('Number of times bob occurs is: ', counter)
| 10.166667
| 49
| 0.565574
|
8bfd9f299f8a3e49d68acee30f35331e05c04631
| 5,469
|
py
|
Python
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 14
|
2015-09-14T18:04:27.000Z
|
2021-02-19T16:51:57.000Z
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 3
|
2015-12-14T17:07:45.000Z
|
2021-10-02T05:55:11.000Z
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 3
|
2015-04-12T16:57:02.000Z
|
2021-02-20T17:15:51.000Z
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import random
import unittest
import sfml as sf
if __name__ == '__main__':
unittest.main()
| 30.724719
| 78
| 0.513257
|
8bfef33258b56cdbd64d66536a38eaa752a6a523
| 12,840
|
py
|
Python
|
textgen/augment/word_level_augment.py
|
shibing624/textgen
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 31
|
2021-06-29T14:31:35.000Z
|
2022-03-25T00:36:44.000Z
|
textgen/augment/word_level_augment.py
|
shibing624/text-generation
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 1
|
2021-11-09T21:30:16.000Z
|
2022-03-02T10:21:04.000Z
|
textgen/augment/word_level_augment.py
|
shibing624/text-generation
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 5
|
2021-06-21T03:13:39.000Z
|
2022-02-07T06:53:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description: Word level augmentations including Replace words with uniform
random words or TF-IDF based word replacement.
"""
import collections
import copy
import math
import numpy as np
from textgen.utils.log import logger
min_token_num = 3
def get_data_idf(tokenized_sentence_list):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for cur_sent in tokenized_sentence_list:
cur_word_dict = {}
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(tokenized_sentence_list) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for cur_sent in tokenized_sentence_list:
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
}
| 35.469613
| 95
| 0.576947
|
e300c54c781958b660c0d153f40329e21fe52fd9
| 6,539
|
py
|
Python
|
three_d_resnet_builder/builder.py
|
thauptmann/3D-ResNet-for-Keras
|
ac1b8b3d0032c9af832cc945bc57a63106366e54
|
[
"MIT"
] | 4
|
2021-05-23T09:30:40.000Z
|
2021-12-29T16:14:46.000Z
|
three_d_resnet_builder/builder.py
|
thauptmann/3D-ResNet-for-Keras
|
ac1b8b3d0032c9af832cc945bc57a63106366e54
|
[
"MIT"
] | 3
|
2021-06-24T09:26:58.000Z
|
2022-01-06T11:01:59.000Z
|
three_d_resnet_builder/builder.py
|
thauptmann/3D-ResNet-for-Keras
|
ac1b8b3d0032c9af832cc945bc57a63106366e54
|
[
"MIT"
] | 3
|
2021-06-07T18:11:34.000Z
|
2021-12-22T01:57:03.000Z
|
from . import three_D_resnet
from .kernel import get_kernel_to_name
def build_three_d_resnet(input_shape, output_shape, repetitions, output_activation, regularizer=None,
squeeze_and_excitation=False, use_bottleneck=False, kernel_size=3, kernel_name='3D'):
"""Return a full customizable resnet.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param repetitions: Define the repetitions of the Residual Blocks e.g. (2, 2, 2, 2) for ResNet-18
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Define the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation: Activate or deactivate SE-Paths.
:param use_bottleneck: Activate bottleneck layers. Recommended for networks with many layers.
:param kernel_size: Set the kernel size. Don't need to be changes in almost all cases. It's just exist for
customization purposes.
:param kernel_name:
:return: Return the built network.
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, repetitions, output_activation,
regularizer, squeeze_and_excitation, use_bottleneck, kernel_size,
kernel=conv_kernel)
def build_three_d_resnet_18(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_18.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-18
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (2, 2, 2, 2),
regularizer, squeeze_and_excitation, kernel=conv_kernel)
def build_three_d_resnet_34(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_34.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-34
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 6, 3),
regularizer, squeeze_and_excitation, kernel=conv_kernel)
def build_three_d_resnet_50(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_50.
:param input_shape: The input shape of the network as (frames, height, width, channels)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-50
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 6, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
def build_three_d_resnet_102(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_102.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-102
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 23, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
def build_three_d_resnet_152(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
""" Return a customizable resnet_152
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-152
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 8, 36, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
| 57.867257
| 115
| 0.692002
|
e301076532db001f5790d94584e7f5e4d2165387
| 1,198
|
py
|
Python
|
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
from matplotlib import use
use('WXAgg')
import pylab as plt
import numpy as np
plt.figure(figsize=(8,5))
ax = plt.subplot(111)
fil = './spectrum_GOME.out'
data = np.loadtxt(fil)
y = data[:,1]
x = data[:,0]
pl_list = []
pl, = ax.plot(x,y,'r')
pl_list.append(pl)
y = 10*data[:,3]
pl, = ax.plot(x,y,'b')
pl_list.append(pl)
#plt.xlim([425,450])
#plt.ylim([0,2000])
plt.ylabel(r"Radiation (photons/(s cm$^2$ nm))", fontsize = 12)
plt.xlabel(r"Wavelength (nm)", fontsize = 12)
from matplotlib.legend import Legend
l0 = Legend(ax, pl_list[0:1], ('Solar irradiance',), loc=(0.1,0.85))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
l0 = Legend(ax, pl_list[1:2], ('Earth shine (multiplied by 10)',), loc=(0.1,0.75))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
#plt.show()
plt.savefig('spectrum_GOME.png')
| 26.622222
| 83
| 0.656093
|
e3018352709a236201cb1c03963553b833bc04b2
| 569
|
py
|
Python
|
pepdb/tasks/migrations/0026_auto_20171031_0153.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 7
|
2015-12-21T03:52:46.000Z
|
2020-07-24T19:17:23.000Z
|
pepdb/tasks/migrations/0026_auto_20171031_0153.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 12
|
2016-03-05T18:11:05.000Z
|
2021-06-17T20:20:03.000Z
|
pepdb/tasks/migrations/0026_auto_20171031_0153.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 4
|
2016-07-17T20:19:38.000Z
|
2021-03-23T12:47:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-30 23:53
from __future__ import unicode_literals
from django.db import migrations
from tasks.models import BeneficiariesMatching
| 22.76
| 52
| 0.70123
|
e302119a1e26db2aa7e3d9148ce46b0ec243f446
| 24,156
|
py
|
Python
|
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | null | null | null |
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | 59
|
2018-03-02T03:08:22.000Z
|
2018-03-11T01:43:02.000Z
|
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | null | null | null |
"""
An AWS Python3+Flask web app.
"""
from flask import Flask, redirect, url_for, request, session, flash, get_flashed_messages, render_template, escape
from flask_oauthlib.client import OAuth
import boto3,botocore
import jinja2
from boto3.dynamodb.conditions import Key, Attr
import urllib.request
import json
import cgi
import time
import random
import sys
from configLoader import ConfigLoader
from googleOAuthManager import GoogleOAuthManager
from data.session import SessionManager
from data import query, schema
from forms import CreateThreadForm, CreateCommentForm
import inspect
from werkzeug.utils import secure_filename
import uuid
import os
###############################################################################
#FLASK CONFIG
###############################################################################
# This is the EB application, calling directly into Flask
application = Flask(__name__)
# Loads config from file or environment variable
config = ConfigLoader("config.local.json")
# Enable encrypted session, required for OAuth to stick
application.secret_key = config.get("sessionSecret")
#used for form validation
application.config["SECRET_KEY"]=config.get("sessionSecret")
# Set up service handles
botoSession = boto3.Session(
aws_access_key_id = config.get("accessKey"),
aws_secret_access_key = config.get("secretKey"),
aws_session_token=None,
region_name = config.get("region"),
botocore_session=None,
profile_name=None)
dynamodb = botoSession.resource('dynamodb')
s3 = botoSession.resource('s3')
authCacheTable = dynamodb.Table('person-attribute-table')
# Example: bucket = s3.Bucket('elasticbeanstalk-us-west-2-3453535353')
# OAuth setup
authManager = GoogleOAuthManager(
flaskApp = application,
clientId = config.get("oauthClientId"),
clientSecret = config.get("oauthClientSecret"))
#This is the Upload requirement section
bucket = s3.Bucket('condensation-forum')
bucket_name = 'condensation-forum'
s3client = boto3.client(
"s3",
aws_access_key_id=config.get("accessKey"),
aws_secret_access_key=config.get("secretKey")
)
#database connection
dataSessionMgr = SessionManager(
config.get("dbUser"),
config.get("dbPassword"),
config.get("dbEndpoint"))
# Load up Jinja2 templates
templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")
templateEnv = jinja2.Environment(loader=templateLoader)
#pass in library functions to jinja, isn't python terrifying?
#we want to zip collections in view
templateEnv.globals.update(zip=zip)
#we also want to view our flashed messages
templateEnv.globals.update(get_flashed_messages=get_flashed_messages)
#generate urls for buttons in the view
templateEnv.globals.update(url_for=url_for)
bodyTemplate = templateEnv.get_template("body.html")
bodySimpleTemplate = templateEnv.get_template("body-simple.html")
homeTemplate = templateEnv.get_template("home.html")
threadTemplate = templateEnv.get_template("thread.html")
editThreadTemplate = templateEnv.get_template("edit-thread.html")
editCommentTemplate = templateEnv.get_template("edit-comment.html")
fileManagerTemplate = templateEnv.get_template("file-manager.html")
fileListTemplate = templateEnv.get_template("file-list.html")
sharedJavascript = templateEnv.get_template("shared.js")
###############################################################################
#END CONFIG
###############################################################################
# Run Flask app now
if __name__ == "__main__":
# Enable debug output, disable in prod
application.debug = True
application.run()
| 36.711246
| 123
| 0.638475
|
e30514bdd0f30538d4ed999ec163ad0e47c028b6
| 186
|
py
|
Python
|
CA3/news_test.py
|
aadyajha12/Covid19-SmartAlarm
|
911fe819cff6ef792f14b7dd48cbbb2c73f2405d
|
[
"MIT"
] | 1
|
2021-03-11T11:57:19.000Z
|
2021-03-11T11:57:19.000Z
|
CA3/news_test.py
|
aadyajha12/Covid19-SmartAlarm
|
911fe819cff6ef792f14b7dd48cbbb2c73f2405d
|
[
"MIT"
] | null | null | null |
CA3/news_test.py
|
aadyajha12/Covid19-SmartAlarm
|
911fe819cff6ef792f14b7dd48cbbb2c73f2405d
|
[
"MIT"
] | null | null | null |
import json
from newsapi import covid_news
| 26.571429
| 47
| 0.698925
|
e30656fdcf081203a75edc6af8dad04320307e06
| 390
|
py
|
Python
|
2015/02/fc_2015_02_10.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | null | null | null |
2015/02/fc_2015_02_10.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | 1
|
2015-04-27T01:43:45.000Z
|
2015-04-27T01:43:45.000Z
|
2015/02/fc_2015_02_10.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# imports go here
import atexit
#
# Free Coding session for 2015-02-10
# Written by Matt Warren
#
if __name__ == '__main__':
atexit.register(clean_up)
try:
import time
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
| 14.444444
| 36
| 0.623077
|
e3073fdd2f59dca010998232729affa0626a74d8
| 3,133
|
py
|
Python
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2016-10-08T09:01:05.000Z
|
2016-10-08T09:01:05.000Z
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" apscheduler. """
import subprocess
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
from datetime import date, datetime, timedelta
import os
import shelve
import zmq
from core.config.settings import logger
daemon = ScheduleDaemon()
daemon.start()
| 25.892562
| 73
| 0.531759
|
e307995e7666610653ffb5c496c1cf1dfe8feab6
| 897
|
py
|
Python
|
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | 1
|
2021-04-01T21:21:23.000Z
|
2021-04-01T21:21:23.000Z
|
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
import warnings
from .base import TorchFramework
from .dqn import DQN
from .dqn_per import DQNPer
from .rainbow import RAINBOW
from .ddpg import DDPG
from .hddpg import HDDPG
from .td3 import TD3
from .ddpg_per import DDPGPer
from .a2c import A2C
from .a3c import A3C
from .ppo import PPO
from .sac import SAC
from .maddpg import MADDPG
try:
from .apex import DQNApex, DDPGApex
from .impala import IMPALA
from .ars import ARS
except ImportError as _:
warnings.warn(
"Failed to import algorithms relying on torch.distributed." " Set them to None."
)
DQNApex = None
DDPGApex = None
IMPALA = None
ARS = None
__all__ = [
"TorchFramework",
"DQN",
"DQNPer",
"RAINBOW",
"DDPG",
"HDDPG",
"TD3",
"DDPGPer",
"A2C",
"A3C",
"PPO",
"SAC",
"DQNApex",
"DDPGApex",
"IMPALA",
"ARS",
"MADDPG",
]
| 16.924528
| 88
| 0.637681
|
e3079c30e7e32fd20e5ad106e7daf8c8a6a94f80
| 575
|
py
|
Python
|
apps/paper/migrations/0008_alter_paper_course.py
|
godetaph/uresearch
|
fb23cb0fe07f8b434b9c46f80b5b43030a3d5323
|
[
"MIT"
] | null | null | null |
apps/paper/migrations/0008_alter_paper_course.py
|
godetaph/uresearch
|
fb23cb0fe07f8b434b9c46f80b5b43030a3d5323
|
[
"MIT"
] | null | null | null |
apps/paper/migrations/0008_alter_paper_course.py
|
godetaph/uresearch
|
fb23cb0fe07f8b434b9c46f80b5b43030a3d5323
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-24 02:31
from django.db import migrations, models
import django.db.models.deletion
| 27.380952
| 157
| 0.653913
|
e308a4fb297dc8f9348bbe1730683c0c197aa336
| 2,925
|
py
|
Python
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""The hashers CLI arguments helper."""
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
manager.ArgumentHelperManager.RegisterHelper(HashersArgumentsHelper)
| 36.111111
| 80
| 0.699487
|
e308f94d9774663e111da5671ce07f0ce2dd542e
| 20,297
|
py
|
Python
|
tutorials/create_sakila/migrations/0001_initial.py
|
MeGustas-5427/SQL_Tutorials
|
627372c2d5d8656d72645830c9a1fae1df278fc7
|
[
"Apache-2.0"
] | 13
|
2020-11-05T04:22:51.000Z
|
2022-02-27T08:44:50.000Z
|
tutorials/create_sakila/migrations/0001_initial.py
|
MeGustas-5427/SQL_Tutorials
|
627372c2d5d8656d72645830c9a1fae1df278fc7
|
[
"Apache-2.0"
] | null | null | null |
tutorials/create_sakila/migrations/0001_initial.py
|
MeGustas-5427/SQL_Tutorials
|
627372c2d5d8656d72645830c9a1fae1df278fc7
|
[
"Apache-2.0"
] | 2
|
2020-11-10T10:01:20.000Z
|
2021-04-07T02:33:29.000Z
|
# Generated by Django 3.1.5 on 2021-01-11 08:07
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
import utils.models
| 58.157593
| 3,672
| 0.564862
|
e30dad35391d44bf3295ac9fde3a87c8c67a561f
| 2,098
|
py
|
Python
|
ncrf_to_bed.py
|
makovalab-psu/NoiseCancellingRepeatFinder
|
b24732ae73a4cef431277664ad4193a0638758c1
|
[
"MIT"
] | 16
|
2019-03-30T05:15:53.000Z
|
2022-01-28T15:20:06.000Z
|
ncrf_to_bed.py
|
makovalab-psu/NoiseCancellingRepeatFinder
|
b24732ae73a4cef431277664ad4193a0638758c1
|
[
"MIT"
] | 8
|
2019-04-04T19:46:08.000Z
|
2020-11-18T15:11:53.000Z
|
ncrf_to_bed.py
|
makovalab-psu/NoiseCancellingRepeatFinder
|
b24732ae73a4cef431277664ad4193a0638758c1
|
[
"MIT"
] | 6
|
2019-10-05T05:16:00.000Z
|
2021-01-28T10:07:49.000Z
|
#!/usr/bin/env python
"""
Convert the output of Noise Cancelling Repeat Finder to bed format.
"""
from sys import argv,stdin,stdout,stderr,exit
from os import path as os_path
from ncrf_parse import alignments,parse_noise_rate
if __name__ == "__main__": main()
| 29.549296
| 76
| 0.605815
|
e30fa4b4018e2cb629164838090fb39449877a74
| 2,551
|
py
|
Python
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 18
|
2020-02-03T07:14:40.000Z
|
2021-12-20T18:45:43.000Z
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 11
|
2020-01-28T23:16:25.000Z
|
2022-02-10T01:04:56.000Z
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/REST
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 2
|
2020-08-20T08:15:09.000Z
|
2021-02-23T07:30:40.000Z
|
# Copyright (c) 2018-present, Royal Bank of Canada.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import numpy as np
import torch
import torchvision.transforms.functional as F
from advertorch.utils import torch_allclose
from advertorch.utils import CIFAR10_MEAN
from advertorch.utils import CIFAR10_STD
from advertorch.utils import MNIST_MEAN
from advertorch.utils import MNIST_STD
from advertorch.utils import NormalizeByChannelMeanStd
from advertorch.utils import PerImageStandardize
from advertorch_examples.utils import bchw2bhwc
from advertorch_examples.utils import bhwc2bchw
| 30.73494
| 74
| 0.717758
|
e30ff60533abef30a592ebe83ada7b1e9f61003f
| 5,595
|
py
|
Python
|
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
from __future__ import division
import itertools
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
import random
from random import shuffle
from collections import Counter
def term_frequency(somelist):
"""Returns the term frequency of each unique token in the term list"""
somelist = flatten_list(somelist)
term_freqs = dict(Counter(somelist))
return term_freqs
def h_tag_nodes(somelist):
"""
Tag tokens in a processed list as either autosemantic(fast) or synsematic(slow).
"""
fast = fast_h(somelist)
fasth = [(word, {'h':'syns'}) for (word, rank) in fast]
slow = slow_h(somelist)
slowh = [(word, {'h':'auto'}) for (word,rank) in slow]
h_tags = fasth + slowh
return h_tags
def extract_fast_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using fast_h method
"""
fh = [key for (key, (val1, val2)) in fast_h(list_of_cycle_length_freqs)]
fast_cycles = [cycle for cycle in cycles if len(cycle) in fh]
return fast_cycles
def extract_slow_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using slow_h method
"""
sh = [key for (key, (val1, val2)) in slow_h(list_of_cycle_length_freqs)]
slow_cycles = [cycle for cycle in cycles if len(cycle) in sh]
return slow_cycles
| 32.719298
| 94
| 0.557283
|
e3106531f1b9e6f9266ac05f2587a787cfc4e699
| 1,316
|
py
|
Python
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | 3
|
2018-01-18T12:25:38.000Z
|
2020-03-19T13:19:31.000Z
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | 4
|
2017-04-24T16:36:59.000Z
|
2017-05-11T11:23:44.000Z
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import pyqtSlot
from channels.channel import Channel
from operators.base import OutputOperator
import numpy as np
| 34.631579
| 105
| 0.575988
|
e31093c826bcdc408129c3db911766a20c8f8973
| 524
|
py
|
Python
|
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
if "__main__" == __name__:
solution = Solution()
n = 1025
res = solution.isPowerOfTwo(n)
print(res)
| 15.878788
| 34
| 0.412214
|
e310a6a628079388cd4034e0733f019c20a04124
| 308
|
py
|
Python
|
yak/rest_social_auth/utils.py
|
johnchuks/YAK-server
|
910af81a7b23e88585479131886c627e33163de1
|
[
"MIT"
] | 15
|
2015-10-10T07:56:23.000Z
|
2021-07-26T14:39:17.000Z
|
yak/rest_social_auth/utils.py
|
johnchuks/YAK-server
|
910af81a7b23e88585479131886c627e33163de1
|
[
"MIT"
] | 26
|
2015-01-06T00:43:50.000Z
|
2018-10-29T03:12:09.000Z
|
yak/rest_social_auth/utils.py
|
johnchuks/YAK-server
|
910af81a7b23e88585479131886c627e33163de1
|
[
"MIT"
] | 8
|
2015-09-28T14:47:52.000Z
|
2018-02-09T18:53:53.000Z
|
from celery.task import task
from django.conf import settings
from social_core.backends.utils import get_backend
| 30.8
| 86
| 0.834416
|
e312667320932a26f8caa618268190a0a7f675cc
| 7,753
|
py
|
Python
|
filepath/NuclearCMC_raw_data_file_list.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
filepath/NuclearCMC_raw_data_file_list.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
filepath/NuclearCMC_raw_data_file_list.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
#pathList = [
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_092220_tensile7_T700_240mic/raw/20160512_092220_tensile7_T700_240mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_085327_tensile7_T700_200mic/raw/20160512_085327_tensile7_T700_200mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_083018_tensile7_T700_140mic/raw/20160512_083018_tensile7_T700_140mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_080231_tensile7_T700_100mic/raw/20160512_080231_tensile7_T700_100mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_073647_tensile7_T700_060mic/raw/20160512_073647_tensile7_T700_060mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_071026_tensile7_T700_040mic/raw/20160512_071026_tensile7_T700_040mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_064307_tensile7_T700_020mic/raw/20160512_064307_tensile7_T700_020mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_061643_tensile7_T700_baseline1/raw/20160512_061643_tensile7_T700_baseline1.h5",
#....
fileListALL = [
"20131023_095305_TRISO_Shell",
"20131023_100150_TRISO_Shell",
"20131023_100319_TRISO_Shell",
"20131023_111954_TRISO_Shell_2",
"20131023_160337_TRISO_Shell_2",
"20131023_165529_SiC-SiC_fiber1",
"20131023_173205_SiC-SiC_fiber1_LuAG",
"20160427_134903_T3_scan1_5lb",
"20160427_141044_T3_scan2_12lb",
"20160427_143230_T3_scan2_13lb",
"20160427_145232_T3_scan4_broken",
"20160427_155707_T2_scan1_10x_RT_broken",
"20160427_181029_T5_scan1_10x_RT",
"20160427_194922_T5_scan2_10x_700C_lowload",
"20160427_200524_T5_scan3_10x_700C_50um",
"20160427_201945_T5_scan4_10x_700C_100um",
"20160427_203402_T5_scan5_10x_700C_150um",
"20160427_204733_T5_scan6_10x_700C_180um",
"20160427_210036_T5_scan7_10x_700C_205um",
"20160427_211351_T5_scan8_10x_700C_240um",
"20160427_212644_T5_scan9_10x_700C_290um",
"20160427_214157_T5_scan10_10x_700C_340um",
"20160427_215424_T5_scan11_10x_700C_380um_break",
#"20160428_094431_test",
#"20160429_105847_test",
"20160511_114747_tensile6_RT_scan0",
"20160511_120731_tensile6_RT_scan1",
"20160511_124156_tensile6_RT_scan2",
"20160511_133259_tensile6_RT_scan3",
"20160511_141112_tensile6_RT_scan4",
"20160511_144829_tensile6_RT_scan5",
"20160511_152620_tensile6_RT_scan6",
"20160511_155912_tensile6_RT_scan7",
"20160511_164956_tensile6_RT_automation",
"20160511_171448_tensile6_RT_automation",
"20160511_173948_tensile6_RT_automation",
"20160511_180448_tensile6_RT_automation",
"20160511_182950_tensile6_RT_automation",
"20160511_185451_tensile6_RT_automation",
"20160511_191955_tensile6_RT_automation",
"20160511_194454_tensile6_RT_automation",
"20160511_210757_tensile9_T1000_baseline1",
"20160511_212851_tensile9_T1000_baseline2",
"20160511_215551_tensile6_RT_automation",
"20160511_222053_tensile6_RT_automation",
"20160511_224554_tensile6_RT_automation",
"20160511_231059_tensile6_RT_automation",
"20160511_233557_tensile6_RT_automation",
"20160512_000100_tensile6_RT_automation",
"20160512_002605_tensile6_RT_automation",
"20160512_005106_tensile6_RT_automation",
"20160512_011607_tensile6_RT_automation",
"20160512_014105_tensile6_RT_automation",
"20160512_020605_tensile6_RT_automation",
"20160512_023115_tensile6_RT_automation",
"20160512_025622_tensile6_RT_automation",
"20160512_032120_tensile6_RT_automation",
"20160512_034618_tensile6_RT_automation",
"20160512_041123_tensile6_RT_automation",
"20160512_061643_tensile7_T700_baseline1",
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic",
"20160512_073647_tensile7_T700_060mic",
"20160512_080231_tensile7_T700_100mic",
"20160512_083018_tensile7_T700_140mic",
"20160512_085327_tensile7_T700_200mic",
"20160512_092220_tensile7_T700_240mic",
"20160915_111315_filename",
"20160915_115049_TowA_10x_testrun",
"20160915_123154_TowA_10x_testrun2",
"20160915_125446_TowA_10x_testrun2",
"20160915_132337_TowA_10x_testrun3",
"20160915_133622_TowA_5x_testrun4",
"20160915_135147_TowA_5x_testrun5",
"20160915_140821_TowA_5x_testrun6",
"20160915_143002_TowA_5x_testrun7",
"20160915_145626_TowA_5x_testrun8",
"20160915_151537_TowA_10x_baseload",
"20160915_153039_TowA_10x_10um",
"20160915_154304_TowA_10x_20um",
"20160915_155844_TowA_10x_50um",
"20160915_161315_TowA_10x_90um",
"20160915_163009_TowA_10x_120um",
"20160915_164534_TowA_10x_150um",
"20160915_170105_TowA_10x_190um",
"20160915_171946_TowA_10x_240um",
"20160915_182720_TowB_10x_baseload",
"20160915_191935_TowB_10x_baseload",
"20160915_194458_TowB_10x_20um",
"20160915_195935_TowB_10x_automation",
"20160915_201303_TowB_10x_automation",
"20160915_202619_TowB_10x_automation",
"20160915_204037_TowB_10x_automation",
"20160915_205552_TowB_10x_automation",
"20160915_211209_TowB_10x_automation",
"20160915_212622_TowB_10x_automation",
"20160915_213947_TowB_10x_automation",
"20160915_222012_TowC_5x_baseload_RT",
"20160915_230717_TowC_5x_automated",
"20160915_231816_TowC_5x_automated",
"20160915_232910_TowC_5x_automated",
"20160915_234856_TowC_5x_automated",
"20160916_000349_TowC_5x_automated",
"20160916_013821_TowD_5x_baseload_RT",
"20160916_020612_TowD_5x_automation",
"20160916_021651_TowD_5x_automation",
"20160916_022742_TowD_5x_automation",
"20160916_023832_TowD_5x_automation",
"20160916_025102_TowD_5x_automation",
"20160916_030236_TowD_5x_automation"
]
fileListShort = [
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic",
"20160512_073647_tensile7_T700_060mic",
"20160512_080231_tensile7_T700_100mic",
"20160512_083018_tensile7_T700_140mic",
"20160512_085327_tensile7_T700_200mic",
"20160512_092220_tensile7_T700_240mic"
]
fileListTEST = [
"20160512_061643_tensile7_T700_baseline1",
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic"]
fileList20160915 = [
#"20160915_111315_filename",
#"20160915_115049_TowA_10x_testrun",
#"20160915_123154_TowA_10x_testrun2",
#"20160915_125446_TowA_10x_testrun2",
#"20160915_132337_TowA_10x_testrun3",
#"20160915_133622_TowA_5x_testrun4",
#"20160915_135147_TowA_5x_testrun5",
#"20160915_140821_TowA_5x_testrun6",
#"20160915_143002_TowA_5x_testrun7",
#"20160915_145626_TowA_5x_testrun8",
#"20160915_151537_TowA_10x_baseload",
"20160915_153039_TowA_10x_10um",
"20160915_154304_TowA_10x_20um",
"20160915_155844_TowA_10x_50um",
"20160915_161315_TowA_10x_90um",
"20160915_163009_TowA_10x_120um",
"20160915_164534_TowA_10x_150um",
"20160915_170105_TowA_10x_190um",
"20160915_171946_TowA_10x_240um",
"20160915_182720_TowB_10x_baseload",
"20160915_191935_TowB_10x_baseload",
"20160915_194458_TowB_10x_20um",
"20160915_195935_TowB_10x_automation",
"20160915_201303_TowB_10x_automation",
"20160915_202619_TowB_10x_automation",
"20160915_204037_TowB_10x_automation",
"20160915_205552_TowB_10x_automation",
"20160915_211209_TowB_10x_automation",
"20160915_212622_TowB_10x_automation",
"20160915_213947_TowB_10x_automation",
"20160915_222012_TowC_5x_baseload_RT",
"20160915_230717_TowC_5x_automated",
"20160915_231816_TowC_5x_automated",
"20160915_232910_TowC_5x_automated",
"20160915_234856_TowC_5x_automated",
"20160916_000349_TowC_5x_automated",
"20160916_013821_TowD_5x_baseload_RT",
"20160916_020612_TowD_5x_automation",
"20160916_021651_TowD_5x_automation",
"20160916_022742_TowD_5x_automation",
"20160916_023832_TowD_5x_automation",
"20160916_025102_TowD_5x_automation",
"20160916_030236_TowD_5x_automation"
]
fileList = fileList20160915
| 40.591623
| 159
| 0.864569
|
e312d0f86ad81db6700f196a91af6d00bac33137
| 3,870
|
py
|
Python
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 6
|
2020-11-29T08:04:07.000Z
|
2021-05-07T11:05:10.000Z
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 139
|
2020-11-24T23:37:03.000Z
|
2022-03-30T00:18:09.000Z
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 1
|
2021-02-01T15:07:17.000Z
|
2021-02-01T15:07:17.000Z
|
import asyncio
import json
import discord
from discord.ext import commands, tasks
from discal.bot import Bot
from datetime import datetime, timedelta
from discal.logger import get_module_logger
logger = get_module_logger(__name__)
| 36.168224
| 100
| 0.496382
|
e312d4733d2d6ab5dadd53371794d5b4269ec969
| 2,738
|
py
|
Python
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2019-12-17T08:59:57.000Z
|
2022-01-09T19:52:27.000Z
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 3
|
2020-08-13T16:05:46.000Z
|
2021-10-17T07:49:33.000Z
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2017-06-14T23:41:50.000Z
|
2021-03-01T18:54:03.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2015 David I. Urbina, david.urbina@utdallas.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Ethernet/IP Common Packet Format Scapy dissector."""
import struct
from scapy import all as scapy_all
from . import utils
scapy_all.bind_layers(CPF_AddressDataItem, CPF_SequencedAddressItem, type_id=0x8002)
| 36.506667
| 106
| 0.685172
|
e314ca5cb9348b5a95152247da6288de4e244796
| 1,103
|
py
|
Python
|
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | 2
|
2020-01-27T11:58:54.000Z
|
2020-03-30T10:54:08.000Z
|
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | null | null | null |
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | null | null | null |
n = int(input())
words = [''] * n
for i in range(n):
words[i] = input()
# merge_sort(words)
insertion_sort(words, n)
for w in words:
print(w)
| 19.350877
| 55
| 0.44243
|
e31548410089b175367898405bf5be3d08d7b387
| 418
|
py
|
Python
|
electionleaflets/apps/content/models.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | null | null | null |
electionleaflets/apps/content/models.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | 23
|
2015-02-19T14:02:23.000Z
|
2015-04-30T11:14:01.000Z
|
electionleaflets/apps/content/models.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | 2
|
2015-02-02T19:39:54.000Z
|
2017-02-08T09:19:53.000Z
|
from django.db import models
| 24.588235
| 79
| 0.662679
|
e316c0dee9255d1c94a21d0fb077092ad8593724
| 162
|
py
|
Python
|
Python/1017.py
|
lucasferreiraa/uri-judge-respostas
|
f5fc659d53c6b512a3624764041675e62d3fa053
|
[
"MIT"
] | null | null | null |
Python/1017.py
|
lucasferreiraa/uri-judge-respostas
|
f5fc659d53c6b512a3624764041675e62d3fa053
|
[
"MIT"
] | null | null | null |
Python/1017.py
|
lucasferreiraa/uri-judge-respostas
|
f5fc659d53c6b512a3624764041675e62d3fa053
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# URI Judge - Problema 1017
tempo = int(input())
velocidade = int(input())
litros = (velocidade / 12.0) * tempo
print("%.3f" % litros)
| 16.2
| 36
| 0.604938
|
e316f4ba8d78958af8ea71861f55f56a0c25786e
| 765
|
py
|
Python
|
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
'''
Date: 2021-08-10 17:17:35
LastEditors: Liuliang
LastEditTime: 2021-08-10 18:27:56
Description:
'''
import random
import sys
sys.path.append("..")
from bacic_module.random_int_list import random_int_list
c = random_int_list(0,10,10)
print(c)
p = qks(c,0,len(c)-1)
print(c)
| 21.25
| 56
| 0.605229
|
e318e94372f3438841131a8e520812b4b488dc1f
| 2,144
|
py
|
Python
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 17
|
2017-06-27T04:14:42.000Z
|
2022-03-07T03:37:44.000Z
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/Cyclops-VFX
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2017-06-14T04:17:51.000Z
|
2018-08-23T20:12:44.000Z
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2019-03-18T06:18:33.000Z
|
2019-08-14T21:07:53.000Z
|
import os
import json
create_json_CYC_envs("/home/geoff/Dropbox")
| 39.703704
| 65
| 0.58722
|
e31a51d9bad6493d50583997c938e58165b7c257
| 956
|
py
|
Python
|
tests/some_test.py
|
ShashkovS/drawzero
|
3722b2fccb655779b6b62e97b1584683413d7fc0
|
[
"MIT"
] | 2
|
2020-08-06T09:51:43.000Z
|
2020-08-06T10:03:58.000Z
|
tests/some_test.py
|
ShashkovS/drawzero
|
3722b2fccb655779b6b62e97b1584683413d7fc0
|
[
"MIT"
] | null | null | null |
tests/some_test.py
|
ShashkovS/drawzero
|
3722b2fccb655779b6b62e97b1584683413d7fc0
|
[
"MIT"
] | null | null | null |
import unittest
import drawzero
################################################################################
if __name__ == "__main__":
unittest.main()
| 38.24
| 80
| 0.614017
|
e31af962393b8a7c27bf698791ef898144c732f5
| 4,143
|
py
|
Python
|
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from fedot.api.api_utils.api_data import ApiDataProcessor
from fedot.api.api_utils.api_data_analyser import DataAnalyser
from fedot.api.main import Fedot
from fedot.core.data.data import InputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import TaskTypesEnum, Task
from fedot.preprocessing.preprocessing import DataPreprocessor
from test.unit.api.test_main_api import composer_params
def get_data_analyser_with_specific_params(max_size=18, max_cat_cardinality=5):
""" Create a DataAnalyser object with small max dataset size and small max cardinality for categorical features"""
safety_module = DataAnalyser(safe_mode=True)
preprocessor = ApiDataProcessor(Task(TaskTypesEnum.classification))
safety_module.max_size = max_size
safety_module.max_cat_cardinality = max_cat_cardinality
return safety_module, preprocessor
def get_small_cat_data():
""" Generate tabular data with categorical features."""
features = np.array([["a", "qq", 0.5],
["b", "pp", 1],
["c", np.nan, 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3]], dtype=object)
target = np.array([0, 0, 0, 0, 1, 1, 1, 1])
input_data = InputData(idx=np.arange(features.shape[0]),
features=features, target=target,
data_type=DataTypesEnum.table,
task=Task(TaskTypesEnum.classification))
input_data = DataPreprocessor().obligatory_prepare_for_fit(input_data)
return input_data
def test_safety_label_correct():
"""
Check if cutting and label encoding is used for pseudo large data with categorical features with high cardinality
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params()
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] <= api_safety.max_size
assert data.features.shape[1] == 3
assert data.features[0, 0] != 'a'
def test_no_safety_needed_correct():
"""
Check if oneHot encoding is used for small data with small cardinality of categorical features
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params(max_size=100, max_cat_cardinality=100)
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] == 24
assert data.features.shape[1] == 3
assert data.features[0, 0] == 'a'
def test_api_fit_predict_with_pseudo_large_dataset_with_label_correct():
"""
Test if safe mode in API cut large data and use LabelEncoder for features with high cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_cat_cardinality = 5
model.data_analyser.max_size = 18
data = get_small_cat_data()
pipeline = model.fit(features=data, predefined_model='auto')
pipeline.predict(data)
model.predict(features=data)
# the should be only tree like models + data operations
assert len(model.params.api_params['available_operations']) == 6
assert 'logit' not in model.params.api_params['available_operations']
def test_api_fit_predict_with_pseudo_large_dataset_with_onehot_correct():
"""
Test if safe mode in API use OneHotEncoder with small data with small cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_size = 1000
data = get_small_cat_data()
model.fit(features=data, predefined_model='auto')
model.predict(features=data)
# there should be all light models + data operations
assert 'logit' in model.params.api_params['available_operations']
| 42.71134
| 118
| 0.69901
|
e31bbe934af2c97028c0e66dc59a02ae268f0c31
| 7,765
|
py
|
Python
|
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | null | null | null |
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | 1
|
2018-08-26T03:01:18.000Z
|
2018-08-26T03:01:18.000Z
|
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | null | null | null |
from multiprocessing import cpu_count, Manager, Process
from time import sleep
| 34.665179
| 79
| 0.582228
|
e31cd77f7061ef13a9e31f26ee8ba9f374dfc272
| 9,781
|
py
|
Python
|
sfa/util/api.py
|
planetlab/sfa
|
d0f743e245e0bb24d7ed1016bcc6e61d1e558a95
|
[
"MIT"
] | 1
|
2015-11-19T13:34:45.000Z
|
2015-11-19T13:34:45.000Z
|
sfa/util/api.py
|
planetlab/sfa
|
d0f743e245e0bb24d7ed1016bcc6e61d1e558a95
|
[
"MIT"
] | null | null | null |
sfa/util/api.py
|
planetlab/sfa
|
d0f743e245e0bb24d7ed1016bcc6e61d1e558a95
|
[
"MIT"
] | null | null | null |
#
# SFA XML-RPC and SOAP interfaces
#
import sys
import os
import traceback
import string
import xmlrpclib
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
from sfa.util.sfalogging import logger
from sfa.trust.auth import Auth
from sfa.util.config import *
from sfa.util.faults import *
from sfa.util.cache import Cache
from sfa.trust.credential import *
from sfa.trust.certificate import *
# See "2.2 Characters" in the XML specification:
#
# #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# avoiding
# [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDDF]
invalid_xml_ascii = map(chr, range(0x0, 0x8) + [0xB, 0xC] + range(0xE, 0x1F))
xml_escape_table = string.maketrans("".join(invalid_xml_ascii), "?" * len(invalid_xml_ascii))
def xmlrpclib_escape(s, replace = string.replace):
"""
xmlrpclib does not handle invalid 7-bit control characters. This
function augments xmlrpclib.escape, which by default only replaces
'&', '<', and '>' with entities.
"""
# This is the standard xmlrpclib.escape function
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">",)
# Replace invalid 7-bit control characters with '?'
return s.translate(xml_escape_table)
def xmlrpclib_dump(self, value, write):
"""
xmlrpclib cannot marshal instances of subclasses of built-in
types. This function overrides xmlrpclib.Marshaller.__dump so that
any value that is an instance of one of its acceptable types is
marshalled as that type.
xmlrpclib also cannot handle invalid 7-bit control characters. See
above.
"""
# Use our escape function
args = [self, value, write]
if isinstance(value, (str, unicode)):
args.append(xmlrpclib_escape)
try:
# Try for an exact match first
f = self.dispatch[type(value)]
except KeyError:
raise
# Try for an isinstance() match
for Type, f in self.dispatch.iteritems():
if isinstance(value, Type):
f(*args)
return
raise TypeError, "cannot marshal %s objects" % type(value)
else:
f(*args)
# You can't hide from me!
xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump
# SOAP support is optional
try:
import SOAPpy
from SOAPpy.Parser import parseSOAPRPC
from SOAPpy.Types import faultType
from SOAPpy.NS import NS
from SOAPpy.SOAPBuilder import buildSOAP
except ImportError:
SOAPpy = None
| 34.807829
| 112
| 0.618546
|
e31d9fd874884c64a5cfd7e556213a44724536fb
| 9,507
|
py
|
Python
|
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | null | null | null |
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | 2
|
2016-05-16T19:54:26.000Z
|
2016-05-20T12:02:20.000Z
|
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | null | null | null |
__author__ = 'rknight'
import os
import csv
import logging
import datetime
from requests_futures.sessions import FuturesSession
def dlrequest(reports, dlkeys):
'''
Primary function to get data for a range of dates
Returns a dict. Structure should be:
{'outname': {'data': [all the data for this report with one list item per school],
'write': whether to write or append},
'second outname': {'data': [all the data for this report with one list item per key],
'write': whether to write or append},
etc
}
'''
session = FuturesSession(max_workers=10)
allreports = {}
futures = []
# This is run in background once the download is completed
# Throw the requests at Deanslist
for ireport in reports:
outname = ireport['outname']
url = ireport['reporturl']
allreports[outname] = {'data': [], 'write': ireport.get('rewrite', 'w')}
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': ireport.get('pulldate', ''),
'edt': ireport.get('enddate', ''),
'apikey': dlkey},
background_callback=lambda sess, resp, outname=outname: bg_call(sess, resp, outname)))
# Parse errors in the results
for f in futures:
try:
f.result()
except:
logging.warning('{0}'.format(f.exception))
continue
return allreports
def dlrequest_single(reporturl, sdt, edt, dlkeys, session = FuturesSession(max_workers=5)):
"""
Request and write a single report for all schools for a date range
"""
alldat = []
futures = []
url = reporturl
# Throw the requests at Deanslist
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': sdt,
'edt': edt,
'apikey': dlkey}))
# Parse errors in the results
for f in futures:
try:
response = f.result()
except MemoryError:
logging.warning('Memory Error.')
if response.status_code != 200:
logging.warning('Response code {0} for {1}'.format(response.status_code, response.url))
continue
# Append results
dat = response.json()
alldat.extend(dat['data'])
return alldat
def writefile(outname, dataset, headers=None, rewrite='a'):
"""
Utility to write results to file
"""
if len(dataset) == 0:
logging.warning('No data for {0}'.format(outname))
return
# Make default headers
if not headers:
headers = sorted(list(dataset[0].keys()))
# Flag to write headers if its the first time
exists = os.path.isfile(outname)
# Write output
with open(outname, rewrite, encoding='utf-8') as file:
outfile = csv.DictWriter(file, headers, lineterminator='\n')
if not exists or rewrite == 'w':
outfile.writeheader()
for row in dataset:
outfile.writerow(row)
# Parse & write the incidents module, which has a unique json structure
| 31.376238
| 141
| 0.577154
|
e31da554e9612910aa7b87468de6e4101ac08273
| 7,210
|
py
|
Python
|
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api import util
| 27.414449
| 156
| 0.59251
|
e31e1e564d0eb470b1f222fdeb2e2e5813305ea2
| 28,531
|
py
|
Python
|
src/pte_decode/decoding/decoder_factory.py
|
richardkoehler/pte-decode
|
d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57
|
[
"MIT"
] | null | null | null |
src/pte_decode/decoding/decoder_factory.py
|
richardkoehler/pte-decode
|
d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57
|
[
"MIT"
] | null | null | null |
src/pte_decode/decoding/decoder_factory.py
|
richardkoehler/pte-decode
|
d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57
|
[
"MIT"
] | null | null | null |
"""Module for machine learning models."""
from dataclasses import dataclass
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from bayes_opt import BayesianOptimization
from catboost import CatBoostClassifier
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis,
)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score, log_loss
from sklearn.model_selection import GroupKFold, GroupShuffleSplit
# from sklearn.svm import SVC
from xgboost import XGBClassifier
from pte_decode.decoding.decoder_base import Decoder
def get_decoder(
classifier: str = "lda",
scoring: str = "balanced_accuracy",
balancing: Optional[str] = None,
optimize: bool = False,
) -> Decoder:
"""Create and return Decoder of desired type.
Parameters
----------
classifier : str
Allowed values for `classifier`: ["catboost", "lda", "lin_svm", "lr",
"svm_lin", "svm_poly", "svm_rbf", "xgb"].
scoring : str | None, default="balanced_accuracy"
Score to be calculated. Possible values:
["oversample", "undersample", "balance_weights"].
balancing : str | None, default=None
Method for balancing skewed datasets. Possible values:
["oversample", "undersample", "balance_weights"].
Returns
-------
Decoder
Instance of Decoder given `classifer` and `balancing` method.
"""
classifiers = {
"catboost": CATB,
"dummy": Dummy,
"lda": LDA,
"lr": LR,
"qda": QDA,
# "svm_lin": SVC_Lin,
# "svm_poly": SVC_Poly,
# "svm_rbf": SVC_RBF,
"xgb": XGB,
}
scoring_methods = {
"balanced_accuracy": _get_balanced_accuracy,
"log_loss": _get_log_loss,
}
classifier = classifier.lower()
balancing = balancing.lower() if isinstance(balancing, str) else balancing
scoring = scoring.lower()
if classifier not in classifiers:
raise DecoderNotFoundError(classifier, classifiers.keys())
if scoring not in scoring_methods:
raise ScoringMethodNotFoundError(scoring, scoring_methods.keys())
return classifiers[classifier](
balancing=balancing,
optimize=optimize,
scoring=scoring_methods[scoring],
)
def _get_balanced_accuracy(model, data_test, label_test) -> Any:
"""Calculated balanced accuracy score."""
return balanced_accuracy_score(label_test, model.predict(data_test))
def _get_log_loss(model, data_test, label_test) -> Any:
"""Calculate Log Loss score."""
return log_loss(label_test, model.predict_proba(data_test))
| 33.68477
| 78
| 0.537836
|
e3203c55f3123f00f21c9072e3c16a2c74fb421f
| 7,603
|
py
|
Python
|
pikoToHM.py
|
lucasHSA/piko
|
a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7
|
[
"MIT"
] | null | null | null |
pikoToHM.py
|
lucasHSA/piko
|
a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7
|
[
"MIT"
] | 1
|
2016-07-18T08:24:50.000Z
|
2016-12-17T09:19:07.000Z
|
pikoToHM.py
|
lucasHSA/piko
|
a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2015 Lucas Koegel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from piko import Piko
from hm import HM
from pyowm import OWM
import time
import sys
import logging, logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.handlers.RotatingFileHandler('/home/pi/Desktop/piko/pikoToHM.log', maxBytes=1024*1024*512, backupCount=2)
fh.setLevel(logging.DEBUG)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(format)
fh.setFormatter(format)
logger.addHandler(ch)
logger.addHandler(fh)
PIKO_INTERVAL = 30 # seconds
OWM_INTERVAL = 1800 # seconds
HM_PV_REMAINING_POWER_ID = 12772
HM_PV_STRING_1_POWER_ID = 15241
HM_PV_STRING_2_POWER_ID = 15242
HM_WEATHER_FORECAST_CLOUDS_ID = 20144
HM_WEATHER_CURRENT_TEMPERATURE_ID = 21442
HM_WEATHER_FORECAST_TEMPERATURE_ID = 21443
OWM_API_KEY = 'insert'
OWM_CITY_ID = 2835477
logging.info('Started')
p = Piko(host='http://192.168.178.123')
hm = HM('http://192.168.178.49')
owm = OWM(OWM_API_KEY)
last_weather_update = time.time() - OWM_INTERVAL # - OWM_INTERVAL to update on first run
while(True):
try:
# -------------------------------
# Weather
now = time.time()
if (now - last_weather_update) >= OWM_INTERVAL:
try:
# Queries the OWM web API for three hours weather forecast for the specified city ID.
# A Forecaster object is returned, containing a Forecast instance covering a global streak of five days:
# this instance encapsulates Weather objects, with a time interval of three hours one from each other
logging.debug('Calling: owm.three_hours_forecast_at_id')
forecast = owm.three_hours_forecast_at_id(OWM_CITY_ID).get_forecast()
# get current weather
logging.debug('Calling: owm.weather_at_id')
weather = owm.weather_at_id(OWM_CITY_ID).get_weather()
# set the cloud coverage of the weather to homematic
# .get_clouds(): Returns the cloud coverage percentage as an int
logging.debug('Calling: set_state HM_WEATHER_FORECAST_CLOUDS_ID')
hm.set_state(HM_WEATHER_FORECAST_CLOUDS_ID, weather.get_clouds())
# set the current temperature of the weather to homematic
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_CURRENT_TEMPERATURE_ID, weather.get_temperature(unit="celsius")["temp"])
# set the temperature of the weather in 12 hours to homematic
# .get(): Lookups up into the Weather items list for the item at the specified index
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_FORECAST_TEMPERATURE_ID, forecast.get(3).get_temperature(unit="celsius")["temp"])
# Update last_weather_update time
last_weather_update = time.time()
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error on updating weather: {0}'.format(err))
# -------------------------------
# Piko
# Get values for remaining power calculation
logging.debug('Calling: get_current_power')
current_solar_power = p.get_current_power()
logging.debug('Calling: get_consumption_phase_1')
consumption_phase_1 = p.get_consumption_phase_1()
consumption_phase_2 = p.get_consumption_phase_2()
logging.debug('Calling: get_consumption_phase_2')
logging.debug('Calling: get_consumption_phase_3')
consumption_phase_3 = p.get_consumption_phase_3()
# Get values for string 1 power and string 2 power
logging.debug('Calling: get_string1_current')
string1Current = p.get_string1_current()
logging.debug('Calling: get_string2_current')
string2Current = p.get_string2_current()
logging.debug('Calling: get_string1_voltage')
string1Voltage = p.get_string1_voltage()
logging.debug('Calling: get_string2_voltage')
string2Voltage = p.get_string2_voltage()
if current_solar_power < 0:
# Piko is off
logging.info('Piko is off, going to sleep 10 minutes.')
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, 0)
logging.debug('Calling: time.sleep 600')
time.sleep(600)
continue
# Calculate remaining power
logging.debug('Rounding for remaining_power')
remaining_power = round(current_solar_power - (consumption_phase_1 + consumption_phase_2 + consumption_phase_3))
if remaining_power < 0:
remaining_power = 0
# Calculate string 1 power and string 2 power
string1 = round(string1Current * string1Voltage)
string2 = round(string2Current * string2Voltage)
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, remaining_power)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, string1)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, string2)
# Sleep
logging.debug('Calling: time.sleep PIKO_INTERVAL')
time.sleep(PIKO_INTERVAL)
except KeyboardInterrupt:
break
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error: {0}'.format(err))
continue
| 42.47486
| 145
| 0.663422
|
e321f4353a25d31bcaa64e339213294f5626c9c9
| 480
|
py
|
Python
|
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=np.random.rand(2) * 10,
width=np.random.rand(), height=np.random.rand(),
angle=np.random.rand() * 360)
for i in range(NUM)]
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(np.random.rand())
e.set_facecolor(np.random.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
| 20.869565
| 54
| 0.708333
|
e32283e627f56eef0ab47dab2fb3694cb482ef8d
| 231
|
py
|
Python
|
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | null | null | null |
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | 13
|
2020-09-04T23:25:20.000Z
|
2022-02-18T01:52:33.000Z
|
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | null | null | null |
from enum import Enum
| 21
| 43
| 0.562771
|
e323376f728d32ac2cbf19f89a6bf1e46c450382
| 638
|
py
|
Python
|
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | 3
|
2018-03-27T14:34:48.000Z
|
2021-10-04T16:28:19.000Z
|
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | null | null | null |
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | 1
|
2021-08-27T23:51:28.000Z
|
2021-08-27T23:51:28.000Z
|
#import OpenStack connection class from the SDK
from openstack import connection
# Create a connection object by calling the constructor and pass the security information
conn = connection.Connection(auth_url="http://192.168.0.106/identity",
project_name="demo",
username="admin",
password="manoj",
user_domain_id="default",
project_domain_id="default")
create_project(conn)
| 30.380952
| 89
| 0.714734
|
e323be496777a0e952195a0a60b4f2ae474d9dd5
| 849
|
py
|
Python
|
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
from sympy import *
import pandas as pd
print(bisection(10, 50, 0.01, '3*x**2 - 120*x + 100'))
| 30.321429
| 212
| 0.522968
|
e323ed5e92eb5da83c0443afabf48a5b468396f3
| 176
|
py
|
Python
|
gd/utils/crypto/__init__.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/utils/crypto/__init__.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/utils/crypto/__init__.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
"""Main module for operating on crypted/encoded strings in Geometry Dash"""
from gd.utils.crypto.coders import Coder
from gd.utils.crypto.xor_cipher import XORCipher as xor
| 44
| 76
| 0.795455
|
e324c2b47225b873ec4b37a7708b700104f77b26
| 3,684
|
py
|
Python
|
subt/ros/base/src/motor_controller.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 12
|
2017-02-16T10:22:59.000Z
|
2022-03-20T05:48:06.000Z
|
subt/ros/base/src/motor_controller.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 618
|
2016-08-30T04:46:12.000Z
|
2022-03-25T16:03:10.000Z
|
subt/ros/base/src/motor_controller.py
|
robotika/osgar
|
6f4f584d5553ab62c08a1c7bb493fefdc9033173
|
[
"MIT"
] | 11
|
2016-08-27T20:02:55.000Z
|
2022-03-07T08:53:53.000Z
|
from pid import PID
import pdb
#for anonymous objects
Object = lambda **kwargs: type("Object", (), kwargs)
| 47.844156
| 140
| 0.659609
|
e325abcd58eea788430716963a4dc7047047719c
| 4,931
|
py
|
Python
|
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 2
|
2020-04-16T17:03:56.000Z
|
2021-04-08T17:23:21.000Z
|
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | null | null | null |
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 1
|
2020-05-04T18:03:59.000Z
|
2020-05-04T18:03:59.000Z
|
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import scrolledtext
from tkinter import ttk
import tkcalendar as tkc
from shiftscheduler.data_types import data_types
from shiftscheduler.excel import output as excel_output
from shiftscheduler.gui import constants
from shiftscheduler.gui import util
from shiftscheduler.i18n import gettext
_ = gettext.GetTextFn('gui/barebone')
LOCALE_CODE = gettext.GetLanguageCode()
DATE_PATTERN = _('y/m/d')
# TkInter frame for getting barebone Excel file
| 35.47482
| 110
| 0.636585
|
e3274579faa2032556dd5e38f0e928addfcdc145
| 1,093
|
py
|
Python
|
orders/migrations/0001_initial.py
|
MahmudulHassan5809/Ecommerce-WebSite-With-Django2
|
a9c76e6e925e236ba064be194a03d9d6635edac2
|
[
"MIT"
] | 1
|
2021-09-24T04:32:35.000Z
|
2021-09-24T04:32:35.000Z
|
orders/migrations/0001_initial.py
|
MahmudulHassan5809/Ecommerce-WebSite-With-Django2
|
a9c76e6e925e236ba064be194a03d9d6635edac2
|
[
"MIT"
] | null | null | null |
orders/migrations/0001_initial.py
|
MahmudulHassan5809/Ecommerce-WebSite-With-Django2
|
a9c76e6e925e236ba064be194a03d9d6635edac2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2019-01-26 19:42
import datetime
from django.db import migrations, models
| 34.15625
| 123
| 0.563586
|
e3278fc449a9b7f42367d6c094639616a86c1514
| 353
|
py
|
Python
|
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | 1
|
2017-01-18T11:19:24.000Z
|
2017-01-18T11:19:24.000Z
|
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | null | null | null |
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | 1
|
2018-02-23T06:23:43.000Z
|
2018-02-23T06:23:43.000Z
|
from setuptools import setup, find_packages
setup(
name='selfstoredict',
version='0.6',
packages=find_packages(),
url='https://github.com/markus61/selfstoredict',
license='MIT',
author='markus',
author_email='ms@dom.de',
description='a python class delivering a dict that stores itself into a JSON file or a redis db',
)
| 29.416667
| 101
| 0.696884
|
e328edcf699e6d13889b75058d9c53daede11262
| 428
|
py
|
Python
|
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
sum = add(2,5,6,5)
print(sum)
calculate(add=3, mul=5)
my_car = Car(make="Nissan")
print(my_car.model)
| 17.12
| 36
| 0.514019
|
e3294c6b906349f5541063a2b6f7ca5cb0e7e90b
| 21,406
|
py
|
Python
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 2
|
2015-02-18T17:31:58.000Z
|
2019-04-01T13:44:45.000Z
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 1
|
2015-01-26T03:58:19.000Z
|
2015-01-26T03:58:19.000Z
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 1
|
2021-05-04T21:15:53.000Z
|
2021-05-04T21:15:53.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
import logging
import json
from urllib import urlencode
import urlparse
#for CSRF state tokens
import time
import base64
# Get available json parser
try:
# should be the fastest on App Engine py27.
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
# at this point ImportError will be raised
# if none of the above could be imported
# it's a OAuth 1.0 spec even though the lib is called oauth2
import oauth2 as oauth1
# users module is needed for OpenID authentication.
from google.appengine.api import urlfetch, users
from webapp2_extras import security
__all__ = ['SimpleAuthHandler',
'Error',
'UnknownAuthMethodError',
'AuthProviderResponseError',
'InvalidCSRFTokenError',
'InvalidOAuthRequestToken',
'InvalidOpenIDUserError']
OAUTH1 = 'oauth1'
OAUTH2 = 'oauth2'
OPENID = 'openid'
| 34.525806
| 80
| 0.679996
|
e32db38efba021a5263a02a0f603ee6533341d64
| 766
|
py
|
Python
|
test.py
|
litex-hub/pythondata-cpu-ibex
|
9775779f0770fc635a17dfc467cb8d5afdf01d1d
|
[
"Apache-2.0"
] | 2
|
2021-02-18T00:27:38.000Z
|
2021-05-12T21:57:41.000Z
|
test.py
|
litex-hub/pythondata-cpu-ibex
|
9775779f0770fc635a17dfc467cb8d5afdf01d1d
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
litex-hub/pythondata-cpu-ibex
|
9775779f0770fc635a17dfc467cb8d5afdf01d1d
|
[
"Apache-2.0"
] | 1
|
2021-04-28T02:42:51.000Z
|
2021-04-28T02:42:51.000Z
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import pythondata_cpu_ibex
print("Found ibex @ version", pythondata_cpu_ibex.version_str, "(with data", pythondata_cpu_ibex.data_version_str, ")")
print()
print("Data is in", pythondata_cpu_ibex.data_location)
assert os.path.exists(pythondata_cpu_ibex.data_location)
print("Data is version", pythondata_cpu_ibex.data_version_str, pythondata_cpu_ibex.data_git_hash)
print("-"*75)
print(pythondata_cpu_ibex.data_git_msg)
print("-"*75)
print()
print("It contains:")
for root, dirs, files in os.walk(pythondata_cpu_ibex.data_location):
dirs.sort()
for f in sorted(files):
path = os.path.relpath(os.path.join(root, f), pythondata_cpu_ibex.data_location)
print(" -", path)
| 31.916667
| 119
| 0.765013
|
e331235f5a65953d372c517da81e56d9c43aa850
| 2,652
|
py
|
Python
|
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | 1
|
2022-01-30T22:06:57.000Z
|
2022-01-30T22:06:57.000Z
|
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
"""LAPKT-BFWS
https://github.com/nirlipo/BFWS-public
"""
import re
import os
import sys
import subprocess
import tempfile
from pddlgym_planners.pddl_planner import PDDLPlanner
from pddlgym_planners.planner import PlanningFailure
import numpy as np
from utils import FilesInCommonTempDirectory
DOCKER_IMAGE = 'khodeir/bfws:latest'
| 42.774194
| 232
| 0.667044
|
e33575c4ac98eb7bd72db9483692a67e2a8b1c0f
| 1,914
|
py
|
Python
|
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | 1
|
2021-04-09T09:46:31.000Z
|
2021-04-09T09:46:31.000Z
|
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | null | null | null |
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | 1
|
2021-04-12T11:27:13.000Z
|
2021-04-12T11:27:13.000Z
|
import csv
import re
import sys
import requests
import json
import Data # data container, replace with your own
orgName = Data.orgName # replace with your own
apiKey = Data.apiKey # provide your own API token
api_token = "SSWS " + apiKey
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': api_token}
if __name__ == "__main__":
CreateZones()
| 29.90625
| 71
| 0.405434
|
e33639a848594d63e324d70460cacf9ae086d33c
| 959
|
py
|
Python
|
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
import random
import PySimpleGUI as sg
simulador = SimuladorDeDado()
simulador.Iniciar()
| 29.96875
| 71
| 0.577685
|
e3370f6e006d93026ba5320fad4727621e81fc92
| 1,712
|
py
|
Python
|
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
import math
from typing import List, Iterable, Union
Numeric = Union[int, float]
def vdot(p: List[Numeric], q: List[Numeric]) -> float:
"""Vector dot product."""
if len(p) == 0:
raise ValueError("p must not be None or empty")
if len(q) == 0:
raise ValueError("q must not be None or empty")
if len(p) != len(q):
raise ValueError("vectors p and q must have the same dimension")
res: float = 0
for i in range(len(p)):
res += p[i] * q[i]
return res
def full(rows: int, columns: int, fill: Numeric = 0) -> List[List[float]]:
"""Return a new array of given shape and type, filled with fill_value."""
return [[fill] * columns for _ in range(rows)]
def dot(p: List[List[Numeric]], q: List[List[Numeric]]) -> List[List[float]]:
"""Matrix dot product."""
p_shape = len(p), len(p[0])
q_shape = len(q), len(q[0])
if p_shape[1] != q_shape[0]:
raise ValueError("number of columns in p must equal the number of rows in q")
res: List[List[float]] = full(rows=p_shape[0], columns=q_shape[1])
for i in range(p_shape[0]):
for j in range(q_shape[1]):
for k in range(p_shape[1]):
res[i][j] += p[i][k] * q[k][j]
return res
| 31.703704
| 86
| 0.567757
|
e337c8816166ee2eea4a6327ac76523c1a2e9c32
| 1,231
|
py
|
Python
|
plot/eigenvalue_statistics.py
|
dh4gan/tache
|
51ed037769ecc4fdadc591e3b3619416c79e65b7
|
[
"MIT"
] | 5
|
2018-02-27T04:07:15.000Z
|
2020-12-29T20:49:36.000Z
|
plot/eigenvalue_statistics.py
|
dh4gan/tache
|
51ed037769ecc4fdadc591e3b3619416c79e65b7
|
[
"MIT"
] | null | null | null |
plot/eigenvalue_statistics.py
|
dh4gan/tache
|
51ed037769ecc4fdadc591e3b3619416c79e65b7
|
[
"MIT"
] | null | null | null |
# Written 9/10/14 by dh4gan
# Code reads in output eigenvalue file from tache
# Computes statistics
import numpy as np
import matplotlib.pyplot as plt
import io_tache as io
# Read in inputs from command line
filename = ff.find_local_input_files('eigenvalues*')
threshold = input("What is the threshold for classification? ")
# Read in eigenvalue file
print "Reading eigenvalue file ", filename
npart,x,y,z,eigenpart,eigenvalues = io.read_eigenvalue_file(filename)
print np.amax(eigenvalues),np.amin(eigenvalues)
# Calculate the trace for each simulation element
trace = np.zeros(npart)
for i in range(npart):
for j in range(3):
trace[i] = trace[i]+ eigenvalues[i,j]
normedeigenvalues = eigenvalues.copy()
for i in range(npart):
if(trace[i]>0.0):
normedeigenvalues[i,:] = normedeigenvalues[i,:]/trace[i]
else:
normedeigenvalues[i,:] = 0.0
# Make a histogram of the eigenvalues
alleigenvalues = eigenvalues.flatten()
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.hist(alleigenvalues, bins=100, normed=True, log=True)
plt.show()
# Make a histogram of the traces
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.hist(trace, bins=100, normed=True, log=True)
plt.show()
| 21.224138
| 69
| 0.723802
|
e337db10027ece0f941b1295bc94ad1a0ed34904
| 4,179
|
py
|
Python
|
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | null | null | null |
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | null | null | null |
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | 3
|
2019-01-07T17:07:16.000Z
|
2021-01-09T13:01:40.000Z
|
from django.views.generic.edit import CreateView, FormMixin
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django import forms
from django.urls import reverse
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from forwarder.models import Application, Hierarchy
def pdf_dl(request, pk):
# Create the HttpResponse object with the appropriate PDF headers.
application = Application.objects.get(pk=pk)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % (application)
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(100, 800, "Name : " + application.applicant.name)
p.drawString(100, 780, "Admission no : " + str(application.applicant.admn_no))
p.drawString(100, 760, "Department : " + application.applicant.branch)
p.drawString(100, 740, "Semester : " + str(application.applicant.semester))
p.drawString(100, 720, "Parent name : " + application.applicant.parent_name)
if application.type == "OTH":
p.drawString(100, 700, "Application type : " + application.other())
else:
p.drawString(100, 700, "Application type : " + application.get_type_display())
p.drawString(100, 680, "Recommended by HOD of " + application.applicant.branch)
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
return response
| 36.025862
| 93
| 0.675042
|
e3390f43d3793bc787b6b52cd5f2cc575976a36e
| 4,793
|
py
|
Python
|
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/Dense_Video_Captioning_Feature_Extraction_Model_Choice
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | 1
|
2021-04-21T12:39:07.000Z
|
2021-04-21T12:39:07.000Z
|
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/masters_code
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | null | null | null |
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/masters_code
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | null | null | null |
import h5py
# torch imports
import torch
from torch.utils.data import Dataset
# generic imports
import os
import sys
import numpy as np
import random
import pandas as pd
import cv2
from decord import VideoReader
from decord import cpu, gpu
from matplotlib import pyplot as plt
import gc
# create data loader
| 30.335443
| 130
| 0.580638
|
e339d61b7c0a81fbe079a184470ec5bdef08b9e1
| 1,583
|
py
|
Python
|
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
"""
@file: sklearn_method.py
@time: 2020-12-09 17:38:38
"""
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
myfont = fm.FontProperties(fname='SimHei.ttf') #
train_data = pd.read_csv('chnsenticorp/train.tsv', sep='\t')
tfidf = TfidfVectorizer(norm='l2', ngram_range=(1, 2))
features = tfidf.fit_transform(train_data.text_a)
labels = train_data.label
print(features.shape)
models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
LinearSVC(),
MultinomialNB(),
LogisticRegression(random_state=0, solver='liblinear'),
]
CV = 10
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in tqdm(models):
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='f1', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
results = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'f1'])
sns.boxplot(x='model_name', y='f1', data=results)
sns.stripplot(x='model_name', y='f1', data=results,
size=8, jitter=True, edgecolor="gray", linewidth=2)
plt.show()
print(results.groupby('model_name').f1.mean())
| 31.66
| 78
| 0.753001
|
e33bc5cbc72c8153bc963c853fb7e883e19b21c8
| 2,087
|
py
|
Python
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 1
|
2019-07-31T11:40:06.000Z
|
2019-07-31T11:40:06.000Z
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 10
|
2020-02-12T01:16:25.000Z
|
2021-06-10T18:42:24.000Z
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 1
|
2019-07-31T11:40:18.000Z
|
2019-07-31T11:40:18.000Z
|
import tempfile
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from filer.models import Image
from handypackages.tag.models import Tag
from .models import Gallery
| 29.394366
| 77
| 0.577384
|
e33d45a696398845a0fe18a3dbb14693d8655739
| 1,926
|
py
|
Python
|
src/jk_mediawiki/impl/WikiCronProcessFilter.py
|
jkpubsrc/python-module-jk-mediawiki
|
5d76a060f0ed46c072d44e8084f6fa40d16e6069
|
[
"Apache-1.1"
] | null | null | null |
src/jk_mediawiki/impl/WikiCronProcessFilter.py
|
jkpubsrc/python-module-jk-mediawiki
|
5d76a060f0ed46c072d44e8084f6fa40d16e6069
|
[
"Apache-1.1"
] | null | null | null |
src/jk_mediawiki/impl/WikiCronProcessFilter.py
|
jkpubsrc/python-module-jk-mediawiki
|
5d76a060f0ed46c072d44e8084f6fa40d16e6069
|
[
"Apache-1.1"
] | null | null | null |
import os
import typing
import jk_typing
from .ProcessFilter import ProcessFilter
#
| 27.514286
| 129
| 0.297508
|
e33da7e662f4c2fc76532c7c89e8edb38e2cccee
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/filelock/_error.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/filelock/_error.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/filelock/_error.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/ab/0b/2c/7ae80e56fd2208fbee5ef317ac009972f468b5601f62f8f799f9d9279a
| 96
| 96
| 0.895833
|
e33e7075c79b3b47f743f64502284119cdb5e862
| 2,094
|
py
|
Python
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 2
|
2019-12-01T16:12:24.000Z
|
2021-05-18T22:10:12.000Z
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 10
|
2019-09-19T17:08:46.000Z
|
2021-02-17T21:42:10.000Z
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 3
|
2019-11-27T18:39:12.000Z
|
2021-02-10T15:11:58.000Z
|
"""
Loader and Parser for the xml format.
Version: 0.01-alpha
"""
from xml.dom import minidom
from konbata.Data.Data import DataNode, DataTree
from konbata.Formats.Format import Format
def xml_toTree(file, delimiter, options=None):
"""
Function transforms a xml file into a DataTree.
Parameters
----------
file: file
open input file in at least read mode
Returns
-------
tree: DataTree
"""
# TODO: Second Parser with the import xml.etree.ElementTree as ET class
xml_reader = minidom.parse(file)
xml_reader.normalize()
tree = DataTree(tree_type='xml')
if xml_reader.hasChildNodes():
for node in xml_reader.childNodes:
childNode = help_xml_toTree(node)
tree.root.add(childNode)
return tree
def help_xml_toTree(xml_node):
"""
Helps xml_ToTree function, walks through xml recursive
Parameters
----------
xml_node: ElementType1
Returns
-------
node: DataNode
"""
if xml_node.hasChildNodes():
tree_node = DataNode(xml_node.localName)
for node in xml_node.childNodes:
tree_node.add(help_xml_toTree(node))
return tree_node
# TODO Add Attributes
node = None
if xml_node.nodeType == xml_node.TEXT_NODE:
# TODO: guess xml_node.nodeValue == xml_node.data
node = DataNode(xml_node.nodeValue.replace('\n ', ''))
elif xml_node.nodeType == xml_node.ELEMENT_NODE:
# TODO: guess xml_node.tagName == xml_node.localName
node = DataNode(xml_node.localName)
else:
# TODO: Implement the other nodeTypes
print('Warning: NodeType not supported yet')
node = DataNode(xml_node.localName)
return node
def xml_fromTree(tree, file, options=None):
"""
Function transforms a DataTree into a xml file.
Parameters
----------
tree: DataTree
file: file
open output file in at least write mode
options: list, optional
"""
# TODO
pass
xml_format = Format('xml', ['/n'], xml_toTree, xml_fromTree)
| 22.516129
| 75
| 0.637536
|
e33ec5c64b5732e244db6498e5c0817ede88b3d0
| 1,650
|
py
|
Python
|
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
#!/usr/env python
import pandas as pd
import os
import sys
import numpy as np
if len(sys.argv) != 3:
sys.exit('[usage] python %s <repeat_index table> <indel cutoff>')
ref_table = sys.argv[1]
indel_cut_off = int(sys.argv[2])
for gdf in pd.read_csv(ref_table, sep='\t', chunksize = 10000):
for contig, contig_df in gdf.groupby('contig'):
df = contig_df\
.assign(indel_index = lambda d: d.negative_index + d.positive_index) \
.query('indel_index >= %i ' %indel_cut_off)
count = 0
for i, base in df.iterrows():
if base['negative_index'] == base['indel_index']:
start = base['start']
mononucleotide = base['fwd_base']
indel_index = base['indel_index']
taken_base = 1
elif taken_base != indel_index and base['fwd_base'] == mononucleotide:
taken_base += 1
elif taken_base == indel_index:
assert base['positive_index'] == indel_index and base['fwd_base'] == mononucleotide,'Wrong parsing'
end = base['start']
line = '{contig}\t{start}\t{end}\tIndel{id}\t{indel_index}\t+\t{mononucleotide}' \
.format(contig = base['contig'],
start = start,
end = end,
id = count,
indel_index = indel_index,
mononucleotide = mononucleotide)
print(line, file= sys.stdout)
count += 1
else:
print(base)
| 36.666667
| 115
| 0.512121
|
e3431c6a3c1b12221c308a1da4d98113e28475f3
| 474
|
py
|
Python
|
xicsrt/optics/_InteractNone.py
|
PrincetonUniversity/xicsrt
|
15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd
|
[
"MIT"
] | 1
|
2021-07-21T17:07:31.000Z
|
2021-07-21T17:07:31.000Z
|
xicsrt/optics/_InteractNone.py
|
PrincetonUniversity/xicsrt
|
15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd
|
[
"MIT"
] | null | null | null |
xicsrt/optics/_InteractNone.py
|
PrincetonUniversity/xicsrt
|
15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. Authors:
Novimir Pablant <npablant@pppl.gov>
Define the :class:`InteractNone` class.
"""
import numpy as np
from copy import deepcopy
from xicsrt.tools.xicsrt_doc import dochelper
from xicsrt.optics._InteractObject import InteractObject
| 21.545455
| 66
| 0.723629
|
e34633ea0534cf1b5136a4ecb84b248d7c202e57
| 416
|
py
|
Python
|
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
print('='*30)
nome = str(input('Nome do Jogador: '))
gols = str(input('Nmero de Gols: '))
ficha(nome, gols)
| 24.470588
| 67
| 0.560096
|
e347285e41902227dea4612bf91fb04df4a24692
| 3,598
|
py
|
Python
|
sub_1602_display.py
|
leonardlinde/timeandtemp
|
93e9ad16b2027fd9c261052c22a5977b86326550
|
[
"Artistic-2.0"
] | null | null | null |
sub_1602_display.py
|
leonardlinde/timeandtemp
|
93e9ad16b2027fd9c261052c22a5977b86326550
|
[
"Artistic-2.0"
] | null | null | null |
sub_1602_display.py
|
leonardlinde/timeandtemp
|
93e9ad16b2027fd9c261052c22a5977b86326550
|
[
"Artistic-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
ZMQ Subscriber for 1602 display
Queue: INF and CMD
"""
import wiringpi2 as wiringpi
import datetime
import time
import json
import Adafruit_DHT
import traceback
import zmq
import sys
import pprint
infoSocket = "tcp://localhost:5550"
cmdSocket = "tcp://localhost:5560"
wiringpi.wiringPiSetup()
# Initialize mcp3008 (same as 3004) ADC - first parm is pin base (must be > 64)
# Second param is SPI bus number
wiringpi.mcp3004Setup(100,0)
# Initialize LCD
# 2 rows of 16 columns, driven by 4 bits
# Control pins are WiringPi 15 & 16
# Data pins are WiringPi 0,1,2,3
display = wiringpi.lcdInit (2, 16, 4, 15,16, 0,1,2,3,0,0,0,0)
# LCD Backlight
backlightPin = 26 # GPIO12 is set to ground to turn off backlight
wiringpi.pinMode(backlightPin,1) #output
wiringpi.digitalWrite(backlightPin, 0)
# Init zmq
context = zmq.Context()
# Subscribe to all the info queues
info = context.socket(zmq.SUB)
info.connect(infoSocket)
info.setsockopt(zmq.SUBSCRIBE, 'INF_SENSOR')
info.setsockopt(zmq.SUBSCRIBE, 'INF_CURRENTWX')
info.setsockopt(zmq.SUBSCRIBE, 'INF_FORECASTWX')
# Subscribe to LCD command queue
cmd = context.socket(zmq.SUB)
cmd.connect(cmdSocket)
cmd.setsockopt(zmq.SUBSCRIBE, 'CMD_LCD')
# set up a poller to read both sockets
poller = zmq.Poller()
poller.register(info, zmq.POLLIN)
poller.register(cmd, zmq.POLLIN)
# state variables
commandState = {'backlight':True}
# convert ADC reading to Lux
if __name__ == '__main__':
main_sub_1602_display()
| 26.651852
| 79
| 0.658143
|
e347e8efaaade3a7b28a992e4961e185b12004e3
| 2,079
|
py
|
Python
|
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from fastapi import APIRouter, UploadFile, File, Depends, HTTPException
from pydantic import create_model
from starlette.responses import StreamingResponse
from app.business_layers.domain import Work
from app.business_layers.repository import WorkRepository
from app.business_layers.use_cases import (
bulk_upload_works_use_case,
get_work_use_case,
list_works_use_case,
)
from app.db.mongodb import get_client
from app.utils.csv_manipulation import process_csv, stream_csv_from_dicts
work_router = APIRouter()
# Model Fields are defined by either a tuple of the form (<type>, <default value>) or a default value
model_fields = {k: (v, ...) for k, v in Work.__annotations__.items()}
WorkModel = create_model("WorkModel", **model_fields)
| 34.65
| 101
| 0.746032
|
e348be446d860ef514d588759be2dbd6de2b4764
| 651
|
py
|
Python
|
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | null | null | null |
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | 2
|
2021-09-07T07:06:00.000Z
|
2021-09-07T07:24:26.000Z
|
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | null | null | null |
from essentials_kit_management.interactors.storages.storage_interface \
import StorageInterface
from essentials_kit_management.interactors.presenters.presenter_interface \
import PresenterInterface
| 34.263158
| 76
| 0.761905
|