hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55a8a143755092a98ad8640901e8dbdb8d58845f | 9,439 | py | Python | install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import subprocess
from threading import Thread
from Queue import Queue
import tempfile
import sys
import traceback
from .logger import get_logger
logger = get_logger(__name__)
| 36.727626 | 123 | 0.604831 |
55acdcacf4ba82a80f3cb7a16e721e05d9bb07b7 | 127 | py | Python | knock-knock4/knockpy/__init__.py | abhinashjain/proxyfuzzer | 9c372390afe4cd3d277bcaaeb289e4c8ef398e5e | [
"BSD-3-Clause"
] | 1 | 2017-03-14T21:16:43.000Z | 2017-03-14T21:16:43.000Z | knock-knock4/knockpy/__init__.py | abhinashjain/proxyfuzzer | 9c372390afe4cd3d277bcaaeb289e4c8ef398e5e | [
"BSD-3-Clause"
] | 1 | 2016-12-19T16:35:53.000Z | 2016-12-22T19:40:30.000Z | knock-knock4/knockpy/__init__.py | abhinashjain/proxyfuzzer | 9c372390afe4cd3d277bcaaeb289e4c8ef398e5e | [
"BSD-3-Clause"
] | 2 | 2018-06-15T02:00:49.000Z | 2021-09-08T19:15:35.000Z | import os
_ROOT = os.path.abspath(os.path.dirname(__file__))
| 25.4 | 50 | 0.748031 |
55ae9ba4b65519bc33be7de8562a205f27c9a655 | 745 | py | Python | brilws/cli/briltag_insertdata.py | xiezhen/brilws | e3652dd4506dff9d713184ff623b59bc11fbe2c7 | [
"MIT"
] | 1 | 2017-03-23T16:26:06.000Z | 2017-03-23T16:26:06.000Z | brilws/cli/briltag_insertdata.py | xiezhen/brilws | e3652dd4506dff9d713184ff623b59bc11fbe2c7 | [
"MIT"
] | 1 | 2017-03-24T15:02:20.000Z | 2017-10-02T13:43:26.000Z | brilws/cli/briltag_insertdata.py | xiezhen/brilws | e3652dd4506dff9d713184ff623b59bc11fbe2c7 | [
"MIT"
] | 1 | 2019-12-06T09:23:01.000Z | 2019-12-06T09:23:01.000Z | """
Usage:
briltag insertdata [options]
Options:
-h --help Show this screen.
-c CONNECT Service name [default: onlinew]
-p AUTHPATH Authentication file
--name TAGNAME Name of the data tag
--comments COMMENTS Comments on the tag
"""
from docopt import docopt
from schema import Schema
from brilws.cli import clicommonargs
if __name__ == '__main__':
print (docopt(__doc__,options_first=True))
| 25.689655 | 93 | 0.625503 |
55b3f38a36b36ad5c48a9910aaae79865f7775ae | 17,152 | py | Python | techniques/volumerec.py | lleonart1984/rendezvous | f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da | [
"MIT"
] | null | null | null | techniques/volumerec.py | lleonart1984/rendezvous | f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da | [
"MIT"
] | null | null | null | techniques/volumerec.py | lleonart1984/rendezvous | f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da | [
"MIT"
] | null | null | null | from rendering.manager import *
from rendering.scenes import *
from rendering.training import *
import random
import glm
import os
import numpy as np
import math
__VOLUME_RECONSTRUCTION_SHADERS__ = os.path.dirname(__file__)+"/shaders/VR"
compile_shader_sources(__VOLUME_RECONSTRUCTION_SHADERS__)
| 45.983914 | 136 | 0.65007 |
55b6264d004418dd7f3a7bb277c12e4c208f7910 | 868 | py | Python | basics/merge_sort.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | null | null | null | basics/merge_sort.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | null | null | null | basics/merge_sort.py | zi-NaN/algorithm_exercise | 817916a62774145fe6387b715f76c5badbf99197 | [
"MIT"
] | 1 | 2018-11-21T05:14:07.000Z | 2018-11-21T05:14:07.000Z |
# test
if __name__ == '__main__':
print(_merge_sort([1, 3, 2])) | 24.111111 | 48 | 0.483871 |
55b6ea1d5523af9cb10562cdce01d07f5fcf19a0 | 2,605 | py | Python | main.py | famaxth/Amazon-Parser | efc236459f2c9d723e02c87e5ebd3b1cf5a09e58 | [
"MIT"
] | null | null | null | main.py | famaxth/Amazon-Parser | efc236459f2c9d723e02c87e5ebd3b1cf5a09e58 | [
"MIT"
] | null | null | null | main.py | famaxth/Amazon-Parser | efc236459f2c9d723e02c87e5ebd3b1cf5a09e58 | [
"MIT"
] | null | null | null | # - *- coding: utf- 8 - *-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
options = Options()
options.headless = True
path = 'path/to/chromedriver.exe' # You need to change this
parser()
| 56.630435 | 222 | 0.603839 |
55b7410f25633189b2b806b878e6eeb2f52c7ecc | 679 | py | Python | Data_Science/Python-Estatistica/stats-ex8.py | maledicente/cursos | 00ace48da7e48b04485e4ca97b3ca9ba5f33a283 | [
"MIT"
] | 1 | 2021-05-03T22:59:38.000Z | 2021-05-03T22:59:38.000Z | Data_Science/Python-Estatistica/stats-ex8.py | maledicente/cursos | 00ace48da7e48b04485e4ca97b3ca9ba5f33a283 | [
"MIT"
] | null | null | null | Data_Science/Python-Estatistica/stats-ex8.py | maledicente/cursos | 00ace48da7e48b04485e4ca97b3ca9ba5f33a283 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
t = np.linspace(0, 5, 500)
s0 = 0.5
v0 = 2.0
a = 1.5
s_noise = 0.5 * np.random.normal(size=t.size)
s = cinematica(t,s0,v0,a)
sdata = s + s_noise
coefs, pcov = curve_fit(cinematica, t, sdata)
plt.plot(t, sdata, 'b-', label='Deslocamento')
plt.plot(t, cinematica(t, *coefs), 'r-',label='Funo ajustada')
plt.xlabel('Tempo')
plt.ylabel('Deslocamento')
plt.title('Ajuste de curva')
plt.legend()
plt.show()
print("Espao inicial= %f" %coefs[0])
print("Velocidade inicial= %f" %coefs[1])
print("Acelerao= %f" %coefs[2]) | 20.575758 | 64 | 0.673049 |
55b9023ec88372bc40c1756a9431095fe3d52bb6 | 1,059 | py | Python | xgboost_model.py | aravindpadman/Riiid-Answer-Correctness-Prediction | 127037d372352af969fbfa335bff8bad84afb603 | [
"MIT"
] | null | null | null | xgboost_model.py | aravindpadman/Riiid-Answer-Correctness-Prediction | 127037d372352af969fbfa335bff8bad84afb603 | [
"MIT"
] | null | null | null | xgboost_model.py | aravindpadman/Riiid-Answer-Correctness-Prediction | 127037d372352af969fbfa335bff8bad84afb603 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import optuna
import xgboost
train = pd.read_csv("~/kaggledatasets/riiid-test-answer-prediction/train.csv", nrows=3e6,
dtype={'row_id': 'int64',
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'user_answer': 'int8',
'answered_correctly': 'int8',
'prior_question_elapsed_time': 'float64',
'prior_question_had_explanation': 'boolean'},
)
| 28.621622 | 90 | 0.481586 |
55b93809c23b2f231b7acf1f7f0608d40af2f69c | 1,828 | py | Python | run.py | Gandor26/covid-open | 50dcb773160edc16b107785a6bb32ae6f82fc9a7 | [
"MIT"
] | 12 | 2020-10-29T20:52:26.000Z | 2021-11-10T14:11:59.000Z | run.py | Gandor26/covid-open | 50dcb773160edc16b107785a6bb32ae6f82fc9a7 | [
"MIT"
] | 1 | 2021-02-16T09:48:39.000Z | 2021-03-20T04:21:54.000Z | run.py | Gandor26/covid-open | 50dcb773160edc16b107785a6bb32ae6f82fc9a7 | [
"MIT"
] | 1 | 2020-12-05T15:51:43.000Z | 2020-12-05T15:51:43.000Z | from typing import Optional, Dict
from pathlib import Path
from copy import deepcopy
from tqdm import tqdm
import torch as pt
from torch import Tensor, nn
from torch.optim import Adam
| 29.967213 | 64 | 0.565646 |
55b9f31d49258d834824cb0904941fbaf15740b7 | 898 | py | Python | authors/apps/profiles/models.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/profiles/models.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | 43 | 2018-10-25T10:14:52.000Z | 2022-03-11T23:33:46.000Z | authors/apps/profiles/models.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | 4 | 2018-10-29T07:04:58.000Z | 2020-04-02T14:15:10.000Z | from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
def user_was_created(sender, instance, created, ** kwargs):
""" Listen for when a user is creted and create a profile"""
created and Profile.objects.create(
user=instance, username=instance.username
)
post_save.connect(user_was_created, sender=settings.AUTH_USER_MODEL)
| 26.411765 | 68 | 0.711581 |
55bb1301f3cfe948295e5ac6f60a5f73e88c2c17 | 975 | py | Python | python/StatsUtil.py | cbaldassano/Parcellating-connectivity | a98142a6b0dc10e9cb6f6e603cb5334996d018ec | [
"Unlicense"
] | 2 | 2020-08-17T21:06:28.000Z | 2021-05-10T14:37:16.000Z | python/StatsUtil.py | cbaldassano/Parcellating-connectivity | a98142a6b0dc10e9cb6f6e603cb5334996d018ec | [
"Unlicense"
] | null | null | null | python/StatsUtil.py | cbaldassano/Parcellating-connectivity | a98142a6b0dc10e9cb6f6e603cb5334996d018ec | [
"Unlicense"
] | 3 | 2018-07-06T17:08:47.000Z | 2019-10-09T18:58:31.000Z | import numpy as np
# Compute normalized mutual information between two parcellations z1 and z2
# (Approximately) return whether an array is symmetric | 24.375 | 75 | 0.610256 |
55bb525b00d7081596041b440b9ccf7eb9668e9b | 31,939 | py | Python | tests/test_model.py | olzama/xigt | 60daa7201258ec02330264317e7a2315d929bd86 | [
"MIT"
] | 17 | 2017-01-14T23:29:07.000Z | 2022-02-23T08:50:09.000Z | tests/test_model.py | olzama/xigt | 60daa7201258ec02330264317e7a2315d929bd86 | [
"MIT"
] | 31 | 2015-02-11T17:25:59.000Z | 2015-12-07T21:04:39.000Z | tests/test_model.py | olzama/xigt | 60daa7201258ec02330264317e7a2315d929bd86 | [
"MIT"
] | 4 | 2018-02-04T17:21:53.000Z | 2021-11-29T16:33:45.000Z | import pytest
from xigt import XigtCorpus, Igt, Tier, Item, Metadata, Meta, MetaChild
from xigt.errors import XigtError, XigtStructureError
| 31.312745 | 82 | 0.558032 |
55bbc7c595e31e90737d59f74df6dbd5b4ab1f77 | 121 | py | Python | api_v2/views.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null | api_v2/views.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null | api_v2/views.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null |
from django.http import JsonResponse | 30.25 | 63 | 0.702479 |
55bbcfb0657fa9d696e2cb0dec828c20a4c0e1c7 | 156 | py | Python | rpi/LiDAR.py | shadowsburney/LiDAR | f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a | [
"MIT"
] | null | null | null | rpi/LiDAR.py | shadowsburney/LiDAR | f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a | [
"MIT"
] | null | null | null | rpi/LiDAR.py | shadowsburney/LiDAR | f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a | [
"MIT"
] | null | null | null | from sensor import Sensor
from stepper import Stepper
sensor = Sensor()
stepper = Stepper(100)
#stepper.start()
while True:
print(sensor.measure())
| 13 | 27 | 0.730769 |
55bc6334d6372aec8c3f097cf63d231873013d04 | 1,351 | py | Python | peering/migrations/0051_auto_20190818_1816.py | schiederme/peering-manager | 2d29427fd4f2b91a5208f31e1a7ad69eaf82924c | [
"Apache-2.0"
] | 173 | 2020-08-08T15:38:08.000Z | 2022-03-21T11:35:25.000Z | peering/migrations/0051_auto_20190818_1816.py | schiederme/peering-manager | 2d29427fd4f2b91a5208f31e1a7ad69eaf82924c | [
"Apache-2.0"
] | 247 | 2017-12-26T12:55:34.000Z | 2020-08-08T11:57:35.000Z | peering/migrations/0051_auto_20190818_1816.py | schiederme/peering-manager | 2d29427fd4f2b91a5208f31e1a7ad69eaf82924c | [
"Apache-2.0"
] | 63 | 2017-10-13T06:46:05.000Z | 2020-08-08T00:41:57.000Z | # Generated by Django 2.2.4 on 2019-08-18 16:16
from django.db import migrations
| 32.95122 | 86 | 0.6151 |
55beea09bbe265b3360f6e0c1ea21bb757b756fd | 7,784 | py | Python | pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HP-ICF-IPV6-RA-GUARD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-IPV6-RA-GUARD-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:34:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, Gauge32, Counter64, IpAddress, TimeTicks, Integer32, iso, Bits, ObjectIdentity, Unsigned32, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "Gauge32", "Counter64", "IpAddress", "TimeTicks", "Integer32", "iso", "Bits", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "MibIdentifier")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
hpicfIpv6RAGuard = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87))
hpicfIpv6RAGuard.setRevisions(('2011-03-16 05:24',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setLastUpdated('201103160524Z')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setOrganization('Hewlett-Packard Company HP Networking')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setDescription('This MIB module contains HP proprietary objects for managing RA Guard.')
hpicfIpv6RAGuardObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1))
hpicfIpv6RAGuardConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1))
hpicfRAGuardPortTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1), )
if mibBuilder.loadTexts: hpicfRAGuardPortTable.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortTable.setDescription('Per-interface configuration for RA Guard. Ra Guard is used to block IPv6 router advertisements and ICMPv6 router redirects. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time. To display debug logging, use debug security ra-guard command.')
hpicfRAGuardPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setDescription('RA Guard configuration information for a single port.')
hpicfRAGuardPortBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setDescription('This object indicates whether this port is blocked for Router Advertisements and Redirects.')
hpicfRAGuardPortBlockedRAs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setDescription('This number of Router Advertisements blocked for the port.')
hpicfRAGuardPortBlockedRedirs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setDescription('This number of Router Redirects blocked for the port.')
hpicfRAGuardPortLog = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfRAGuardPortLog.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortLog.setDescription('Whether to log RAs and Redirects for the port. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time.')
hpicfRAGuardLastErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noError", 1), ("insufficientHardwareResources", 2), ("genericError", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setDescription('Error code of the last error that occurred. A non-zero value indicates that the last operation performed by this instance did not succeed.')
hpicfIpv6RAGuardConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2))
hpicfIpv6RAGuardCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1))
hpicfIpv6RAGuardGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2))
hpicfIpv6RAGuardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlocked"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRAs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRedirs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortLog"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardLastErrorCode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpv6RAGuardGroup = hpicfIpv6RAGuardGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIpv6RAGuardGroup.setDescription('A collection of objects providing configuration for Ipv6 RA Guard.')
hpicfIpv6RAGuardCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfIpv6RAGuardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpv6RAGuardCompliance = hpicfIpv6RAGuardCompliance.setStatus('current')
if mibBuilder.loadTexts: hpicfIpv6RAGuardCompliance.setDescription('The compliance statement for devices support of HP-ICF-IPV6-RA-GUARD-MIB.')
mibBuilder.exportSymbols("HP-ICF-IPV6-RA-GUARD-MIB", hpicfIpv6RAGuardConfig=hpicfIpv6RAGuardConfig, hpicfRAGuardPortLog=hpicfRAGuardPortLog, hpicfIpv6RAGuardCompliances=hpicfIpv6RAGuardCompliances, hpicfIpv6RAGuardGroup=hpicfIpv6RAGuardGroup, hpicfIpv6RAGuardCompliance=hpicfIpv6RAGuardCompliance, hpicfRAGuardPortEntry=hpicfRAGuardPortEntry, hpicfIpv6RAGuardObjects=hpicfIpv6RAGuardObjects, PYSNMP_MODULE_ID=hpicfIpv6RAGuard, hpicfRAGuardPortBlocked=hpicfRAGuardPortBlocked, hpicfRAGuardPortTable=hpicfRAGuardPortTable, hpicfRAGuardPortBlockedRAs=hpicfRAGuardPortBlockedRAs, hpicfRAGuardPortBlockedRedirs=hpicfRAGuardPortBlockedRedirs, hpicfRAGuardLastErrorCode=hpicfRAGuardLastErrorCode, hpicfIpv6RAGuardConformance=hpicfIpv6RAGuardConformance, hpicfIpv6RAGuardGroups=hpicfIpv6RAGuardGroups, hpicfIpv6RAGuard=hpicfIpv6RAGuard)
| 127.606557 | 828 | 0.776208 |
55bfeb24ff5584cd80bb449c46db4ec74f53fd3c | 102 | py | Python | API/utils/tokenizer.py | accordproject/labs-cicero-classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | 2 | 2021-07-07T01:06:18.000Z | 2021-11-12T18:54:21.000Z | API/utils/tokenizer.py | accordproject/labs_cicero_classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | 3 | 2021-06-25T12:40:23.000Z | 2022-02-14T13:42:30.000Z | API/utils/tokenizer.py | accordproject/labs_cicero_classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | null | null | null | from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base") | 51 | 60 | 0.872549 |
55c01bcc5785d0af3f6437a91b853450fda2bb63 | 2,531 | py | Python | gdesk/panels/imgview/quantiles.py | thocoo/gamma-desk | 9cb63a65fe23e30e155b3beca862f369b7fa1b7e | [
"Apache-2.0"
] | null | null | null | gdesk/panels/imgview/quantiles.py | thocoo/gamma-desk | 9cb63a65fe23e30e155b3beca862f369b7fa1b7e | [
"Apache-2.0"
] | 8 | 2021-04-09T11:31:43.000Z | 2021-06-09T09:07:18.000Z | gdesk/panels/imgview/quantiles.py | thocoo/gamma-desk | 9cb63a65fe23e30e155b3beca862f369b7fa1b7e | [
"Apache-2.0"
] | null | null | null | import numpy as np
from .fasthist import hist2d
stdquant = np.ndarray(13)
stdquant[0] = (0.0000316712418331200) #-4 sdev
stdquant[1] = (0.0013498980316301000) #-3 sdev
stdquant[2] = (0.0227501319481792000) #-2 sdev
stdquant[3] = (0.05)
stdquant[4] = (0.1586552539314570000) #-1 sdev or lsdev
stdquant[5] = (0.25) #first quartile
stdquant[6] = (0.50) #median
stdquant[7] = (0.75) #third quartile
stdquant[8] = (0.8413447460685430000) #+1 sdev or usdev
stdquant[9] = (0.95)
stdquant[10] = (0.9772498680518210000) #+2 sdev
stdquant[11] = (0.9986501019683700000) #+3 sdev
stdquant[12] = (0.9999683287581670000) #+4 sdev
| 34.671233 | 83 | 0.590281 |
55c0577110244c4fafd7e8c73ddb2adb8d710299 | 10,584 | py | Python | isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.371134 | 305 | 0.647865 |
55c0c3ecc4384f35e0ec61e90038c58f6fa656b9 | 89 | py | Python | languages/116/examples/test_problem.py | c3333/sphereengine-languages | ef76cbffe67407d88519ba1e4bfaa20e3a55ccff | [
"Apache-2.0"
] | 5 | 2019-05-05T15:47:24.000Z | 2021-07-22T14:29:13.000Z | languages/116/examples/test_problem.py | c3333/sphereengine-languages | ef76cbffe67407d88519ba1e4bfaa20e3a55ccff | [
"Apache-2.0"
] | 1 | 2022-03-29T14:20:04.000Z | 2022-03-29T14:20:04.000Z | languages/116/examples/test_problem.py | c3333/sphereengine-languages | ef76cbffe67407d88519ba1e4bfaa20e3a55ccff | [
"Apache-2.0"
] | 4 | 2020-02-25T14:30:43.000Z | 2021-05-12T10:05:05.000Z | from sys import stdin
for line in stdin:
n = int(line)
if n == 42:
break
print(n)
| 9.888889 | 21 | 0.629213 |
e9480334f3e96fb87240d084ea753201b541d895 | 367 | py | Python | Python/Effective Python/item19.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | Python/Effective Python/item19.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | Python/Effective Python/item19.py | Vayne-Lover/Effective | 05f0a08bec8eb112fdb4e7a489d0e33bc81522ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__=="__main__":
print(remainder(20,7))
print(remainder(20,divisor=7))
print(remainder(number=20,divisor=7))
print(remainder(divisor=7,number=20))
print(flow_rate(0.5,3))
print(flow_rate(6,3,100))
| 21.588235 | 39 | 0.708447 |
e94dc72d516776aab0f1e035f052d60121476db1 | 1,981 | py | Python | create_h5ad.py | xmuyulab/DAISM-XMBD | 916e18a1f111789a1c0bd3c1209d5a73813f3d3a | [
"MIT"
] | 2 | 2021-11-05T00:43:16.000Z | 2021-12-14T08:39:29.000Z | create_h5ad.py | biosyy/DAISM-XMBD | a76f976db8c33ef33f78533a5a2be50a85148e79 | [
"MIT"
] | 2 | 2021-01-14T19:40:46.000Z | 2021-01-14T19:41:14.000Z | create_h5ad.py | biosyy/DAISM-XMBD | a76f976db8c33ef33f78533a5a2be50a85148e79 | [
"MIT"
] | 1 | 2021-08-30T15:11:45.000Z | 2021-08-30T15:11:45.000Z | ##############################
## cread purified h5ad file ##
##############################
# input: annotation table and the whole expression profile
# output: purified h5ad file
import os
import pandas as pd
import anndata
import argparse
import gc
import numpy as np
parser = argparse.ArgumentParser(description='cread purified h5ad file for DAISM-XMBD')
parser.add_argument("-anno", type=str, help="annotation table (contains 'sample.name' and 'cell.type' two columns)", default=None)
parser.add_argument("-exp", type=str, help="the whole expression profile (sample.name in column and gene symbol in row)", default=None)
parser.add_argument("-outdir", type=str, help="the directory to store h5ad file", default="example/")
parser.add_argument("-prefix",type=str,help="the prefix of h5ad file",default= "purified")
if __name__ == "__main__":
main() | 34.155172 | 135 | 0.649167 |
e94e1af31de28cb3ee32e1feeddbef4991bf43d4 | 1,424 | py | Python | FM_Tuning.py | RomanGutin/GEMSEC | cb2c26d4747cbd3d4c048787ca41665ef0e64155 | [
"MIT"
] | null | null | null | FM_Tuning.py | RomanGutin/GEMSEC | cb2c26d4747cbd3d4c048787ca41665ef0e64155 | [
"MIT"
] | null | null | null | FM_Tuning.py | RomanGutin/GEMSEC | cb2c26d4747cbd3d4c048787ca41665ef0e64155 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 13:56:44 2018
@author: RomanGutin
"""
import pandas as pd
import numpy as np
#Frequency Tuning Loop
amino_letter = ['A','R','D','N','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V']
length_scores =[4,8,6,6,5,7,7,4,7,5,6,8,7,8,5,5,5,9,8,5]
FM_df = pd.DataFrame(0, index= just_let.index, columns= range(0,81))
FM_score_dict = dict(zip(amino_letter,length_scores))
#splitting amino letter into new independent variables based on its length score#
fm_letter_dict ={}
for letter in amino_letter:
new_vars =[]
for i in range(FM_score_dict[letter]):
new_vars.append(letter+str(i+1))
fm_letter_dict[letter]=new_vars
#generate new FM_tuned dataframe
for seq in FM_df.index:
letter_list= list(seq)
for letter in letter_list:
for var in fm_letter_dict[letter]:
row= FM_df.loc[seq,:]
spot= row[row==0].index[0]
FM_df.loc[seq,spot]= var
FM_df= pd.read_csv('Frequency Tuned Dataset') #data after frequency tuning wit
FM_df.set_index('sequence', inplace= True)
FM_df_arr = np.array(FM_df.values, dtype=[('O', np.float)]).astype(np.float)
#New letter to weight holding the new FM tuned variables
ltw_fm_MLE={}
for amino in amino_letter:
for var in fm_letter_dict[amino]:
ltw_fm_MLE[var]= ltw_AM_n[amino]
ltw_fm_MLE = np.load('ltw_fm_MLE.npy').item()
| 30.297872 | 96 | 0.656601 |
e94e9483c973c25abe2c71d5816ab7d9b774441e | 692 | py | Python | unified_api/brokers/kafka/consumer.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | 1 | 2021-04-06T00:43:26.000Z | 2021-04-06T00:43:26.000Z | unified_api/brokers/kafka/consumer.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | null | null | null | unified_api/brokers/kafka/consumer.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | null | null | null | from kafka import KafkaConsumer
| 34.6 | 109 | 0.601156 |
e94ef8f2fd09f77bca0e59bab465fb16e55c0ca1 | 2,159 | py | Python | utils.py | mino2401200231/File-convertor | 6fb438dc5f37bf0efd78e18e4848b4cdb0331343 | [
"MIT"
] | null | null | null | utils.py | mino2401200231/File-convertor | 6fb438dc5f37bf0efd78e18e4848b4cdb0331343 | [
"MIT"
] | null | null | null | utils.py | mino2401200231/File-convertor | 6fb438dc5f37bf0efd78e18e4848b4cdb0331343 | [
"MIT"
] | 2 | 2021-08-12T06:37:52.000Z | 2021-09-05T13:03:36.000Z | # utilities
import os
from re import sub
import uuid
import subprocess
# Image To Pdf
import img2pdf
# PDF To Images
from pdf2image import convert_from_path
# PDF To Word
from pdf2docx import parse
_BASE_DIR = os.getcwd()
_BASE_DIR_FILE = os.path.join(_BASE_DIR, "files")
| 26.329268 | 117 | 0.656322 |
e950fb1913401e7e3634e1210cfe24f9fddcf950 | 2,026 | py | Python | screens/tasks/tasks.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | 80 | 2017-11-13T21:58:55.000Z | 2022-01-03T20:10:42.000Z | screens/tasks/tasks.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | null | null | null | screens/tasks/tasks.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | 21 | 2017-11-14T09:47:41.000Z | 2021-11-23T06:44:31.000Z | from kivy.uix.screenmanager import Screen
from kivy.properties import StringProperty, ObjectProperty, NumericProperty, ListProperty, BooleanProperty
from kivy.app import App
from kivy.logger import Logger
from library_widgets import TrackingScreenMixin
from utils import import_kv
import_kv(__file__)
| 35.54386 | 106 | 0.673248 |
e954754c8db1dbc45662c97eec7de33aed7d3e19 | 1,240 | py | Python | imclassify/train_model.py | AdamSpannbauer/imclassify | 27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd | [
"MIT"
] | null | null | null | imclassify/train_model.py | AdamSpannbauer/imclassify | 27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd | [
"MIT"
] | null | null | null | imclassify/train_model.py | AdamSpannbauer/imclassify | 27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd | [
"MIT"
] | null | null | null | """Train logistic regression model on hdf5 features for classification
Modified from:
https://gurus.pyimagesearch.com/topic/transfer-learning-example-dogs-and-cats/
"""
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
def train_model(h5py_db, model_output='model.pickle', percent_train=1.0):
"""Train logistic regression classifier
:param h5py_db: path to HDF5 database containing 'features', 'labels', & 'label_names'
:param model_output: path to save trained model to using pickle
:param percent_train: percent of images to be used for training (instead of testing)
:return: None; output is written to `model_output`
"""
i = int(h5py_db['labels'].shape[0] * percent_train)
# C decided with sklearn.model_selection.GridSearchCV
model = LogisticRegression(C=0.1)
model.fit(h5py_db['features'][:i], h5py_db['labels'][:i])
if percent_train < 1.0:
preds = model.predict(h5py_db['features'][i:])
print(classification_report(h5py_db['labels'][i:], preds,
target_names=h5py_db['label_names']))
with open(model_output, 'wb') as f:
f.write(pickle.dumps(model))
| 37.575758 | 90 | 0.704839 |
e955b53af943d2f078f97e589977586caea5ae03 | 1,760 | py | Python | Test/final/V5_baseline_CC_ref/aggregate.py | WangWenhao0716/ISC-Track1-Submission | 3484142c0550262c90fc229e5e0ba719c58c592d | [
"MIT"
] | 46 | 2021-10-31T08:02:51.000Z | 2022-03-11T08:42:30.000Z | Test/final/V5_baseline_CC_ref/aggregate.py | WangWenhao0716/ISC-Track1-Submission | 3484142c0550262c90fc229e5e0ba719c58c592d | [
"MIT"
] | 3 | 2021-11-18T09:35:45.000Z | 2022-03-31T01:20:34.000Z | Test/final/V5_baseline_CC_ref/aggregate.py | WangWenhao0716/ISC-Track1-Submission | 3484142c0550262c90fc229e5e0ba719c58c592d | [
"MIT"
] | 8 | 2021-12-01T08:02:08.000Z | 2022-02-26T13:29:36.000Z | import pandas as pd
v_4 = pd.read_csv('50/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_4['query_id'])
v_4['query_id'] = list(v_4['reference_id'])
v_4['reference_id'] = temp
v_5 = pd.read_csv('ibn/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_5['query_id'])
v_5['query_id'] = list(v_5['reference_id'])
v_5['reference_id'] = temp
v_6 = pd.read_csv('152/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_6['query_id'])
v_6['query_id'] = list(v_6['reference_id'])
v_6['reference_id'] = temp
v_4_query = list(v_4['query_id'])
v_4_reference = list(v_4['reference_id'])
v_4_com = []
for i in range(len(v_4)):
v_4_com.append((v_4_query[i],v_4_reference[i]))
v_5_query = list(v_5['query_id'])
v_5_reference = list(v_5['reference_id'])
v_5_com = []
for i in range(len(v_5)):
v_5_com.append((v_5_query[i],v_5_reference[i]))
v_6_query = list(v_6['query_id'])
v_6_reference = list(v_6['reference_id'])
v_6_com = []
for i in range(len(v_6)):
v_6_com.append((v_6_query[i],v_6_reference[i]))
inter_45 = list(set(v_4_com).intersection(set(v_5_com)))
inter_46 = list(set(v_4_com).intersection(set(v_6_com)))
inter_456 = list(set(inter_45).intersection(set(inter_46)))
new_456 = pd.DataFrame()
q = []
for i in range(len(inter_456)):
q.append(inter_456[i][0])
r = []
for i in range(len(inter_456)):
r.append(inter_456[i][1])
new_456['query_id'] = q
new_456['reference_id'] = r
df_2 = pd.merge(new_456, v_4, on=['query_id','reference_id'], how='inner')
df_3 = pd.merge(new_456, v_5, on=['query_id','reference_id'], how='inner')
df_4 = pd.merge(new_456, v_6, on=['query_id','reference_id'], how='inner')
fast_456 = pd.concat((df_2,df_3,df_4))
fast_456.to_csv('R-baseline-CC-234-50k.csv',index=False)
| 31.428571 | 74 | 0.710795 |
e95640499c478bef869502f2fe8e6dcadc430eb2 | 399 | py | Python | src/commands/i_stat/anticheat.py | slimsevernake/osbb-bot | 3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8 | [
"MIT"
] | 9 | 2018-08-19T12:55:58.000Z | 2021-07-17T15:38:40.000Z | src/commands/i_stat/anticheat.py | slimsevernake/osbb-bot | 3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8 | [
"MIT"
] | 124 | 2018-07-31T13:43:58.000Z | 2022-03-11T23:27:43.000Z | src/commands/i_stat/anticheat.py | slimsevernake/osbb-bot | 3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8 | [
"MIT"
] | 3 | 2019-10-21T13:18:14.000Z | 2021-02-09T11:05:10.000Z | from src.utils.cache import cache
| 23.470588 | 69 | 0.649123 |
e9569e3a4e8763ed40f2c7965c464907cae6ec57 | 744 | py | Python | tutorial/flask-api-mongo/app/services/mail_service.py | carrenolg/python | 7c1f0013d911177ce3bc2c5ea58b8e6e562b7282 | [
"Apache-2.0"
] | null | null | null | tutorial/flask-api-mongo/app/services/mail_service.py | carrenolg/python | 7c1f0013d911177ce3bc2c5ea58b8e6e562b7282 | [
"Apache-2.0"
] | null | null | null | tutorial/flask-api-mongo/app/services/mail_service.py | carrenolg/python | 7c1f0013d911177ce3bc2c5ea58b8e6e562b7282 | [
"Apache-2.0"
] | null | null | null | from threading import Thread
from flask_mail import Mail, Message
from resources.errors import InternalServerError
mail = Mail(app=None)
app = None
| 25.655172 | 66 | 0.711022 |
e9570255d9896891bde513fb7630bb22b041b8d0 | 18,541 | py | Python | vxsandbox/resources/tests/test_http.py | praekeltfoundation/vumi-sandbox | 1e2dfca8325ce98e52fe32a072749fe4cf7f448d | [
"BSD-3-Clause"
] | 1 | 2021-05-26T08:38:28.000Z | 2021-05-26T08:38:28.000Z | vxsandbox/resources/tests/test_http.py | praekelt/vumi-sandbox | 1e2dfca8325ce98e52fe32a072749fe4cf7f448d | [
"BSD-3-Clause"
] | 24 | 2015-03-04T08:33:12.000Z | 2016-08-18T07:57:12.000Z | vxsandbox/resources/tests/test_http.py | praekeltfoundation/vumi-sandbox | 1e2dfca8325ce98e52fe32a072749fe4cf7f448d | [
"BSD-3-Clause"
] | null | null | null | import base64
import json
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_NONE,
SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD)
from twisted.web.http_headers import Headers
from twisted.internet.defer import inlineCallbacks, fail, succeed
from vxsandbox.resources.http import (
HttpClientContextFactory, HttpClientPolicyForHTTPS, make_context_factory,
HttpClientResource)
from vxsandbox.resources.tests.utils import ResourceTestCaseBase
| 42.138636 | 79 | 0.644356 |
e9576153377cb8542e00446bc31a32f660d4a2a6 | 99 | py | Python | examples/port_demo.py | smilelight/lightUtils | e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac | [
"MIT"
] | 2 | 2020-01-23T02:03:19.000Z | 2020-12-13T09:05:45.000Z | examples/port_demo.py | smilelight/lightUtils | e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac | [
"MIT"
] | null | null | null | examples/port_demo.py | smilelight/lightUtils | e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac | [
"MIT"
] | null | null | null | from lightutils import get_free_tcp_port
port = get_free_tcp_port()
print(port)
print(type(port))
| 16.5 | 40 | 0.808081 |
e95a4fa6b39694c0762d544398c6a91dc4eb000f | 722 | py | Python | soundDB/__init__.py | gjoseph92/soundDB2 | 4d9cc93cc596a5089233f17b0b8be252f73e1224 | [
"CC0-1.0"
] | 3 | 2017-05-16T19:37:32.000Z | 2020-03-29T21:54:33.000Z | soundDB/__init__.py | gjoseph92/soundDB2 | 4d9cc93cc596a5089233f17b0b8be252f73e1224 | [
"CC0-1.0"
] | 19 | 2016-12-02T20:47:24.000Z | 2021-10-05T19:01:01.000Z | soundDB/__init__.py | gjoseph92/soundDB2 | 4d9cc93cc596a5089233f17b0b8be252f73e1224 | [
"CC0-1.0"
] | 2 | 2017-05-10T23:01:06.000Z | 2019-12-27T19:49:29.000Z | from .accessor import Accessor
from . import parsers
import inspect
def populateAccessors():
"""
Find all filetype-specific Accessor subclasses in the parsers file (i.e. NVSPL, SRCID, etc.) and instantiate them.
This way, one instance of each Accessor is added to the soundDB namespace under the name of the Endpoint it uses.
"""
predicate = lambda obj: inspect.isclass(obj) and issubclass(obj, Accessor) and obj is not Accessor
specificAccessorSubclasses = inspect.getmembers(parsers, predicate)
accessors = { cls.endpointName: cls for name, cls in specificAccessorSubclasses }
return accessors
globals().update(populateAccessors())
del inspect, accessor, parsers, populateAccessors
| 34.380952 | 118 | 0.756233 |
e95c3c23ff20e2cb3d818ef3d5c5a11d27117013 | 3,953 | py | Python | ipbb/models/ipbb.py | aagusti/i-pbb | 8178f68744b440f96f2c3d114c2485d728655e24 | [
"MIT"
] | null | null | null | ipbb/models/ipbb.py | aagusti/i-pbb | 8178f68744b440f96f2c3d114c2485d728655e24 | [
"MIT"
] | null | null | null | ipbb/models/ipbb.py | aagusti/i-pbb | 8178f68744b440f96f2c3d114c2485d728655e24 | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy import (
Column,
Integer,
Text,
DateTime,
SmallInteger,
BigInteger,
String,
Date,
ForeignKey,
UniqueConstraint
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship
)
from ..tools import as_timezone
from ..models import Base, CommonModel, DefaultModel, DBSession
| 39.53 | 85 | 0.698963 |
e95c5e6fc88c9d5b12bafc54c0d0afb1690c36cf | 556 | py | Python | tests/testLoadMapFromString.py | skowronskij/OGCServer | 3fd11438180944ffa43e315c6390e89437a28f4e | [
"BSD-3-Clause"
] | 90 | 2015-04-30T22:13:14.000Z | 2022-02-16T17:30:11.000Z | tests/testLoadMapFromString.py | skowronskij/OGCServer | 3fd11438180944ffa43e315c6390e89437a28f4e | [
"BSD-3-Clause"
] | 6 | 2019-09-09T06:07:27.000Z | 2020-06-17T09:52:49.000Z | tests/testLoadMapFromString.py | skowronskij/OGCServer | 3fd11438180944ffa43e315c6390e89437a28f4e | [
"BSD-3-Clause"
] | 28 | 2015-05-12T09:08:17.000Z | 2021-07-02T11:53:29.000Z | import nose
import os
from ogcserver.WMS import BaseWMSFactory
| 27.8 | 63 | 0.676259 |
e95cb362167c296066d686777e92e50fed2083ee | 977 | py | Python | core/models/transaction.py | soslaio/openme | b6e8c87279363a62992b5db14646dbaa655dc936 | [
"MIT"
] | null | null | null | core/models/transaction.py | soslaio/openme | b6e8c87279363a62992b5db14646dbaa655dc936 | [
"MIT"
] | null | null | null | core/models/transaction.py | soslaio/openme | b6e8c87279363a62992b5db14646dbaa655dc936 | [
"MIT"
] | null | null | null |
from django.db import models
from .base import Base
| 33.689655 | 107 | 0.69089 |
e95f809c079ce79cbabf21b0bd9fca926c8f6149 | 864 | py | Python | setup.py | mikemalinowski/insomnia | ea637e5eba608eacd1731239f7ddf6bb91aacc9e | [
"MIT"
] | 2 | 2019-02-28T09:58:55.000Z | 2020-03-06T05:03:34.000Z | setup.py | mikemalinowski/insomnia | ea637e5eba608eacd1731239f7ddf6bb91aacc9e | [
"MIT"
] | null | null | null | setup.py | mikemalinowski/insomnia | ea637e5eba608eacd1731239f7ddf6bb91aacc9e | [
"MIT"
] | null | null | null | import setuptools
try:
with open('README.md', 'r') as fh:
long_description = fh.read()
except:
long_description = ''
setuptools.setup(
name='blackout',
version='1.0.4',
author='Mike Malinowski',
author_email='mike@twisted.space',
description='A python package making it easy to drop a multi-module package from sys.modules',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mikemalinowski/blackout',
packages=setuptools.find_packages(),
entry_points="""
[console_scripts]
blackout = blackout:blackout
""",
py_modules=["blackout"],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 28.8 | 99 | 0.635417 |
e960b0fabb4246bd94bb826b4cf1e4c34f2696b5 | 2,590 | py | Python | vk_music/__main__.py | w1r2p1/vk_music | 066fa623f87a6351846011c477cff2aad2943bc5 | [
"MIT"
] | 7 | 2015-01-26T08:46:12.000Z | 2020-08-29T13:07:07.000Z | vk_music/__main__.py | w1r2p1/vk_music | 066fa623f87a6351846011c477cff2aad2943bc5 | [
"MIT"
] | 3 | 2015-04-29T20:34:53.000Z | 2015-07-08T08:43:47.000Z | vk_music/__main__.py | sashasimkin/vk_music | 3814909ffd914103e80734e51b01dddb458b1bfe | [
"MIT"
] | 4 | 2016-04-24T14:09:48.000Z | 2019-11-23T14:50:46.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
from subprocess import call
from .vk_music import VkMusic
from .exceptions import AlreadyRunningError
from .defaults import SafeFsStorage
if __name__ == '__main__':
main()
| 39.846154 | 116 | 0.622008 |
e962ef78829cd251169298d5da18fd8a33cb94ba | 950 | py | Python | misc/convert.py | Fusion-Goettingen/ExtendedTargetTrackingToolbox | 945ede661e9258a8f1ca8abc00e25727fedf3ac7 | [
"MIT"
] | 40 | 2018-07-30T13:07:23.000Z | 2021-08-30T05:53:29.000Z | misc/convert.py | GitRooky/ExtendedTargetTrackingToolbox | 945ede661e9258a8f1ca8abc00e25727fedf3ac7 | [
"MIT"
] | null | null | null | misc/convert.py | GitRooky/ExtendedTargetTrackingToolbox | 945ede661e9258a8f1ca8abc00e25727fedf3ac7 | [
"MIT"
] | 21 | 2018-10-03T11:50:00.000Z | 2022-01-11T06:41:24.000Z | __author__ = "Jens Honer"
__copyright__ = "Copyright 2018, Jens Honer Tracking Toolbox"
__email__ = "-"
__license__ = "mit"
__version__ = "1.0"
__status__ = "Prototype"
import numpy as np
_bbox_sign_factors = np.asarray(
[
[1.0, 1.0],
[0.0, 1.0],
[-1.0, 1.0],
[-1.0, 0.0],
[-1.0, -1.0],
[0.0, -1.0],
[1.0, -1.0],
[1.0, 0.0],
], dtype='f4')
| 27.142857 | 93 | 0.548421 |
e96535fbd6c7f8ed1b7186f2611a4c30b772e4ba | 866 | py | Python | tbx/settings/dev.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 103 | 2015-02-24T17:58:21.000Z | 2022-03-23T08:08:58.000Z | tbx/settings/dev.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 145 | 2015-01-13T17:13:43.000Z | 2022-03-29T12:56:20.000Z | tbx/settings/dev.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 57 | 2015-01-03T12:00:37.000Z | 2022-02-09T13:11:30.000Z | from .base import * # noqa
DEBUG = True
SECURE_SSL_REDIRECT = False
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "CHANGEME!!!"
# Enable FE component library
PATTERN_LIBRARY_ENABLED = True
INTERNAL_IPS = ("127.0.0.1", "10.0.2.2")
BASE_URL = "http://localhost:8000"
# URL to direct preview requests to
PREVIEW_URL = "http://localhost:8001/preview"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
AUTH_PASSWORD_VALIDATORS = []
# Enable Wagtail's style guide in Wagtail's settings menu.
# http://docs.wagtail.io/en/stable/contributing/styleguide.html
INSTALLED_APPS += ["wagtail.contrib.styleguide"] # noqa
# Set URL for the preview iframe. Should point at Gatsby.
PREVIEW_URL = "http://localhost:8003/preview/"
MEDIA_PREFIX = BASE_URL
try:
from .local import * # noqa
except ImportError:
pass
| 23.405405 | 66 | 0.742494 |
e965d671abefc6771ef8f31d4904d2ca170eeb5c | 84 | py | Python | EKF/swig/python/test.py | fx815/EKF | ac33a6500d6cedd441758cae2f9aa7192f0f2a87 | [
"BSD-3-Clause"
] | 38 | 2017-09-03T18:27:48.000Z | 2022-01-25T04:56:57.000Z | EKF/swig/python/test.py | fx815/EKF | ac33a6500d6cedd441758cae2f9aa7192f0f2a87 | [
"BSD-3-Clause"
] | 1 | 2020-08-24T03:28:49.000Z | 2020-08-24T03:28:49.000Z | EKF/swig/python/test.py | fx815/EKF | ac33a6500d6cedd441758cae2f9aa7192f0f2a87 | [
"BSD-3-Clause"
] | 10 | 2018-05-11T18:57:27.000Z | 2022-03-10T02:53:54.000Z | import swig_example
swig_example.swig_example_hello()
swig_example.link_liba_hello() | 28 | 33 | 0.892857 |
e9667bd424694f5af16378d0dfcd7bc9fa58a7a6 | 3,356 | py | Python | src/base/local_dataset.py | wenyushi451/Deep-SAD-PyTorch | 168d31f538a50fb029739206994ea5517d907853 | [
"MIT"
] | null | null | null | src/base/local_dataset.py | wenyushi451/Deep-SAD-PyTorch | 168d31f538a50fb029739206994ea5517d907853 | [
"MIT"
] | null | null | null | src/base/local_dataset.py | wenyushi451/Deep-SAD-PyTorch | 168d31f538a50fb029739206994ea5517d907853 | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset
from torchvision.transforms import transforms
from sklearn.model_selection import train_test_split
import os
import glob
import torch
import numpy as np
from PIL import Image
import pdb
| 35.326316 | 111 | 0.56615 |
e9676f23c227a8e3dbd2af8223b0d6f349a5e56a | 408 | py | Python | envdsys/envdaq/migrations/0009_auto_20210415_2246.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 1 | 2021-11-06T19:22:53.000Z | 2021-11-06T19:22:53.000Z | envdsys/envdaq/migrations/0009_auto_20210415_2246.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 25 | 2019-06-18T20:40:36.000Z | 2021-07-23T20:56:48.000Z | envdsys/envdaq/migrations/0009_auto_20210415_2246.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-15 22:46
from django.db import migrations, models
| 21.473684 | 79 | 0.607843 |
e9681f3574652f7f41d0d0d5c77f92d6ff04b1eb | 2,020 | py | Python | works/migrations/0001_initial.py | wildcodear/wildcode_project | 95d396ad3acbed08f607f618d6ada9d04b351bd8 | [
"MIT"
] | null | null | null | works/migrations/0001_initial.py | wildcodear/wildcode_project | 95d396ad3acbed08f607f618d6ada9d04b351bd8 | [
"MIT"
] | null | null | null | works/migrations/0001_initial.py | wildcodear/wildcode_project | 95d396ad3acbed08f607f618d6ada9d04b351bd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 40.4 | 114 | 0.550495 |
e96a119d9fa6a43015c4274d98d22fcf31a25276 | 3,181 | py | Python | 2020/python/template.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | 1 | 2021-12-04T18:09:44.000Z | 2021-12-04T18:09:44.000Z | 2020/python/template.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | 2020/python/template.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | from tutils import pdb
from tutils import subprocess
from tutils import Counter
from tutils import partial
from tutils import reduce
from tutils import wraps
from tutils import count
from tutils import groupby
from tutils import product
from tutils import prod
from tutils import itemgetter
from tutils import Path
from tutils import ascii_lowercase
from tutils import ascii_digits
from tutils import Any
from tutils import Callable
from tutils import List
from tutils import Iterable
from tutils import IterableS
from tutils import Optional
from tutils import Sequence
from tutils import OInt
from tutils import ODict
from tutils import UListStr
from tutils import Tuple
from tutils import Union
from tutils import hexc
from tutils import compose_left
from tutils import concat
from tutils import curry
from tutils import do
from tutils import excepts
from tutils import iterate
from tutils import keyfilter
from tutils import pluck
from tutils import pipe
from tutils import sliding_window
from tutils import toolz_pick
from tutils import toolz_omit
from tutils import omit
from tutils import pick
from tutils import add_debug
from tutils import add_debug_list
from tutils import run_process
from tutils import until_stable
from tutils import oxford
from tutils import excepts_wrap
from tutils import nextwhere
from tutils import noncontinuous
from tutils import lnoncontinuous
from tutils import lfilter
from tutils import lcompact
from tutils import lmap
from tutils import lpluck
from tutils import lstrip
from tutils import splitstrip
from tutils import splitstriplines
from tutils import seq_to_dict
from tutils import split_to_dict
from tutils import c_map
from tutils import c_lmap
from tutils import is_char_az
from tutils import is_char_hex
from tutils import is_char_az09
from tutils import filter_str
from tutils import filter_az
from tutils import filter_az09
from tutils import filter_hex
from tutils import add_pprint
from tutils import add_pprinting
from tutils import make_incrementer
from tutils import adjacent_transforms
from tutils import load_input
from tutils import process_input
from tutils import tests
from tutils import load_and_process_input
from tutils import run_tests
""" END HELPER FUNCTIONS """
DAY = "00"
INPUT, TEST = f"input-{DAY}.txt", f"test-input-{DAY}.txt"
TA1 = None
TA2 = None
ANSWER1 = None
ANSWER2 = None
if __name__ == "__main__":
cli_main()
| 25.653226 | 77 | 0.786231 |
e96a9a36758616e89fb2f6e13a5fba67dd556005 | 323 | py | Python | setup.py | alkaupp/weather | 0aab40b26064ae8ebc4b0868da828a07a4c39631 | [
"MIT"
] | null | null | null | setup.py | alkaupp/weather | 0aab40b26064ae8ebc4b0868da828a07a4c39631 | [
"MIT"
] | null | null | null | setup.py | alkaupp/weather | 0aab40b26064ae8ebc4b0868da828a07a4c39631 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='weather',
version='0.1',
description='CLI frontend for querying weather',
packages=['weather'],
entry_points={
'console_scripts': ['weather = weather.__main__:main']
},
author='Aleksi Kauppila',
author_email='aleksi.kauppila@gmail.com'
)
| 20.1875 | 62 | 0.656347 |
e96abeb27deaf4502ac786cdfa144e452aa4f116 | 271 | py | Python | mordor_magic/mordor_app/admin.py | Far4Ru/mordor-magic-2 | 7082ae8cc0b12154f74f4f58f9cad8f0325a8f57 | [
"MIT"
] | null | null | null | mordor_magic/mordor_app/admin.py | Far4Ru/mordor-magic-2 | 7082ae8cc0b12154f74f4f58f9cad8f0325a8f57 | [
"MIT"
] | null | null | null | mordor_magic/mordor_app/admin.py | Far4Ru/mordor-magic-2 | 7082ae8cc0b12154f74f4f58f9cad8f0325a8f57 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
admin.site.register(CharacterEvent)
admin.site.register(Event)
admin.site.register(CharacterOwner)
admin.site.register(Character)
admin.site.register(User, UserAdmin)
| 27.1 | 47 | 0.830258 |
e96b4f43c95a1b4ce5857c21e88b3785232408aa | 9,142 | py | Python | main.py | Lmy0217/Flight | faf5045712c4d28e0ca3df408308a5e3b9bf8038 | [
"MIT"
] | 2 | 2019-03-31T01:42:29.000Z | 2019-05-16T06:31:50.000Z | main.py | Lmy0217/Flight | faf5045712c4d28e0ca3df408308a5e3b9bf8038 | [
"MIT"
] | 1 | 2019-03-31T01:45:25.000Z | 2019-04-17T05:46:35.000Z | main.py | Lmy0217/Flight | faf5045712c4d28e0ca3df408308a5e3b9bf8038 | [
"MIT"
] | 1 | 2019-03-31T01:42:34.000Z | 2019-03-31T01:42:34.000Z | #coding=utf-8
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox as mBox
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import datetime
import threading
import flight
import outlier
import analytics
#
win = tk.Tk()
win.title("")
win.resizable(0, 0)
#
tabControl = ttk.Notebook(win)
tab1 = ttk.Frame(tabControl)
tabControl.add(tab1, text='')
tab2 = ttk.Frame(tabControl)
tabControl.add(tab2, text='')
tab3 = ttk.Frame(tabControl)
tabControl.add(tab3, text='')
tabControl.pack(expand=1, fill="both")
#
monty = ttk.LabelFrame(tab1, text='')
monty.grid(column=0, row=0, padx=8, pady=4)
labelsFrame = ttk.LabelFrame(monty, text=' ')
labelsFrame.grid(column=0, row=0)
#
ttk.Label(labelsFrame, text=":").grid(column=0, row=0, sticky='W')
#
city = tk.Text(labelsFrame, width=20, height=10)
city.insert(tk.END, "'SHA', 'SIA', 'BJS', 'CAN', 'SZX', 'CTU', 'HGH', 'WUH', 'CKG', 'TAO', 'CSX', 'NKG', 'XMN', 'KMG', 'DLC', 'TSN', 'CGO', 'SYX', 'TNA', 'FOC'")
city.grid(column=1, row=0, sticky='W')
#
ttk.Label(labelsFrame, text=":").grid(column=0, row=1, sticky='W')
#
date1 = tk.StringVar()
da_days = datetime.datetime.now() + datetime.timedelta(days=1)
date1.set(da_days.strftime('%Y-%m-%d'))
date1Entered = ttk.Entry(labelsFrame, textvariable=date1)
date1Entered.grid(column=1, row=1, sticky='W')
#
ttk.Label(labelsFrame, text=":").grid(column=0, row=2, sticky='W')
#
date2 = tk.StringVar()
da_days2 = datetime.datetime.now() + datetime.timedelta(days=1)
date2.set(da_days2.strftime('%Y-%m-%d'))
date2Entered = ttk.Entry(labelsFrame, textvariable=date2)
date2Entered.grid(column=1, row=2, sticky='W')
# Log
scrolW = 91;
scrolH = 37;
scr = scrolledtext.ScrolledText(monty, width=scrolW, height=scrolH, wrap=tk.WORD)
scr.grid(column=3, row=0, sticky='WE', rowspan=5)
#
spider_flight.flight = None
#
spider = ttk.Button(labelsFrame, text="", width=10, command=run_spider_flight)
spider.grid(column=0, row=4, sticky='W')
#
#
save = ttk.Button(labelsFrame, text="", width=10, command=save_file)
save.grid(column=1, row=4, sticky='E')
for child in labelsFrame.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty.winfo_children():
child.grid_configure(padx=3, pady=1)
#
monty2 = ttk.LabelFrame(tab2, text='')
monty2.grid(column=0, row=0, padx=8, pady=4)
labelsFrame2 = ttk.LabelFrame(monty2, text=' ')
labelsFrame2.grid(column=0, row=0)
# Log
scrolW = 34;
scrolH = 25;
scr2 = scrolledtext.ScrolledText(monty2, width=scrolW, height=scrolH, wrap=tk.WORD)
scr2.grid(column=0, row=3, sticky='WE')
#
ttk.Label(labelsFrame2, text=":").grid(column=0, row=0, sticky='W')
#
data_file.outlier = None
#
data = ttk.Button(labelsFrame2, text="", width=10, command=data_file)
data.grid(column=1, row=0, sticky='E')
#
ttk.Label(labelsFrame2, text=":").grid(column=0, row=1, sticky='W')
#
diff = tk.IntVar()
diff.set(5)
diffEntered = ttk.Entry(labelsFrame2, textvariable=diff)
diffEntered.grid(column=1, row=1, sticky='W')
#
drawdiff.out = None
drawdiff.f = plt.figure()
drawdiff.canvas = FigureCanvasTkAgg(drawdiff.f, master=monty2)
drawdiff.canvas.show()
drawdiff.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
#
da = ttk.Button(labelsFrame2, text="", width=10, command=run_drawdiff)
da.grid(column=0, row=2, sticky='W')
#
#
save2 = ttk.Button(labelsFrame2, text="", width=10, command=save_file2)
save2.grid(column=1, row=2, sticky='E')
for child in labelsFrame2.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty2.winfo_children():
child.grid_configure(padx=8, pady=4)
#
monty3 = ttk.LabelFrame(tab3, text='')
monty3.grid(column=0, row=0, padx=8, pady=4)
labelsFrame3 = ttk.LabelFrame(monty3, text=' ')
labelsFrame3.grid(column=0, row=0)
# Log
scrolW = 34;
scrolH = 25;
scr3 = scrolledtext.ScrolledText(monty3, width=scrolW, height=scrolH, wrap=tk.WORD)
scr3.grid(column=0, row=3, sticky='WE')
#
ttk.Label(labelsFrame3, text=":").grid(column=0, row=0, sticky='W')
#
data_file2.analytics = None
#
data2 = ttk.Button(labelsFrame3, text="", width=10, command=data_file2)
data2.grid(column=1, row=0, sticky='E')
#
ttk.Label(labelsFrame3, text=":").grid(column=0, row=1, sticky='W')
#
days = tk.IntVar()
days.set(30)
daysEntered = ttk.Entry(labelsFrame3, textvariable=days)
daysEntered.grid(column=1, row=1, sticky='W')
#
drawpredict.out = None
drawpredict.f = plt.figure()
drawpredict.canvas = FigureCanvasTkAgg(drawpredict.f, master=monty3)
drawpredict.canvas.show()
drawpredict.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
#
pr = ttk.Button(labelsFrame3, text="", width=10, command=run_drawpredict)
pr.grid(column=0, row=2, sticky='W')
#
#
save = ttk.Button(labelsFrame3, text="", width=10, command=save_file3)
save.grid(column=1, row=2, sticky='E')
for child in labelsFrame3.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty3.winfo_children():
child.grid_configure(padx=8, pady=4)
if __name__ == "__main__":
win.mainloop()
| 27.371257 | 161 | 0.669438 |
e96b8708dc8be78814c697d042595105e2d873c2 | 80 | py | Python | Getting_Started_With_Raspberry_Pi_Pico/variable/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 665 | 2017-09-27T21:20:14.000Z | 2022-03-31T09:09:25.000Z | Getting_Started_With_Raspberry_Pi_Pico/variable/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 641 | 2017-10-03T19:46:37.000Z | 2022-03-30T18:28:46.000Z | Getting_Started_With_Raspberry_Pi_Pico/variable/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 734 | 2017-10-02T22:47:38.000Z | 2022-03-30T14:03:51.000Z | """Example of assigning a variable."""
user_name = input("What is your name? ")
| 26.666667 | 40 | 0.6875 |
e96d84302227c0aff1faeef0969afac44cd9a679 | 228 | py | Python | sitator/visualization/__init__.py | lekah/sitator | 0f9c84989758eb7b76be8104a94a8d6decd27b55 | [
"MIT"
] | 8 | 2018-10-05T18:02:24.000Z | 2021-02-22T20:24:58.000Z | sitator/visualization/__init__.py | lekah/sitator | 0f9c84989758eb7b76be8104a94a8d6decd27b55 | [
"MIT"
] | 6 | 2019-02-21T04:33:01.000Z | 2021-01-06T20:05:25.000Z | sitator/visualization/__init__.py | lekah/sitator | 0f9c84989758eb7b76be8104a94a8d6decd27b55 | [
"MIT"
] | 6 | 2018-08-11T21:43:59.000Z | 2021-12-21T06:32:12.000Z | from .common import layers, grid, plotter, DEFAULT_COLORS, set_axes_equal
from .atoms import plot_atoms, plot_points
from .SiteNetworkPlotter import SiteNetworkPlotter
from .SiteTrajectoryPlotter import SiteTrajectoryPlotter
| 28.5 | 73 | 0.855263 |
e96dd4f2640b513649fb3793b8d1056d51d5824e | 1,525 | py | Python | src/futebol_wss_agent/lib/verification.py | nerds-ufes/futebol-optical-agent | 405117b152ce96f09770ff5ca646bd18a72ee2fa | [
"Apache-2.0"
] | null | null | null | src/futebol_wss_agent/lib/verification.py | nerds-ufes/futebol-optical-agent | 405117b152ce96f09770ff5ca646bd18a72ee2fa | [
"Apache-2.0"
] | null | null | null | src/futebol_wss_agent/lib/verification.py | nerds-ufes/futebol-optical-agent | 405117b152ce96f09770ff5ca646bd18a72ee2fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017-2022 Anderson Bravalheri, Univertity of Bristol
# High Performance Networks Group
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 34.659091 | 74 | 0.727213 |
e96debb65a28b71e00c0a2a49cd0ca34ceacdd69 | 449 | py | Python | api/compat.py | fancystats/api | 298ae6d71fa37f649bbd61ad000767242f49a698 | [
"MIT"
] | 1 | 2015-03-20T20:35:22.000Z | 2015-03-20T20:35:22.000Z | api/compat.py | fancystats/api | 298ae6d71fa37f649bbd61ad000767242f49a698 | [
"MIT"
] | null | null | null | api/compat.py | fancystats/api | 298ae6d71fa37f649bbd61ad000767242f49a698 | [
"MIT"
] | null | null | null | """
Python 2/3 Compatibility
========================
Not sure we need to support anything but Python 2.7 at this point , but copied
this module over from flask-peewee for the time being.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
reduce = reduce
else:
text_type = str
string_types = (str, )
unichr = chr
from functools import reduce
| 17.96 | 78 | 0.639198 |
e96ffd9e458abb20cec71135158a8cf1ce09e9d1 | 888 | py | Python | ElevatorBot/commands/funStuff/ticTacToe/vsAI.py | LukasSchmid97/destinyBloodoakStats | 1420802ce01c3435ad5c283f44eb4531d9b22c38 | [
"MIT"
] | 3 | 2019-10-19T11:24:50.000Z | 2021-01-29T12:02:17.000Z | ElevatorBot/commands/funStuff/ticTacToe/vsAI.py | LukasSchmid97/destinyBloodoakStats | 1420802ce01c3435ad5c283f44eb4531d9b22c38 | [
"MIT"
] | 29 | 2019-10-14T12:26:10.000Z | 2021-07-28T20:50:29.000Z | ElevatorBot/commands/funStuff/ticTacToe/vsAI.py | LukasSchmid97/destinyBloodoakStats | 1420802ce01c3435ad5c283f44eb4531d9b22c38 | [
"MIT"
] | 2 | 2019-10-13T17:11:09.000Z | 2020-05-13T15:29:04.000Z | # from discord.ext.commands import Cog
# from discord_slash import SlashContext, cog_ext
# from discord_slash.utils.manage_commands import create_option
#
#
# class TicTacToeAI(Cog):
# def __init__(self, client):
# self.client = client
#
# @cog_ext.cog_subcommand(
# base="tictactoe",
# base_description="You know and love it - TicTacToe",
# name="computer",
# description="Try to beat me in a tic tac toe game",
# options=[
# create_option(
# name="easy_mode",
# description="Set this to true if you are too weak for the normal mode",
# option_type=5,
# required=False,
# ),
# ],
# )
# async def _tictactoe_ai(self, ctx: SlashContext, easy_mode: bool = False):
# pass
#
#
# def setup(client):
# TicTacToeAI(client)
| 29.6 | 89 | 0.581081 |
e97022aba46b50c4fc79f34b4e0641ec360d25a6 | 3,254 | bzl | Python | infra-sk/karma_test/index.bzl | bodymovin/skia-buildbot | 1570e4e48ecb330750264d4ae6a875b5e49a37fe | [
"BSD-3-Clause"
] | null | null | null | infra-sk/karma_test/index.bzl | bodymovin/skia-buildbot | 1570e4e48ecb330750264d4ae6a875b5e49a37fe | [
"BSD-3-Clause"
] | null | null | null | infra-sk/karma_test/index.bzl | bodymovin/skia-buildbot | 1570e4e48ecb330750264d4ae6a875b5e49a37fe | [
"BSD-3-Clause"
] | null | null | null | """This module defines the karma_test rule."""
load("@infra-sk_npm//@bazel/typescript:index.bzl", "ts_library")
load("@infra-sk_npm//@bazel/rollup:index.bzl", "rollup_bundle")
load("@infra-sk_npm//karma:index.bzl", _generated_karma_test = "karma_test")
def karma_test(name, srcs, deps, entry_point = None):
"""Runs unit tests in a browser with Karma and the Mocha test runner.
When executed with `bazel test`, a headless Chrome browser will be used. This supports testing
multiple karma_test targets in parallel, and works on RBE.
When executed with `bazel run`, it prints out a URL to stdout that can be opened in the browser,
e.g. to debug the tests using the browser's developer tools. Source maps are generated.
When executed with `ibazel test`, the test runner never exits, and tests will be rerun every
time a source file is changed.
When executed with `ibazel run`, it will act the same way as `bazel run`, but the tests will be
rebuilt automatically when a source file changes. Reload the browser page to see the changes.
Args:
name: The name of the target.
srcs: The *.ts test files.
deps: The ts_library dependencies for the source files.
entry_point: File in srcs to be used as the entry point to generate the JS bundle executed by
the test runner. Optional if srcs contains only one file.
"""
if len(srcs) > 1 and not entry_point:
fail("An entry_point must be specified when srcs contains more than one file.")
if entry_point and entry_point not in srcs:
fail("The entry_point must be included in srcs.")
if len(srcs) == 1:
entry_point = srcs[0]
ts_library(
name = name + "_lib",
srcs = srcs,
deps = deps + [
# Add common test dependencies for convenience.
"@infra-sk_npm//@types/mocha",
"@infra-sk_npm//@types/chai",
"@infra-sk_npm//@types/sinon",
],
)
rollup_bundle(
name = name + "_bundle",
entry_point = entry_point,
deps = [
name + "_lib",
"@infra-sk_npm//@rollup/plugin-node-resolve",
"@infra-sk_npm//@rollup/plugin-commonjs",
"@infra-sk_npm//rollup-plugin-sourcemaps",
],
format = "umd",
config_file = "//infra-sk:rollup.config.js",
)
# This rule is automatically generated by rules_nodejs from Karma's package.json file.
_generated_karma_test(
name = name,
size = "large",
data = [
name + "_bundle",
"//infra-sk/karma_test:karma.conf.js",
"@infra-sk_npm//karma-chrome-launcher",
"@infra-sk_npm//karma-sinon",
"@infra-sk_npm//karma-mocha",
"@infra-sk_npm//karma-chai",
"@infra-sk_npm//karma-chai-dom",
"@infra-sk_npm//karma-spec-reporter",
"@infra-sk_npm//mocha",
],
templated_args = [
"start",
"$(execpath //infra-sk/karma_test:karma.conf.js)",
"$$(rlocation $(location %s_bundle))" % name,
],
tags = [
# Necessary for it to work with ibazel.
"ibazel_notify_changes",
],
)
| 36.977273 | 100 | 0.609711 |
e970a8957b84490bbe0b79a62e25d6fddc55f490 | 5,894 | py | Python | stats/ClassicAnalyzerStats.py | arndff/fpl-rivals-tracker | 311b932ab7c07b03c1676e5a971df13e652a1b7b | [
"Apache-2.0"
] | 4 | 2019-02-06T10:42:50.000Z | 2021-02-17T21:09:26.000Z | stats/ClassicAnalyzerStats.py | arndff/fpl-rivals-tracker | 311b932ab7c07b03c1676e5a971df13e652a1b7b | [
"Apache-2.0"
] | null | null | null | stats/ClassicAnalyzerStats.py | arndff/fpl-rivals-tracker | 311b932ab7c07b03c1676e5a971df13e652a1b7b | [
"Apache-2.0"
] | 1 | 2021-02-17T21:09:27.000Z | 2021-02-17T21:09:27.000Z | from fileutils.fileutils import save_output_to_file, select_option_from_menu
| 33.68 | 120 | 0.588904 |
e971243f262537809157c1b4baa49f7bcb8914f9 | 88 | py | Python | xallennlp/training/__init__.py | himkt/xallennlp | 073a1475398e59c70230623016f4036432b9c186 | [
"MIT"
] | null | null | null | xallennlp/training/__init__.py | himkt/xallennlp | 073a1475398e59c70230623016f4036432b9c186 | [
"MIT"
] | null | null | null | xallennlp/training/__init__.py | himkt/xallennlp | 073a1475398e59c70230623016f4036432b9c186 | [
"MIT"
] | null | null | null | import xallennlp.training.mlflow_callback
import xallennlp.training.mlflow_checkpointer
| 29.333333 | 45 | 0.909091 |
e972ad4a4720505a28ff8ccfa9d6a0290e94f706 | 11,599 | py | Python | colabutil.py | cmcheungMOOC/colabUtil | c08da88ae56d461404960de3426344e7da49f3db | [
"MIT"
] | 1 | 2018-08-07T05:34:11.000Z | 2018-08-07T05:34:11.000Z | colabutil.py | cmcheungMOOC/colabUtil | c08da88ae56d461404960de3426344e7da49f3db | [
"MIT"
] | null | null | null | colabutil.py | cmcheungMOOC/colabUtil | c08da88ae56d461404960de3426344e7da49f3db | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""colabUtil.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KX9x-rqyj0XfUkLtfOVh8t8T_kW0hs0u
#Colab Util
This is a collection of utility functions that simplifies data science researchin using colab. I wrote this while working through *Deep Learning with Python* by Francisco Chollet.
Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a
##Usage
###Pull in py files into colab. The content will be in colabUtil folder.
```python
!pip install -U -q PyDrive
!git clone https://github.com/cmcheungMOOC/colabUtil.git
```
###Add colab directory to module path
```python
import sys
sys.path.insert(0, '/content/colabUtil')
```
###Share and enjoy!
```python
import colabutil as cu
cu.setupGlove()
cu.setupAclImdb()
cu.setupKaggleCatsAndDogs()
cu.restore('CNN_Results')
cu.save('CNN_Results')
```
##Assumptions
I have made the following assumptions to allow me to simplify my code. This code is not meant for general usage.
* Colab VMs are reliable
* Colab VMs will be recycled
These assumptions simply means that you can count on the VM to do work correctly while it is still assigned to you, but the VM will be yanked from under you. So, it is necessary to backup intermediate state information to persistent storage such as a Google drive.
The transient nature of you Colab work space means that there is little reason for complicated directory hierarchies. After all, anything you built up will vanish overnight. This means that a simple directory hierarchy supporting the tasks at hand is all you need.
##Directory Hierarchy
Colab workspace is rooted at /content. This is our defaull directory. In addition, we use /content/dataset to store downloaded datasets. Intermediate states of a ML algorithm is written onto /content. All top level content /content can be zipped up and saved. The content can be restored when needed. Note that only the latest state persists in the Google drive. Unfortuately, I know of no easy way to get the title of a Jupyter notebook. So, a user defined name need to be chosen for the backup zip file.
## Utility Functions
"""
#@title Download Dataset
import requests, os
#@title Test Download { run: "auto", vertical-output: true }
url = "" #@param ["", "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", "http://nlp.stanford.edu/data/glove.6B.zip"]
overwrite = False #@param {type:"boolean"}
if url != "":
download(url, overwrite)
os.listdir()
"""###Untar Dataset into Current Working Directory
Currently, untar only support *.tar.gz. This will be extended only if there is a real use case.
"""
import tarfile, os, shutil
#@title Test Untar { run: "auto", vertical-output: true }
gzName = "" #@param ["", "aclImdb_v1.tar.gz"]
dstDir = "" #@param ["", ".", "/content/dataset"]
if gzName != "":
d = untar(gzName, dstDir)
print(d)
print(os.listdir(d))
#@title Zip Up Content of a Specified Directory
import zipfile, os
#@title Test Zip { run: "auto" }
srcDir = "" #@param ["", ".", "/content", "/content/datalab"]
if srcDir != '':
if not os.path.isdir(srcDir):
os.mkdir(srcDir)
print(zip(srcDir))
#@title Unzip Content
import os, zipfile, shutil
#@title Test Unzip { run: "auto", vertical-output: true }
zipName = "" #@param ["", "glove.6B.zip", "/content/datalab.zip"]
dstDir = "" #@param ["", ".", "/content/dataset/glove.6B", "/content/dataset", "datalab", "a/b", "dataset/tmp"]
if zipName != "":
d = unzip(zipName, dstDir)
print(d)
print(os.listdir(d))
os.listdir(d)
#@title Setup GLOVE
#@title Test GLOVE Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupGlove()
#@title Setup ACLIMDB
#@title Test ACLIMDB Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupAclImdb()
#@title Setup Kaggle Cats and Dogs
#@title Test Kaggle Cats and Dogs Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupKaggleCatsAndDogs()
"""##Pydrive Utilities
https://gsuitedevs.github.io/PyDrive/docs/build/html/index.html
Content of a specified directory is saved to or restored from a Google drive.
Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a
"""
#@title Authenticate and Create the PyDrive Client
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
#@title Test CreatePyDrive { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
drive = createPyDrive()
os.listdir()
#@title Create & Upload a File
#@title Test UploadFile to Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
if not os.path.exists(fname):
print('Creating', fname)
with open(fname, 'w') as fp:
fp.write('abc')
uploadFile(drive, fname)
#@title Find a File by Name in the Google Drive
#@title Test Find File in Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
findFile(drive, fname)
#@title Download a File and Optionally Trash it
#@title Test Download from Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
trashIt = False #@param {type:"boolean"}
if fname != '':
print(downloadFile(drive, fname, trashIt))
#@title Google Drive Class
#@title Test Google Drive Class { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
if not os.path.exists(fname):
with open(fname, 'w') as fp:
fp.write('abc')
gd = GDrive()
gd.upload(fname)
gd.download(fname)
"""###Save and Restore the Content of a Directory"""
#@title Save Directory to Google Drive
#@title Test Directory Save { run: "auto", vertical-output: true }
srcDirName = "" #@param ["", "datalab", "/content/datalab"]
if srcDirName != '':
if not os.path.isdir(srcDirName):
os.mkdir(srcDirName)
path = os.path.join(srcDirName, 'abc.txt')
if not os.path.exists(path):
with open(path, 'w') as fp:
fp.write('abc')
save(srcDirName)
#@title Restore Directory from Google Drive
import os
#@title Test Restore Directory { run: "auto", vertical-output: true }
dstDirName = "" #@param ["", "datalab", "CNN_Results"]
import shutil
if dstDirName != '':
if os.path.isdir(dstDirName):
print('rmtree', dstDirName)
shutil.rmtree(dstDirName)
print(restore(dstDirName)) | 30.049223 | 512 | 0.691698 |
e9736a918f48d6f382688f91eb8391428a99f968 | 2,893 | py | Python | sarpy/io/product/base.py | spacefan/sarpy | 2791af86b568c8a8560275aee426a4718d5a4606 | [
"MIT"
] | 119 | 2018-07-12T22:08:17.000Z | 2022-03-24T12:11:39.000Z | sarpy/io/product/base.py | spacefan/sarpy | 2791af86b568c8a8560275aee426a4718d5a4606 | [
"MIT"
] | 72 | 2018-03-29T15:57:37.000Z | 2022-03-10T01:46:21.000Z | sarpy/io/product/base.py | spacefan/sarpy | 2791af86b568c8a8560275aee426a4718d5a4606 | [
"MIT"
] | 54 | 2018-03-27T19:57:20.000Z | 2022-03-09T20:53:11.000Z | """
Base common features for product readers
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Sequence, List, Tuple, Union
from sarpy.io.general.base import AbstractReader
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.complex.sicd_elements.SICD import SICDType
| 31.445652 | 98 | 0.59281 |
e97c7053b712437ddd9adb3801c6bf654177920e | 2,717 | py | Python | PersonManage/role/views.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | null | null | null | PersonManage/role/views.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | 3 | 2021-03-19T01:28:43.000Z | 2021-04-08T19:57:19.000Z | PersonManage/role/views.py | ahriknow/ahriknow | 817b5670c964e01ffe19ed182ce0a7b42e17ce09 | [
"MIT"
] | null | null | null | from django.conf import settings
from redis import StrictRedis
from rest_framework.response import Response
from rest_framework.views import APIView
from PersonManage.role.models import Role
from PersonManage.role.serializer import OneRole, ManyRole
from PersonManage.jurisdiction.models import Jurisdiction
| 46.844828 | 92 | 0.560177 |
e97d491587ef3bda7620cb34a61d716763821b01 | 5,288 | py | Python | datalad_osf/utils.py | adswa/datalad-osf-2 | 25988f898ffc6f489c0855933136f39f79cf8c65 | [
"BSD-3-Clause"
] | null | null | null | datalad_osf/utils.py | adswa/datalad-osf-2 | 25988f898ffc6f489c0855933136f39f79cf8c65 | [
"BSD-3-Clause"
] | null | null | null | datalad_osf/utils.py | adswa/datalad-osf-2 | 25988f898ffc6f489c0855933136f39f79cf8c65 | [
"BSD-3-Clause"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See LICENSE file distributed along with the datalad_osf package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import json
from os import environ
from datalad.downloaders.credentials import (
Token,
UserPassword,
)
from datalad import ui
# Note: This should ultimately go into osfclient
def create_node(osf_session, title, category="data", tags=None,
public=False, parent=None, description=None):
""" Create a node on OSF
Parameters
----------
title: str
Title of the node
category: str
categorization changes how the node is displayed
on OSF, but doesn't appear to have a "real" function
tags: list of str
public: bool
whether to make the new node public
parent: str, optional
ID of an OSF parent node to create a child node for
Returns
-------
str
ID of the created node
"""
if parent:
# we have a parent, use its URL to create children
url = osf_session.build_url('nodes', parent, 'children')
else:
url = osf_session.build_url('nodes')
post_data = {"data":
{"type": "nodes",
"attributes":
{"title": title,
"category": category,
"public": public,
}
}
}
if tags:
post_data["data"]["attributes"]["tags"] = tags
if description:
post_data["data"]["attributes"]["description"] = description
response = osf_session.post(url, data=json.dumps(post_data))
# TODO: figure what errors to better deal with /
# create a better message from
response.raise_for_status()
# TODO: This should eventually return an `node` instance (see osfclient).
# Response contains all properties of the created node.
node_id = response.json()['data']['id']
# Note: Going for "html" URL here for reporting back to the user, since this
# what they would need to go to in order to proceed manually.
# There's also the flavor "self" instead, which is the node's
# API endpoint.
proj_url = response.json()["data"]["links"]["html"]
return node_id, proj_url
def delete_node(osf_session, id_):
""" Delete a node on OSF
Parameters
----------
id_: str
to be deleted node ID
"""
url = osf_session.build_url('nodes', id_)
response = osf_session.delete(url)
response.raise_for_status()
def initialize_osf_remote(remote, node,
encryption="none", autoenable="true"):
"""Initialize special remote with a given node
convenience wrapper for git-annex-initremote w/o datalad
Parameters
----------
remote: str
name for the special remote
node: str
ID of the node/component to use
encryption: str
see git-annex-initremote; mandatory option;
autoenable: str
'true' or 'false'; tells git-annex to automatically enable the
special remote on git-annex-init (particularly after a fresh git-clone
"""
init_opts = ["type=external",
"externaltype=osf",
"encryption={}".format(encryption),
"autoenable={}".format(autoenable),
"node={}".format(node)]
import subprocess
subprocess.run(["git", "annex", "initremote", remote] + init_opts)
| 31.664671 | 87 | 0.580182 |
e97dc3bd342d59f1490983b6c64ea74961cdd4e4 | 1,487 | py | Python | tpDcc/libs/qt/core/observable.py | tpDcc/tpQtLib | 26b6e893395633a1b189a1b73654891b7688648d | [
"MIT"
] | 3 | 2019-08-26T05:56:12.000Z | 2019-10-03T11:35:53.000Z | tpDcc/libs/qt/core/observable.py | tpDcc/tpQtLib | 26b6e893395633a1b189a1b73654891b7688648d | [
"MIT"
] | null | null | null | tpDcc/libs/qt/core/observable.py | tpDcc/tpQtLib | 26b6e893395633a1b189a1b73654891b7688648d | [
"MIT"
] | 1 | 2021-03-03T21:01:50.000Z | 2021-03-03T21:01:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Qt observer pattern related functions and classes
"""
from __future__ import print_function, division, absolute_import
from uuid import uuid4
from functools import partial
from Qt.QtCore import Signal, QObject
| 31.638298 | 119 | 0.462677 |
e98066a2b0d3ed3bbd8dc11131cf9f11efdf134a | 3,645 | py | Python | advent-of-code-2019/day 12/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | advent-of-code-2019/day 12/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | advent-of-code-2019/day 12/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | """Advent of Code 2019 Day 12."""
from functools import lru_cache
import re
def simulate_steps(moons, steps=None):
"""Simulate number steps of moons.
Returns moons after number of steps.
If steps is None returns cycles of moons."""
cycles = {}
initial_moons = moons
step = 0
while not steps or step < steps:
step += 1
moons = moon_motion(moons)
if steps:
continue
for axis in range(3):
if axis in cycles:
continue
if is_cycle(moons, initial_moons, axis):
cycles[axis] = step
if len(cycles) == 3:
return cycles
return moons
def is_cycle(moons, initial, axis):
"""Check if moons cycled at the axis to the initial values."""
for moon, initial in zip(moons, initial):
if (moon['position'][axis] != initial['position'][axis]
or moon['velocity'][axis] != initial['velocity'][axis]):
return False
return True
def moon_motion(initial_moons):
"""Move moons by one step."""
moons = []
for moon in initial_moons:
cur_velocity = moon['velocity']
for other_moon in initial_moons:
if moon == other_moon:
continue
velocity_change = join_with_function(
gravity_effect, moon['position'], other_moon['position'])
cur_velocity = join_with_function(
int.__add__, cur_velocity, velocity_change)
new_position = join_with_function(
int.__add__, moon['position'], cur_velocity)
moons.append({
'position': new_position,
'velocity': cur_velocity,
})
return moons
def join_with_function(func, values1, values2):
"""Join values using func function."""
return [
func(value1, value2)
for value1, value2 in zip(values1, values2)
]
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1
def find_total_energy(moons):
"""Get total energy from moons."""
return sum(get_energy(moon['position']) * get_energy(moon['velocity'])
for moon in moons)
def get_energy(values):
"""Get energy from values."""
return sum(abs(value) for value in values)
def parse_moons(lines):
"""Parse lines to dictionary with positions and velocity."""
moons = []
regex = r'([-\d]+)'
for line in lines:
position = [int(num) for num in re.findall(regex, line)]
moons.append({
'position': position,
'velocity': [0, 0, 0]
})
return moons
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 27.201493 | 74 | 0.608505 |
e980cd0e0ae302b2d5e582e27e0280d700f45285 | 1,909 | py | Python | rest_framework_json_api/utils.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | rest_framework_json_api/utils.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | rest_framework_json_api/utils.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | from django.utils.encoding import force_text
from django.utils.text import slugify
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework.serializers import ListSerializer
except ImportError:
ListSerializer = type(None)
def model_to_resource_type(model):
'''Return the verbose plural form of a model name, with underscores
Examples:
Person -> "people"
ProfileImage -> "profile_image"
'''
if model is None:
return "data"
return force_text(model._meta.verbose_name_plural)
#
# String conversion
#
def camelcase(string):
'''Return a string in lowerCamelCase
Examples:
"people" -> "people"
"profile images" -> "profileImages"
'''
out = slug(string).replace('-', ' ').title().replace(' ', '')
return out[0].lower() + out[1:]
def slug(string):
'''Return a string where words are connected with hyphens'''
return slugify(force_text(string))
def snakecase(string):
'''Return a string where words are connected with underscores
Examples:
"people" -> "people"
"profile images" -> "profile_images"
'''
return slug(string).replace('-', '_')
| 20.526882 | 71 | 0.671032 |
e982548723b8fb19b5a93e5e600f9ad6d5133e1c | 2,246 | py | Python | Ui_ZhkuMainWindow.py | yujiecong/PyQt-Zhku-Client | 8fa35592cbf8af7efe8d55d4f66625cd4918a3ff | [
"MIT"
] | null | null | null | Ui_ZhkuMainWindow.py | yujiecong/PyQt-Zhku-Client | 8fa35592cbf8af7efe8d55d4f66625cd4918a3ff | [
"MIT"
] | null | null | null | Ui_ZhkuMainWindow.py | yujiecong/PyQt-Zhku-Client | 8fa35592cbf8af7efe8d55d4f66625cd4918a3ff | [
"MIT"
] | 1 | 2021-09-14T03:28:16.000Z | 2021-09-14T03:28:16.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_ZhkuMainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import qr_img_rc
| 44.92 | 81 | 0.740427 |
e9869e465ad91d2e5ca0674a3741999310e41b5c | 95 | py | Python | calheatmap/apps.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | 1 | 2020-03-15T16:14:02.000Z | 2020-03-15T16:14:02.000Z | calheatmap/apps.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | 14 | 2018-11-09T08:34:23.000Z | 2022-02-10T08:15:53.000Z | calheatmap/apps.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.833333 | 34 | 0.768421 |
e987a8021b1287256296f2282748c6e9f81dfd63 | 767 | py | Python | ntcir15_tools/eval/__init__.py | longpham28/ntcir15_tools | d5fd138a3c90dfd2c5a67ea908101fed5563484d | [
"MIT"
] | null | null | null | ntcir15_tools/eval/__init__.py | longpham28/ntcir15_tools | d5fd138a3c90dfd2c5a67ea908101fed5563484d | [
"MIT"
] | null | null | null | ntcir15_tools/eval/__init__.py | longpham28/ntcir15_tools | d5fd138a3c90dfd2c5a67ea908101fed5563484d | [
"MIT"
] | null | null | null | import numpy as np
from pyNTCIREVAL import Labeler
from pyNTCIREVAL.metrics import MSnDCG
from collections import defaultdict
from ntcir15_tools.data import en_query_ids, ja_query_ids, en_labels, ja_labels
| 24.741935 | 85 | 0.647979 |
e987c807f21477bc86678b22246d01c6112ae5c0 | 50 | py | Python | classification/cifar10/losses/__init__.py | AkibMashrur/Ensembling | bdf2f601be90070fed10db62a9c15506e1df37b6 | [
"Apache-2.0"
] | null | null | null | classification/cifar10/losses/__init__.py | AkibMashrur/Ensembling | bdf2f601be90070fed10db62a9c15506e1df37b6 | [
"Apache-2.0"
] | null | null | null | classification/cifar10/losses/__init__.py | AkibMashrur/Ensembling | bdf2f601be90070fed10db62a9c15506e1df37b6 | [
"Apache-2.0"
] | null | null | null | from .contrastive import SupConLoss, NoiseConLoss
| 25 | 49 | 0.86 |
e988aca86693a630d0af6b4768506c2e555391e5 | 71 | py | Python | Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py | EduardoJonathan0/Python | 0e4dff4703515a6454ba25c6f401960b6155f32f | [
"MIT"
] | null | null | null | Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py | EduardoJonathan0/Python | 0e4dff4703515a6454ba25c6f401960b6155f32f | [
"MIT"
] | null | null | null | Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py | EduardoJonathan0/Python | 0e4dff4703515a6454ba25c6f401960b6155f32f | [
"MIT"
] | null | null | null | C = int(input("Insira um valor: "))
Fire = (9 * C / 5) + 32
print(Fire) | 23.666667 | 35 | 0.56338 |
e9895372814e45f43f516d5ef779aac132b10fc9 | 2,145 | py | Python | notebooks/Detecting Covid-19 through Transfer Learning/src/test.py | supria68/Data-Science-Projects | 423695c130a92db1a188b3d3a13871f0f76f6f5b | [
"MIT"
] | 2 | 2020-09-16T19:37:30.000Z | 2021-11-01T17:49:36.000Z | notebooks/Detecting Covid-19 through Transfer Learning/src/test.py | supria68/Data-Science-Projects | 423695c130a92db1a188b3d3a13871f0f76f6f5b | [
"MIT"
] | null | null | null | notebooks/Detecting Covid-19 through Transfer Learning/src/test.py | supria68/Data-Science-Projects | 423695c130a92db1a188b3d3a13871f0f76f6f5b | [
"MIT"
] | 1 | 2021-11-01T17:49:37.000Z | 2021-11-01T17:49:37.000Z | """
filename: test.py
author: Supriya Sudarshan
version: 19.04.2021
description: Takes in the images and predicts (Covid or Non-Covid/Normal) using the *.h5 models
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
import random
def evaluate(img_path, model):
"""
Given the image path and model, preprocess the input image and get
predictions
"""
img = image.load_img(img_path, target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
image_data = preprocess_input(x)
y_pred = model.predict(image_data)
probability = y_pred[0]
if probability[0] > 0.5:
prediction = str('%.2f' % (probability[0]*100) + '% COVID')
else:
prediction = str('%.2f' % ((1-probability[0])*100) + '% Normal')
plt.title(prediction)
plt.imshow(img)
plt.show()
if __name__ == "__main__":
# Load appropriate models
ct_model = load_model('../saved_models/chest_ct_vggmodel.h5')
xray_model = load_model('../saved_models/chest_xray_vggmodel.h5')
ultrasound_model = load_model('../saved_models/ultrasound_vggmodel.h5')
##### Predictions CT
path = '../images_for_testing/CT'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a CT image: {}'.format(img))
evaluate(path + '/'+ img, ct_model)
##### Predictions Xray
path = '../images_for_testing/Xray'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a Xray image: {}'.format(img))
evaluate(path + '/'+ img, xray_model)
##### Predictions Ultrasound
path = '../images_for_testing/Ultrasound'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a ultrasound image: {}'.format(img))
evaluate(path + '/'+ img, ultrasound_model)
| 32.014925 | 97 | 0.674592 |
e98a1dc0d5d9161eac10445f95ac9ce1dbe57950 | 348 | py | Python | projecteuler/problems/problem_41.py | hjheath/ProjectEuler | 6961fe81e2039c281ea9d4ab0bdd85611bf256a8 | [
"MIT"
] | 1 | 2015-04-25T10:37:52.000Z | 2015-04-25T10:37:52.000Z | projecteuler/problems/problem_41.py | hjheath/ProjectEuler | 6961fe81e2039c281ea9d4ab0bdd85611bf256a8 | [
"MIT"
] | null | null | null | projecteuler/problems/problem_41.py | hjheath/ProjectEuler | 6961fe81e2039c281ea9d4ab0bdd85611bf256a8 | [
"MIT"
] | null | null | null | """Problem 41 of https://projecteuler.net"""
from itertools import permutations
from projecteuler.inspectors import is_prime
def problem_41():
"""Solution to problem 41."""
# All 8 and 9 digit pandigitals are divisible by 3.
perms = [int(''.join(x)) for x in permutations('1234567')]
return max(x for x in perms if is_prime(x))
| 26.769231 | 62 | 0.698276 |
e98cb6485313bf23d0ef3116dfc0e309cd633aad | 3,064 | py | Python | preprocess/utils.py | federicozaiter/LogClass | 62c1c9c61294625bdb3d99dc01b6adc7b735c4ab | [
"MIT"
] | 159 | 2020-02-19T00:19:23.000Z | 2022-03-30T08:40:08.000Z | preprocess/utils.py | WeibinMeng/LogClass-1 | 8edbaf4377374e2aac5e7057987e1d047b83ff2f | [
"MIT"
] | 3 | 2021-06-09T04:30:35.000Z | 2022-01-09T23:26:07.000Z | preprocess/utils.py | WeibinMeng/LogClass-1 | 8edbaf4377374e2aac5e7057987e1d047b83ff2f | [
"MIT"
] | 41 | 2020-02-19T00:19:26.000Z | 2022-03-28T08:02:22.000Z | import re
import numpy as np
from tqdm import tqdm
from ..decorators import print_step
from multiprocessing import Pool
# Compiling for optimization
re_sub_1 = re.compile(r"(:(?=\s))|((?<=\s):)")
re_sub_2 = re.compile(r"(\d+\.)+\d+")
re_sub_3 = re.compile(r"\d{2}:\d{2}:\d{2}")
re_sub_4 = re.compile(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep")
re_sub_5 = re.compile(r":?(\w+:)+")
re_sub_6 = re.compile(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]")
p = re.compile(r"[^(A-Za-z)]")
| 35.627907 | 77 | 0.568864 |
e98ead08452c6bd2e01e97b70008a25d1afdf8fe | 4,494 | py | Python | examples/FasterRCNN/dataset/data_configs_dict.py | ruodingt/tensorpack | 026006457f3ecdedf23d1bb57c8610591d936b3e | [
"Apache-2.0"
] | null | null | null | examples/FasterRCNN/dataset/data_configs_dict.py | ruodingt/tensorpack | 026006457f3ecdedf23d1bb57c8610591d936b3e | [
"Apache-2.0"
] | null | null | null | examples/FasterRCNN/dataset/data_configs_dict.py | ruodingt/tensorpack | 026006457f3ecdedf23d1bb57c8610591d936b3e | [
"Apache-2.0"
] | null | null | null | import os
from dataset.data_config import DataConfig
images_data_base_dir = os.path.abspath('../../../data/datasets_coco/')
data_conf = {
DataConfig.IMAGE_BASEDIR: images_data_base_dir,
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-5.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-3.json')
}
]
}
# images_data_base_dir = os.path.abspath('../../../data/datasets_coco/')
data_conf_tooth_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json') #
}
]
}
data_conf_tooth_legacy_of = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json') #
}
]
}
data_conf_tooth_web_of = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json') #
}
]
}
data_conf_lesion_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-9-lesion.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-8-lesion.json') #
}
]
}
data_conf_gingivitis_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/gingivitis_web_490-13-ging.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-14-ging.json') #
}
]
}
| 36.536585 | 99 | 0.459947 |
e98ee77c65cf6881d1b3b3557c92ca630d8803bb | 2,905 | py | Python | beaconsite/tests/test_permissions.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 14 | 2019-09-30T12:44:17.000Z | 2022-02-04T14:45:16.000Z | beaconsite/tests/test_permissions.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 244 | 2021-03-26T15:13:15.000Z | 2022-03-31T15:48:04.000Z | beaconsite/tests/test_permissions.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | 8 | 2020-05-19T21:55:13.000Z | 2022-03-31T07:02:58.000Z | from django.urls import reverse
from projectroles.tests.test_permissions import TestProjectPermissionBase
from beaconsite.tests.factories import ConsortiumFactory, SiteFactory
| 35.864198 | 98 | 0.664028 |
e98f3c0cbfe695e09cf6acaf634dcaef0d39ab20 | 965 | py | Python | backend/forms.py | adarshrao1/Flood_detection | 4a2a7ecef178366700d5c29a13d45143eaa7cc54 | [
"CC0-1.0"
] | null | null | null | backend/forms.py | adarshrao1/Flood_detection | 4a2a7ecef178366700d5c29a13d45143eaa7cc54 | [
"CC0-1.0"
] | null | null | null | backend/forms.py | adarshrao1/Flood_detection | 4a2a7ecef178366700d5c29a13d45143eaa7cc54 | [
"CC0-1.0"
] | 5 | 2021-06-05T14:11:04.000Z | 2021-06-19T05:51:56.000Z | from django.forms import ModelForm
from backend.models import Image, Image2
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
| 26.081081 | 70 | 0.654922 |
e98f933a4b8c3a1b81125f679e51f0db2f252a76 | 22,851 | py | Python | uldaq-1.2.1/uldaq/ul_c_interface.py | Novellogiampiero/RapLib | 614d25abf402052dcaf81aa72044e3a03cb014fa | [
"Apache-2.0"
] | null | null | null | uldaq-1.2.1/uldaq/ul_c_interface.py | Novellogiampiero/RapLib | 614d25abf402052dcaf81aa72044e3a03cb014fa | [
"Apache-2.0"
] | null | null | null | uldaq-1.2.1/uldaq/ul_c_interface.py | Novellogiampiero/RapLib | 614d25abf402052dcaf81aa72044e3a03cb014fa | [
"Apache-2.0"
] | null | null | null | """
Created on Mar 7 2018
@author: MCC
"""
from ctypes import (CDLL, CFUNCTYPE, Structure, c_uint, c_int, c_longlong,
POINTER, c_double, c_char, py_object, c_ulonglong, cast,
c_char_p, c_byte)
from enum import IntEnum
from .ul_structs import DaqDeviceDescriptor, AiQueueElement, TransferStatus
from .ul_structs import DaqInChanDescriptor, MemDescriptor, DaqOutChanDescriptor, EventCallbackArgs
from .ul_enums import DaqEventType
from sys import platform
if platform.startswith('darwin'):
lib = CDLL('libuldaq.dylib')
else:
lib = CDLL('libuldaq.so')
#
# Structures
#
#
# Enums
#
# Prototypes for callbacks
InterfaceCallbackProcType = CFUNCTYPE(None, c_longlong, c_uint, c_ulonglong, POINTER(EventParams))
def interface_event_callback_function(handle, event_type, event_data, event_params):
# type: (int, DaqEventType, py_object, py_object) -> None
"""Internal function used for handling event callbacks."""
event_parameters = cast(event_params, POINTER(EventParams)).contents
user_data = event_parameters.user_data
cb = event_parameters.user_callback
cb(EventCallbackArgs(event_type, event_data, user_data))
return
# Prototypes for DAQ Device
lib.ulDevGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulDevGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulGetDaqDeviceDescriptor.argtypes = (c_longlong, POINTER(DaqDeviceDescriptor))
lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulGetDaqDeviceInventory.argtypes = (c_uint, POINTER(DaqDeviceDescriptor), POINTER(c_uint))
lib.ulConnectDaqDevice.argtypes = (c_longlong,)
lib.ulEnableEvent.argtypes = (c_longlong, c_uint, c_ulonglong, InterfaceCallbackProcType, POINTER(EventParams))
lib.ulDisableEvent.argtypes = (c_longlong, c_uint)
lib.ulMemRead.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint)
lib.ulMemWrite.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint)
lib.ulCreateDaqDevice.argtypes = (DaqDeviceDescriptor,)
lib.ulReleaseDaqDevice.argtypes = (c_longlong,)
lib.ulIsDaqDeviceConnected.argtypes = (c_longlong, POINTER(c_int))
lib.ulDisconnectDaqDevice.argtypes = (c_longlong,)
lib.ulFlashLed.argtypes = (c_longlong, c_int)
lib.ulGetInfoStr.argtypes = (c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulSetConfig.argtypes = (c_uint, c_uint, c_longlong)
lib.ulGetConfig.argtypes = (c_uint, c_uint, POINTER(c_longlong))
lib.ulGetNetDaqDeviceDescriptor.argtypes = (c_char_p, c_uint, c_char_p,
POINTER(DaqDeviceDescriptor),
c_double)
lib.ulDaqDeviceConnectionCode.argtypes = (c_uint, c_longlong)
# Prototypes for the analog input subsystem
lib.ulAIn.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, POINTER(c_double))
lib.ulAInScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulAInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulAInLoadQueue.argtypes = (c_longlong, POINTER(AiQueueElement), c_uint)
lib.ulAInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulAInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulAISetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulAIGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAISetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, c_double)
lib.ulAIGetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAInScanStop.argtypes = (c_longlong,)
lib.ulAIGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulTIn.argtypes = (c_longlong, c_int, c_uint, c_uint, POINTER(c_double))
lib.ulTInArray.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint,
POINTER(c_double))
# Prototypes for the analog output subsystem
lib.ulAOut.argtypes = (c_longlong, c_int, c_uint, c_uint, c_double)
lib.ulAOutScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulAOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulAOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulAOutScanStop.argtypes = (c_longlong,)
lib.ulAOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulAOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAOutArray.argtypes = (c_longlong, c_int, c_int, POINTER(c_uint), c_uint,
POINTER(c_double))
# Prototypes for the DAQ input subsystem
lib.ulDaqInSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint)
lib.ulDaqInScan.argtypes = (c_longlong, POINTER(DaqInChanDescriptor), c_int, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulDaqInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDaqInScanStop.argtypes = (c_longlong,)
lib.ulDaqInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDaqIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDaqIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# Prototypes for DIO subsystem
lib.ulDIn.argtypes = (c_longlong, c_uint, POINTER(c_ulonglong))
lib.ulDOut.argtypes = (c_longlong, c_uint, c_ulonglong)
lib.ulDBitIn.argtypes = (c_longlong, c_uint, c_int, POINTER(c_uint))
lib.ulDBitOut.argtypes = (c_longlong, c_uint, c_int, c_uint)
lib.ulDInScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDOutScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDOutScanStop.argtypes = (c_longlong,)
lib.ulDInScanStop.argtypes = (c_longlong,)
lib.ulDInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulDOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulDConfigPort.argtypes = (c_longlong, c_uint, c_uint)
lib.ulDConfigBit.argtypes = (c_longlong, c_uint, c_int, c_uint)
lib.ulDIOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDIOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulDIOGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDIOSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulDInArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDOutArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong))
# prototypes for DAQ output subsystem
lib.ulDaqOutScan.argtypes = (c_longlong, POINTER(DaqOutChanDescriptor), c_int, c_int, POINTER(c_double), c_uint,
c_uint, POINTER(c_double))
lib.ulDaqOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDaqOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDaqOutScanStop.argtypes = (c_longlong,)
lib.ulDaqOutSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint)
lib.ulDaqOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDaqOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# prototypes for counter subsystem
lib.ulCIn.argtypes = (c_longlong, c_int, POINTER(c_ulonglong))
lib.ulCRead.argtypes = (c_longlong, c_int, c_uint, POINTER(c_ulonglong))
lib.ulCLoad.argtypes = (c_longlong, c_int, c_uint, c_ulonglong)
lib.ulCClear.argtypes = (c_longlong, c_int)
lib.ulCConfigScan.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint)
lib.ulCInScan.argtypes = (c_longlong, c_int, c_int, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulCInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulCInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulCInScanStop.argtypes = (c_longlong,)
lib.ulCInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulCtrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulCtrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulCtrSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulCtrGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
# Prototypes for the timer subsystem
lib.ulTmrPulseOutStart.argtypes = (c_longlong, c_int, POINTER(c_double), POINTER(c_double), c_ulonglong,
POINTER(c_double), c_uint, c_uint)
lib.ulTmrPulseOutStop.argtypes = (c_longlong, c_int)
lib.ulTmrPulseOutStatus.argtypes = (c_longlong, c_int, POINTER(c_uint))
lib.ulTmrSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulTmrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulTmrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# Other Prototypes
lib.ulGetErrMsg.argtypes = (c_uint, POINTER(c_char))
lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulMemGetInfo.argtypes = (c_longlong, c_uint, POINTER(MemDescriptor))
| 56.843284 | 136 | 0.733928 |
e991e9f5f0c1bdfb1e7229e0942eed1c870966c6 | 1,478 | py | Python | gfg/trees/sorted_ll_to_bst.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | 1 | 2019-04-18T03:29:02.000Z | 2019-04-18T03:29:02.000Z | gfg/trees/sorted_ll_to_bst.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | gfg/trees/sorted_ll_to_bst.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | """
Given a Singly Linked List which has data members sorted in ascending order.
Construct a Balanced Binary Search Tree which has same data members as the given Linked List.
"""
from typing import Optional
from binary_tree_node import Node # type: ignore
from tree_traversal import inorder # type: ignore
if __name__ == "__main__":
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
inorder(sorted_ll_to_bst(head))
print()
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
head.next.next.next = LLNode(4)
head.next.next.next.next = LLNode(5)
head.next.next.next.next.next = LLNode(6)
head.next.next.next.next.next.next = LLNode(7)
inorder(sorted_ll_to_bst(head))
print()
| 23.460317 | 93 | 0.635995 |
e9920d3efc1f0f760192d2dad03a56edd3268c51 | 556 | py | Python | uvcoverage.py | haricash/bayesian-ionized-bubbles | c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6 | [
"MIT"
] | null | null | null | uvcoverage.py | haricash/bayesian-ionized-bubbles | c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6 | [
"MIT"
] | null | null | null | uvcoverage.py | haricash/bayesian-ionized-bubbles | c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from modules.conversions import enu2uvw
data = np.load("uv-array.npy")
e = data[0,:].transpose()
n = data[1,:].transpose()
uvarray = []
for i in range(120):
u,v = enu2uvw( wavelength=1.690,
hour_angle=i/30,
declination=0,
ref_declination=-30,
ref_hour_angle=0,
e=e,
n=n)
# np.save("uv-coverage.npy",u)
uvarray.append((u,v))
np.save("uv-coverage.npy",uvarray) | 23.166667 | 41 | 0.526978 |
e99213e148fd6d67da5c28d0d36014f1bdd56a29 | 6,540 | py | Python | main.py | Bishalsarang/Leetcode-Questions | 9d0c938778343c073b631884cc38411ea0ac7cd3 | [
"MIT"
] | 6 | 2021-09-17T12:26:59.000Z | 2022-03-11T00:37:35.000Z | main.py | Bishalsarang/Leetcode-Questions | 9d0c938778343c073b631884cc38411ea0ac7cd3 | [
"MIT"
] | null | null | null | main.py | Bishalsarang/Leetcode-Questions | 9d0c938778343c073b631884cc38411ea0ac7cd3 | [
"MIT"
] | null | null | null | # Author: Bishal Sarang
import json
import os
import pickle
import time
import bs4
import colorama
import requests
from colorama import Back, Fore
from ebooklib import epub
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from utils import *
import epub_writer
# Initialize Colorama
colorama.init(autoreset=True)
options = Options()
options.headless = True
# Disable Warning, Error and Info logs
# Show only fatal errors
options.add_argument("--log-level=3")
driver = webdriver.Chrome(options=options)
# Get upto which problem it is already scraped from track.conf file
completed_upto = read_tracker("track.conf")
# Load chapters list that stores chapter info
# Store chapter info
with open('chapters.pickle', 'rb') as f:
chapters = pickle.load(f)
if __name__ == "__main__":
main()
| 40.875 | 376 | 0.657034 |
e992f77a4ff4f3363d1bcb7a821282c7065578b8 | 4,985 | py | Python | model/magenta_app.py | DesmondYuan/DeepMovement | b4f347f139d52c345b592bc712260fa579b6c9a8 | [
"MIT"
] | null | null | null | model/magenta_app.py | DesmondYuan/DeepMovement | b4f347f139d52c345b592bc712260fa579b6c9a8 | [
"MIT"
] | null | null | null | model/magenta_app.py | DesmondYuan/DeepMovement | b4f347f139d52c345b592bc712260fa579b6c9a8 | [
"MIT"
] | 1 | 2020-12-31T14:44:38.000Z | 2020-12-31T14:44:38.000Z | # Adapted from Magenta console commands
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
magenta_model = Magenta_Model("/mnt/disks/ssd_disk/final/models/",
content_square_crop=False, style_square_crop=False,
style_image_size=256, content_image_size=256)
magenta_model.process_data(style_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*",
content_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*")
magenta_model.run("/mnt/disks/ssd_disk/final/tmp/", [0., 1.])
| 39.88 | 109 | 0.664995 |
e99385b476437e2b2258af182121e6b707636676 | 4,781 | py | Python | lisa/base_tools/wget.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 48 | 2018-05-19T17:46:34.000Z | 2020-09-28T21:09:06.000Z | lisa/base_tools/wget.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 1,261 | 2018-05-17T04:32:22.000Z | 2020-11-23T17:29:13.000Z | lisa/base_tools/wget.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 133 | 2018-05-15T23:12:14.000Z | 2020-11-13T10:37:49.000Z | import re
from pathlib import PurePosixPath
from typing import TYPE_CHECKING, Optional, Type
from lisa.executable import Tool
from lisa.tools.ls import Ls
from lisa.tools.mkdir import Mkdir
from lisa.tools.powershell import PowerShell
from lisa.tools.rm import Rm
from lisa.util import LisaException, is_valid_url
if TYPE_CHECKING:
from lisa.operating_system import Posix
| 31.453947 | 87 | 0.590253 |
e995e4148b59ca5a7b4ba1e5e2c168dedb8fd4e8 | 1,787 | py | Python | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | 1 | 2021-03-10T09:40:05.000Z | 2021-03-10T09:40:05.000Z | # Load other business attributes and set meta prefix
from pandas.io.json import json_normalize
flat_cafes = json_normalize(data["businesses"],
sep="_",
record_path="categories",
meta=['name',
'alias',
'rating',
['coordinates', 'latitude'],
['coordinates', 'longitude']],
meta_prefix='biz_')
# View the data
print(flat_cafes.head())
'''
<script.py> output:
alias title biz_name biz_alias biz_rating biz_coordinates_latitude biz_coordinates_longitude
0 coffee Coffee & Tea White Noise white-noise-brooklyn-2 4.5 40.689358 -73.988415
1 coffee Coffee & Tea Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
2 coffeeroasteries Coffee Roasteries Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
3 cafes Cafes Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
4 coffee Coffee & Tea Coffee Project NY coffee-project-ny-new-york 4.5 40.726990 -73.989220
Naming meta columns can get tedious for datasets with many attributes,
and code is susceptible to breaking if column names or nesting levels change.
In such cases, you may have to write a custom function and
employ techniques like recursion to handle the data.
''' | 52.558824 | 154 | 0.493005 |
e9960edde95bcaeefa3f37767c2580e46bec455b | 2,310 | py | Python | deprecated/obsolete/src/coverinst.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 90 | 2015-04-07T10:26:53.000Z | 2022-03-07T15:14:57.000Z | deprecated/obsolete/src/coverinst.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 14 | 2015-10-13T16:25:59.000Z | 2021-01-21T18:31:03.000Z | deprecated/obsolete/src/coverinst.py | Anirban166/tstl | 73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e | [
"Apache-2.0"
] | 32 | 2015-04-07T10:41:29.000Z | 2022-02-26T05:17:28.000Z | import sys
infn = sys.argv[1]
outfn = infn.split(".py")[0]+"_INST.py"
code = []
for l in open(infn):
code.append(l)
outf = open(outfn, 'w')
outf.write("import covertool\n")
ln = 0
inComment = False
justEnded = False
currentIndent = 0
lineIndent = 0
okChangeIndent = False
skipNext = False
doNotInstrument = ["class","def","import", "elif", "else:", "except", "}", "]", ")"]
indentChangers = ["class", "def", "if", "elif", "else:", "for", "try:", "except", "while"]
skipNextChars = [",","\\"]
conditionals = ["if","elif", "else"]
for l in code:
ln += 1
ls = l.split()
if l.find('"""') != -1:
inComment = not inComment
justEnded = True
if inComment:
outf.write(l)
continue
if justEnded:
outf.write(l)
justEnded = False
continue
lineIndent = 0
for c in l:
if c != " ":
break
else:
lineIndent += 1
instrument = False
if (lineIndent > currentIndent):
if okChangeIndent and not skipNext:
currentIndent = lineIndent
instrument = True
else:
instrument = ls != []
currentIndent = lineIndent
if (ls != []) and ((ls[0] in doNotInstrument) or (ls[0][0] == "#")):
instrument = False
if (ls != []) and (ls[0] in conditionals) and (":" in l) and (ls[-1][-1] != ":"):
if ls[0] == "if":
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
ld = infn + ":" + str(ln)+":True"
sc = l.split(":")
sct = ""
started = False
for c in sc[1]:
if started or (c != " "):
started = True
sct += c
outf.write(sc[0] + ":" + "\n")
outf.write((" " * lineIndent) + ' covertool.cover("' + ld + '")\n')
outf.write((" " * lineIndent) + " " + sct + "\n")
okChangeIndent = False
skipNext = False
continue
if instrument:
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
okChangeIndent = skipNext or ((ls != []) and (ls[0] in indentChangers))
skipNext = (len(l) > 2) and (l[-2] in skipNextChars)
outf.write(l)
outf.close()
| 25.666667 | 90 | 0.490909 |
e997ebbde4fce0c730819b363c5adbce38d2664d | 8,729 | py | Python | actionkit_templates/settings.py | MoveOnOrg/actionkit-templates | 2d06ad7634fac59e352d5cd8625f3092624d30e4 | [
"Unlicense",
"MIT"
] | 8 | 2016-11-29T07:34:04.000Z | 2021-06-09T18:09:25.000Z | actionkit_templates/settings.py | MoveOnOrg/actionkit-templates | 2d06ad7634fac59e352d5cd8625f3092624d30e4 | [
"Unlicense",
"MIT"
] | 12 | 2016-12-06T17:24:58.000Z | 2022-02-21T20:11:47.000Z | actionkit_templates/settings.py | MoveOnOrg/actionkit-templates | 2d06ad7634fac59e352d5cd8625f3092624d30e4 | [
"Unlicense",
"MIT"
] | 4 | 2016-12-25T11:16:34.000Z | 2020-02-11T18:48:26.000Z | import json
import os
import sys
import time
try:
from urlparse import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
from django.conf.urls import url
from django.conf.urls.static import static
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, redirect
from django.template.loader import render_to_string
from django.template.base import add_to_builtins
from django.views.static import serve
from .moveon_fakeapi import mo_event_data
"""
try running with
aktemplates runserver 0.0.0.0:1234
"""
DEBUG = True
SECRET_KEY = 'who cares!'
INSTALLED_APPS = ['actionkit_templates', ]
try:
import template_debug #django-template-debug
INSTALLED_APPS.append('template_debug')
import django_extensions #django-extensions
INSTALLED_APPS.append('django_extensions')
except:
pass
#one directory down
APP_PATH = os.path.dirname(__file__)
PROJECT_ROOT_PATH = os.path.abspath(os.getcwd())
#############
# STATIC DIRECTORY
#############
#note this only works if DEBUG=True
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(PROJECT_ROOT_PATH, './static'))
STATIC_URL = os.environ.get('STATIC_URL', '/static/')
STATIC_FALLBACK = os.environ.get('STATIC_FALLBACK', False)
STATIC_LOCAL = os.environ.get('STATIC_URL', None) # an explicit local or not
#############
# TEMPLATES
#############
DEFAULT_TEMPLATES = os.path.join(APP_PATH, 'templates')
DIR_TEMPLATES = []
if os.environ.get('TEMPLATE_DIR'):
DIR_TEMPLATES.append(os.environ.get('TEMPLATE_DIR'))
else:
for d in ('./', './template_set', './_layouts', './_includes'):
dd = os.path.join(PROJECT_ROOT_PATH, d)
if os.path.exists(dd):
DIR_TEMPLATES.append(dd)
DIR_TEMPLATES.append(DEFAULT_TEMPLATES)
TEMPLATES = [
{ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': DIR_TEMPLATES,
},
]
MIDDLEWARE_CLASSES = []
add_to_builtins('actionkit_templates.templatetags.actionkit_tags')
#############
# HOME PAGE TEST
#############
def event_api_moveon_fake(request):
"""Fake representation of MoveOn events api"""
cxt = _get_context_data(request, 'events', 'WILL_USE_REFERER_HEADER', use_referer=True)
events = cxt.get('events', [])
if cxt.get('SLOW_API'):
# This allows us to test for race conditions
time.sleep(2)
if cxt.get('500_API'):
raise Exception('Cause failure to allow graceful degradation')
search_results = [mo_event_data(evt) for evt in events]
return HttpResponse(json.dumps({'events': search_results}), content_type='application/json')
#############
# URLS
#############
ROOT_URLCONF = 'actionkit_templates.settings'
urlpatterns = [
url(r'^context', login_context),
url(r'^progress', login_context, name='progress'),
url(r'^logout', logout, name="logout"),
url(r'^(?P<name>[-.\w]+)?(/(?P<page>[-.\w]+))?$', index),
url(r'^forgot/$', user_password_forgot, name='user_password_forgot'),
url(r'^cms/event/(?P<page>[-.\w]+)/search_results/', event_search_results, name='event_search_results'),
url(r'^fake/api/events', event_api_moveon_fake, name="event_api_moveon_fake"),
# ActionKit urls or {% url %} template tag:
url(r'^fake/stub/reverse', event_api_moveon_fake, name="reverse_donation"),
]
if STATIC_ROOT:
urlpatterns = (urlpatterns
+ static(STATIC_URL, document_root=STATIC_ROOT)
+ static('/resources/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './resources'))
+ static('/media/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './media'))
)
if os.path.exists(os.path.join(PROJECT_ROOT_PATH, 'local_settings.py')):
from local_settings import *
| 35.77459 | 112 | 0.643487 |
e9a055a93eab839ab9a14c3a44071ae1537f4ac6 | 1,528 | py | Python | fpga/test/fifo/fifo_tb.py | edge-analytics/fpga-sleep-tracker | 50efd114500e134297be5229775a9ec6809abb53 | [
"MIT"
] | 2 | 2021-11-05T13:27:35.000Z | 2022-03-12T04:44:03.000Z | fpga/test/fifo/fifo_tb.py | edge-analytics/fpga-sleep-tracker | 50efd114500e134297be5229775a9ec6809abb53 | [
"MIT"
] | null | null | null | fpga/test/fifo/fifo_tb.py | edge-analytics/fpga-sleep-tracker | 50efd114500e134297be5229775a9ec6809abb53 | [
"MIT"
] | null | null | null | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import ClockCycles, RisingEdge, FallingEdge, NextTimeStep, ReadWrite
N = 16
test_input = list(range(N))
# FIXME add more unit tests here
| 28.296296 | 89 | 0.630236 |
e9a05f45a351e31a1eadb205f7bd181f6ae63473 | 2,314 | py | Python | Mock-exams/02-Mock-exam/notes/notes/app/views.py | M0673N/Python-Web-Basics | cecc27f7a12f990756edcc8885290eb3b2e487b7 | [
"MIT"
] | null | null | null | Mock-exams/02-Mock-exam/notes/notes/app/views.py | M0673N/Python-Web-Basics | cecc27f7a12f990756edcc8885290eb3b2e487b7 | [
"MIT"
] | null | null | null | Mock-exams/02-Mock-exam/notes/notes/app/views.py | M0673N/Python-Web-Basics | cecc27f7a12f990756edcc8885290eb3b2e487b7 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from notes.app.forms import ProfileForm, NoteForm, NoteDeleteForm
from notes.app.models import Profile, Note
| 29.291139 | 88 | 0.600259 |
e9a09dff959ae1110da793fb71caa1d3736f73bf | 3,066 | py | Python | trainwiki.py | tomsonsgs/TRAN-MMA-master | 91bf927c64a8d813ba60ae12e61e8f44830a82cc | [
"Apache-2.0"
] | null | null | null | trainwiki.py | tomsonsgs/TRAN-MMA-master | 91bf927c64a8d813ba60ae12e61e8f44830a82cc | [
"Apache-2.0"
] | null | null | null | trainwiki.py | tomsonsgs/TRAN-MMA-master | 91bf927c64a8d813ba60ae12e61e8f44830a82cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 00:56:18 2019
@author: tang
"""
seed=102
vocab="vocab.bin"
train_file="train.bin"
dropout=0.3
hidden_size=256
embed_size=100
action_embed_size=100
field_embed_size=32
type_embed_size=32
lr_decay=0.5
beam_size=5
patience=2
lstm='lstm'
col_att='affine'
model_name='wiki'
#python -u exp.py \
# --cuda \
# --seed ${seed} \
# --mode train \
# --batch_size 64 \
# --parser wikisql_parser \
# --asdl_file asdl/lang/sql/sql_asdl.txt \
# --transition_system sql \
# --evaluator wikisql_evaluator \
# --train_file data/wikisql/${train_file} \
# --dev_file data/wikisql/dev.bin \
# --sql_db_file data/wikisql/dev.db \
# --vocab data/wikisql/${vocab} \
# --glove_embed_path data/contrib/glove.6B.100d.txt \
# --lstm ${lstm} \
# --column_att ${col_att} \
# --no_parent_state \
# --no_parent_field_embed \
# --no_parent_field_type_embed \
# --no_parent_production_embed \
# --hidden_size ${hidden_size} \
# --embed_size ${embed_size} \
# --action_embed_size ${action_embed_size} \
# --field_embed_size ${field_embed_size} \
# --type_embed_size ${type_embed_size} \
# --dropout ${dropout} \
# --patience ${patience} \
# --max_num_trial 5 \
# --lr_decay ${lr_decay} \
# --glorot_init \
# --beam_size ${beam_size} \
# --eval_top_pred_only \
# --decode_max_time_step 50 \
# --log_every 10 \
# --save_to saved_models/wikisql/${model_name}
| 28.924528 | 63 | 0.689498 |
e9a18b845016664a0d3350f6afe5c55f943340ff | 3,476 | py | Python | heritago/heritages/tests/tests_annotationdatamodel.py | SWE574-Groupago/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 6 | 2017-02-13T10:22:18.000Z | 2017-03-11T20:38:30.000Z | heritago/heritages/tests/tests_annotationdatamodel.py | SWE574-Groupago/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 172 | 2017-02-12T21:07:27.000Z | 2017-06-08T10:46:58.000Z | heritago/heritages/tests/tests_annotationdatamodel.py | SWE574-RenameMe/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 17 | 2017-02-13T08:29:37.000Z | 2017-06-29T14:43:53.000Z | import unittest
from django.test import Client
| 39.954023 | 116 | 0.561277 |
e9a26fd47a49716298a92bfa1c231de0e135e9dd | 824 | py | Python | tests/test_main.py | cesarbruschetta/julio-cesar-decrypter | 1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c | [
"BSD-2-Clause"
] | null | null | null | tests/test_main.py | cesarbruschetta/julio-cesar-decrypter | 1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c | [
"BSD-2-Clause"
] | null | null | null | tests/test_main.py | cesarbruschetta/julio-cesar-decrypter | 1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c | [
"BSD-2-Clause"
] | null | null | null | import unittest
from unittest.mock import patch
from jc_decrypter.main import process, main
| 27.466667 | 85 | 0.679612 |
e9a341910fc41cf0116d2acf9b1914cdde30cec5 | 615 | py | Python | library/tests/test_setup.py | pimoroni/mics6814-python | 73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79 | [
"MIT"
] | 6 | 2021-05-16T05:02:57.000Z | 2022-01-05T16:02:46.000Z | library/tests/test_setup.py | pimoroni/mics6814-python | 73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79 | [
"MIT"
] | 3 | 2021-09-15T10:24:56.000Z | 2022-01-24T21:16:05.000Z | library/tests/test_setup.py | pimoroni/mics6814-python | 73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79 | [
"MIT"
] | null | null | null | import mock
| 24.6 | 67 | 0.666667 |
e9a3a2aba365270bf90b9a6d7673d3d58bca51fe | 3,290 | py | Python | template_maker/data/documents.py | codeforamerica/template-maker | 66d4744c123d5b868cf259e947dc924bb5a25c9a | [
"BSD-3-Clause"
] | 9 | 2015-02-23T22:03:30.000Z | 2020-01-31T19:06:50.000Z | template_maker/data/documents.py | codeforamerica/template-maker | 66d4744c123d5b868cf259e947dc924bb5a25c9a | [
"BSD-3-Clause"
] | 37 | 2015-03-01T01:10:22.000Z | 2015-12-31T17:24:42.000Z | template_maker/data/documents.py | codeforamerica/template-maker | 66d4744c123d5b868cf259e947dc924bb5a25c9a | [
"BSD-3-Clause"
] | 2 | 2016-01-21T09:59:17.000Z | 2021-04-16T10:51:04.000Z | import datetime
from template_maker.database import db
from template_maker.generator.models import DocumentBase, DocumentPlaceholder
from template_maker.builder.models import TemplateBase, TemplatePlaceholders
from template_maker.data.placeholders import get_template_placeholders
def get_all_documents():
'''
Returns all documents currently being edited
'''
return DocumentBase.query.all()
def get_document_placeholders(document_id):
'''
Gets all the placeholders associated with a document
'''
return db.session.query(
DocumentPlaceholder.id, TemplatePlaceholders.full_name, TemplatePlaceholders.type,
TemplatePlaceholders.display_name, DocumentPlaceholder.value
).filter(DocumentPlaceholder.document_id==document_id).filter(
DocumentPlaceholder.placeholder_id==TemplatePlaceholders.id
).all()
def get_single_document(document_id):
'''
Returns a single document from a template_id
'''
return DocumentBase.query.get(document_id)
| 31.333333 | 90 | 0.730091 |
e9a3a67be8807d04ec27501d70d8ad63e1c4fad0 | 1,194 | py | Python | app/db.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | app/db.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | app/db.py | JuanDM93/fcc-fastapi-demo | 7d20f91fa96989d22426632c1ab2550f62898789 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .config import settings
SQLALCHEMY_DATABASE_URL = 'postgresql://{user}:{password}@{host}:{port}/{db}'.format(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
db=settings.DB_NAME
)
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
"""
while True:
try:
conn = psycopg2.connect(
host=settings.DB_HOST,
port=settings.DB_PORT,
database=settings.DB_NAME,
user=settings.DB_USER,
password=settings.DB_PASSWORD,
cursor_factory=RealDictCursor
)
cur = conn.cursor()
print("Connected to the database")
break
except (Exception, psycopg2.Error) as error:
print(error)
print(f"Retrying in {settings.SLEEP_TIME} secs")
sleep(settings.SLEEP_TIME)
"""
| 23.88 | 85 | 0.664154 |
e9a3b150e872655275d100c3ba1868368c2d52e0 | 716 | py | Python | katph/spiders/stackoverflow_spider.py | trujunzhang/katph | b71b5a7171b133fcf087f77cd612c13a966ecd61 | [
"MIT"
] | null | null | null | katph/spiders/stackoverflow_spider.py | trujunzhang/katph | b71b5a7171b133fcf087f77cd612c13a966ecd61 | [
"MIT"
] | null | null | null | katph/spiders/stackoverflow_spider.py | trujunzhang/katph | b71b5a7171b133fcf087f77cd612c13a966ecd61 | [
"MIT"
] | null | null | null | import scrapy
from scrapy.selector import Selector
from katph.items import StackItem
| 31.130435 | 76 | 0.603352 |
e9a3c9a700552b660476506eef95bc2604a7a3bc | 1,156 | py | Python | migrations/0002_user_biography_user_gender_user_phone_number_and_more.py | sepydev/django-user | 1a67caa197f9bb72ec41491cac1ae0a94385da87 | [
"MIT"
] | 1 | 2022-02-05T18:26:02.000Z | 2022-02-05T18:26:02.000Z | migrations/0002_user_biography_user_gender_user_phone_number_and_more.py | mrprocs/django-user | 1a67caa197f9bb72ec41491cac1ae0a94385da87 | [
"MIT"
] | null | null | null | migrations/0002_user_biography_user_gender_user_phone_number_and_more.py | mrprocs/django-user | 1a67caa197f9bb72ec41491cac1ae0a94385da87 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-02-07 17:53
import django.core.validators
from django.db import migrations, models
| 33.028571 | 264 | 0.601211 |
e9a5f013db2d4eef22aa1809148db7e678473ae5 | 501 | py | Python | utils/constants.py | tholiao/learning-morph-and-ctrl | 6093cc7cede3b7ab2f3304d7060815712d535a2d | [
"MIT"
] | 1 | 2022-03-10T08:17:18.000Z | 2022-03-10T08:17:18.000Z | utils/constants.py | tholiao/learning-morph-and-ctrl | 6093cc7cede3b7ab2f3304d7060815712d535a2d | [
"MIT"
] | null | null | null | utils/constants.py | tholiao/learning-morph-and-ctrl | 6093cc7cede3b7ab2f3304d7060815712d535a2d | [
"MIT"
] | null | null | null | import numpy as np
from walkers import ScalableWalker
DEFAULT_SCENE = "scenes/walker.ttt"
DEFAULT_WALKER = ScalableWalker
N_MRPH_PARAMS = [3, 3, 6]
N_CTRL_PARAMS = [4, 8, 8]
MORPHOLOGY_BOUNDS = [
[[0.7] * 3, [1.4] * 3],
[[0.7] * 3, [1.4] * 3],
[[0.7] * 6, [1.4] * 6]
]
CONTROLLER_BOUNDS = [
[[1, -np.pi, 0, 0], [45, np.pi, 1, 1]],
[[1, -np.pi, 0, 0, 0, 0, .5, .5], [45, np.pi, .4, .4, .4, .4, 1, 1]],
[[1, -np.pi, 0, 0, 0, 0, .5, .5], [45, np.pi, .4, .4, .4, .4, 1, 1]]
]
| 22.772727 | 73 | 0.493014 |
e9a6214120a911400cce37d1a1a474426ab60fe5 | 1,284 | py | Python | hardware/joystick.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null | hardware/joystick.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null | hardware/joystick.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null |
from solid import *
from solid.utils import *
import util
from util import inch_to_mm, tube, ABIT, corners, pipe
from fixings import M3
from math import tan, radians
"""
Sub-miniature analog joy-sticks.
There's not much useful in documentation of their measurements.
I'm going to treat it like a sphere with a 14mm radius, with a
12mm diameter cylinder sticking out the top.
40 degrees in any direction. The knob on the top is 20mm wide
so the hole in the panel must be at least that wide.
"""
fixing = M3
width=35.0
depth=35.0
pivot_height=9.6
panel_height=11.0
height=pivot_height+panel_height
if __name__ == '__main__':
export_scad()
| 28.533333 | 114 | 0.696262 |
e9a7d2f66b4f8dbaa2eb22e345ef51c2d6c7fe14 | 2,360 | py | Python | src/Line.py | npanuhin/BIOCAD-BWA | 50f56fd7d08b8ad1247934c902fb137f3c28cdf8 | [
"MIT"
] | null | null | null | src/Line.py | npanuhin/BIOCAD-BWA | 50f56fd7d08b8ad1247934c902fb137f3c28cdf8 | [
"MIT"
] | null | null | null | src/Line.py | npanuhin/BIOCAD-BWA | 50f56fd7d08b8ad1247934c902fb137f3c28cdf8 | [
"MIT"
] | null | null | null | from typing import List
from collections import deque
def copyCoords(self):
return Line(self.start_x, self.start_y, self.end_x, self.end_y, dots=[])
def shift(self, dx=0, dy=0):
self.start_x += dx
self.start_y += dy
self.end_x += dx
self.end_y += dy
for i in range(len(self.dots)):
self.dots[i][0] += dx
self.dots[i][1] += dy
def shiftLines(lines, count) -> List[Line]:
result = deque(lines)
for _ in range(count):
result.append(result.popleft())
return list(result)
| 25.106383 | 84 | 0.555932 |
e9a8550e13deee649e253f45b07fa459658b1f18 | 205 | py | Python | hw_asr/model/__init__.py | ArturGoldman/ASR-HW | 96494a7ce3f6661fbafb8077f15ece8c6e4b1a11 | [
"MIT"
] | null | null | null | hw_asr/model/__init__.py | ArturGoldman/ASR-HW | 96494a7ce3f6661fbafb8077f15ece8c6e4b1a11 | [
"MIT"
] | null | null | null | hw_asr/model/__init__.py | ArturGoldman/ASR-HW | 96494a7ce3f6661fbafb8077f15ece8c6e4b1a11 | [
"MIT"
] | 1 | 2021-10-29T18:46:14.000Z | 2021-10-29T18:46:14.000Z | from hw_asr.model.baseline_model import BaselineModel, BasicLSTM, BasicGRU
from hw_asr.model.QuartzNet import QuartzNet
__all__ = [
"BaselineModel",
"BasicLSTM",
"BasicGRU",
"QuartzNet"
]
| 20.5 | 74 | 0.731707 |
e9ab3dbd3f61574c06a9441f006ee914a6d3064c | 4,458 | py | Python | Fishers LDA/fishersLDA.py | Exorust/Machine-Learning-Algorithms | c634fd0a1a49ea2574f0867b591ee8a2cd401fd2 | [
"MIT"
] | null | null | null | Fishers LDA/fishersLDA.py | Exorust/Machine-Learning-Algorithms | c634fd0a1a49ea2574f0867b591ee8a2cd401fd2 | [
"MIT"
] | null | null | null | Fishers LDA/fishersLDA.py | Exorust/Machine-Learning-Algorithms | c634fd0a1a49ea2574f0867b591ee8a2cd401fd2 | [
"MIT"
] | null | null | null | '''**********************************************
CODE TO IMPLEMENT FISHER'S LDA -
Given two dimensional dataset with two classes 0 and 1,
Perform Fisher's LDA on the dataset,
Perform dimensionality reduction and find the suitable vector to project it onto,
Find the threshold value for separation of the two classes
***********************************************'''
import numpy as np
import matplotlib.pyplot as plt
import time
# to calculate the execution time of th clustering
start_time = time.time()
# reading data csv file
my_data = np.genfromtxt('datasets/dataset_3.csv', delimiter=',')
# deleting the serial number column
data=np.delete(my_data,0,1)
# separating the two classes and deleting the target variable column
class0 = data[np.nonzero(data[:,2] == 0)]
class1=data[np.nonzero(data[:,2]==1)]
class0=np.delete(class0,2,1)
class1=np.delete(class1,2,1)
# finding the mean of the the two classes
mean0=np.mean(class0,0)
mean1=np.mean(class1,0)
''' calculating the variability of the two classes using the formula :
variability=summation over points belonging to class 1((xi-mean)(xi-mean)tanspose)
'''
var0=np.zeros(1)
temp=np.array(mean0)
for i in range (class0.shape[0]) :
temp=(class0[i,:]-mean0)
var0+=np.dot(temp, temp.T)
var1=np.zeros(1)
temp=np.array(mean1)
for i in range (class1.shape[0]) :
temp=(class1[i,:]-mean1)
var1+=np.dot(temp, temp.T)
sw=var1+var0
# calculating the inverse of Sw matrix
invsw=np.array([(1/sw[0])])
# calculating the w vector using below formula
w=invsw*(mean1-mean0)
# declaring arrays for storing points' distance from the vector
dist0=np.zeros((class0.shape[0],1))
dist1=np.zeros((class1.shape[0],1))
# finding the the vector to project the points on;
# such that the means are farthest from each other
wperp=np.array([-w[1],w[0]])
# finding the norm of the w vector
norm_w=np.linalg.norm(wperp)
''' calculating the distance of original data points from the vector using the formula:
r=w.T/norm(w)
'''
for i in range(dist0.shape[0]):
dist0[i]=np.dot(wperp.T,class0[i,:])/norm_w
for i in range(dist1.shape[0]):
dist1[i]=np.dot(wperp.T,class1[i,:])/norm_w
''' declaring the arrays to store the projected points data using formula:
x_projected = x_actual-r*w/norm(w)
'''
class0proj=np.zeros((class0.shape[0],2))
class1proj=np.zeros((class1.shape[0],2))
for i in range(class0.shape[0]):
class0proj[i,:]=np.subtract((class0[i,:]),(dist0[i]*wperp.T/norm_w))
for i in range(class1.shape[0]):
class1proj[i,:]=np.subtract((class1[i,:]),(dist1[i]*wperp.T/norm_w))
# displaying the plot with the original data , projected points and line
plt.scatter(class0[:,0],class0[:,1])
plt.scatter(class1[:,0],class1[:,1])
plt.scatter(class0proj[:,0],class0proj[:,1],color='blue')
plt.scatter(class1proj[:,0],class1proj[:,1],color='red')
#concatenating the two classes into a single array
pointsproj=np.concatenate((class0proj,class1proj),axis=0)
plt.plot(pointsproj[:,0],pointsproj[:,1],'m')
# storing dimensionally reduced projected points in array using formula:
# y(x) = w.T*x
newproj0=np.zeros((class0.shape[0],1))
newproj1=np.zeros((class1.shape[0],1))
for i in range(class0.shape[0]):
newproj0[i,:]=np.dot(wperp.T,class0[i,:])
for i in range(class1.shape[0]):
newproj1[i,:]=np.dot(wperp.T,class1[i,:])
# storing the means and standard deviations of the projected points
proj0mean=np.mean(newproj0)
proj1mean=np.mean(newproj1)
proj0std=np.std(newproj0)
proj1std=np.std(newproj1)
'''
Below function "solve" to finds the threshold value separating the two
classes when dimensionally reduced -
input : m1, m2 - means of the two classes whose point of intersection needs to be found
std1, std2 - the standard deviations of the two classes
'''
threshold=solve(proj0mean,proj1mean,proj0std,proj1std)
print("Threshold value =", threshold)
print("Time taken = ",(time.time()-start_time))
plt.savefig('Results/Result3.png')
| 32.540146 | 104 | 0.685509 |
e9ad668ebc54401a790054fd2f8bfe6c1d6a7c9b | 3,071 | py | Python | study/pytorch_study/14_dropout.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | 2 | 2020-04-06T09:09:19.000Z | 2020-07-24T03:59:55.000Z | study/pytorch_study/14_dropout.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | null | null | null | study/pytorch_study/14_dropout.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import torch
n_input = 1
# n_hidden should be very big to make dropout's effect more clear
n_hidden = 100
n_output = 1
EPOCH = 1000
LR = 0.01
torch.manual_seed(1) # reproducible
N_SAMPLES = 20
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
net_overfitting = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
net_dropout = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
optimizer_overfit = torch.optim.Adam(net_overfitting.parameters(), lr=LR)
optimizer_drop = torch.optim.Adam(net_dropout.parameters(), lr=LR)
loss_func = torch.nn.MSELoss()
plt.ion()
for i in range(EPOCH):
pred_overfit = net_overfitting(x)
pred_drop = net_dropout(x)
loss_overfit = loss_func(pred_overfit, y)
loss_drop = loss_func(pred_drop, y)
optimizer_overfit.zero_grad()
optimizer_drop.zero_grad()
loss_overfit.backward()
loss_drop.backward()
optimizer_overfit.step()
optimizer_drop.step()
#
if i % 10 == 0: # 10
# change to eval mode in order to fix drop out effect
net_overfitting.eval()
# parameters for dropout differ from train mode
net_dropout.eval()
# plotting
plt.cla()
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropout(test_x)
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=5, alpha=0.3, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=5, alpha=0.3, label='test')
plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, 'overfitting loss=%.4f' % loss_func(test_pred_ofit, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'red'})
plt.text(0, -1.5, 'dropout loss=%.4f' % loss_func(test_pred_drop, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'orange'})
plt.legend(loc='upper left');
plt.ylim((-2.5, 2.5));
plt.pause(0.1)
#
net_overfitting.train()
net_dropout.train()
plt.ioff()
plt.show() | 32.326316 | 101 | 0.652231 |
e9b1301b28dc40f613c5048548a9e3fd67d1e1a8 | 72,649 | py | Python | harmonica/twiss.py | i-a-morozov/harmonica | 546e664e59457ad9cc354d108402137e90e0d8c2 | [
"MIT"
] | null | null | null | harmonica/twiss.py | i-a-morozov/harmonica | 546e664e59457ad9cc354d108402137e90e0d8c2 | [
"MIT"
] | null | null | null | harmonica/twiss.py | i-a-morozov/harmonica | 546e664e59457ad9cc354d108402137e90e0d8c2 | [
"MIT"
] | null | null | null | """
Twiss module.
Compute twiss parameters from amplitude & phase data.
Twiss filtering & processing.
"""
import numpy
import torch
import pandas
from scipy import odr
from .util import mod, generate_pairs, generate_other
from .statistics import weighted_mean, weighted_variance
from .statistics import median, biweight_midvariance, standardize
from .anomaly import threshold, dbscan, local_outlier_factor, isolation_forest
from .decomposition import Decomposition
from .model import Model
from .table import Table
def filter_twiss(self, plane:str = 'x', *,
phase:dict={'use': True, 'threshold': 10.00},
model:dict={'use': True, 'threshold': 00.50},
value:dict={'use': True, 'threshold': 00.50},
sigma:dict={'use': True, 'threshold': 00.25},
limit:dict={'use': True, 'threshold': 05.00}) -> dict:
"""
Filter twiss for given data plane and cleaning options.
Parameters
----------
plane: str
data plane ('x' or 'y')
phase: dict
clean based on advance phase data
used if 'use' is True, remove combinations with absolute value of phase advance cotangents above threshold value
model: dict
clean based on phase advance proximity to model
used if 'use' is True, remove combinations with (x - x_model)/x_model > threshold value
value: dict
clean based on estimated twiss beta error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
sigma: dict
clean based on estimated phase advance error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
limit: dict
clean outliers outside scaled interval
used if 'use' is True
Returns
-------
mask (torch.Tensor)
"""
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a_m, b_m = self.model.ax.reshape(-1, 1), self.model.bx.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ax'], self.data_phase['bx'], self.data_phase['sigma_ax'], self.data_phase['sigma_bx']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik']
if plane == 'y':
a_m, b_m = self.model.ay.reshape(-1, 1), self.model.by.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ay'], self.data_phase['by'], self.data_phase['sigma_ay'], self.data_phase['sigma_by']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik']
if phase['use']:
cot_ij, cot_m_ij = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
cot_ik, cot_m_ik = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
mask *= phase['threshold'] > cot_ij
mask *= phase['threshold'] > cot_m_ij
mask *= phase['threshold'] > cot_ik
mask *= phase['threshold'] > cot_m_ik
if model['use']:
mask *= model['threshold'] > torch.abs((f_ij - f_m_ij)/f_m_ij)
mask *= model['threshold'] > torch.abs((f_ik - f_m_ik)/f_m_ik)
if value['use']:
mask *= value['threshold'] > torch.abs((b - b_m)/b_m)
if sigma['use']:
mask *= 1/sigma['threshold'] < torch.abs(f_ij/sigma_f_ij)
mask *= 1/sigma['threshold'] < torch.abs(f_ik/sigma_f_ik)
if limit['use']:
factor = torch.tensor(limit['threshold'], dtype=self.dtype, device=self.device)
mask *= threshold(standardize(a, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask *= threshold(standardize(b, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
return mask
def mask_range(self, limit:tuple) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
limit: tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask[:, :count_max] = 1
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask[:, count_min:count_max] = 1
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = self.limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask = mask[:, :count_max]
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask = mask[:, count_min:count_max]
return mask
def mask_location(self, table:list) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
table: list
list of locations to remove
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.combo.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
for location in table:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
mask = (mask == other)
return mask.logical_not()
def mask_distance(self, function) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
function: Callable
function to apply to distance data
Returns
-------
weight mask (torch.Tensor)
"""
mask = torch.stack([function(distance) for distance in self.distance])
mask = torch.stack([mask for _ in range(self.size)])
return mask
def process_twiss(self, plane:str='x', *,
weight:bool=True, mask:torch.Tensor=None) -> dict:
"""
Process twiss data.
Parameters
----------
plane: str
data plane ('x' or 'y')
weight: bool
flag to use weights
mask: torch.Tensor
mask
Returns
-------
twiss data (dict)
dict_keys(['value_a', 'sigma_a', 'error_a', 'value_b', 'sigma_b', 'error_b'])
"""
result = {}
if mask == None:
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a, sigma_a, a_m = self.data_phase['ax'], self.data_phase['sigma_ax'], self.model.ax
b, sigma_b, b_m = self.data_phase['bx'], self.data_phase['sigma_bx'], self.model.bx
if plane == 'y':
a, sigma_a, a_m = self.data_phase['ay'], self.data_phase['sigma_ay'], self.model.ay
b, sigma_b, b_m = self.data_phase['by'], self.data_phase['sigma_by'], self.model.by
if not weight:
center = weighted_mean(a, weight=mask)
spread = weighted_variance(a, weight=mask, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
center = weighted_mean(b, weight=mask)
spread = weighted_variance(b, weight=mask, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
return result
weight = (mask.to(self.dtype)/sigma_a**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(a, weight=weight)
spread = weighted_variance(a, weight=weight, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
weight = (mask.to(self.dtype)/sigma_b**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(b, weight=weight)
spread = weighted_variance(b, weight=weight, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
if plane == 'x':
self.ax, self.sigma_ax = result['value_a'], result['sigma_a']
self.bx, self.sigma_bx = result['value_b'], result['sigma_b']
if plane == 'y':
self.ay, self.sigma_ay = result['value_a'], result['sigma_a']
self.by, self.sigma_by = result['value_b'], result['sigma_b']
return result
def get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *,
refit:bool=False, factor:float=5.0,
level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None,
ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None,
transport:torch.Tensor=None, **kwargs) -> dict:
"""
Estimate twiss from tbt data using ODR fit.
Note, if no initial guesses for twiss and/or transport are given, model values will be used
This method is sensitive to noise and calibration errors
Parameters
----------
n: int
number of turns to use
x: torch.Tensor
x data
y: torch.Tensor
y data
refit: bool
flag to refit twiss using estimated invariants
factor: float
threshold factor for invariants spread
level: float
default noise level
sigma_x: torch.Tensor
x noise sigma for each signal
sigma_y: torch.Tensor
y noise sigma for each signal
ax, bx, ay, by: torch.Tensor
initial guess for twiss parameters at monitor locations
transport: torch.Tensor
transport matrices between monitor locations
Returns
-------
fit result (dict)
dict_keys(['jx', 'ax', 'bx', 'sigma_jx', 'sigma_ax', 'sigma_bx', 'jy', 'ay', 'by', 'sigma_jy', 'sigma_ay', 'sigma_by', 'mux', 'muy'])
"""
if ax is None:
ax = self.model.ax[self.model.monitor_index].cpu().numpy()
else:
ax = ax.cpu().numpy()
if bx is None:
bx = self.model.bx[self.model.monitor_index].cpu().numpy()
else:
bx = bx.cpu().numpy()
if ay is None:
ay = self.model.ay[self.model.monitor_index].cpu().numpy()
else:
ay = ay.cpu().numpy()
if by is None:
by = self.model.by[self.model.monitor_index].cpu().numpy()
else:
by = by.cpu().numpy()
if transport is None:
probe = torch.tensor(self.model.monitor_index, dtype=torch.int64, device=self.device)
other = torch.roll(probe, -1)
other[-1] += self.model.size
transport = self.model.matrix(probe, other)
copy = torch.clone(transport)
value_jx, error_jx = [], []
value_jy, error_jy = [], []
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = ax[i], bx[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jx.append(action)
value_ax.append(alpha)
value_bx.append(beta)
error_jx.append(sigma_action)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = ay[i], by[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jy.append(action)
value_ay.append(alpha)
value_by.append(beta)
error_jy.append(sigma_action)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result = {}
result['center_jx'] = None
result['spread_jx'] = None
result['center_jy'] = None
result['spread_jy'] = None
result['jx'] = 0.5*torch.tensor(value_jx, dtype=self.dtype, device=self.device)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_jx'] = 0.5*torch.tensor(error_jx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['jy'] = 0.5*torch.tensor(value_jy, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_jy'] = 0.5*torch.tensor(error_jy, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
factor = torch.tensor(factor, dtype=self.dtype, device=self.device)
mask_jx = threshold(standardize(result['jx'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jx = mask_jx.squeeze()/(result['sigma_jx']/result['sigma_jx'].sum())**2
center_jx = weighted_mean(result['jx'], weight=mask_jx)
spread_jx = weighted_variance(result['jx'], weight=mask_jx, center=center_jx).sqrt()
mask_jy = threshold(standardize(result['jy'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jy = mask_jy.squeeze()/(result['sigma_jy']/result['sigma_jy'].sum())**2
center_jy = weighted_mean(result['jy'], weight=mask_jy)
spread_jy = weighted_variance(result['jy'], weight=mask_jy, center=center_jy).sqrt()
result['center_jx'] = center_jx
result['spread_jx'] = spread_jx
result['center_jy'] = center_jy
result['spread_jy'] = spread_jy
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
if not refit:
return result
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
action = 2.0*center_jx.cpu().numpy()
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = result['ax'][i].cpu().numpy(), result['bx'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ax.append(alpha)
value_bx.append(beta)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
action = 2.0*center_jy.cpu().numpy()
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = result['ay'][i].cpu().numpy(), result['by'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ay.append(alpha)
value_by.append(beta)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
return result
def get_ax(self, index:int) -> torch.Tensor:
"""
Get ax value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ax, sigma_ax] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ax(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ax[index], self.sigma_ax[index]])
def get_bx(self, index:int) -> torch.Tensor:
"""
Get bx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[bx, sigma_bx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_bx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.bx[index], self.sigma_bx[index]])
def get_fx(self, index:int) -> torch.Tensor:
"""
Get fx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fx, sigma_fx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fx[index], self.sigma_fx[index]])
def get_ay(self, index:int) -> torch.Tensor:
"""
Get ay value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ay, sigma_ay] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ay(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ay[index], self.sigma_ay[index]])
def get_by(self, index:int) -> torch.Tensor:
"""
Get by value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[by, sigma_by] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_by(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.by[index], self.sigma_by[index]])
def get_fy(self, index:int) -> torch.Tensor:
"""
Get fy value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fy, sigma_fy] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fy(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fy[index], self.sigma_fy[index]])
def get_twiss(self, index:int) -> dict:
"""
Return twiss data at given index.
Parameters
----------
index: int
index or location name
Returns
-------
twiss data (dict)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_twiss(self.model.get_index(index))
table = {}
table['ax'], table['sigma_ax'] = self.get_ax(index)
table['bx'], table['sigma_bx'] = self.get_bx(index)
table['fx'], table['sigma_fx'] = self.get_fx(index)
table['ay'], table['sigma_ay'] = self.get_ay(index)
table['by'], table['sigma_by'] = self.get_by(index)
table['fy'], table['sigma_fy'] = self.get_fy(index)
return table
def get_table(self) -> pandas.DataFrame:
"""
Return twiss data at all locations as dataframe.
Parameters
----------
None
Returns
-------
twiss data (pandas.DataFrame)
"""
df = pandas.DataFrame()
df['name'] = self.model.name
df['kind'] = self.model.kind
df['flag'] = self.flag.cpu().numpy()
df['time'] = self.model.time.cpu().numpy()
df['ax'], df['sigma_ax'] = self.ax.cpu().numpy(), self.sigma_ax.cpu().numpy()
df['bx'], df['sigma_bx'] = self.bx.cpu().numpy(), self.sigma_bx.cpu().numpy()
df['fx'], df['sigma_fx'] = self.fx.cpu().numpy(), self.sigma_fx.cpu().numpy()
df['ay'], df['sigma_ay'] = self.ay.cpu().numpy(), self.sigma_ay.cpu().numpy()
df['by'], df['sigma_by'] = self.by.cpu().numpy(), self.sigma_by.cpu().numpy()
df['fy'], df['sigma_fy'] = self.fy.cpu().numpy(), self.sigma_fy.cpu().numpy()
return df
def __repr__(self) -> str:
"""
String representation.
"""
return f'{self.__class__.__name__}({self.model}, {self.table}, {self.limit})'
def __len__(self) -> int:
"""
Number of locations.
"""
return self.size
def __call__(self, limit:int=None) -> pandas.DataFrame:
"""
Perform twiss loop with default parameters.
Parameters
----------
limit: int
range limit for virtual phase computation
Returns
-------
twiss table (pandas.DataFrame)
"""
limit = max(self.limit) if limit is None else limit
self.get_action()
self.get_twiss_from_amplitude()
self.phase_virtual(limit=limit)
self.get_twiss_from_phase()
select = {
'phase': {'use': True, 'threshold': 10.00},
'model': {'use': False, 'threshold': 00.50},
'value': {'use': False, 'threshold': 00.50},
'sigma': {'use': False, 'threshold': 00.25},
'limit': {'use': True, 'threshold': 05.00}
}
mask_x = self.filter_twiss(plane='x', **select)
mask_y = self.filter_twiss(plane='y', **select)
_ = self.process_twiss(plane='x', mask=mask_x, weight=True)
_ = self.process_twiss(plane='y', mask=mask_y, weight=True)
return self.get_table()
def matrix(self, probe:torch.Tensor, other:torch.Tensor) -> tuple:
"""
Generate uncoupled transport matrix (or matrices) for given locations.
Matrices are generated from probe to other
One-turn matrices are generated where probe == other
Input parameters should be 1D tensors with matching length
Additionaly probe and/or other input parameter can be an int or str in self.model.name (not checked)
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
other: torch.Tensor
other locations
Returns
-------
uncoupled transport matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
if isinstance(other, int):
other = torch.tensor([other], dtype=torch.int64, device=self.device)
if isinstance(other, str):
other = torch.tensor([self.model.name.index(other)], dtype=torch.int64, device=self.device)
other[probe == other] += self.size
fx, sigma_fx = Decomposition.phase_advance(probe, other, self.table.nux, self.fx, error=True, sigma_frequency=self.table.sigma_nux, sigma_phase=self.sigma_fx)
fy, sigma_fy = Decomposition.phase_advance(probe, other, self.table.nuy, self.fy, error=True, sigma_frequency=self.table.sigma_nuy, sigma_phase=self.sigma_fy)
probe = mod(probe, self.size).to(torch.int64)
other = mod(other, self.size).to(torch.int64)
transport = self.model.matrix_uncoupled(self.ax[probe], self.bx[probe], self.ax[other], self.bx[other], fx, self.ay[probe], self.by[probe], self.ay[other], self.by[other], fy)
sigma_transport = torch.zeros_like(transport)
sigma_transport[:, 0, 0] += self.sigma_ax[probe]**2*self.bx[other]*torch.sin(fx)**2/self.bx[probe]
sigma_transport[:, 0, 0] += self.sigma_bx[probe]**2*self.bx[other]*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]**3)
sigma_transport[:, 0, 0] += self.sigma_bx[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 0, 0] += sigma_fx**2*self.bx[other]*(-self.ax[probe]*torch.cos(fx) + torch.sin(fx))**2/self.bx[probe]
sigma_transport[:, 0, 1] += self.sigma_bx[probe]**2*self.bx[other]*torch.sin(fx)**2/(4.0*self.bx[probe])
sigma_transport[:, 0, 1] += self.sigma_bx[other]**2*self.bx[probe]*torch.sin(fx)**2/(4.0*self.bx[other])
sigma_transport[:, 0, 1] += sigma_fx**2*self.bx[probe]*self.bx[other]*torch.cos(fx)**2
sigma_transport[:, 1, 0] += self.sigma_ax[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_ax[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[probe]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]**3*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[other]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other]**3)
sigma_transport[:, 1, 0] += sigma_fx**2*((1.0 + self.ax[probe]*self.ax[other])*torch.cos(fx) + (self.ax[probe] - self.ax[other])*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_bx[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_ax[other]**2*self.bx[probe]*torch.sin(fx)**2/self.bx[other]
sigma_transport[:, 1, 1] += self.sigma_bx[other]**2*self.bx[probe]*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[other]**3)
sigma_transport[:, 1, 1] += sigma_fx**2*self.bx[probe]*(self.ax[other]*torch.cos(fx) + torch.sin(fx))**2/self.bx[other]
sigma_transport[:, 2, 2] += self.sigma_ay[probe]**2*self.by[other]*torch.sin(fy)**2/self.by[probe]
sigma_transport[:, 2, 2] += self.sigma_by[probe]**2*self.by[other]*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]**3)
sigma_transport[:, 2, 2] += self.sigma_by[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 2, 2] += sigma_fy**2*self.by[other]*(-self.ay[probe]*torch.cos(fy) + torch.sin(fy))**2/self.by[probe]
sigma_transport[:, 2, 3] += self.sigma_by[probe]**2*self.by[other]*torch.sin(fy)**2/(4.0*self.by[probe])
sigma_transport[:, 2, 3] += self.sigma_by[other]**2*self.by[probe]*torch.sin(fy)**2/(4.0*self.by[other])
sigma_transport[:, 2, 3] += sigma_fy**2*self.by[probe]*self.by[other]*torch.cos(fy)**2
sigma_transport[:, 3, 2] += self.sigma_ay[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_ay[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[probe]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]**3*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[other]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other]**3)
sigma_transport[:, 3, 2] += sigma_fy**2*((1.0 + self.ay[probe]*self.ay[other])*torch.cos(fy) + (self.ay[probe] - self.ay[other])*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_by[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_ay[other]**2*self.by[probe]*torch.sin(fy)**2/self.by[other]
sigma_transport[:, 3, 3] += self.sigma_by[other]**2*self.by[probe]*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[other]**3)
sigma_transport[:, 3, 3] += sigma_fy**2*self.by[probe]*(self.ay[other]*torch.cos(fy) + torch.sin(fy))**2/self.by[other]
sigma_transport.sqrt_()
return (transport.squeeze(), sigma_transport.squeeze())
def make_transport(self) -> None:
"""
Set transport matrices between adjacent locations.
self.transport[i] is a transport matrix from i to i + 1
Parameters
----------
None
Returns
-------
None
"""
probe = torch.arange(self.size, dtype=torch.int64, device=self.device)
other = 1 + probe
self.transport, _ = self.matrix(probe, other)
def matrix_transport(self, probe:int, other:int) -> torch.Tensor:
"""
Generate transport matrix from probe to other using self.transport.
Parameters
----------
probe: int
probe location
other: int
other location
Returns
-------
transport matrix (torch.Tensor)
"""
if isinstance(probe, str):
probe = self.name.index(probe)
if isinstance(other, str):
other = self.name.index(other)
if probe < other:
matrix = self.transport[probe]
for i in range(probe + 1, other):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return matrix
if probe > other:
matrix = self.transport[other]
for i in range(other + 1, probe):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return torch.inverse(matrix)
def normal(self, probe:torch.Tensor) -> tuple:
"""
Generate uncoupled normal matrix (or matrices) for given locations.
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
Returns
-------
uncoupled normal matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
probe = mod(probe, self.size).to(torch.int64)
matrix = torch.zeros((len(probe), 4, 4), dtype=self.dtype, device=self.device)
sigma_matrix = torch.zeros_like(matrix)
matrix[:, 0, 0] = self.bx[probe].sqrt()
matrix[:, 1, 0] = -self.ax[probe]/self.bx[probe].sqrt()
matrix[:, 1, 1] = 1.0/self.bx[probe].sqrt()
matrix[:, 2, 2] = self.by[probe].sqrt()
matrix[:, 3, 2] = -self.ay[probe]/self.by[probe].sqrt()
matrix[:, 3, 3] = 1.0/self.by[probe].sqrt()
sigma_matrix[:, 0, 0] += self.sigma_bx[probe]**2/(4.0*self.bx[probe])
sigma_matrix[:, 1, 0] += self.sigma_ax[probe]**2/self.bx[probe] + self.sigma_bx[probe]**2*self.ax[probe]/(4.0*self.bx[probe]**3)
sigma_matrix[:, 1, 1] += self.sigma_bx[probe]**2/(4.0*self.bx[probe]**3)
sigma_matrix[:, 2, 2] += self.sigma_by[probe]**2/(4.0*self.by[probe])
sigma_matrix[:, 3, 2] += self.sigma_ay[probe]**2/self.by[probe] + self.sigma_by[probe]**2*self.ay[probe]/(4.0*self.by[probe]**3)
sigma_matrix[:, 3, 3] += self.sigma_by[probe]**2/(4.0*self.by[probe]**3)
return (matrix.squeeze(), sigma_matrix.sqrt().squeeze())
def main():
pass
if __name__ == '__main__':
main() | 42.360933 | 357 | 0.595218 |