hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b4d5102e41f930744e5fe944b292c3c7d93fe14 | 2,985 | py | Python | tests/test_detailed.py | TarasRudnyk/python-telegram-bot-calendar | 6bca67e1e4ac1e9cc187d2370ba02a6df9ed60a4 | [
"MIT"
] | 44 | 2020-08-05T20:19:45.000Z | 2022-03-10T22:29:19.000Z | tests/test_detailed.py | TarasRudnyk/python-telegram-bot-calendar | 6bca67e1e4ac1e9cc187d2370ba02a6df9ed60a4 | [
"MIT"
] | 6 | 2021-01-08T16:07:24.000Z | 2022-02-15T18:39:51.000Z | tests/test_detailed.py | TarasRudnyk/python-telegram-bot-calendar | 6bca67e1e4ac1e9cc187d2370ba02a6df9ed60a4 | [
"MIT"
] | 20 | 2020-09-08T16:19:22.000Z | 2022-03-14T15:39:56.000Z | import json
import os
import sys
from datetime import date
import pytest
from dateutil.relativedelta import relativedelta
from telegram_bot_calendar import DAY, MONTH, YEAR
from telegram_bot_calendar.detailed import DetailedTelegramCalendar, NOTHING
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
# todo: fix this test to properly check generated keyboard
| 45.923077 | 121 | 0.60536 |
7b4da977c274712fe30edac1bd68cf28ac41017f | 4,964 | py | Python | django_bootstrap_generator/management/commands/generate_bootstrap.py | rapilabs/django-bootstrap-generator | 51bd0ea3eae69c04b856c1df34c5be1a4abc0385 | [
"MIT"
] | 1 | 2015-03-10T03:41:39.000Z | 2015-03-10T03:41:39.000Z | django_bootstrap_generator/management/commands/generate_bootstrap.py | rapilabs/django-bootstrap-generator | 51bd0ea3eae69c04b856c1df34c5be1a4abc0385 | [
"MIT"
] | null | null | null | django_bootstrap_generator/management/commands/generate_bootstrap.py | rapilabs/django-bootstrap-generator | 51bd0ea3eae69c04b856c1df34c5be1a4abc0385 | [
"MIT"
] | null | null | null | import collections
import types
from optparse import make_option
from django.db.models.loading import get_model
from django.core.management.base import BaseCommand, CommandError
from django.db.models.fields import EmailField, URLField, BooleanField, TextField
bs_form = """\
<form role="form" class="form-horizontal">
%s\
<div class="form-group">
<div class="col-sm-offset-2 col-sm-10">
<button class="btn btn-primary"><i class="fa fa-save"></i> Save</button>
</div>
</div>
</form>
"""
bs_field = """\
<div class="form-group">
<label for="%(id)s" class="col-sm-2 control-label">%(label)s</label>
<div class="col-sm-10">
%(field)s%(error)s
</div>
</div>
"""
bs_input = """\
<input type="%(input_type)s" %(name_attr)s="%(name)s"%(class)s id="%(id)s"%(extra)s/>"""
bs_select = """\
<select %(name_attr)s="%(name)s" class="form-control" id="%(id)s"%(extra)s>%(options)s
</select>"""
bs_option = """
<option value="%(value)s">%(label)s</option>"""
optgroup = """
<optgroup label="%(label)s">%(options)s
</optgroup>"""
bs_textarea = """\
<textarea %(name_attr)s="%(name)s" class="form-control" id="%(id)s"%(extra)s></textarea>"""
react_error = """
{errors.%(name)s}"""
| 29.724551 | 124 | 0.567889 |
7b50d8a97e5f0d3cb0ea0a3693bd90b984964582 | 291 | py | Python | logical/__init__.py | reity/logical | 476193949311824cd63bd9e279a223bcfdceb2e2 | [
"MIT"
] | null | null | null | logical/__init__.py | reity/logical | 476193949311824cd63bd9e279a223bcfdceb2e2 | [
"MIT"
] | null | null | null | logical/__init__.py | reity/logical | 476193949311824cd63bd9e279a223bcfdceb2e2 | [
"MIT"
] | null | null | null | """Gives users direct access to class and functions."""
from logical.logical import\
logical,\
nullary, unary, binary, every,\
nf_, nt_,\
uf_, id_, not_, ut_,\
bf_,\
and_, nimp_, fst_, nif_, snd_, xor_, or_,\
nor_, xnor_, nsnd_, if_, nfst_, imp_, nand_,\
bt_
| 26.454545 | 55 | 0.61512 |
7b51df476a25eb10705b2313609ccd9bca295e46 | 1,667 | py | Python | lab_15/server/main.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | lab_15/server/main.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | lab_15/server/main.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | import queue
import select
import socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
server.bind(('localhost', 9999))
server.listen(5)
inputs = [server]
outputs = []
message_queues = {}
while inputs:
readable, writable, exceptional = select.select(
inputs, outputs, inputs)
for s in readable:
if s is server:
connection, client_address = s.accept()
connection.setblocking(0)
inputs.append(connection)
message_queues[connection] = queue.Queue()
else:
data = s.recv(1024)
if data:
message_queues[s].put(data)
if s not in outputs:
outputs.append(s)
else:
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
except queue.Empty:
outputs.remove(s)
else:
s.send(next_msg)
for s in exceptional:
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
# import socket
#
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# server.bind(('localhost', 9999))
# server.listen(1)
# while True:
# client_socket, addr = server.accept()
# print(f'New connection from {addr}')
# client_socket.send('Hello there, how are you?'.encode('utf-8'))
# answer = client_socket.recv(1024)
# print(answer)
# client_socket.close()
| 26.887097 | 69 | 0.574685 |
7b51e80ec38fe59135b07b0729fc19b2c432fc68 | 1,118 | py | Python | febraban/cnab240/itau/sispag/payment/transfer.py | netosjb/febraban-python | a546fa3353d2db1546df60f6f8cc26c7c862c743 | [
"MIT"
] | 7 | 2019-07-16T11:31:50.000Z | 2019-07-29T19:49:50.000Z | febraban/cnab240/itau/sispag/payment/transfer.py | netosjb/febraban-python | a546fa3353d2db1546df60f6f8cc26c7c862c743 | [
"MIT"
] | 4 | 2020-05-07T15:34:21.000Z | 2020-11-12T21:09:34.000Z | febraban/cnab240/itau/sispag/payment/transfer.py | netosjb/febraban-python | a546fa3353d2db1546df60f6f8cc26c7c862c743 | [
"MIT"
] | 6 | 2019-12-04T00:40:10.000Z | 2020-11-05T18:39:40.000Z | from .segmentA import SegmentA
| 24.844444 | 91 | 0.658318 |
7b53b8e74efe57ed1e451add7d928562719e4e93 | 4,689 | py | Python | BroLog.py | jcwoods/BroLog | b95c91178d4038d1e363cb8c8ef9ecc64a23193f | [
"MIT"
] | null | null | null | BroLog.py | jcwoods/BroLog | b95c91178d4038d1e363cb8c8ef9ecc64a23193f | [
"MIT"
] | null | null | null | BroLog.py | jcwoods/BroLog | b95c91178d4038d1e363cb8c8ef9ecc64a23193f | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import codecs
import ipaddress as ip
import pandas as pd
from datetime import datetime as dt
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 28.591463 | 97 | 0.527831 |
7b53fdee739e7f6188764d53517bc20b22165406 | 8,288 | py | Python | test_pdf.py | ioggstream/json-forms-pdf | 9f18cf239ae892ebb0018bfd4a8f792af35ccfac | [
"BSD-3-Clause"
] | 2 | 2020-06-18T13:31:32.000Z | 2022-02-21T08:30:37.000Z | test_pdf.py | ioggstream/json-forms-pdf | 9f18cf239ae892ebb0018bfd4a8f792af35ccfac | [
"BSD-3-Clause"
] | 2 | 2020-05-25T17:31:52.000Z | 2020-06-23T17:32:03.000Z | test_pdf.py | ioggstream/json-forms-pdf | 9f18cf239ae892ebb0018bfd4a8f792af35ccfac | [
"BSD-3-Clause"
] | null | null | null | # simple_checkboxes.py
import logging
from os.path import basename
from pathlib import Path
import jsonschema
import pytest
# from reportlab.pdfbase import pdfform
import yaml
from reportlab.pdfgen import canvas
uischema = yaml.safe_load(Path("jsonforms-react-seed/src/uischema.json").read_text())
form_schema = yaml.safe_load(Path("jsonforms-react-seed/src/schema.json").read_text())
form_fields = jsonschema.RefResolver.from_schema(form_schema)
log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
DATA = {
"name": "foo",
"description": "Confirm if you have passed the subject\nHereby ...",
"done": True,
"recurrence": "Daily",
"rating": "3",
"due_date": "2020-05-21",
"recurrence_interval": 421,
}
| 30.358974 | 86 | 0.527389 |
7b549c8cf31113881352739cc51b8f9b8d3428b5 | 701 | py | Python | pos_map.py | olzama/neural-supertagging | 340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4 | [
"MIT"
] | null | null | null | pos_map.py | olzama/neural-supertagging | 340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4 | [
"MIT"
] | null | null | null | pos_map.py | olzama/neural-supertagging | 340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4 | [
"MIT"
] | null | null | null |
'''
Assuming the following tag-separated format:
VBP+RB VBP
VBZ+RB VBZ
IN+DT IN
(etc.)
''' | 23.366667 | 52 | 0.513552 |
7b580428c6c3fc7e1f2b5ec4c50a922c0d642dcf | 4,051 | py | Python | src/nti/externalization/integer_strings.py | NextThought/nti.externalization | 5a445b85fb809a7c27bf8dbe45c29032ece187d8 | [
"Apache-2.0"
] | null | null | null | src/nti/externalization/integer_strings.py | NextThought/nti.externalization | 5a445b85fb809a7c27bf8dbe45c29032ece187d8 | [
"Apache-2.0"
] | 78 | 2017-09-15T14:59:58.000Z | 2021-10-05T17:40:06.000Z | src/nti/externalization/integer_strings.py | NextThought/nti.externalization | 5a445b85fb809a7c27bf8dbe45c29032ece187d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions to represent potentially large integers as the shortest
possible human-readable and writable strings. The motivation is to be
able to take int ids as produced by an :class:`zc.intid.IIntId`
utility and produce something that can be written down and typed in by
a human. To this end, the strings produced have to be:
* One-to-one and onto the integer domain;
* As short as possible;
* While not being easily confused;
* Or accidentaly permuted
To meet those goals, we define an alphabet consisting of the ASCII
digits and upper and lowercase letters, leaving out troublesome pairs
(zero and upper and lower oh and upper queue, one and upper and lower
ell) (actually, those troublesome pairs will all map to the same
character).
We also put a version marker at the end of the string so we can evolve
this algorithm gracefully but still honor codes in the wild.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = [
'to_external_string',
'from_external_string',
]
# stdlib imports
import string
try:
maketrans = str.maketrans
except AttributeError: # Python 2
from string import maketrans # pylint:disable=no-name-in-module
translate = str.translate
# In the first version of the protocol, the version marker, which would
# come at the end, is always omitted. Subsequent versions will append
# a value that cannot be produced from the _VOCABULARY
_VERSION = '$'
# First, our vocabulary.
# Remove the letter values o and O, Q (confused with O if you're sloppy), l and L,
# and i and I, leaving the digits 1 and 0
_REMOVED = 'oOQlLiI'
_REPLACE = '0001111'
_VOCABULARY = ''.join(
reversed(sorted(list(set(string.ascii_letters + string.digits) - set(_REMOVED))))
)
# We translate the letters we removed
_TRANSTABLE = maketrans(_REMOVED, _REPLACE)
# Leaving us a base vocabulary to map integers into
_BASE = len(_VOCABULARY)
_ZERO_MARKER = '@' # Zero is special
def from_external_string(key):
"""
Turn the string in *key* into an integer.
>>> from nti.externalization.integer_strings import from_external_string
>>> from_external_string('xkr')
6773
:param str key: A native string, as produced by `to_external_string`.
(On Python 2, unicode *keys* are also valid.)
:raises ValueError: If the key is invalid or contains illegal characters.
:raises UnicodeDecodeError: If the key is a Unicode object, and contains
non-ASCII characters (which wouldn't be valid anyway)
"""
if not key:
raise ValueError("Improper key")
if not isinstance(key, str):
# Unicode keys cause problems on Python 2: The _TRANSTABLE is coerced
# to Unicode, which fails because it contains non-ASCII values.
# So instead, we encode the unicode string to ascii, which, if it is a
# valid key, will work
key = key.decode('ascii') if isinstance(key, bytes) else key.encode('ascii')
# strip the version if needed
key = key[:-1] if key[-1] == _VERSION else key
key = translate(key, _TRANSTABLE) # translate bad chars
if key == _ZERO_MARKER:
return 0
int_sum = 0
for idx, char in enumerate(reversed(key)):
int_sum += _VOCABULARY.index(char) * pow(_BASE, idx)
return int_sum
def to_external_string(integer):
"""
Turn an integer into a native string representation.
>>> from nti.externalization.integer_strings import to_external_string
>>> to_external_string(123)
'xk'
>>> to_external_string(123456789)
'kVxr5'
"""
# we won't step into the while if integer is 0
# so we just solve for that case here
if integer == 0:
return _ZERO_MARKER
result = ''
# Simple string concat benchmarks the fastest for this size data,
# among a list and an array.array( 'c' )
while integer > 0:
integer, remainder = divmod(integer, _BASE)
result = _VOCABULARY[remainder] + result
return result
| 30.923664 | 85 | 0.709701 |
7b5a89b5d003d45628ff5ec0925287bf4802eb5a | 2,882 | py | Python | chokitto.py | WKSu/chokitto | 9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8 | [
"MIT"
] | null | null | null | chokitto.py | WKSu/chokitto | 9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8 | [
"MIT"
] | null | null | null | chokitto.py | WKSu/chokitto | 9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8 | [
"MIT"
] | 1 | 2021-01-16T18:51:57.000Z | 2021-01-16T18:51:57.000Z | #!/usr/bin/python3
import argparse, os
from collections import defaultdict
from lib.data import *
from lib.exporters import *
from lib.filters import *
from lib.parsers import *
if __name__ == '__main__':
main()
| 37.428571 | 161 | 0.683553 |
7b5ac6eff9c37a22900d99e140cd578daaa0f186 | 736 | py | Python | torrent.py | olazona/colabtorrent | bbddec0fc12eab672a711769f7d071ada8235c86 | [
"Apache-2.0"
] | 3 | 2020-07-30T09:29:00.000Z | 2022-02-05T05:19:30.000Z | torrent.py | olazona/colabtorrent | bbddec0fc12eab672a711769f7d071ada8235c86 | [
"Apache-2.0"
] | null | null | null | torrent.py | olazona/colabtorrent | bbddec0fc12eab672a711769f7d071ada8235c86 | [
"Apache-2.0"
] | 4 | 2020-03-31T15:42:38.000Z | 2021-09-27T07:30:07.000Z | # -*- coding: utf-8 -*-
"""Torrent.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Uo1_KHGxOe_Dh4-DjQyEb0wROp-tMv0w
"""
TorrName = 'YOUR TORRENT NAME'
FolderPath = f'/content/drive/My Drive/{TorrName}'
TorrPath = f'{TorrName}.torrent'
init()
download(FolderPath,TorrPath) | 25.37931 | 86 | 0.71875 |
7b5b098de65214be758959ec7c9a6aae3055a94c | 3,197 | py | Python | src/py21cmmc/_21cmfast/_utils.py | BradGreig/Hybrid21CMMC | 984aa88ee4543db24095a3ba8529e1f4d0b1048d | [
"MIT"
] | null | null | null | src/py21cmmc/_21cmfast/_utils.py | BradGreig/Hybrid21CMMC | 984aa88ee4543db24095a3ba8529e1f4d0b1048d | [
"MIT"
] | null | null | null | src/py21cmmc/_21cmfast/_utils.py | BradGreig/Hybrid21CMMC | 984aa88ee4543db24095a3ba8529e1f4d0b1048d | [
"MIT"
] | null | null | null | """
Utilities that help with wrapping various C structures.
"""
| 32.622449 | 119 | 0.597435 |
7b5b0c3580d4b9ad4e0f4e13f5794c9e38d7df6a | 160 | py | Python | components/charts.py | tufts-ml/c19-dashboard | be88f91ca74dd154566bd7b36fe482565331346b | [
"MIT"
] | null | null | null | components/charts.py | tufts-ml/c19-dashboard | be88f91ca74dd154566bd7b36fe482565331346b | [
"MIT"
] | 5 | 2020-04-10T20:36:18.000Z | 2020-04-28T19:42:15.000Z | components/charts.py | tufts-ml/c19-dashboard | be88f91ca74dd154566bd7b36fe482565331346b | [
"MIT"
] | null | null | null | import dash_html_components as html
| 26.666667 | 90 | 0.725 |
7b5bf002a4841de751ef7a81520f03f1fc8e3906 | 2,144 | py | Python | lib/bes/fs/dir_util.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/fs/dir_util.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/fs/dir_util.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os, os.path as path, shutil
import datetime
from .file_match import file_match
from .file_util import file_util
| 30.197183 | 90 | 0.588619 |
7b5cbfeebbb6c1b6c874a7c85487913e09de67c0 | 24,540 | py | Python | exploit.py | yassineaboukir/CVE-2019-0708 | 4f4ff5a9eef5ed4cda92376b25667fd5272d9753 | [
"Apache-2.0"
] | null | null | null | exploit.py | yassineaboukir/CVE-2019-0708 | 4f4ff5a9eef5ed4cda92376b25667fd5272d9753 | [
"Apache-2.0"
] | null | null | null | exploit.py | yassineaboukir/CVE-2019-0708 | 4f4ff5a9eef5ed4cda92376b25667fd5272d9753 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
from struct import pack
from twisted.internet import reactor
from twisted.application.reactors import Reactor
from twisted.internet.endpoints import HostnameEndpoint
from twisted.internet.protocol import ClientFactory
from pyrdp.core.ssl import ClientTLSContext
from pyrdp.mitm.layerset import RDPLayerSet
from pyrdp.mitm.state import RDPMITMState
from pyrdp.mitm.config import MITMConfig
from pyrdp.layer import LayerChainItem, VirtualChannelLayer, DeviceRedirectionLayer, ClipboardLayer
from pyrdp.pdu import MCSAttachUserConfirmPDU, MCSAttachUserRequestPDU, MCSChannelJoinConfirmPDU, \
MCSChannelJoinRequestPDU, MCSConnectInitialPDU, MCSConnectResponsePDU, MCSDisconnectProviderUltimatumPDU, \
MCSDomainParams, MCSErectDomainRequestPDU, MCSSendDataIndicationPDU, MCSSendDataRequestPDU, \
ClientChannelDefinition, PDU, ClientExtraInfo, ClientInfoPDU, DemandActivePDU, MCSSendDataIndicationPDU, \
ShareControlHeader, ConfirmActivePDU, SynchronizePDU, ShareDataHeader, ControlPDU, DeviceRedirectionPDU, \
DeviceListAnnounceRequest, X224DataPDU, NegotiationRequestPDU, NegotiationResponsePDU, X224ConnectionConfirmPDU, \
X224ConnectionRequestPDU, X224DisconnectRequestPDU, X224ErrorPDU, NegotiationFailurePDU, MCSDomainParams, \
ClientDataPDU, GCCConferenceCreateRequestPDU, SlowPathPDU, SetErrorInfoPDU, DeviceRedirectionClientCapabilitiesPDU
from pyrdp.enum import NegotiationFailureCode, NegotiationProtocols, EncryptionMethod, ChannelOption, MCSChannelName, \
ParserMode, SegmentationPDUType, ClientInfoFlags, CapabilityType, VirtualChannelCompressionFlag, SlowPathPDUType, \
SlowPathDataType, DeviceRedirectionPacketID, DeviceRedirectionComponent, VirtualChannelPDUFlag
from pyrdp.pdu.rdp.negotiation import NegotiationRequestPDU
from pyrdp.pdu.rdp.capability import Capability
from pyrdp.parser import NegotiationRequestParser, NegotiationResponseParser, ClientConnectionParser, GCCParser, \
ServerConnectionParser
from pyrdp.mcs import MCSClientChannel
from pyrdp.logging import LOGGER_NAMES, SessionLogger
import logging
# Hard-coded constant
PAYLOAD_HEAD_ADDR = 0xfffffa8008711010 + 0x38
if __name__ == '__main__':
main()
| 44.699454 | 3,207 | 0.724165 |
7b5cf1180972e7b3ffbdfafed33eb97b3d3772b4 | 8,241 | py | Python | deep_light/random_selection_threeInputs.py | maqorbani/neural-daylighting | 753c86dfea32483a7afbf213a7b7684e070d3672 | [
"Apache-2.0"
] | 4 | 2020-08-24T03:12:22.000Z | 2020-08-27T17:13:56.000Z | deep_light/random_selection_threeInputs.py | maqorbani/neural-daylighting | 753c86dfea32483a7afbf213a7b7684e070d3672 | [
"Apache-2.0"
] | 4 | 2020-08-24T07:30:51.000Z | 2021-02-20T10:18:47.000Z | deep_light/random_selection_threeInputs.py | maqorbani/neural-daylighting | 753c86dfea32483a7afbf213a7b7684e070d3672 | [
"Apache-2.0"
] | 3 | 2020-04-08T17:37:40.000Z | 2020-08-24T07:32:52.000Z |
#
#
# Copyright (c) 2020. Yue Liu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If you find this code useful please cite:
# Predicting Annual Equirectangular Panoramic Luminance Maps Using Deep Neural Networks,
# Yue Liu, Alex Colburn and and Mehlika Inanici. 16th IBPSA International Conference and Exhibition, Building Simulation 2019.
#
#
#
import os
import numpy as np
from matplotlib import pyplot as plt
from deep_light import time_to_sun_angles
import shutil
from deep_light.genData import get_data_path
####randomly select the test data from the dataset
#### TODO make a function with source and destination subdirectoies
#finish the rest part
| 31.818533 | 138 | 0.580876 |
7b5e2470f1d47ae2237ac46314f2765d06fcd634 | 1,993 | py | Python | graphs/ops.py | andreipoe/sve-analysis-tools | 696d9a82af379564b05ce0207a6f872211a819eb | [
"MIT"
] | 2 | 2020-12-23T02:22:20.000Z | 2020-12-31T17:30:56.000Z | graphs/ops.py | andreipoe/sve-analysis-tools | 696d9a82af379564b05ce0207a6f872211a819eb | [
"MIT"
] | null | null | null | graphs/ops.py | andreipoe/sve-analysis-tools | 696d9a82af379564b05ce0207a6f872211a819eb | [
"MIT"
] | 3 | 2020-06-03T17:05:45.000Z | 2021-12-26T13:45:49.000Z | #!/usr/bin/env python3
import argparse
import sys
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import altair as alt
# Plots application `appname`
if __name__ == '__main__':
main()
| 30.661538 | 114 | 0.610637 |
7b600881aefecd926df3678ce829274a46e661ba | 18 | py | Python | schicexplorer/_version.py | joachimwolff/scHiCExplorer | 8aebb444f3968d398c260690c89c9cd0e3186f0e | [
"MIT"
] | 10 | 2019-12-09T04:11:18.000Z | 2021-03-24T15:29:06.000Z | common/walt/common/version.py | drakkar-lig/walt-python-packages | b778992e241d54b684f54715d83c4aff98a01db7 | [
"BSD-3-Clause"
] | 73 | 2016-04-29T13:17:26.000Z | 2022-03-01T15:06:48.000Z | common/walt/common/version.py | drakkar-lig/walt-python-packages | b778992e241d54b684f54715d83c4aff98a01db7 | [
"BSD-3-Clause"
] | 3 | 2019-03-18T14:27:56.000Z | 2021-06-03T12:07:02.000Z | __version__ = '7'
| 9 | 17 | 0.666667 |
7b60c81cc28198aaec6b287552d4028bec373d4b | 2,918 | py | Python | bluebottle/organizations/views.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/organizations/views.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/organizations/views.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | from rest_framework import generics
from rest_framework import filters
from bluebottle.utils.permissions import IsAuthenticated, IsOwnerOrReadOnly, IsOwner
from rest_framework_json_api.pagination import JsonApiPageNumberPagination
from rest_framework_json_api.parsers import JSONParser
from rest_framework_json_api.views import AutoPrefetchMixin
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from bluebottle.bluebottle_drf2.renderers import BluebottleJSONAPIRenderer
from bluebottle.organizations.serializers import (
OrganizationSerializer, OrganizationContactSerializer
)
from bluebottle.organizations.models import (
Organization, OrganizationContact
)
| 30.715789 | 84 | 0.778958 |
7b6172efe890112ba2bd4d2808ee33bca9779adb | 1,646 | py | Python | gen3_etl/utils/defaults.py | ohsu-comp-bio/gen3-etl | 9114f75cc8c8085111152ce0ef686a8a12f67f8e | [
"MIT"
] | 1 | 2020-01-22T17:05:58.000Z | 2020-01-22T17:05:58.000Z | gen3_etl/utils/defaults.py | ohsu-comp-bio/gen3-etl | 9114f75cc8c8085111152ce0ef686a8a12f67f8e | [
"MIT"
] | 2 | 2019-02-08T23:24:58.000Z | 2021-05-13T22:42:28.000Z | gen3_etl/utils/defaults.py | ohsu-comp-bio/gen3_etl | 9114f75cc8c8085111152ce0ef686a8a12f67f8e | [
"MIT"
] | null | null | null | from gen3_etl.utils.cli import default_argument_parser
from gen3_etl.utils.ioutils import JSONEmitter
import os
import re
DEFAULT_OUTPUT_DIR = 'output/default'
DEFAULT_EXPERIMENT_CODE = 'default'
DEFAULT_PROJECT_ID = 'default-default'
def emitter(type=None, output_dir=DEFAULT_OUTPUT_DIR, **kwargs):
"""Creates a default emitter for type."""
return JSONEmitter(os.path.join(output_dir, '{}.json'.format(type)), compresslevel=0, **kwargs)
def path_to_type(path):
"""Get the type (snakecase) of a vertex file"""
return snake_case(os.path.basename(path).split('.')[0])
def snake_case(name):
"""Converts name to snake_case."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
| 35.782609 | 99 | 0.643378 |
7b6408ae7e94f31c2cccd6d4bff3b3ad42baca0f | 6,266 | py | Python | csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py | kiudee/cs-ranking | 47cf648fa286c37b9214bbad1926004d4d7d9796 | [
"Apache-2.0"
] | 65 | 2018-02-12T13:18:13.000Z | 2021-12-18T12:01:51.000Z | csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py | kiudee/cs-ranking | 47cf648fa286c37b9214bbad1926004d4d7d9796 | [
"Apache-2.0"
] | 189 | 2018-02-13T10:11:55.000Z | 2022-03-12T16:36:23.000Z | csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py | kiudee/cs-ranking | 47cf648fa286c37b9214bbad1926004d4d7d9796 | [
"Apache-2.0"
] | 19 | 2018-03-08T15:39:31.000Z | 2020-11-18T12:46:36.000Z | import logging
import numpy as np
from sklearn.utils import check_random_state
from csrank.constants import DISCRETE_CHOICE
from csrank.dataset_reader.tag_genome_reader import critique_dist
from csrank.dataset_reader.util import get_key_for_indices
from ..tag_genome_reader import TagGenomeDatasetReader
from ...util import convert_to_label_encoding
logger = logging.getLogger(__name__)
| 42.62585 | 111 | 0.57804 |
7b6453cb804d74096b8a1f23797b8a9e957ae61c | 12,614 | py | Python | tests/conversion/converters/inside_worker_test/declarative_schema/osm_models.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 27 | 2015-03-30T14:17:26.000Z | 2022-02-19T17:30:44.000Z | tests/conversion/converters/inside_worker_test/declarative_schema/osm_models.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 483 | 2015-03-09T16:58:03.000Z | 2022-03-14T09:29:06.000Z | tests/conversion/converters/inside_worker_test/declarative_schema/osm_models.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 6 | 2015-04-07T07:38:30.000Z | 2020-04-01T12:45:53.000Z | # coding: utf-8
from geoalchemy2 import Geometry
from sqlalchemy import BigInteger, Column, DateTime, Float, Integer, SmallInteger, Table, Text, TEXT, BIGINT
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
t_osm_line = Table(
'osm_line', metadata,
Column('osm_id', BigInteger, index=True),
Column('access', Text),
Column('addr:city', Text),
Column('addr:housenumber', Text),
Column('addr:interpolation', Text),
Column('addr:place', Text),
Column('addr:postcode', Text),
Column('addr:street', Text),
Column('addr:country', Text),
Column('admin_level', Text),
Column('aerialway', Text),
Column('aeroway', Text),
Column('amenity', Text),
Column('area', Text),
Column('barrier', Text),
Column('brand', Text),
Column('bridge', Text),
Column('boundary', Text),
Column('building', Text),
Column('bus', Text),
Column('contact:phone', Text),
Column('cuisine', Text),
Column('denomination', Text),
Column('drinkable', Text),
Column('emergency', Text),
Column('entrance', Text),
Column('foot', Text),
Column('frequency', Text),
Column('generator:source', Text),
Column('height', Text),
Column('highway', Text),
Column('historic', Text),
Column('information', Text),
Column('junction', Text),
Column('landuse', Text),
Column('layer', Text),
Column('leisure', Text),
Column('man_made', Text),
Column('maxspeed', Text),
Column('military', Text),
Column('name', Text),
Column('int_name', Text),
Column('name:en', Text),
Column('name:de', Text),
Column('name:fr', Text),
Column('name:es', Text),
Column('natural', Text),
Column('office', Text),
Column('oneway', Text),
Column('opening_hours', Text),
Column('operator', Text),
Column('phone', Text),
Column('power', Text),
Column('power_source', Text),
Column('parking', Text),
Column('place', Text),
Column('population', Text),
Column('public_transport', Text),
Column('recycling:glass', Text),
Column('recycling:paper', Text),
Column('recycling:clothes', Text),
Column('recycling:scrap_metal', Text),
Column('railway', Text),
Column('ref', Text),
Column('religion', Text),
Column('route', Text),
Column('service', Text),
Column('shop', Text),
Column('sport', Text),
Column('tourism', Text),
Column('tower:type', Text),
Column('tracktype', Text),
Column('traffic_calming', Text),
Column('train', Text),
Column('tram', Text),
Column('tunnel', Text),
Column('type', Text),
Column('vending', Text),
Column('voltage', Text),
Column('water', Text),
Column('waterway', Text),
Column('website', Text),
Column('wetland', Text),
Column('width', Text),
Column('wikipedia', Text),
Column('z_order', SmallInteger),
Column('way_area', Float),
Column('osm_timestamp', DateTime),
Column('osm_version', Text),
Column('tags', HSTORE),
Column('way', Geometry(geometry_type='LineString', srid=900913), index=True)
)
t_osm_point = Table(
'osm_point', metadata,
Column('osm_id', BigInteger, index=True),
Column('access', Text),
Column('addr:city', Text),
Column('addr:housenumber', Text),
Column('addr:interpolation', Text),
Column('addr:place', Text),
Column('addr:postcode', Text),
Column('addr:street', Text),
Column('addr:country', Text),
Column('admin_level', Text),
Column('aerialway', Text),
Column('aeroway', Text),
Column('amenity', Text),
Column('area', Text),
Column('barrier', Text),
Column('brand', Text),
Column('bridge', Text),
Column('boundary', Text),
Column('building', Text),
Column('bus', Text),
Column('contact:phone', Text),
Column('cuisine', Text),
Column('denomination', Text),
Column('drinkable', Text),
Column('ele', Text),
Column('emergency', Text),
Column('entrance', Text),
Column('foot', Text),
Column('frequency', Text),
Column('generator:source', Text),
Column('height', Text),
Column('highway', Text),
Column('historic', Text),
Column('information', Text),
Column('junction', Text),
Column('landuse', Text),
Column('layer', Text),
Column('leisure', Text),
Column('man_made', Text),
Column('maxspeed', Text),
Column('military', Text),
Column('name', Text),
Column('int_name', Text),
Column('name:en', Text),
Column('name:de', Text),
Column('name:fr', Text),
Column('name:es', Text),
Column('natural', Text),
Column('office', Text),
Column('oneway', Text),
Column('opening_hours', Text),
Column('operator', Text),
Column('phone', Text),
Column('power', Text),
Column('power_source', Text),
Column('parking', Text),
Column('place', Text),
Column('population', Text),
Column('public_transport', Text),
Column('recycling:glass', Text),
Column('recycling:paper', Text),
Column('recycling:clothes', Text),
Column('recycling:scrap_metal', Text),
Column('railway', Text),
Column('ref', Text),
Column('religion', Text),
Column('route', Text),
Column('service', Text),
Column('shop', Text),
Column('sport', Text),
Column('tourism', Text),
Column('tower:type', Text),
Column('traffic_calming', Text),
Column('train', Text),
Column('tram', Text),
Column('tunnel', Text),
Column('type', Text),
Column('vending', Text),
Column('voltage', Text),
Column('water', Text),
Column('waterway', Text),
Column('website', Text),
Column('wetland', Text),
Column('width', Text),
Column('wikipedia', Text),
Column('z_order', SmallInteger),
Column('osm_timestamp', DateTime),
Column('osm_version', Text),
Column('tags', HSTORE),
Column('way', Geometry(geometry_type='Point', srid=900913), index=True)
)
t_osm_polygon = Table(
'osm_polygon', metadata,
Column('osm_id', BigInteger, index=True),
Column('access', Text),
Column('addr:city', Text),
Column('addr:housenumber', Text),
Column('addr:interpolation', Text),
Column('addr:place', Text),
Column('addr:postcode', Text),
Column('addr:street', Text),
Column('addr:country', Text),
Column('admin_level', Text),
Column('aerialway', Text),
Column('aeroway', Text),
Column('amenity', Text),
Column('area', Text),
Column('barrier', Text),
Column('brand', Text),
Column('bridge', Text),
Column('boundary', Text),
Column('building', Text),
Column('bus', Text),
Column('contact:phone', Text),
Column('cuisine', Text),
Column('denomination', Text),
Column('drinkable', Text),
Column('emergency', Text),
Column('entrance', Text),
Column('foot', Text),
Column('frequency', Text),
Column('generator:source', Text),
Column('height', Text),
Column('highway', Text),
Column('historic', Text),
Column('information', Text),
Column('junction', Text),
Column('landuse', Text),
Column('layer', Text),
Column('leisure', Text),
Column('man_made', Text),
Column('maxspeed', Text),
Column('military', Text),
Column('name', Text),
Column('int_name', Text),
Column('name:en', Text),
Column('name:de', Text),
Column('name:fr', Text),
Column('name:es', Text),
Column('natural', Text),
Column('office', Text),
Column('oneway', Text),
Column('opening_hours', Text),
Column('operator', Text),
Column('phone', Text),
Column('power', Text),
Column('power_source', Text),
Column('parking', Text),
Column('place', Text),
Column('population', Text),
Column('public_transport', Text),
Column('recycling:glass', Text),
Column('recycling:paper', Text),
Column('recycling:clothes', Text),
Column('recycling:scrap_metal', Text),
Column('railway', Text),
Column('ref', Text),
Column('religion', Text),
Column('route', Text),
Column('service', Text),
Column('shop', Text),
Column('sport', Text),
Column('tourism', Text),
Column('tower:type', Text),
Column('tracktype', Text),
Column('traffic_calming', Text),
Column('train', Text),
Column('tram', Text),
Column('tunnel', Text),
Column('type', Text),
Column('vending', Text),
Column('voltage', Text),
Column('water', Text),
Column('waterway', Text),
Column('website', Text),
Column('wetland', Text),
Column('width', Text),
Column('wikipedia', Text),
Column('z_order', SmallInteger),
Column('way_area', Float),
Column('osm_timestamp', DateTime),
Column('osm_version', Text),
Column('tags', HSTORE),
Column('way', Geometry(geometry_type='Geometry', srid=900913), index=True)
)
t_osm_roads = Table(
'osm_roads', metadata,
Column('osm_id', BigInteger, index=True),
Column('access', Text),
Column('addr:city', Text),
Column('addr:housenumber', Text),
Column('addr:interpolation', Text),
Column('addr:place', Text),
Column('addr:postcode', Text),
Column('addr:street', Text),
Column('addr:country', Text),
Column('admin_level', Text),
Column('aerialway', Text),
Column('aeroway', Text),
Column('amenity', Text),
Column('area', Text),
Column('barrier', Text),
Column('brand', Text),
Column('bridge', Text),
Column('boundary', Text),
Column('building', Text),
Column('bus', Text),
Column('contact:phone', Text),
Column('cuisine', Text),
Column('denomination', Text),
Column('drinkable', Text),
Column('emergency', Text),
Column('entrance', Text),
Column('foot', Text),
Column('frequency', Text),
Column('generator:source', Text),
Column('height', Text),
Column('highway', Text),
Column('historic', Text),
Column('information', Text),
Column('junction', Text),
Column('landuse', Text),
Column('layer', Text),
Column('leisure', Text),
Column('man_made', Text),
Column('maxspeed', Text),
Column('military', Text),
Column('name', Text),
Column('int_name', Text),
Column('name:en', Text),
Column('name:de', Text),
Column('name:fr', Text),
Column('name:es', Text),
Column('natural', Text),
Column('office', Text),
Column('oneway', Text),
Column('opening_hours', Text),
Column('operator', Text),
Column('phone', Text),
Column('power', Text),
Column('power_source', Text),
Column('parking', Text),
Column('place', Text),
Column('population', Text),
Column('public_transport', Text),
Column('recycling:glass', Text),
Column('recycling:paper', Text),
Column('recycling:clothes', Text),
Column('recycling:scrap_metal', Text),
Column('railway', Text),
Column('ref', Text),
Column('religion', Text),
Column('route', Text),
Column('service', Text),
Column('shop', Text),
Column('sport', Text),
Column('tourism', Text),
Column('tower:type', Text),
Column('tracktype', Text),
Column('traffic_calming', Text),
Column('train', Text),
Column('tram', Text),
Column('tunnel', Text),
Column('type', Text),
Column('vending', Text),
Column('voltage', Text),
Column('water', Text),
Column('waterway', Text),
Column('website', Text),
Column('wetland', Text),
Column('width', Text),
Column('wikipedia', Text),
Column('z_order', SmallInteger),
Column('way_area', Float),
Column('osm_timestamp', DateTime),
Column('osm_version', Text),
Column('tags', HSTORE),
Column('way', Geometry(geometry_type='LineString', srid=900913), index=True)
)
| 29.961995 | 108 | 0.615507 |
7b64b4f449f372c3bd3efce8e5b293075f2e5515 | 321 | py | Python | twinfield/tests/__init__.py | yellowstacks/twinfield | 2ae161a6cf6d7ff1f2c80a034b775f3d4928313d | [
"Apache-2.0"
] | 4 | 2020-12-20T23:02:33.000Z | 2022-01-13T19:40:13.000Z | twinfield/tests/__init__.py | yellowstacks/twinfield | 2ae161a6cf6d7ff1f2c80a034b775f3d4928313d | [
"Apache-2.0"
] | 9 | 2020-12-18T07:27:07.000Z | 2022-02-17T09:23:51.000Z | twinfield/tests/__init__.py | yellowstacks/twinfield | 2ae161a6cf6d7ff1f2c80a034b775f3d4928313d | [
"Apache-2.0"
] | null | null | null | import logging
from keyvault import secrets_to_environment
from twinfield import TwinfieldApi
logging.basicConfig(
format="%(asctime)s.%(msecs)03d [%(levelname)-5s] [%(name)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
secrets_to_environment("twinfield-test")
tw = TwinfieldApi()
| 20.0625 | 80 | 0.70405 |
7b64b79a9fe2fe03f8066b0d29de8f2b6617fac5 | 397 | py | Python | magic/magicdb/migrations/0002_auto_20181027_1726.py | ParserKnight/django-app-magic | bdedf7f423da558f09d5818157762f3d8ac65d93 | [
"MIT"
] | 1 | 2019-12-20T17:56:13.000Z | 2019-12-20T17:56:13.000Z | magic/magicdb/migrations/0002_auto_20181027_1726.py | ParserKnight/django-app-magic | bdedf7f423da558f09d5818157762f3d8ac65d93 | [
"MIT"
] | 11 | 2019-11-23T19:24:17.000Z | 2022-03-11T23:34:21.000Z | magic/magicdb/migrations/0002_auto_20181027_1726.py | ParserKnight/django-app-magic | bdedf7f423da558f09d5818157762f3d8ac65d93 | [
"MIT"
] | 1 | 2019-12-20T17:58:20.000Z | 2019-12-20T17:58:20.000Z | # Generated by Django 2.1.2 on 2018-10-27 17:26
from django.db import migrations, models
| 20.894737 | 74 | 0.594458 |
7b6531b31b1e432174690734541236e12b96ec5b | 385 | py | Python | dscli/storageimage.py | Taywee/dscli-python | c06786d3924912b323251d8e081edf60d3cedef0 | [
"MIT"
] | null | null | null | dscli/storageimage.py | Taywee/dscli-python | c06786d3924912b323251d8e081edf60d3cedef0 | [
"MIT"
] | null | null | null | dscli/storageimage.py | Taywee/dscli-python | c06786d3924912b323251d8e081edf60d3cedef0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2017 Taylor C. Richberger <taywee@gmx.com>
# This code is released under the license described in the LICENSE file
from __future__ import division, absolute_import, print_function, unicode_literals
| 29.615385 | 82 | 0.711688 |
7b65bb70b1e3c95b7c5e5cfddb6056ef4399ec89 | 2,968 | py | Python | crnpy/utils.py | mehradans92/crnpy | e145d63b5cf97eb3c91276000cc8fef92c35cde9 | [
"BSD-3-Clause"
] | null | null | null | crnpy/utils.py | mehradans92/crnpy | e145d63b5cf97eb3c91276000cc8fef92c35cde9 | [
"BSD-3-Clause"
] | null | null | null | crnpy/utils.py | mehradans92/crnpy | e145d63b5cf97eb3c91276000cc8fef92c35cde9 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def weighted_quantile(values, quantiles, sample_weight=None,
values_sorted=False, old_style=False):
''' Very close to numpy.percentile, but supports weights.
Note: quantiles should be in [0, 1]!
:param values: numpy.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of
initial array
:param old_style: if True, will correct output to be consistent
with numpy.percentile.
:return: numpy.array with computed quantiles.
'''
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
| 41.802817 | 148 | 0.626685 |
7b65f6a1d2bed41aa57a027424c81ab5587c97d9 | 123 | py | Python | exercise/newfile5.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | exercise/newfile5.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | exercise/newfile5.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | alist = [1,2,3,5,5,1,3]
b = set(alist)
c = tuple(alist)
print(b)
print(c)
print([x for x in b])
print([x for x in c]) | 17.571429 | 24 | 0.569106 |
7b661c16cd5afafef4aba119027c917f67774a50 | 170 | py | Python | ocp_resources/kubevirt_metrics_aggregation.py | kbidarkar/openshift-python-wrapper | 3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb | [
"Apache-2.0"
] | 9 | 2021-07-05T18:35:55.000Z | 2021-12-31T03:09:39.000Z | ocp_resources/kubevirt_metrics_aggregation.py | kbidarkar/openshift-python-wrapper | 3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb | [
"Apache-2.0"
] | 418 | 2021-07-04T13:12:09.000Z | 2022-03-30T08:37:45.000Z | ocp_resources/kubevirt_metrics_aggregation.py | kbidarkar/openshift-python-wrapper | 3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb | [
"Apache-2.0"
] | 28 | 2021-07-04T12:48:18.000Z | 2022-02-22T15:19:30.000Z | from ocp_resources.resource import NamespacedResource
| 28.333333 | 59 | 0.870588 |
7b66b53434936973c793021141732d4d9ee0ccb9 | 1,329 | py | Python | metashare/accounts/urls.py | hpusset/ELRI | c4455cff3adb920627f014f37e740665342e9cee | [
"BSD-3-Clause"
] | 1 | 2017-07-10T08:15:07.000Z | 2017-07-10T08:15:07.000Z | metashare/accounts/urls.py | hpusset/ELRI | c4455cff3adb920627f014f37e740665342e9cee | [
"BSD-3-Clause"
] | null | null | null | metashare/accounts/urls.py | hpusset/ELRI | c4455cff3adb920627f014f37e740665342e9cee | [
"BSD-3-Clause"
] | 1 | 2018-07-03T07:55:56.000Z | 2018-07-03T07:55:56.000Z | from django.conf.urls import patterns, url
from metashare.settings import DJANGO_BASE
urlpatterns = patterns('metashare.accounts.views',
url(r'create/$',
'create', name='create'),
url(r'confirm/(?P<uuid>[0-9a-f]{32})/$',
'confirm', name='confirm'),
url(r'contact/$',
'contact', name='contact'),
url(r'reset/(?:(?P<uuid>[0-9a-f]{32})/)?$',
'reset', name='reset'),
url(r'profile/$',
'edit_profile', name='edit_profile'),
url(r'editor_group_application/$',
'editor_group_application', name='editor_group_application'),
url(r'organization_application/$',
'organization_application', name='organization_application'),
url(r'update_default_editor_groups/$',
'update_default_editor_groups', name='update_default_editor_groups'),
url(r'edelivery_membership_application/$',
'edelivery_application', name='edelivery_application'),
)
urlpatterns += patterns('django.contrib.auth.views',
url(r'^profile/change_password/$', 'password_change',
{'post_change_redirect' : '/{0}accounts/profile/change_password/done/'.format(DJANGO_BASE), 'template_name': 'accounts/change_password.html'}, name='password_change'),
url(r'^profile/change_password/done/$', 'password_change_done',
{'template_name': 'accounts/change_password_done.html'}, name='password_change_done'),
)
| 42.870968 | 175 | 0.713318 |
7b6919cc089619a865a91ca1a0c07ade430d6d28 | 366 | py | Python | conduit/config/env/__init__.py | utilmeta/utilmeta-py-realworld-example-app | cf6d9e83e72323a830b2fcdb5c5eae3ebd800103 | [
"MIT"
] | null | null | null | conduit/config/env/__init__.py | utilmeta/utilmeta-py-realworld-example-app | cf6d9e83e72323a830b2fcdb5c5eae3ebd800103 | [
"MIT"
] | null | null | null | conduit/config/env/__init__.py | utilmeta/utilmeta-py-realworld-example-app | cf6d9e83e72323a830b2fcdb5c5eae3ebd800103 | [
"MIT"
] | null | null | null | from utilmeta.conf import Env
env = ServiceEnvironment(__file__)
| 22.875 | 65 | 0.650273 |
7b6a1e3587e95d7c0cf5eb9e41ba34ccfca2c19e | 437 | py | Python | sort/counting.py | haandol/dojo | c29dc54614bdfaf79eb4862ed9fa25974a0f5654 | [
"MIT"
] | null | null | null | sort/counting.py | haandol/dojo | c29dc54614bdfaf79eb4862ed9fa25974a0f5654 | [
"MIT"
] | null | null | null | sort/counting.py | haandol/dojo | c29dc54614bdfaf79eb4862ed9fa25974a0f5654 | [
"MIT"
] | null | null | null | # https://www.geeksforgeeks.org/counting-sort/
if __name__ == '__main__':
arr = [10, 7, 8, 9, 1, 5]
assert [1, 5, 7, 8, 9, 10] == sort(arr)
| 18.208333 | 46 | 0.535469 |
7b6a67db77e30e9f797bb8f8a046460eef6c1f54 | 1,316 | py | Python | uedinst/daq.py | trbritt/uedinst | e9fe1379b762be97b31ffab86a2cb149cb6291da | [
"BSD-3-Clause"
] | null | null | null | uedinst/daq.py | trbritt/uedinst | e9fe1379b762be97b31ffab86a2cb149cb6291da | [
"BSD-3-Clause"
] | null | null | null | uedinst/daq.py | trbritt/uedinst | e9fe1379b762be97b31ffab86a2cb149cb6291da | [
"BSD-3-Clause"
] | null | null | null | import nidaqmx
from . import InstrumentException
from time import sleep
| 26.32 | 86 | 0.522796 |
7b6ab2fe5bfb6e6c729ecffe273017b734826941 | 1,135 | py | Python | tests/operations_model_test.py | chlemagne/python-oop-calculator | 0259ce0f7a72faab60b058588a6838fe107e88eb | [
"MIT"
] | null | null | null | tests/operations_model_test.py | chlemagne/python-oop-calculator | 0259ce0f7a72faab60b058588a6838fe107e88eb | [
"MIT"
] | null | null | null | tests/operations_model_test.py | chlemagne/python-oop-calculator | 0259ce0f7a72faab60b058588a6838fe107e88eb | [
"MIT"
] | null | null | null | """ Unittest.
"""
import unittest
from calculator.standard.operations_model import (
UniOperation,
BiOperation,
Square,
SquareRoot,
Reciprocal,
Add,
Subtract,
Multiply,
Divide,
Modulo
)
| 21.415094 | 58 | 0.600881 |
7b6c01aa137fe5eab922023b4f7b039eaadf78f0 | 684 | py | Python | LAB5/lab/main.py | ThinkingFrog/MathStat | cd3712f4f4a59badd7f2611de64681b0e928d3db | [
"MIT"
] | null | null | null | LAB5/lab/main.py | ThinkingFrog/MathStat | cd3712f4f4a59badd7f2611de64681b0e928d3db | [
"MIT"
] | null | null | null | LAB5/lab/main.py | ThinkingFrog/MathStat | cd3712f4f4a59badd7f2611de64681b0e928d3db | [
"MIT"
] | null | null | null | from lab.distribution import DistrManager
| 28.5 | 120 | 0.557018 |
7b6c677fc5296afba3c3ed059b4dbdc0e009c7cf | 3,010 | py | Python | haplotype_plot/tests/test_plot.py | neobernad/haplotype_plot | 45d9e916f474242648baa8d8b2afe9d502302485 | [
"MIT"
] | 2 | 2021-01-09T10:43:25.000Z | 2021-02-16T17:21:08.000Z | haplotype_plot/tests/test_plot.py | neobernad/haplotype_plot | 45d9e916f474242648baa8d8b2afe9d502302485 | [
"MIT"
] | 3 | 2021-02-01T11:28:17.000Z | 2021-03-29T22:12:48.000Z | haplotype_plot/tests/test_plot.py | neobernad/haplotype_plot | 45d9e916f474242648baa8d8b2afe9d502302485 | [
"MIT"
] | null | null | null | import unittest
import logging
import os
import haplotype_plot.genotyper as genotyper
import haplotype_plot.reader as reader
import haplotype_plot.haplotyper as haplotyper
import haplotype_plot.plot as hplot
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
dir_path = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
unittest.main()
| 38.589744 | 108 | 0.644186 |
7b6e114171988bb11bb357d60e9671587a0a54e0 | 1,806 | py | Python | src/docknet/data_generator/chessboard_data_generator.py | Accenture/Docknet | e81eb0c5aefd080ebeebf369d41f8d3fa85ab917 | [
"Apache-2.0"
] | 2 | 2020-06-29T08:58:26.000Z | 2022-03-08T11:38:18.000Z | src/docknet/data_generator/chessboard_data_generator.py | jeekim/Docknet | eb3cad13701471a7aaeea1d573bc5608855bab52 | [
"Apache-2.0"
] | 1 | 2022-03-07T17:58:59.000Z | 2022-03-07T17:58:59.000Z | src/docknet/data_generator/chessboard_data_generator.py | jeekim/Docknet | eb3cad13701471a7aaeea1d573bc5608855bab52 | [
"Apache-2.0"
] | 3 | 2020-06-29T08:58:31.000Z | 2020-11-22T11:23:11.000Z | from typing import Tuple
import numpy as np
from docknet.data_generator.data_generator import DataGenerator
| 32.836364 | 103 | 0.606312 |
7b7181c5da71f675df29626211d629f1f9f4e5ef | 5,485 | py | Python | stereoVO/geometry/features.py | sakshamjindal/Visual-Odometry-Pipeline-in-Python | d4a8a8ee16f91a145b90c41744a85e8dd1c1d249 | [
"Apache-2.0"
] | 10 | 2021-11-01T23:56:30.000Z | 2022-03-07T08:08:25.000Z | stereoVO/geometry/features.py | sakshamjindal/StereoVO-SFM | d4a8a8ee16f91a145b90c41744a85e8dd1c1d249 | [
"Apache-2.0"
] | null | null | null | stereoVO/geometry/features.py | sakshamjindal/StereoVO-SFM | d4a8a8ee16f91a145b90c41744a85e8dd1c1d249 | [
"Apache-2.0"
] | 1 | 2021-12-02T03:15:00.000Z | 2021-12-02T03:15:00.000Z | import cv2
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['DetectionEngine']
| 40.036496 | 149 | 0.615497 |
7b72e8377f711b1cb1ee17fd39204a3883465200 | 78 | py | Python | shopify_csv/__init__.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | 1 | 2021-02-28T11:36:50.000Z | 2021-02-28T11:36:50.000Z | shopify_csv/__init__.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | null | null | null | shopify_csv/__init__.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | null | null | null | from .constants import FIELDS, PROPERTIES
from .shopify_csv import ShopifyRow
| 26 | 41 | 0.846154 |
7b745432625aa3fd9106d5e8fec7445b66115435 | 8,070 | py | Python | pgpointcloud_utils/pcformat.py | dustymugs/pgpointcloud_utils | 24193438982a8070a0aada34fca4db62688d18ba | [
"BSD-3-Clause"
] | 1 | 2016-09-04T20:44:15.000Z | 2016-09-04T20:44:15.000Z | pgpointcloud_utils/pcformat.py | dustymugs/pgpointcloud_utils | 24193438982a8070a0aada34fca4db62688d18ba | [
"BSD-3-Clause"
] | 6 | 2015-02-19T10:27:39.000Z | 2015-02-19T10:58:49.000Z | pgpointcloud_utils/pcformat.py | dustymugs/pgpointcloud_utils | 24193438982a8070a0aada34fca4db62688d18ba | [
"BSD-3-Clause"
] | null | null | null | from decimal import Decimal
import xml.etree.ElementTree as ET
from .pcexception import *
def get_dimension(self, name_or_pos):
'''
return the dimension by name or position (1-based)
'''
if isinstance(name_or_pos, int):
# position is 1-based
return self.dimensions[name_or_pos - 1]
else:
return self._dimension_lookups['name'][name_or_pos]
def get_dimension_index(self, name):
'''
return the index of the dimension by name
'''
if name not in self._dimension_lookups['name']:
return None
return self.dimensions.index(self._dimension_lookups['name'][name])
| 24.603659 | 78 | 0.53482 |
7b788c4e537ccfe9b9a19c03459ac9310b0314ff | 662 | py | Python | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
] | null | null | null | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
] | null | null | null | setup.py | yuji-koseki/django-home-urls | ef42ad08101f83c2aff941e00abd50e60c57ac51 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django_home_urls",
version="0.1.0",
author="Yuji Koseki",
author_email="pxquuqjm0k62new7q4@gmail.com",
description="Django home urlconf.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yuji-koseki/django-home-urls",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
'Framework :: Django',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 28.782609 | 58 | 0.663142 |
7b792f428ffd2ed8a9d5df151157eca526120574 | 3,553 | py | Python | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
] | null | null | null | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
] | null | null | null | lib/DataFileIO.py | cttsai1985/Kaggle-Home-Credit-Default-Risk | a378d5fcee1895a6229c740779f64b286532de8c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script provide a class to read and save files
Created on Sat July 21 2018
@author: cttsai
"""
import pandas as pd
from Utility import CheckFileExist
from LibConfigs import logger, hdf5_compress_option, fast_hdf5_compress_option
def loadCSV(self, configs={}):
"""
configs = {'name': 'file_path'}
return load_data = {'name': dataframe}
"""
logger.info("Read Data from CSV")
load_data = {}
for k, f_path in configs.items():
if not self.checkFile(f_path):
continue
load_data[k] = pd.read_csv(f_path)
logger.info("Read in {}: from {}, shape={}".format(k, f_path, load_data[k].shape))
self.data_lastet_load = load_data.copy()
return load_data
def loadHDF(self, filename, configs={}, limited_by_configs=True):
"""
"""
logger.info("Read Data from HDFS")
if not self.checkFile(filename):
return self.loadEmpty(configs)
if limited_by_configs:
logger.info("Load selected DataFrame Only")
load_data = self.readHDF(filename, configs, opt_load=True)
else: # full loaded
load_data = self.readHDF(filename, opt_load=True)
for k, v in load_data.items():
if isinstance(v, pd.DataFrame):
logger.info('memory usage on {} is {:.3f} MB'.format(k, v.memory_usage().sum() / 1024. ** 2))
self.data_lastet_load = load_data#.copy()
return load_data
| 33.205607 | 109 | 0.587672 |
7b7fac5e786fffa0981a48a959c7b50a97194205 | 885 | py | Python | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
] | null | null | null | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
] | null | null | null | tests/testSevenKing.py | yooyoo2004/RoomAI | 7f4d655581a03ded801f6c6d7d18f9fff47aa6f5 | [
"MIT"
] | 1 | 2021-08-15T16:19:01.000Z | 2021-08-15T16:19:01.000Z | #!/bin/python
from roomai.sevenking import SevenKingEnv
from roomai.sevenking import SevenKingAction
import unittest
| 30.517241 | 120 | 0.662147 |
7b817580d6dc21506efb8434e6050e6f651bf968 | 1,776 | py | Python | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
] | null | null | null | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
] | null | null | null | pyamazonlandsat/product.py | eamanu/pyamazonlandsat | cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e | [
"MIT"
] | null | null | null | import attr
import os
import tarfile
from pyamazonlandsat.utils import get_path_row_from_name
from pyamazonlandsat.downloader import Downloader
| 31.714286 | 84 | 0.614865 |
7b83240c1ea862333830ef3e4b3423db43db8c92 | 5,352 | py | Python | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
] | 2 | 2021-04-13T18:54:08.000Z | 2021-09-21T23:08:08.000Z | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
] | null | null | null | segmentation.py | IgnacioPardo/RoadTrip | 6cdded860a67bb99cc1fc81e85cd8c09eaf46431 | [
"MIT"
] | null | null | null | from __future__ import division
from skimage.segmentation import slic, mark_boundaries
from skimage.util import img_as_float
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import os
from cv2 import boundingRect
#from argparse import ArgumentParser
img_width = 50
img_height = 50
img_depth = 4
_selected_segments = set()
_current_segments = []
_current_image = []
_original_image = []
_plt_img = []
_shift = False
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--name", default="new")
args = parser.parse_args()
image_paths = os.listdir("inputs")
images = [io.imread(os.path.join("inputs", image_path)) for image_path in image_paths]
print(f"Found {len(images)} inputs")
output_path = os.path.join("datasets", args.name)
existing_segments = os.listdir(output_path)
if 'c0' in existing_segments:
false_index = existing_segments.index('c0')
true_index = len(existing_segments) - false_index
else:
false_index = len(existing_segments)
true_index = 0
print("Segmenting")
segments = [segment(image) for image in images]
for i in range(len(images)):
selection = select(images[i], segments[i])
true_padded_images, _ = padded_segments(images[i], segments[i], selection)
print(f"Saving {len(true_padded_images)} car images")
for img in true_padded_images:
# Can't save it as an image: it has an extra channel
with open(os.path.join(output_path, f"c{str(true_index)}"), 'wb') as save_file:
np.save(save_file, img)
true_index += 1
not_selection = set(range(segments[i].max())) - selection
false_padded_images, _ = padded_segments(images[i], segments[i], not_selection)
print(f"Saving {len(false_padded_images)} non-car images")
for img in false_padded_images:
with open(os.path.join(output_path, str(false_index)), 'wb') as save_file:
np.save(save_file, img)
false_index += 1
os.rename(os.path.join("inputs", image_paths[i]), os.path.join("processed", image_paths[i])) | 30.409091 | 112 | 0.734865 |
7b842e0e690c82590e6a6533bd9a6cab6937e48f | 1,797 | py | Python | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
] | null | null | null | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
] | null | null | null | benten/code/workflowgraph.py | stain/benten | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | [
"Apache-2.0"
] | null | null | null | """Parse CWL and create a JSON file describing the workflow. This dictionary
is directly suitable for display by vis.js, but can be parsed for any other
purpose."""
# Copyright (c) 2019 Seven Bridges. See LICENSE
from ..cwl.lib import ListOrMap
| 29.95 | 97 | 0.590428 |
7b882a00a99da3e2e17e41e9f577ca3003e8abd3 | 2,561 | py | Python | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
] | null | null | null | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
] | null | null | null | app/core/models.py | fxavier/abt-epts | 021a8140db32afba106a7a9e122b98452d88c225 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
| 30.488095 | 76 | 0.673565 |
7b8b21db4d1b5bb95da77aaaeac80ad479fa1496 | 477 | py | Python | reviews/migrations/0006_review_no_login.py | moshthepitt/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
] | 6 | 2015-07-28T09:36:39.000Z | 2020-08-11T17:15:18.000Z | reviews/migrations/0006_review_no_login.py | Swifilaboroka/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
] | 8 | 2015-12-17T22:56:16.000Z | 2022-01-13T00:43:16.000Z | reviews/migrations/0006_review_no_login.py | Swifilaboroka/answers | 9febf465a18c41e7a48130e987a8fd64ceae3358 | [
"MIT"
] | 3 | 2017-07-15T12:13:03.000Z | 2022-02-02T10:04:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 23.85 | 125 | 0.628931 |
7b8c7e8a4741c68754a4c124370afe960c3a82b1 | 2,191 | py | Python | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
] | 2 | 2021-01-27T18:39:12.000Z | 2021-02-01T06:57:02.000Z | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
] | null | null | null | qbism/kraus.py | heyredhat/qbism | 192333b725495c6b66582f7a7b0b4c18a2f392a4 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 04kraus.ipynb (unless otherwise specified).
__all__ = ['apply_kraus', 'partial_trace_kraus', 'povm_map']
# Cell
import numpy as np
import qutip as qt
# Cell
def apply_kraus(dm, kraus):
r"""
Applies a Kraus map to a density matrix $\rho$. The Kraus map consists in some number of operators
satisfying $\sum_{i} \hat{K}_{i}^{\dagger}\hat{K}_{i} = \hat{I}$. $\rho$ is transformed via:
$$\rho \rightarrow \sum_{i} \hat{K}_{i}\rho\hat{K}_{i}^{\dagger} $$
"""
return sum([kraus[j]*dm*kraus[j].dag() for j in range(len(kraus))])
# Cell
def partial_trace_kraus(keep, dims):
r"""
Constructs the Kraus map corresponding to the partial trace. Takes `keep` which is a single index or list of indices denoting
subsystems to keep, and a list `dims` of dimensions of the overall tensor product Hilbert space.
For illustration, to trace over the $i^{th}$ subsystem of $n$, one would construct Kraus operators:
$$ \hat{K}_{i} = I^{\otimes i - 1} \otimes \langle i \mid \otimes I^{\otimes n - i}$$.
"""
if type(keep) == int:
keep = [keep]
trace_over = [i for i in range(len(dims)) if i not in keep]
indices = [{trace_over[0]:t} for t in range(dims[trace_over[0]])]
for i in trace_over[1:]:
new_indices = []
for t in range(dims[i]):
new_indices.extend([{**j, **{i: t}} for j in indices])
indices = new_indices
return [qt.tensor(*[qt.identity(d) if i in keep else qt.basis(d, index[i]).dag() for i, d in enumerate(dims)]) for index in indices]
# Cell
def povm_map(kraus, A, B=None):
r"""
Represents a Kraus map on Qbist probability vectors. Takes a list of Kraus operators, a POVM $A$ on the initial Hilbert space,
and a POVM $B$ on the final Hilbert space. If $B$ isn't provided, it's assumed to be the same as $A$. Then the matrix elements of the map are:
$$K_{j, i} = tr( \mathbb{K}(\frac{\hat{A}_{i}}{tr \hat{A}_{i}})\hat{B}_{j} ) $$
Where $\mathbb{K}(\hat{O})$ denotes the Kraus map applied to $O$.
"""
B = B if type(B) != type(None) else A
return np.array([[(apply_kraus(a/a.tr(), kraus)*b).tr() for a in A] for b in B]).real | 42.960784 | 146 | 0.64126 |
7b8d3bfd9dda43412dd61ee3a956e43a5295cf1f | 78 | py | Python | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | Python Book/12. Complex Loops/05_sequence_2k_plus_one/sequence_2k_plus_one.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | n = int(input())
num = 1
while num <= n:
print(num)
num = num * 2 + 1 | 13 | 21 | 0.487179 |
7b8dd1f4d57db9568b64c88454ba16b6a105aa77 | 4,129 | py | Python | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
] | 14 | 2020-06-30T00:36:14.000Z | 2022-01-11T13:15:53.000Z | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
] | null | null | null | run_all_benchmark_functions.py | ntienvu/KnowingOptimumValue_BO | 42225cb9d61c1225bd757fe9dd02834a0bc7a3e6 | [
"MIT"
] | 2 | 2020-10-17T15:27:06.000Z | 2021-02-27T10:34:04.000Z | import sys
sys.path.insert(0,'..')
sys.path.insert(0,'../..')
from bayes_opt import BayesOpt,BayesOpt_KnownOptimumValue
import numpy as np
#from bayes_opt import auxiliary_functions
from bayes_opt import functions
from bayes_opt import utilities
import warnings
#from bayes_opt import acquisition_maximization
import sys
import itertools
import matplotlib.pyplot as plt
np.random.seed(6789)
warnings.filterwarnings("ignore")
counter = 0
myfunction_list=[]
#myfunction_list.append(functions.sincos())
#myfunction_list.append(functions.branin())
#myfunction_list.append(functions.hartman_3d())
#myfunction_list.append(functions.ackley(input_dim=5))
myfunction_list.append(functions.alpine1(input_dim=5))
#myfunction_list.append(functions.hartman_6d())
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1])))
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1,1,1,1,1,1])))
acq_type_list=[]
temp={}
temp['name']='erm' # expected regret minimization
temp['IsTGP']=0 # recommended to use tgp for ERM
acq_type_list.append(temp)
temp={}
temp['name']='cbm' # confidence bound minimization
temp['IsTGP']=1 # recommended to use tgp for CBM
#acq_type_list.append(temp)
#temp={}
#temp['name']='kov_mes' # MES+f*
#temp['IsTGP']=0 # we can try 'tgp'
#acq_type_list.append(temp)
temp={}
temp['name']='kov_ei' # this is EI + f*
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ucb' # vanilla UCB
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ei' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='random' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
fig=plt.figure()
color_list=['r','b','k','m','c','g','o']
marker_list=['s','x','o','v','^','>','<']
for idx, (myfunction,acq_type,) in enumerate(itertools.product(myfunction_list,acq_type_list)):
print("=====================func:",myfunction.name)
print("==================acquisition type",acq_type)
IsTGP=acq_type['IsTGP']
acq_name=acq_type['name']
nRepeat=10
ybest=[0]*nRepeat
MyTime=[0]*nRepeat
MyOptTime=[0]*nRepeat
marker=[0]*nRepeat
bo=[0]*nRepeat
[0]*nRepeat
for ii in range(nRepeat):
if 'kov' in acq_name or acq_name == 'erm' or acq_name == 'cbm':
bo[ii]=BayesOpt_KnownOptimumValue(myfunction.func,myfunction.bounds,myfunction.fstar, \
acq_name,IsTGP,verbose=1)
else:
bo[ii]=BayesOpt(myfunction.func,myfunction.bounds,acq_name,verbose=1)
ybest[ii],MyTime[ii]=utilities.run_experiment(bo[ii],n_init=3*myfunction.input_dim,\
NN=10*myfunction.input_dim,runid=ii)
MyOptTime[ii]=bo[ii].time_opt
print("ii={} BFV={:.3f}".format(ii,myfunction.ismax*np.max(ybest[ii])))
Score={}
Score["ybest"]=ybest
Score["MyTime"]=MyTime
Score["MyOptTime"]=MyOptTime
utilities.print_result_sequential(bo,myfunction,Score,acq_type)
## plot the result
# process the result
y_best_sofar=[0]*len(bo)
for uu,mybo in enumerate(bo):
y_best_sofar[uu]=[ (myfunction.fstar - np.max(mybo.Y_ori[:ii+1]) ) for ii in range(len(mybo.Y_ori))]
y_best_sofar[uu]=y_best_sofar[uu][3*myfunction.input_dim:] # remove the random phase for plotting purpose
y_best_sofar=np.asarray(y_best_sofar)
myxaxis=range(y_best_sofar.shape[1])
plt.errorbar(myxaxis,np.mean(y_best_sofar,axis=0), np.std(y_best_sofar,axis=0)/np.sqrt(nRepeat),
label=acq_type['name'],color=color_list[idx],marker=marker_list[idx])
plt.ylabel("Simple Regret",fontsize=14)
plt.xlabel("Iterations",fontsize=14)
plt.legend(prop={'size': 14})
strTitle="{:s} D={:d}".format(myfunction.name,myfunction.input_dim)
plt.title(strTitle,fontsize=18)
| 25.80625 | 125 | 0.654154 |
7b8e2e97334a2cce55aad103330d605ea89ea8e4 | 2,258 | py | Python | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
] | 2 | 2021-03-19T02:23:24.000Z | 2021-12-22T15:01:46.000Z | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
] | null | null | null | coursesical/ical.py | cdfmlr/coursesical | d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba | [
"MIT"
] | null | null | null | import icalendar
import uuid
from datetime import datetime
import pytz
cst = pytz.timezone('Asia/Shanghai')
# def fCalendar():
# cal = icalendar.Calendar()
# cal.add('prodid', '-//CDFMLR//coursesical//CN')
# cal.add('VERSION', '2.0')
# cal.add('X-WR-CALNAME', 'coursesical')
# cal.add('X-APPLE-CALENDAR-COLOR', '#ff5a1d')
# cal.add('X-WR-TIMEZONE', 'Asia/Shanghai')
# return cal
| 31.361111 | 95 | 0.569088 |
7b8f6c6edc977e548344a0694966296691f0f034 | 816 | py | Python | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
] | null | null | null | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
] | null | null | null | minesweeper/test/message_tests.py | newnone/Multiplayer-Minesweeper | 054adc4a14a710dfdd479791b9d1d40df061211c | [
"MIT"
] | null | null | null | #!/usr/bin/python3.2
import unittest
from minesweeper.message import *
if __name__ == "__main__":
unittest.main()
| 27.2 | 98 | 0.61152 |
7b917e46393f05ca669d8af2e30bf77af89da6ab | 1,640 | py | Python | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | setup.py | RunnerPyzza/RunnerPyzza | 47f46339ab510635120613ac683f0be462f54ca4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from distutils.core import setup
from distutils.command.build_py import build_py
import os
import shutil
import stat
from RunnerPyzza import __version__
setup(
name = 'RunnerPyzza',
version = __version__,
author = 'Marco Galardini - Emilio Potenza',
author_email = 'marco.galardini@gmail.com - emilio.potenza@gmail.com',
packages = ['RunnerPyzza','RunnerPyzza.ClientCommon', 'RunnerPyzza.Common', 'RunnerPyzza.LauncherManager', 'RunnerPyzza.ServerCommon'],
scripts = ['RPdaemon','RPlauncher','RPaddservice','RPadduser','RPpreparedir','RPsshkeys'],
#url = 'http://RunnerPyzza',
license = 'LICENSE.txt',
description = 'An easy to use queue system for laboratory networks',
long_description = open('README.txt').read(),
install_requires = ["paramiko >= 1.7.7.2", "argparse >= 1.1"],
cmdclass = {"build_py" : runner_build_py}
)
| 33.469388 | 140 | 0.646951 |
7b92c51e95df7d865e1969f7a3d0f8febc341130 | 1,142 | py | Python | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | recursion/0043_string_multiplication.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
solu = Solution()
print(solu.multiply('123', '456')) | 27.853659 | 135 | 0.565674 |
7b941b7e926180fee64edecc4bb32c18fe4b75b2 | 220 | py | Python | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | 1 | 2016-08-17T06:36:03.000Z | 2016-08-17T06:36:03.000Z | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | null | null | null | galaxy/exceptions.py | jmchilton/pulsar | 783b90cf0bce893a11c347fcaf6778b98e0bb062 | [
"Apache-2.0"
] | null | null | null | """
Here for compat. with objectstore.
"""
| 15.714286 | 47 | 0.663636 |
7b95c23e30524cab22ee7e5bbccde48a49bfd895 | 9,432 | py | Python | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
] | null | null | null | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
] | 21 | 2021-08-13T06:11:55.000Z | 2022-03-18T06:13:05.000Z | fluid/node.py | quantmind/aio-fluid | e75f91646ac9a0c9ca5679bda12319c208166d64 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import inspect
import logging
import os
import random
import time
import uuid
from abc import ABC, abstractmethod
from functools import cached_property, wraps
from logging import Logger
from typing import Any, Callable, Dict, List, Optional, Tuple
from aiohttp.client import ClientConnectionError, ClientConnectorError
from aiohttp.web import Application, GracefulExit
from .log import get_logger
from .utils import close_task, dot_name, underscore
def _exit(self) -> None: # pragma: no cover
if os.getenv("PYTHON_ENV") != "test":
raise GracefulExit
class NodeWorker(NodeBase):
# FOR DERIVED CLASSES
async def work(self) -> None:
"""Main work coroutine, this is where you define the asynchronous loop.
Must be implemented by derived classes
"""
raise NotImplementedError
# API
def is_running(self) -> bool:
"""True if the Node is running"""
return bool(self._worker)
# INTERNAL
class Node(NodeWorker):
"""A nodeworker with an heartbeat work loop and ability to publish
messages into a pubsub
"""
heartbeat: float = 1
ticks: int = 0
class Worker(NodeWorker):
class TickWorker(Node):
class every:
def __init__(self, seconds: float, noise: float = 0) -> None:
self.seconds = seconds
self.noise = min(noise, seconds)
self.last = 0
self.gap = self._gap()
self.ticks = 0
| 28.155224 | 86 | 0.603584 |
7b967b35c19a8e35142f9fb160d57122b85d9056 | 860 | py | Python | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
] | null | null | null | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
] | null | null | null | python/testcase.py | AurySystem/SYAML | 7bc6e6cae023bfb8c3f2f15f0ce9d3618f879593 | [
"MIT"
] | null | null | null | import syaml
testcase = """---
key:
key: f
key2: l
nest:
inner: g
nest2:
nestted: 3
inner2: s
outnest: 3
ha: g
je: r
---
key: value
a_list:
- itema
- listlist:
- itemitem
- itemb
- key1: bweh
key2: bweh
key3: bweh
key4: bweh
- innerList:
- innerItem
- indict: reh
rar: dd
sublist:
- iteml
- itemc
-
- itm
- [44,55,66,"7t","8t","eeee"]
- ohno
- "test"
- "ending": obj
key: last of inner
- aa: aaa
- lastitem
anotherkey: value
...
"""
a = syaml.load(testcase)
print(a)
depth = 12
#a[0] = recurse(a[0])
b = syaml.dump(2,a)
print(b)
print(syaml.load(b)) | 14.576271 | 37 | 0.522093 |
7b98ab75092f0df028f96b2d93df9ca2c2ab75d6 | 478 | py | Python | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
] | null | null | null | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
] | null | null | null | lib/csvtools.py | mtyates/scrapers | 1fe55314b1235a971a436a8a17f05cea22b40f49 | [
"Apache-2.0"
] | 1 | 2021-12-20T16:55:50.000Z | 2021-12-20T16:55:50.000Z | #!/usr/bin/env python
import os
import sys
| 22.761905 | 71 | 0.546025 |
7b9a53fe727088aa66ec964d3fe2b9eeb158dba7 | 543 | py | Python | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0016_auto_20200222_2336.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2020-02-22 22:36
from django.db import migrations
| 20.884615 | 49 | 0.546961 |
7b9bed79cdfa84b20637330716a10344fca07de2 | 799 | py | Python | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 1,224 | 2015-01-01T22:09:23.000Z | 2022-03-29T19:43:56.000Z | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 253 | 2015-01-14T03:45:51.000Z | 2022-02-08T01:18:19.000Z | examples.py/Basics/Shape/LoadDisplayShape.py | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 225 | 2015-01-13T18:38:33.000Z | 2022-03-30T20:27:39.000Z | """
Load and Display a Shape.
Illustration by George Brower.
(Rewritten in Python by Jonathan Feinberg.)
The loadShape() command is used to read simple SVG (Scalable Vector Graphics)
files into a Processing sketch. This library was specifically tested under
SVG files created from Adobe Illustrator. For now, we can't guarantee that
it'll work for SVGs created with anything else.
"""
# The file "bot1.svg" must be in the data folder
# of the current sketch to load successfully
bot = loadShape("bot1.svg")
| 33.291667 | 84 | 0.700876 |
7b9c4e6a952c20a965aae8106ca3b0f977bd503c | 4,015 | py | Python | deidentify/tokenizer/tokenizer_ons.py | bbieniek/deidentify | 7021bf0540e0a7f931e65544d12a2909c79a14eb | [
"MIT"
] | 64 | 2020-01-16T16:20:47.000Z | 2022-03-31T12:59:19.000Z | deidentify/tokenizer/tokenizer_ons.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
] | 14 | 2020-01-28T08:47:06.000Z | 2022-02-12T08:32:12.000Z | deidentify/tokenizer/tokenizer_ons.py | HabibMrad/deidentify | d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60 | [
"MIT"
] | 12 | 2020-01-21T07:54:04.000Z | 2022-02-19T06:42:53.000Z | """
Custom tokenization routines for the 'ons' corpus. Special care is taken to metadata tokens such as
=== Report: 12345 === that were inserted to distinguish between multiple documents of a client.
They will be properly handled during the tokenization and sentence segmentation stage.
"""
import re
import spacy
from spacy.matcher import Matcher
from spacy.symbols import ORTH
from deidentify.tokenizer import Tokenizer
META_REGEX = re.compile(r'=== (?:Report|Answer): [0-9]+ ===\n')
TOKENIZER_SPECIAL_CASES = [
'B.Sc.',
'Co.',
'Dhr.',
'Dr.',
'M.Sc.',
'Mevr.',
'Mgr.',
'Mr.',
'Mw.',
'O.K.',
'a.u.b.',
'ca.',
'e.g.',
'etc.',
'v.d.'
]
def _metadata_sentence_segmentation(doc):
"""Custom sentence segmentation rule of the Ons corpus. It segments metadata text into separate
sentences.
Metadata consists of 10 tokens:
['=', '=', '=', 'Report|Answer', ':', 'DDDDDD', '=', '=', '=', '\n']
During sentence segmentation, we want that the metadata is always a sentence in itself.
Therefore, the first token (i.e., '=') is marked as sentence start. All other tokens
are explicitly marked as non-sentence boundaries.
To ensure that anything immediately following after metadata is a new sentece, the next token
is marked as sentence start.
"""
for i in range(len(doc)):
if not _metadata_complete(doc, i):
continue
# All metadata tokens excluding the leading '='.
meta_span = doc[i - 8: i + 1]
for meta_token in meta_span:
meta_token.is_sent_start = False
# The leading '=' is a sentence boundary
doc[i - 9].is_sent_start = True
# Any token following the metadata is also a new sentence.
doc[i + 1].is_sent_start = True
return doc
NLP = spacy.load('nl_core_news_sm')
try:
NLP.add_pipe(_metadata_sentence_segmentation, before="parser") # Insert before the parser
except ValueError:
# spacy>=3
from spacy.language import Language
Language.component('meta-sentence-segmentation')(_metadata_sentence_segmentation) # pylint: disable=E1101
NLP.add_pipe('meta-sentence-segmentation', before="parser") # Insert before the parser
for case in TOKENIZER_SPECIAL_CASES:
NLP.tokenizer.add_special_case(case, [{ORTH: case}])
NLP.tokenizer.add_special_case(case.lower(), [{ORTH: case.lower()}])
infixes = NLP.Defaults.infixes + [r'\(', r'\)', r'(?<=[\D])\/(?=[\D])']
infix_regex = spacy.util.compile_infix_regex(infixes)
NLP.tokenizer.infix_finditer = infix_regex.finditer
| 31.124031 | 109 | 0.596762 |
7b9c889768e3496393e2ee54739cb4b6ccbaab96 | 1,219 | py | Python | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | 1 | 2022-03-09T18:07:11.000Z | 2022-03-09T18:07:11.000Z | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | null | null | null | systemtest/quality/utils/models.py | IBM-Power-SystemTest/systemtest | a29e6d54500ca13f554073cc66a4a2d403ea5b14 | [
"BSD-3-Clause"
] | null | null | null |
# Django
from django.conf import Settings, settings
# APPs
from systemtest.quality import forms as quality_forms, models as quality_models
from systemtest.utils.db2 import Database
def get_quality_status(status_name: str) -> quality_models.QualityStatus:
"""
Gets a specific QualityStatus by exact name
Args:
status_name:
Name of status to fetch
Raises:
DoesNotExist:
QualityStatus matching query does not exist
Returns:
QualityStatus object
"""
return quality_models.QualityStatus.objects.get(name=status_name)
| 23.442308 | 79 | 0.646432 |
7b9c9c8690ed96b25a9028c69ebb2b7c65845147 | 1,849 | py | Python | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | 11 | 2016-02-08T11:45:26.000Z | 2017-05-19T16:07:31.000Z | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | 5 | 2016-02-11T22:11:54.000Z | 2016-06-09T20:54:07.000Z | cibopath/scraper.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
import logging
import aiohttp
from cibopath import readme_parser, github_api
from cibopath.templates import Template
logger = logging.getLogger('cibopath')
def fetch_template_data(username, token):
semaphore = asyncio.Semaphore(10)
loop = asyncio.get_event_loop()
auth = aiohttp.BasicAuth(username, token)
with aiohttp.ClientSession(loop=loop, auth=auth) as client:
logger.debug('Load Cookiecutter readme')
cookiecutter_readme = loop.run_until_complete(
github_api.get_readme(semaphore, client, 'audreyr', 'cookiecutter')
)
if not cookiecutter_readme:
raise CookiecutterReadmeError
logger.debug('Find GitHub links in Cookiecutter readme')
github_links, _ = readme_parser.read(cookiecutter_readme)
if not github_links:
raise UnableToFindTemplateLinks
tasks = [
github_api.get_template(semaphore, client, link)
for link in github_links
]
logger.debug('Fetch template data from links')
results = loop.run_until_complete(asyncio.gather(*tasks))
yield from filter(None, results) # Ignore all invalid templates
def load_templates(username, token):
templates = []
template_data = fetch_template_data(username, token)
for name, author, repo, context, readme in template_data:
_, tags = readme_parser.read(readme)
templates.append(Template(name, author, repo, context, sorted(tags)))
return templates
| 29.822581 | 79 | 0.70146 |
7b9ce56039cc41fcf712d566d9141353c7327dc4 | 5,400 | py | Python | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
] | 4 | 2020-06-11T00:11:17.000Z | 2022-03-17T22:58:13.000Z | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
] | null | null | null | using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py | sjdemartini/SpikeSafePythonSamples | 60dc9cd175577e9601c0709ac471c72c5a666f1b | [
"MIT"
] | 2 | 2021-12-20T20:03:05.000Z | 2022-01-12T18:51:54.000Z | # Goal:
# Demonstrate the A/B switch functionality of the SpikeSafe PSMU while operating in DC mode
#
# Expectation:
# Channel 1 will run in DC mode with the switch set to Primary.
# Afterward the Switch be set to Auxiliary mode, in which another source may operate connected to the SpikeSafe
# After the Auxiliary source has completed operation, the switch will be set to Primary to operate the SpikeSafe in DC mode again
import sys
import time
import logging
from spikesafe_python.MemoryTableReadData import log_memory_table_read
from spikesafe_python.ReadAllEvents import log_all_events
from spikesafe_python.TcpSocket import TcpSocket
from spikesafe_python.Threading import wait
from spikesafe_python.SpikeSafeError import SpikeSafeError
from tkinter import messagebox
### set these before starting application
# SpikeSafe IP address and port number
ip_address = '10.0.0.220'
port_number = 8282
### setting up sequence log
log = logging.getLogger(__name__)
logging.basicConfig(filename='SpikeSafePythonSamples.log',format='%(asctime)s, %(levelname)s, %(message)s',datefmt='%m/%d/%Y %I:%M:%S',level=logging.INFO)
### start of main program
try:
log.info("ForceSenseSwitchSample.py started.")
# instantiate new TcpSocket to connect to SpikeSafe
tcp_socket = TcpSocket()
tcp_socket.open_socket(ip_address, port_number)
# reset to default state
tcp_socket.send_scpi_command('*RST')
log_all_events(tcp_socket)
# check that the Force Sense Selector Switch is available for this SpikeSafe. We need the switch to run this sequence
# If switch related SCPI is sent and there is no switch configured, it will result in error "386, Output Switch is not installed"
tcp_socket.send_scpi_command('OUTP1:CONN:AVAIL?')
isSwitchAvailable = tcp_socket.read_data()
if isSwitchAvailable != 'Ch:1':
raise Exception('Force Sense Selector Switch is not available, and is necessary to run this sequence.')
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
# the default switch state can be manually adjusted using SCPI, so it is best to send this command even after sending a *RST
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# set Channel 1 settings to operate in DC mode
tcp_socket.send_scpi_command('SOUR1:FUNC:SHAP DC')
tcp_socket.send_scpi_command('SOUR1:CURR:PROT 50')
tcp_socket.send_scpi_command('SOUR1:CURR 0.1')
tcp_socket.send_scpi_command('SOUR1:VOLT 20')
# log all SpikeSafe event after settings are adjusted
log_all_events(tcp_socket)
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
# When operating in DC mode, the channel must be turned off before adjusting the switch state
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# set the Force Sense Selector Switch state to Auxiliary (B) so that the Auxiliary Source will be routed to the DUT and the SpikeSafe will be disconnected
tcp_socket.send_scpi_command('OUTP1:CONN AUX')
# Show a message box so any tasks using the Auxiliary source may be performed before adjusting the switch back to Primary
# The SpikeSafe is not electrically connected to the DUT at this time
messagebox.showinfo("Auxiliary Source Active", "Force Sense Selector Switch is in Auxiliary (B) mode. Perform any tests using the auxiliary source, then close this window to adjust the switch back to Primary (A) mode.")
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# disconnect from SpikeSafe
tcp_socket.close_socket()
log.info("ForceSenseSwitchSample.py completed.\n")
except SpikeSafeError as ssErr:
# print any SpikeSafe-specific error to both the terminal and the log file, then exit the application
error_message = 'SpikeSafe error: {}\n'.format(ssErr)
log.error(error_message)
print(error_message)
sys.exit(1)
except Exception as err:
# print any general exception to both the terminal and the log file, then exit the application
error_message = 'Program error: {}\n'.format(err)
log.error(error_message)
print(error_message)
sys.exit(1) | 46.551724 | 223 | 0.696111 |
7b9d392017b7f0eb08e175d175e38cee08ff4854 | 1,886 | py | Python | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/contrib/cluster_telemetry/screenshot.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import py_utils
import time
from telemetry.page import legacy_page_test
from telemetry.util import image_util
| 33.087719 | 80 | 0.698303 |
7b9dada36fd7bad56b1a0092534a61252ce1c05e | 2,474 | py | Python | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
] | null | null | null | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
] | null | null | null | tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py | mail2nsrajesh/python-tripleoclient | 6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from tripleoclient.tests.v1.overcloud_deploy import fakes
from tripleoclient.v1 import overcloud_delete
| 36.382353 | 79 | 0.719887 |
7b9f976e658245e57765789e6e80ca7112711034 | 8,621 | py | Python | bird_view/models/agent_IAs_RL.py | magh24/carla_RL_IAs | a38fb353bd84330c6c20b9cc8e824d7bbb02cfe5 | [
"MIT"
] | 39 | 2020-03-17T10:12:49.000Z | 2022-03-12T14:18:45.000Z | bird_view/models/agent_IAs_RL.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
] | null | null | null | bird_view/models/agent_IAs_RL.py | marintoro/LearningByCheating | a13b331ee8d69071570c97b35f1348758d658ee5 | [
"MIT"
] | 16 | 2020-06-11T20:15:57.000Z | 2022-03-13T01:55:16.000Z | import numpy as np
import torch
from collections import deque, namedtuple
import cv2
import os
import carla
from .model_supervised import Model_Segmentation_Traffic_Light_Supervised
from .model_RL import DQN, Orders
| 37.482609 | 95 | 0.594363 |
7b9fd2f8f9e605ca6783d2a3d4f02dcb90eb1482 | 162 | py | Python | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
] | 5 | 2020-10-12T16:21:31.000Z | 2021-12-15T20:27:22.000Z | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
] | null | null | null | Python/1079.py | Marcelalopes/Questoes-URI | e13894c1bcbcb252ed814d5b5e930d05c7a8494f | [
"MIT"
] | 5 | 2019-06-21T04:26:14.000Z | 2021-05-01T14:15:44.000Z | n = int(input())
for i in range(1 , n + 1 ):
x = input().split()
a,b,c = x
print('{:.1f}'.format((float(a) * 2 + float(b) * 3 + float(c) * 5) / 10))
| 23.142857 | 77 | 0.45679 |
7b9fd98a85b6fed6891c0ba799c31065628711f4 | 10,547 | py | Python | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
] | null | null | null | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
] | null | null | null | bin_testing/diff_fuzzing.py | KristianMika/PA193-Bech32m | 6625c3883dd4ee4db40afc0b9eae1c945544a87b | [
"MIT"
] | null | null | null | import base64
import binascii
import datetime
import os
import subprocess
import random
import sys
BECH_SYMBOLS = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
OUR_BINARY = None
LIBBECH32ENC_BINARY = None
LIBBECH32DEC_BINARY = None
NODE_REF = "node . "
# region Encoding
# endregion
# region Decoding
# endregion
# Adapted from
# https://stackoverflow.com/questions/1425493/convert-hex-to-binary
if __name__ == '__main__':
OUR_BINARY = sys.argv[1]
LIBBECH32ENC_BINARY = sys.argv[2]
LIBBECH32DEC_BINARY = sys.argv[3]
FUZZ_ITERATIONS = int(sys.argv[4])
FUZZ_SECONDS = int(sys.argv[5])
_hrp = 'v)zeod9[qg.ns)+}r}'
_hex_str = '857e'
_b64_str = to_base64(_hex_str.upper())
process('a', 'ff', to_base64('FF'))
fail_count = 0
start_time = datetime.datetime.now()
for _ in range(0, FUZZ_ITERATIONS):
if not process(_hrp, _hex_str, _b64_str): fail_count += 1
_hrp = generate_hrp()
_hex_str = generate_hex(_hrp)
_b64_str = to_base64(_hex_str.upper())
end_time = datetime.datetime.now()
if (end_time - start_time).seconds >= FUZZ_SECONDS:
print(f'Fuzzing stopped after {FUZZ_SECONDS} seconds')
break
print("DONE")
sys.exit(fail_count)
| 35.156667 | 114 | 0.561297 |
7ba085171ad82d0c573dcc7bfc7f5421e63a5a9f | 3,166 | py | Python | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
] | 67 | 2018-11-13T21:40:54.000Z | 2022-02-23T08:11:56.000Z | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
] | 679 | 2018-11-13T20:10:29.000Z | 2022-03-30T19:55:25.000Z | ldt/utils/usaf/bcsd_preproc/forecast_task_07.py | andrewsoong/LISF | 20e3b00a72b6b348c567d0703550f290881679b4 | [
"Apache-2.0"
] | 119 | 2018-11-08T15:53:35.000Z | 2022-03-28T10:16:01.000Z | #!/usr/bin/env python3
"""
#------------------------------------------------------------------------------
#
# SCRIPT: forecast_task_07.py
#
# PURPOSE: Combine all non-precip 6-hourly files into one file and copy BCSD
# precip files in to the same directory Based on FORECAST_TASK_07.sh.
#
# REVISION HISTORY:
# 24 Oct 2021: Ryan Zamora, first version
#
#------------------------------------------------------------------------------
"""
#
# Standard modules
#
import configparser
import os
import subprocess
import sys
#
# Local methods
#
def _usage():
"""Print command line usage."""
txt = f"[INFO] Usage: {(sys.argv[0])} current_year month_abbr config_file"
print(txt)
print("[INFO] where")
print("[INFO] current_year: Current year")
print("[INFO] month_abbr: Current month")
print("[INFO] config_file: Config file that sets up environment")
def _read_cmd_args():
"""Read command line arguments."""
if len(sys.argv) != 4:
print("[ERR] Invalid number of command line arguments!")
_usage()
sys.exit(1)
# current_year
try:
current_year = int(sys.argv[1])
except ValueError:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
if current_year < 0:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
# month_abbr
month_abbr = str(sys.argv[2])
# config_file
config_file = sys.argv[3]
if not os.path.exists(config_file):
print(f"[ERR] {config_file} does not exist!")
sys.exit(1)
return current_year, month_abbr, config_file
def read_config(config_file):
"""Read from bcsd_preproc config file."""
config = configparser.ConfigParser()
config.read(config_file)
return config
def _driver():
"""Main driver."""
current_year, month_abbr, config_file = _read_cmd_args()
# Setup local directories
config = read_config(config_file)
# Path of the main project directory
projdir = config["bcsd_preproc"]["projdir"]
# Number of precip ensembles needed
range_ens_fcst=list(range(1, 13)) + list(range(1,13)) + list(range(1,7))
range_ens_nmme=range(1,31)
fcst_date = f"{month_abbr}01"
# Path for where forecast files are located:
indir=f"{projdir}/data/forecast/CFSv2_25km/raw/6-Hourly/{fcst_date}/{current_year}"
# Path for where the linked precip files should be placed:
outdir=f"{projdir}/data/forecast/NMME/linked_cfsv2_precip_files/{fcst_date}/{current_year}"
if not os.path.exists(outdir):
os.makedirs(outdir)
for iens, ens_value in enumerate(range_ens_fcst):
src_file=f"{indir}/ens{ens_value}"
dst_file=f"{outdir}/ens{range_ens_nmme[iens]}"
cmd = f"ln -sfn {src_file} {dst_file}"
print(cmd)
returncode = subprocess.call(cmd, shell=True)
if returncode != 0:
print("[ERR] Problem calling creating symbolic links!")
sys.exit(1)
print("[INFO] Done creating symbolic links")
#
# Main Method
#
if __name__ == "__main__":
_driver()
| 26.830508 | 95 | 0.622236 |
7ba2716e5d28bfa4af27d8788deae9d221d9561f | 271 | py | Python | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | 1_estrutura_sequencial/10_celsius_fahrenheit.py | cecilmalone/lista_de_exercicios_pybr | 6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5 | [
"MIT"
] | null | null | null | """
10. Faa um Programa que pea a temperatura em graus Celsius, transforme e mostre em graus Farenheit.
"""
celsius = float(input('Informe o valor em Celsius (C): '))
fahrenheit = (celsius * (9/5)) + 32
print('{} C igual a {:.1f} F'.format(celsius, fahrenheit))
| 30.111111 | 101 | 0.682657 |
7ba27d2ca0843358d969fed10afe5cbbd1851036 | 12,178 | py | Python | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
] | 10 | 2018-09-17T02:14:34.000Z | 2021-06-17T12:16:35.000Z | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
] | null | null | null | model/modules/capsules.py | lidq92/pytorch-dynamic-routing-between-capsules | 4388cd36193348cbb10035008360330e67acdd41 | [
"MIT"
] | 2 | 2019-08-06T20:40:02.000Z | 2020-01-02T08:24:39.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
def squash(s, dim=-1, eps=1e-8):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero
length and long vectors to a length slightly below 1
v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
v: Squashed vector
"""
squared_norm = torch.sum(s**2, dim=dim, keepdim=True)
v = squared_norm / (1 + squared_norm) * \
s / (torch.sqrt(squared_norm) + eps)
return v
| 36.029586 | 172 | 0.579323 |
7ba31e643aa2124a524e4368c26dcf7ed0147d91 | 16,807 | py | Python | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
] | 511 | 2015-10-17T09:28:28.000Z | 2022-02-20T21:58:56.000Z | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
] | 575 | 2015-10-09T11:54:09.000Z | 2021-11-22T20:50:19.000Z | ci/test_marathon_lb_dcos_e2e.py | vivint-smarthome/marathon-lb | d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9 | [
"Apache-2.0"
] | 411 | 2015-10-29T13:41:45.000Z | 2022-02-11T09:27:50.000Z | #!python3
import contextlib
import json
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from dcos_e2e import cluster
from dcos_e2e import node
from dcos_test_utils import helpers as dcos_helpers
from dcos_test_utils import iam as dcos_iam
from dcos_test_utils import enterprise as dcos_ee_api
from dcos_test_utils import dcos_api
from dcos_test_utils import package
import dcos_installer_tools
import pytest
import test_marathon_lb
DCOS_E2E_BACKEND = 'DCOS_E2E_BACKEND'
DCOS_E2E_CLUSTER_ID = 'DCOS_E2E_CLUSTER_ID'
DCOS_E2E_NODE_TRANSPORT = 'DCOS_E2E_NODE_TRANSPORT'
DCOS_LOGIN_UNAME = 'DCOS_LOGIN_UNAME'
DCOS_LOGIN_PW = 'DCOS_LOGIN_PW'
BACKEND_AWS = 'aws'
BACKEND_DOCKER = 'docker'
BACKEND_VAGRANT = 'vagrant'
MARATHON_LB_IMAGE = os.environ.get('MARATHON_LB_IMAGE',
'marathon-lb:latest')
MARATHON_LB_VERSION = os.environ.get('MARATHON_LB_VERSION',
'dev')
OSS = 'oss'
ENTERPRISE = 'enterprise'
VARIANTS = {OSS: dcos_installer_tools.DCOSVariant.OSS,
ENTERPRISE: dcos_installer_tools.DCOSVariant.ENTERPRISE}
VARIANT_VALUES = dict((value.value, value) for value in VARIANTS.values())
logging.captureWarnings(True)
# NOTE(jkoelker) Define some helpers that should eventually be upstreamed
def add_user_to_group(self, user, group):
return self.put('/groups/{}/users/{}'.format(group, user))
def delete_user_from_group(self, user, group):
if not self.user_in_group(user, group):
return
return self.delete('/groups/{}/users/{}'.format(group, user))
def list_group_users(self, group):
r = self.get('/groups/{}/users'.format(group))
r.raise_for_status()
return r.json()['array']
def user_in_group(self, user, group):
return user in [a['user']['uid']
for a in self.list_group_users(group)]
# NOTE(jkoelker) Monkey patch in our helpers
dcos_api.DcosApiSession.package = property(
lambda s: Package(default_url=s.default_url.copy(path='package'),
session=s.copy().session))
dcos_api.DcosApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_ee_api.EnterpriseApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_iam.Iam.add_user_to_group = add_user_to_group
dcos_iam.Iam.delete_user_from_group = delete_user_from_group
dcos_iam.Iam.list_group_users = list_group_users
dcos_iam.Iam.user_in_group = user_in_group
def test_port(app_deployment, app_port):
assert app_port == app_deployment["labels"]["HAPROXY_0_PORT"]
def test_response(app_deployment, app_port, agent_public_ip):
(response,
status_code) = test_marathon_lb.get_app_content(app_port,
agent_public_ip)
assert status_code == 200
assert response == app_deployment['name']
| 32.571705 | 76 | 0.609091 |
7ba7975d420153a385e3680b17a15d19e06af3c9 | 308 | py | Python | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
] | null | null | null | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
] | null | null | null | day1.py | danmana/adventofcode2017 | 6f80cd7c2382453b6e9d577975c2f02a024095c5 | [
"MIT"
] | null | null | null |
file = open("./input/input1.txt", "r")
for s in file:
s = s.strip()
print('Part 1: ', sumOf(s, 1))
print('Part 2: ', sumOf(s, int(len(s)/2)))
file.close() | 14 | 43 | 0.519481 |
7ba983a2c839be1dfa3a88ffa4c32747f568686e | 2,123 | py | Python | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
] | null | null | null | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
] | null | null | null | tests/test_inflate.py | FilipKlaesson/cops | 67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from cops.graph import Graph
from cops.clustering import ClusterProblem, ClusterStructure, inflate_agent_clusters
| 31.686567 | 84 | 0.646726 |
7bab917bc22f4ebc30925ed7bbdf5a597c5e9ff4 | 3,243 | py | Python | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
] | null | null | null | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
] | 7 | 2019-12-27T20:59:12.000Z | 2020-01-08T22:53:42.000Z | tools/STCG/STCG.py | ambertide/SASVM | ee699ec9b585ad0fccb0b70e11dde1b225ac56c1 | [
"MIT"
] | null | null | null | import csv
from sys import argv
from os import getcwd
def generate_memory_list_view(expected_memory) -> str:
"""
Convert expected memory's bytestring to a list of Cells (in string)
:param expected_memory: "A00023"
:return: [Cell("A0"), Cell("00"), Cell("23")]
"""
list_view = "["
for i in range(0, len(expected_memory), 2):
list_view += f"Cell(\"{expected_memory[i] + expected_memory[i + 1]}\"),"
list_view += "]"
return list_view
def generate_test_case(file_name: str, expected_memory: str, expected_output: str) -> str:
"""
Generate a test case string to test an *.asm file.
:param file_name: *.asm file to test
:param expected_memory: Expected memory as bytestring
:param expected_output: Expected output from STDOUT
:return: String
"""
with open(file_name) as file:
code = file.read()
expected_memory_list: str = generate_memory_list_view(expected_memory)
output: str = f"""# Generated with SpaceCat TestCase Generator.
import unittest
from spacecat import assembler, simulator
from spacecat.common_utils import Cell
test_code = \"\"\"{code}\"\"\"
class AlphabetBenchmark(unittest.TestCase):
def test_assembler(self):
a_ = assembler.Assembler.instantiate(test_code, mem_size={len(expected_memory)//2})
self.assertEqual({expected_memory_list}, a_.memory)
def test_simulator(self):
a_ = assembler.Assembler.instantiate(test_code, mem_size=128)
s_ = simulator.Simulator(mem_size=128, register_size=16, stdout_register_indices=[15])
s_.load_memory(a_.memory)
output = ""
i = 0
for _ in s_:
output += s_.return_stdout()
i += 1
if i == 10_000:
self.fail("Failed to resolve in given CPU Cycles.")
self.assertEqual('{expected_output}', output)
if __name__ == '__main__':
unittest.main()
"""
return output
if __name__ == "__main__":
print("Generating...")
if len(argv) > 1:
relative_import_directory = argv[1]
config_file = argv[2]
output_directory = argv[3]
else:
relative_import_directory = "../../src/data/sample_scripts"
config_file="test_files.csv"
output_directory="../../src/test/integration_tests"
generate_from_config(relative_import_directory, config_file, output_directory) | 36.438202 | 114 | 0.662041 |
7bad82b4e2d7cbdb41d0bbaab31ed7d1164ed27e | 108 | py | Python | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
] | null | null | null | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
] | null | null | null | Crash Course Python, 2nd Edition/Chapter 3, Introducing List/YourOwnList.py | EdgarCastillo101/Crash-Course-Python-2nd-edition | 484c9096076c0ba69b1b9d78c6c974064fc1eda3 | [
"MIT"
] | null | null | null | car = ['volvo', 'toyota', 'BMW', 'Yes?']
message = f"I would like to own a {car[1].title()}"
print(message) | 27 | 51 | 0.601852 |
7baf6ff631178bc7ddca808d29592a1384d2ce35 | 10,677 | py | Python | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
] | null | null | null | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
] | null | null | null | stanCode_projects/my_drawing/my_drawing.py | ShihYesWei/stanCode-projects | 69104b7be3d8c3fbd34935c1d4e15e40961e4556 | [
"MIT"
] | null | null | null | """
File: my_drawing
Author name: Alan Chen
----------------------
This program will draw a recently famous picture of Gian(), one of the main characters in doraemon(A).
This is a picture that originally Gian was scared by something. Here, I reassign the things that scared him is the
Illuminati symbol with a string of PYTHON.
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
w = GWindow(1000, 650)
def main():
"""
Draw a scared Gian.
"""
'''
#This is for adjusting the position
for i in range(0, 1000, 100):
li = GLine(i, 0, i, 650)
locatei = GLabel(str(i))
w.add(li)
w.add(locatei, i, 20)
for j in range(0, 700, 100):
lj = GLine(0, j, 1000, j)
locatej = GLabel(str(j))
w.add(lj)
w.add(locatej, 0, j)
'''
#background
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 0))
bg.add_vertex((0, 325))
bg.filled = True
bg.fill_color = 'red'
bg.color = 'red'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 325))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'orange'
bg.color = 'orange'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'lightgreen'
bg.color = 'lightgreen'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'slategrey'
bg.color = 'slategrey'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'darkcyan'
bg.color = 'darkcyan'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((1000, 400))
bg.filled = True
bg.fill_color = 'greenyellow'
bg.color = 'greenyellow'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 400))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'khaki'
bg.color = 'khaki'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'mistyrose'
bg.color = 'mistyrose'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'plum'
bg.color = 'plum'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'magenta'
bg.color = 'magenta'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((0, 0))
bg.filled = True
bg.fill_color = 'tomato'
bg.color = 'tomato'
w.add(bg)
#body
body = GOval(900, 200)
body.filled = True
body.fill_color = 'Steelblue'
body.color = 'blue'
w.add(body, 220, 570)
#face
lower_face = GOval(530, 380)
lower_face.filled = True
lower_face.fill_color = 'Steelblue'
lower_face.color = 'navy'
w.add(lower_face, 405, 260)
upper_face = GOval(485, 575)
upper_face.filled = True
upper_face.fill_color = 'Steelblue'
upper_face.color = 'Steelblue'
w.add(upper_face, 423, 40)
shadow_on_face = GOval(420, 330)
shadow_on_face.filled = True
shadow_on_face.fill_color = 'Cadetblue'
shadow_on_face.color = 'Cadetblue'
w.add(shadow_on_face, 455, 230)
shadow_on_face2 = GOval(390, 370)
shadow_on_face2.filled = True
shadow_on_face2.fill_color = 'Cadetblue'
shadow_on_face2.color = 'Cadetblue'
w.add(shadow_on_face2, 480, 170)
# right_eye
right_eye1 = GOval(90, 90)
right_eye1.filled = True
right_eye1.fill_color = 'powderblue'
right_eye1.color = 'black'
w.add(right_eye1, 525, 225)
right_eye2 = GOval(45, 80)
right_eye2.color = 'black'
w.add(right_eye2, 546, 231)
right_eye3 = GOval(30, 45)
right_eye3.color = 'black'
w.add(right_eye3, 552, 253)
right_eye4 = GOval(5, 10)
right_eye4.filled = True
right_eye4.fill_color = 'black'
right_eye4.color = 'black'
w.add(right_eye4, 565, 271)
# left_eye
left_eye1 = GOval(90, 90)
left_eye1.filled = True
left_eye1.fill_color = 'powderblue'
left_eye1.color = 'black'
w.add(left_eye1, 710, 230)
left_eye2 = GOval(60, 80)
left_eye2.color = 'black'
w.add(left_eye2, 725, 235)
left_eye3 = GOval(25, 50)
left_eye3.color = 'black'
w.add(left_eye3, 740, 250)
left_eye4 = GOval(5, 10)
left_eye4.filled = True
left_eye4.fill_color = 'black'
left_eye4.color = 'black'
w.add(left_eye4, 750, 270)
# nose
nose = GOval(80, 52) # 610 351
nose.filled = True
nose.fill_color = 'DarkSeaGreen'
nose.color = 'black'
w.add(nose, 610, 347)
# mouse
for i in range(10):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 560 + 4 * i, 430 - i)
for i in range(100):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 600 + i, 420)
# tongue
for i in range(15):
tongue = GOval(50, 40)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 570 + 2 * i, 470 - i)
for i in range(10):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 455)
for i in range(25):
tongue = GOval(50, 30)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 475)
for i in range(50):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 650 + i, 455)
# hair
top_hair = GOval(330, 95)
top_hair.filled = True
top_hair.fill_color = 'navy'
top_hair.color = 'navy'
w.add(top_hair, 505, 25)
bangs = GPolygon()
bangs.add_vertex((510, 82))
bangs.add_vertex((620, 82))
bangs.add_vertex((560, 147))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((580, 98))
bangs.add_vertex((690, 98))
bangs.add_vertex((635, 155))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((650, 96))
bangs.add_vertex((770, 96))
bangs.add_vertex((710, 150))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((740, 85))
bangs.add_vertex((825, 85))
bangs.add_vertex((780, 148))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
for i in range(80): # rightside
side = GOval(40, 90)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 800 + i, 55 + i ** 1.2)
for i in range(100): # leftside
side = GOval(40, 40)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 500 - i, 60 + i ** 1.2)
# right_ear
right_ear = GOval(70, 130)
right_ear.filled = True
right_ear.fill_color = 'Steelblue'
right_ear.color = 'blue'
w.add(right_ear, 395, 250)
right_inear = GOval(50, 80)
right_inear.filled = True
right_inear.fill_color = 'royalblue'
right_inear.color = 'blue'
w.add(right_inear, 410, 290)
# left_ear
left_ear = GOval(70, 130)
left_ear.filled = True
left_ear.fill_color = 'Steelblue'
left_ear.color = 'blue'
w.add(left_ear, 880, 260)
left_inear = GOval(50, 80)
left_inear.filled = True
left_inear.fill_color = 'royalblue'
left_inear.color = 'blue'
w.add(left_inear, 890, 290)
# tears
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 525, 300)
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 750, 300)
#left tears
for i in range(0, 10, 2):
tear = GOval(15, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525 - 2* i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(21, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 515 + i, 400 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(18, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525, 500 + 10 * i)
#right tears
for i in range(0, 10, 2):
tear = GOval(5, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 790 + 2 * i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(11, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 808 - i, 410 + 10 * i)
#lines
line1 = GLine(525, 175, 575, 185)
w.add(line1)
line2 = GLine(575,185, 625, 270)
w.add(line2)
line3 = GLine(710, 255, 760, 170)
w.add(line3)
line4 = GLine(651, 400, 651, 420)
w.add(line4)
line5 = GLine(630, 520, 660, 520)
w.add(line5)
# Illuminati
tri = GPolygon()
tri.add_vertex((150, 20))
tri.add_vertex((-20, 280))
tri.add_vertex((320, 280))
tri.filled = True
tri.fill_color = 'green'
w.add(tri)
up_eye = GArc(200, 120, 0, 180)
up_eye.filled = True
up_eye.fill_color = 'darkgreen'
w.add(up_eye, 50, 150)
low_eye = GArc(200, 120, -12, -167)
low_eye.filled = True
low_eye.fill_color = 'darkgreen'
low_eye.color = 'darkgreen'
w.add(low_eye, 50, 145)
eye_ball = GOval(55, 55)
eye_ball.filled = True
eye_ball.fill_color = 'black'
w.add(eye_ball, 125, 150)
py = GLabel('PYTHON')
py.font = '-50'
w.add(py, 20, 280)
if __name__ == '__main__':
main()
| 24.156109 | 114 | 0.579189 |
7bb1307068ac9567a6b6b9039165f859f8bd8998 | 127 | py | Python | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
] | 5 | 2021-01-12T02:53:07.000Z | 2022-03-02T17:58:18.000Z | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
] | null | null | null | tests/handlers/testhandlers/not_a_handler.py | bcurnow/magicband-reader | d8afa80648abc8954abd9c3cab8f6f6b9cb260ec | [
"Apache-2.0"
] | null | null | null | """ This module doesn't provide a register method and should be skipped. This ensures that the error handling logic works. """
| 63.5 | 126 | 0.76378 |
7bb15b935b3d0af4caae284ba8b64031d24bf414 | 3,196 | py | Python | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
] | null | null | null | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
] | null | null | null | ciri/modules/reddit.py | AmarnathCJD/Cirilla-Userbot | a580f2d3442ab7ebc4497aee7e381e6e220dbf93 | [
"MIT"
] | 2 | 2022-01-01T06:58:10.000Z | 2022-01-12T15:59:38.000Z | import json
import os
import subprocess
import requests
from bs4 import BeautifulSoup
from ciri import HelpStr
from ciri.utils import ciri_cmd, eor
HelpStr.update(
{
"reddit": {
"red(ddit)": {
"Description": "Downloads the audio and video from a reddit post.",
"Usage": "red(ddit <url>)",
},
}
}
)
| 30.730769 | 129 | 0.54005 |
7bb4c8f4351e98128f6ae2e0b66016892643437c | 2,343 | py | Python | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
] | null | null | null | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
] | null | null | null | .ipynb_checkpoints/visuals-checkpoint.py | Serenitea/DSND-Capstone-LoL | 2a29132b5e513f9dde1b2afadbc9f28b00ae952d | [
"CNRI-Python"
] | null | null | null | import requests, json
import numpy as np
import importlib
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import pdb
import warnings
warnings.filterwarnings('ignore')
| 34.455882 | 82 | 0.600512 |
7bb571ec75fa6c41fe74464726a90fe46a7374f0 | 4,373 | py | Python | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
] | null | null | null | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
] | null | null | null | components/roode/__init__.py | mgernhard/Roode | 50727e0f46d2bfc73559eb5fc73984ca87acb174 | [
"Unlicense"
] | null | null | null | from re import I
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import CONF_ID, STATE_CLASS_MEASUREMENT, UNIT_EMPTY, UNIT_METER
# DEPENDENCIES = ["i2c"]
AUTO_LOAD = ["sensor", "binary_sensor", "text_sensor"]
MULTI_CONF = True
CONF_ROODE_ID = "roode_id"
roode_ns = cg.esphome_ns.namespace("roode")
Roode = roode_ns.class_("Roode", cg.PollingComponent)
CONF_ROI_HEIGHT = 'roi_height'
CONF_ROI_WIDTH = 'roi_width'
CONF_ADVISED_SENSOR_ORIENTATION = 'advised_sensor_orientation'
CONF_CALIBRATION = "calibration"
CONF_ROI_CALIBRATION = "roi_calibration"
CONF_INVERT_DIRECTION = "invert_direction"
CONF_MAX_THRESHOLD_PERCENTAGE = "max_threshold_percentage"
CONF_MIN_THRESHOLD_PERCENTAGE = "min_threshold_percentage"
CONF_MANUAL_THRESHOLD = "manual_threshold"
CONF_THRESHOLD_PERCENTAGE = "threshold_percentage"
CONF_RESTORE_VALUES = "restore_values"
CONF_I2C_ADDRESS = "i2c_address"
CONF_SENSOR_MODE = "sensor_mode"
CONF_MANUAL = "manual"
CONF_MANUAL_ACTIVE = "manual_active"
CONF_CALIBRATION_ACTIVE = "calibration_active"
CONF_TIMING_BUDGET = "timing_budget"
TYPES = [
CONF_RESTORE_VALUES, CONF_INVERT_DIRECTION,
CONF_ADVISED_SENSOR_ORIENTATION, CONF_I2C_ADDRESS
]
CONFIG_SCHEMA = (cv.Schema({
cv.GenerateID():
cv.declare_id(Roode),
cv.Optional(CONF_INVERT_DIRECTION, default='false'):
cv.boolean,
cv.Optional(CONF_RESTORE_VALUES, default='false'):
cv.boolean,
cv.Optional(CONF_ADVISED_SENSOR_ORIENTATION, default='true'):
cv.boolean,
cv.Optional(CONF_I2C_ADDRESS, default=0x29):
cv.uint8_t,
cv.Exclusive(
CONF_CALIBRATION, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_CALIBRATION_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_MAX_THRESHOLD_PERCENTAGE, default=85):
cv.int_range(min=50, max=100),
cv.Optional(CONF_MIN_THRESHOLD_PERCENTAGE, default=0):
cv.int_range(min=0, max=100),
cv.Optional(CONF_ROI_CALIBRATION, default='false'):
cv.boolean,
}),
cv.Exclusive(
CONF_MANUAL, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_MANUAL_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_TIMING_BUDGET, default=10):
cv.int_range(min=10, max=1000),
cv.Inclusive(
CONF_SENSOR_MODE,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=-1, max=2),
cv.Inclusive(
CONF_ROI_HEIGHT,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_ROI_WIDTH,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_MANUAL_THRESHOLD,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=40, max=4000),
}),
}).extend(cv.polling_component_schema("100ms")))
| 35.266129 | 121 | 0.69426 |
7bb63dc88642b89c018c0dffd26e55f7b04e4fc6 | 7,767 | py | Python | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
] | 2 | 2019-04-18T14:37:05.000Z | 2020-10-25T02:47:26.000Z | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
] | null | null | null | GUI/mainUI.py | nipunsampath/Folder-Maker | e52ace87a08f477e6c105a0e14f85b7886a71f8c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Projects\Python\Folder Maker\GUI\main.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 55.085106 | 135 | 0.709927 |
7bb80c3ecc1f81bebc7a34d9d8f2cc068b53480f | 1,632 | py | Python | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
] | 1 | 2019-05-01T18:24:49.000Z | 2019-05-01T18:24:49.000Z | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
] | null | null | null | LoanPandas/code.py | yogprabhu/ga-learner-dsmp-repo | eaf27f7598f767481b08be3999024fb56612a666 | [
"MIT"
] | null | null | null | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop(columns='Loan_ID')
print(banks.isnull().sum())
bank_mode = banks.mode()
#print(bank_mode)
banks = banks.fillna(0)
print(banks.isna().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(data=banks, index=['Gender', 'Married', 'Self_Employed'],values='LoanAmount', aggfunc=np.mean)
# code ends here
# --------------
# code starts here
loan_approved_se = banks[(banks['Self_Employed']=='Yes')&(banks['Loan_Status']=='Y')].shape[0]
loan_approved_nse=banks[(banks['Self_Employed']=='No')&(banks['Loan_Status']=='Y')].shape[0]
Loan_Status = 614
percentage_se = (loan_approved_se/Loan_Status)*100
percentage_nse = (loan_approved_nse/Loan_Status)*100
# code ends here
# --------------
# code starts here
banks.Loan_Amount_Term.iloc[0]
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
banks['Loan_Amount_Term']=banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
big_loan_term= banks[banks['Loan_Amount_Term']>=25].shape[0]
# code ends here
# --------------
# code starts here
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby = banks.groupby(by='Loan_Status')
loan_groupby = loan_groupby[columns_to_show]
mean_values = loan_groupby.agg([np.mean])
# code ends here
| 22.666667 | 127 | 0.700368 |
7bb9a05e4b4df3445a16a9d49bf23b734a000bdc | 1,718 | py | Python | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | test/espnet2/tts/feats_extract/test_energy.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | import pytest
import torch
from espnet2.tts.feats_extract.energy import Energy
| 30.140351 | 88 | 0.661816 |
7bba3197cf6ebc84a1f3034725dd0f1b29fd1b82 | 4,699 | py | Python | squad/merge.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 89 | 2018-08-25T07:59:07.000Z | 2021-05-04T06:37:27.000Z | squad/merge.py | seominjoon/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 11 | 2018-09-28T17:33:27.000Z | 2019-11-27T23:34:45.000Z | squad/merge.py | uwnlp/piqa | e18f2189c93965c94655d5cc943dcecdc2c1ea57 | [
"Apache-2.0"
] | 10 | 2018-09-19T06:48:06.000Z | 2020-04-14T20:42:06.000Z | """Official merge script for PI-SQuAD v0.1"""
from __future__ import print_function
import os
import argparse
import json
import sys
import shutil
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
import numpy.linalg
if __name__ == '__main__':
squad_expected_version = '1.1'
parser = argparse.ArgumentParser(description='Official merge script for PI-SQuAD v0.1')
parser.add_argument('data_path', help='Dataset file path')
parser.add_argument('context_emb_dir', help='Context embedding directory')
parser.add_argument('question_emb_dir', help='Question embedding directory')
parser.add_argument('pred_path', help='Prediction json file path')
parser.add_argument('--sparse', default=False, action='store_true',
help='Whether the embeddings are scipy.sparse or pure numpy.')
parser.add_argument('--metric', type=str, default='ip',
help='ip|l1|l2 (inner product or L1 or L2 distance)')
parser.add_argument('--progress', default=False, action='store_true', help='Show progress bar. Requires `tqdm`.')
args = parser.parse_args()
with open(args.data_path) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != squad_expected_version:
print('Evaluation expects v-' + squad_expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
q2c = get_q2c(dataset)
predictions = get_predictions(args.context_emb_dir, args.question_emb_dir, q2c, sparse=args.sparse,
metric=args.metric, progress=args.progress)
with open(args.pred_path, 'w') as fp:
json.dump(predictions, fp)
| 38.516393 | 117 | 0.613109 |
7bba6288445870de13beac5ccea088e511b9306b | 3,918 | py | Python | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
] | null | null | null | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
] | null | null | null | src/passpredict/locations.py | samtx/pass-predictor | 6577f75cd7d64bd3c12a9512880d4b29c2682b4c | [
"MIT"
] | null | null | null | from functools import cached_property
from datetime import datetime
from math import degrees, radians, sin, cos
import numpy as np
from orbit_predictor import coordinate_systems
from .utils import get_timezone_from_latlon
from .time import make_utc
from ._time import datetime2mjd
from .solar import sun_pos_mjd
from ._rotations import elevation_at_rad
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
def sun_elevation(self, d: datetime) -> float:
"""
Computes elevation angle of sun relative to location. Returns degrees.
"""
d2 = make_utc(d)
mjd = datetime2mjd(d2)
return self._sun_elevation_mjd(mjd)
def is_sunlit(self, dt: datetime) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self.sun_elevation(dt)
return el > -6
def _is_sunlit_mjd(self, mjd: float) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self._sun_elevation_mjd(mjd)
return el > -6
| 29.238806 | 88 | 0.619704 |
7bbaeab63e6d9b82f2fcd904c0c52ba80c699e2f | 4,559 | py | Python | rl_baselines/evaluation/eval_post.py | anonymous-authors-2018/robotics-repo | 385d1f3b49f8d414ab90f53c6f06b56614ae83ba | [
"MIT"
] | 5 | 2019-08-21T22:57:21.000Z | 2021-01-01T21:15:26.000Z | rl_baselines/evaluation/eval_post.py | BillChan226/POAR-SRL-4-Robot | a6a8052e105369656d34fffc4f7ca4475dcc38df | [
"MIT"
] | null | null | null | rl_baselines/evaluation/eval_post.py | BillChan226/POAR-SRL-4-Robot | a6a8052e105369656d34fffc4f7ca4475dcc38df | [
"MIT"
] | 2 | 2019-11-26T11:41:12.000Z | 2021-08-30T16:00:27.000Z |
import subprocess
import numpy as np
import pickle
import argparse
import os
from rl_baselines.student_eval import allPolicy
from srl_zoo.utils import printRed, printGreen
from rl_baselines.evaluation.cross_eval_utils import EnvsKwargs, loadConfigAndSetup, policyEval,createEnv
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Evaluation after training")
parser.add_argument('--log-dir',type=str, default=''
,help='RL algo to use')
parser.add_argument('--task-label', type=str, default='',
help='task to evaluate')
parser.add_argument('--episode', type=str, default='',
help='evaluation for the policy saved at this episode')
parser.add_argument('--policy-path', type=str, default='',
help='policy path')
parser.add_argument('--seed', type=int, default=0,
help='policy path')
args, unknown = parser.parse_known_args()
reward, _ = policyCrossEval(args.log_dir, args.task_label, episode=args.episode, model_path=args.policy_path,
num_timesteps=251,seed=args.seed)
saveReward(args.log_dir, reward, args.task_label, save_name='episode_eval.pkl')
| 33.77037 | 130 | 0.645317 |
7bbb8601ea2e62414cb9ab4019393f8898c93e86 | 6,304 | py | Python | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #! /usr/bin/env python
import os
os.system("make clean; make; \\rm *.log log.list")
############################################
#dir1='TriggerValidation_223_HLT'
#dir2='TriggerValidation_224_HLT'
#out='223_vs_224'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11"
#sufix2 = "_IDEALV11_v1"
#label1 = "LM1_223"
#label2 = "LM1_224"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre2_HLT'
#out='224_vs_300pre2'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEALV9"
#label1 = "LM1_223"
#label2 = "LM1_300pre2"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre6_HLT'
#out='224_vs_300pre6'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEAL_30x_v1"
#label1 = "LM1_223"
#label2 = "LM1_300pre6"
############################################
dir1='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
dir2='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
out='mc1_vs_mc2'
samples=['_HLT']
prefix1 = "DQM_V0001"
prefix2 = "DQM_V0001"
sufix1 = "_R000000001"
sufix2 = "_R000000001_2"
label1 = "HLT"
label2 = "HLT"
############################################
os.system('mkdir html/'+out)
#create html index page
os.system('cp html/template/index.html html/'+out+'/index.html')
#create the cover page
inputhtml = open('html/template/beginning.html')
outputhtml = open('html/'+out+'/cover.html','w')
for line in inputhtml:
# remove .root
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
# remove .root
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
#create the menu page
os.system('cp html/template/menu_beginning.html html/'+out+'/menu.html')
for sample in samples:
tmp1 = open('tmp.html','w')
tmp2 = open('html/template/menu_body.html')
for line in tmp2:
if line.find('thissample') != -1:
newline = line.replace('thissample',sample)
tmp1.write(newline)
else: tmp1.write(line)
continue
tmp1.close()
tmp2.close()
os.system('more tmp.html >> html/'+out+'/menu.html')
os.system('rm tmp.html')
continue
os.system('more html/template/menu_end.html >> html/'+out+'/menu.html')
#run the code for each sample
for sample in samples:
file1 = dir1+'/'+prefix1+sample+sufix1+'.root'
file2 = dir2+'/'+prefix2+sample+sufix2+'.root'
outputfile = 'outputfile.root'
#create html page for this sample
inputhtml = open('html/template/comp_beginning.html')
os.system('mkdir html/'+out+'/'+sample)
outputhtml = open('html/'+out+'/'+sample+'/comparison.html','w')
# add right version names in the html
for line in inputhtml:
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
elif line.find('<!-- Here python will write the name of the model -->') != -1: outputhtml.write(sample)
elif line.find('thissample') != -1:
newline = line.replace('thissample',sample)
outputhtml.write(newline)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
# run the comparison
os.system('./triggerComparison.x -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
# for old names
# os.system('./triggerComparison.x --oldL1names -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
os.system('mv HLTcomparison.log html/'+out+'/'+sample)
os.system('mv L1comparison.log html/'+out+'/'+sample)
# mv root file to the html directory
os.system('mv '+outputfile+' html/'+out+'/'+sample)
# add eff and residual pulls to the html
os.system('more html/template/comp.html >> html/'+out+'/'+sample+'/comparison.html')
# link the compatibility maps
os.system('more compatibility.html >> html/'+out+'/'+sample+'/comparison.html')
# create jpg files
os.system("ls *eps > listeps.log")
listeps = open("listeps.log")
for epsfile in listeps: os.system("convert \""+epsfile[:-1]+"\" \""+epsfile[:-4]+"jpg\"")
thefile = open('html/'+out+'/'+sample+'/comparison.html',"r+")
# link HLT files
#thefile.seek(0,2)
#thefile.write('<tr><td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('HLT') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td>\n')
# link L1 files
#thefile.write('<td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('L1') != -1): #this is a plot of a trigger path
# if(epsfile.find('A_') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td></tr>\n')
#thefile.close()
# write end of the comparison web page
os.system('more html/template/end.html >> html/'+out+'/'+sample+'/comparison.html')
# move all eps and jpg files in the proper directory
os.system('mv *jpg html/'+out+'/'+sample+'/')
os.system('mv *eps html/'+out+'/'+sample+'/')
continue
os.system('\\rm listeps.log')
| 34.075676 | 153 | 0.615641 |
7bbbafe111995f8ba65d3c92e9ed6a6bf9e416f8 | 2,275 | py | Python | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
] | null | null | null | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
] | null | null | null | log_in.py | lowerx/8March | 02948996ca43dddbc2a7ffad882a21b59fbea4ed | [
"MIT"
] | null | null | null | from curses import echo
from importlib.metadata import metadata
import sqlite3
import sys
import sqlalchemy
import os
from sqlalchemy import Column, Integer, String, ForeignKey, Table, MetaData, create_engine, engine_from_config
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from database import DataBase
Base = declarative_base()
# def db_connect(db_name):
# """
# Performs database connection using database settings from settings.py.
# Returns sqlalchemy engine instance
# """
name = "test"
db = DataBase(name)
passphrase = "test"
AuthentificationPros = Authentification(db, name, passphrase)
| 28.4375 | 110 | 0.633407 |
7bbbb84b2ea6ce8e2867ca8c352a6bb6c21ce89f | 1,602 | py | Python | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | 3 | 2021-03-19T10:36:10.000Z | 2021-09-08T01:37:47.000Z | mecc/views.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django_cas.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from mecc.apps.years.models import UniversityYear
| 36.409091 | 83 | 0.637953 |
7bbbe09edfcf3321edef1198ab48f96f54dee63c | 2,695 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Datatypes/KlNetwerklinkMediumtype.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
| 72.837838 | 173 | 0.634879 |
7bbde3e95bb2349d1613a331043db076b94f2cfe | 1,617 | py | Python | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
] | null | null | null | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
] | null | null | null | src/utgardtests/filewriter/statusprocessor.py | ess-dmsc/utgard-test-utils | 27e244d06a681e09a10584dc6b93e5eaf767a8be | [
"BSD-2-Clause"
] | null | null | null | import logging
import threading
import time
| 26.508197 | 73 | 0.641311 |
7bbdf574388c84658ffc5b1e989b4bad6ddb075e | 9,045 | py | Python | befh/exchanges/okex_spot.py | philsong/BitcoinExchangeFH | 3c45d4be2ea2a258f132d982f62f69d649e0b083 | [
"Apache-2.0"
] | 32 | 2017-12-15T07:30:11.000Z | 2020-07-16T10:15:18.000Z | befh/exchanges/okex_spot.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
] | null | null | null | befh/exchanges/okex_spot.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
] | 20 | 2017-11-09T15:28:39.000Z | 2019-12-10T01:02:57.000Z | from befh.ws_api_socket import WebSocketApiClient
from befh.market_data import L2Depth, Trade
from befh.exchanges.gateway import ExchangeGateway
from befh.instrument import Instrument
from befh.util import Logger
from befh.clients.sql_template import SqlClientTemplate
import time
import threading
import json
from functools import partial
from datetime import datetime
if __name__ == '__main__':
exchange_name = 'Okex'
instmt_name = 'BCHBTC'
instmt_code = 'BCH_BTC'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
Logger.init_log()
exch = ExchGwOkexSpot([db_client])
td = exch.start(instmt)
| 38.326271 | 116 | 0.570591 |
7bbe575d89df5cb9077767131f1bcff71b4ea2bc | 191 | py | Python | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
] | 1 | 2019-05-16T15:25:22.000Z | 2019-05-16T15:25:22.000Z | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
] | null | null | null | cloudflare_ddns/__init__.py | joshuaavalon/cloudflare-ddns | 9a79a73dc6f723d2bd9afd26289a9c990744f4e7 | [
"Apache-2.0"
] | 1 | 2019-06-17T15:22:29.000Z | 2019-06-17T15:22:29.000Z | from cloudflare_ddns.configuration import Configuration, SiteConfiguration
from cloudflare_ddns.ddns import CloudflareDDNS
__all__ = ["CloudflareDDNS", "Configuration", "SiteConfiguration"]
| 38.2 | 74 | 0.848168 |
7bbe5cef3d1aeca66fb6ca826edab503eb8c860b | 587 | py | Python | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/twisted.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | from .base import PipBaseRecipe
| 32.611111 | 60 | 0.524702 |
7bbf00877f721b0c24c4e63d13a17b9fddb98274 | 250 | py | Python | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | null | null | null | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | 1 | 2022-02-23T07:34:53.000Z | 2022-02-23T07:34:53.000Z | EXC/CW1/task3/combiner.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# combiner.py
import sys
word_count = 0
line_count = 0
for line in sys.stdin:
words, lines = line.strip().split('\t')
word_count += int(words)
line_count += int(lines)
print("{0}\t{1}".format(word_count, line_count)) | 17.857143 | 48 | 0.66 |
7bbf1685508e5466a589c9ca9ef370e0a3b9611c | 1,376 | py | Python | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | tests/exploratory/user_data/radish/steps.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | import re
from radish.stepregistry import step
from radish import when, then
from radish.terrain import world
| 35.282051 | 106 | 0.652616 |
7bbf1d84d1d1e722a857754d78ceb86118a7eadb | 3,462 | py | Python | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
] | 2 | 2018-10-29T09:37:47.000Z | 2019-11-28T14:11:12.000Z | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
] | null | null | null | django/core/views.py | andreyvpng/askme | 65139c347a6b80f0a660ca24d6dd864e4531903a | [
"Apache-2.0"
] | 2 | 2018-09-18T14:09:46.000Z | 2019-11-28T14:11:14.000Z | from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.http.response import HttpResponseBadRequest, HttpResponseRedirect
from django.urls import reverse_lazy
from django.urls.base import reverse
from django.views.generic import CreateView, DeleteView, DetailView, View
from .forms import AnswerForm, QuestionForm
from .models import Answer, Like, Question
User = get_user_model()
| 27.696 | 77 | 0.662334 |
7bc0b829008737def4ec98f701aadecbf19f6fdd | 336 | py | Python | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
] | 1 | 2019-04-14T19:58:43.000Z | 2019-04-14T19:58:43.000Z | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
] | null | null | null | setup.py | lepture/pydouban | 5b67c9f6a206a2b21539fc28b3b8658947ae1904 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(
name = 'pydouban',
version = '1.0.0',
description = 'Lightweight Python Douban API Library',
author = 'Marvour',
author_email = 'marvour@gmail.com',
license = 'BSD License',
url = 'http://i.shiao.org/a/pydouban',
packages = ['pydouban'],
)
| 22.4 | 58 | 0.630952 |