hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95dba8c035cf85f4ef8f4bc3e7a7c14c268076f1 | 1,797 | py | Python | engine/src/valet/engine/search/filters/cpu_filter.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/search/filters/cpu_filter.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/search/filters/cpu_filter.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T18:54:03.000Z | 2021-10-15T18:54:03.000Z | #
# -------------------------------------------------------------------------
# Copyright (c) 2019 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
| 30.982759 | 76 | 0.606566 |
95e0d6973a04cf649a738acb651bea0fa6b7dfcd | 996 | py | Python | Inflearn_SungKim/3.MultiVariableLinearRegression/multi-variableLinearregression.py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/3.MultiVariableLinearRegression/multi-variableLinearregression.py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | Inflearn_SungKim/3.MultiVariableLinearRegression/multi-variableLinearregression.py | shinhaha/tensorflow | 4647017a727985d64c5b0addee92f0ec516952c1 | [
"MIT"
] | null | null | null | import tensorflow as tf
x1_data=[73.,93.,89.,96.,73.]
x2_data=[80.,88.,91.,98.,66.]
x3_data=[75.,93.,90.,100.,70.]
y_data=[152.,185.,180.,196.,142.]
x1=tf.placeholder(tf.float32)
x2=tf.placeholder(tf.float32)
x3=tf.placeholder(tf.float32)
Y=tf.placeholder(tf.float32)
w1=tf.Variable(tf.random_normal([1]),name='weight1')
w2=tf.Variable(tf.random_normal([1]),name='weight2')
w3=tf.Variable(tf.random_normal([1]),name='weight1')
b=tf.Variable(tf.random_normal([1]),name='bias')
hypothesis=x1*w1+x2*w2+x3*w3+b
cost=tf.reduce_mean(tf.square(hypothesis-Y))
#minimize
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train=optimizer.minimize(cost)
#launch graph
sess=tf.Session()
#initialize
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val,hy_val,_=sess.run([cost,hypothesis,train],
feed_dict={x1:x1_data,x2:x2_data,x3:x3_data,Y:y_data})
if step%10==0:
print(step,"Cost:",cost_val,"\nPrediction:\n",hy_val) | 32.129032 | 85 | 0.712851 |
95e18e6281085104769aa15c1a8ef9828b449526 | 1,759 | py | Python | train_model.py | sanjjayrj/Chatbot-NLTK | 2000a3c640d6624984ca4ad2457557e937d4ae05 | [
"MIT"
] | 3 | 2020-11-17T12:14:37.000Z | 2021-08-14T05:46:38.000Z | train_model.py | sanjjayrj/Chatbot-NLTK | 2000a3c640d6624984ca4ad2457557e937d4ae05 | [
"MIT"
] | null | null | null | train_model.py | sanjjayrj/Chatbot-NLTK | 2000a3c640d6624984ca4ad2457557e937d4ae05 | [
"MIT"
] | null | null | null | import pandas as pd
import nltk
import re
from nltk.stem import wordnet
from nltk import pos_tag
from nltk import word_tokenize
from datetime import datetime
data = pd.read_csv('traindata.csv', encoding='utf-8')
train_counter = 0
if __name__ == '__main__':
print("Time now: " + str(datetime.now()))
print(data.info())
print("\nData Imported...")
print("----------------------------------------------------------------------------------------------------------")
data['lemmatized text'] = data['Content'].apply(text_normalize)
print("Training Data Lemmatized..., Time now: " + str(datetime.now()))
data.to_csv('traindata.csv', encoding='utf-8', index = False)
print(data['lemmatized text'])
print(type(data['lemmatized text']))
print("\nTraining data...")
print("----------------------------------------------------------------------------------------------------------") | 37.425532 | 119 | 0.557703 |
95e2b38a9c011b08bb379e05752137d534a0a8a9 | 1,848 | py | Python | tensor_twister/server.py | iamorphen/tensor_twister | d7936efa50cf0f7f3950ff4cbb0dd3fbac310ca9 | [
"MIT"
] | null | null | null | tensor_twister/server.py | iamorphen/tensor_twister | d7936efa50cf0f7f3950ff4cbb0dd3fbac310ca9 | [
"MIT"
] | null | null | null | tensor_twister/server.py | iamorphen/tensor_twister | d7936efa50cf0f7f3950ff4cbb0dd3fbac310ca9 | [
"MIT"
] | null | null | null | import io
import logging
import queue
from collections import namedtuple
import torch
import zmq
from tensor_twister.status_codes import StatusCode
UnpackedMessage = namedtuple("UnpackedMessage", ["tensor", "name", "ip"])
def serve(host: str, port: int):
"""
Listen for incoming tensor data from clients. Print comparisons between
pairs of tensor data.
Args:
host (str): The hostname to listen on; for example "localhost"
port (int): The port to listen on; for example 5555
"""
logger = logging.getLogger(__name__)
logger.debug("libzmq version: %s", zmq.zmq_version())
logger.debug(" pyzmq version: %s", zmq.__version__)
tensor_queue = queue.Queue()
context = zmq.Context()
socket = context.socket(zmq.REP)
server_uri = f"tcp://{host}:{port}"
logger.info("Attempting to listen on %s.", server_uri)
socket.bind(server_uri)
logger.info("Listening on %s.", server_uri)
while True:
# Get the next message, blocking.
message = socket.recv_pyobj()
try:
tensor = torch.load(message.tensor)
except Exception:
socket.send_pyobj(StatusCode.TensorLoadFailure)
continue
tensor_queue.put(UnpackedMessage(tensor, message.name, message.ip))
socket.send_pyobj(StatusCode.OK)
# If the queue has at least 2 messages, compare the first 2.
if tensor_queue.qsize() >= 2:
m1 = tensor_queue.get()
m2 = tensor_queue.get()
print(f"{m1.name}@{m1.ip}: tensor min: {m1.tensor.min()}; max: {m1.tensor.max()}; mean: {m1.tensor.mean()}")
print(f"{m2.name}@{m2.ip}: tensor min: {m2.tensor.min()}; max: {m2.tensor.max()}; mean: {m2.tensor.mean()}")
print(f"t1 and t2 are {'' if (m1.tensor == m2.tensor).all() else 'not'} equal")
| 33 | 120 | 0.635281 |
95e39518b618f5551cfe1c882c8f307a7a86e276 | 6,744 | py | Python | optunity/solvers/CMAES.py | xrounder/optunity | 019182ca83fe2002083cc1ac938510cb967fd2c9 | [
"BSD-3-Clause"
] | 401 | 2015-01-08T00:56:20.000Z | 2022-03-19T09:07:12.000Z | optunity/solvers/CMAES.py | xrounder/optunity | 019182ca83fe2002083cc1ac938510cb967fd2c9 | [
"BSD-3-Clause"
] | 67 | 2015-01-08T09:13:20.000Z | 2022-01-05T23:26:36.000Z | optunity/solvers/CMAES.py | xrounder/optunity | 019182ca83fe2002083cc1ac938510cb967fd2c9 | [
"BSD-3-Clause"
] | 94 | 2015-02-04T08:35:56.000Z | 2021-10-03T12:40:35.000Z | #! /usr/bin/env python
# Copyright (c) 2014 KU Leuven, ESAT-STADIUS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import functools
from .solver_registry import register_solver
from .util import Solver, _copydoc
from . import util
_numpy_available = True
try:
import numpy as np
except ImportError:
_numpy_available = False
_deap_available = True
try:
import deap
import deap.creator
import deap.base
import deap.tools
import deap.cma
import deap.algorithms
except ImportError:
_deap_available = False
except TypeError:
# this can happen because DEAP is in Python 2
# install needs to take proper care of converting
# 2 to 3 when necessary
_deap_available = False
# CMA_ES solver requires deap > 1.0.1
# http://deap.readthedocs.org/en/latest/examples/cmaes.html
if _deap_available and _numpy_available:
CMA_ES = register_solver('cma-es', 'covariance matrix adaptation evolutionary strategy',
['CMA-ES: covariance matrix adaptation evolutionary strategy',
' ',
'This method requires the following parameters:',
'- num_generations :: number of generations to use',
'- sigma :: (optional) initial covariance, default 1',
'- Lambda :: (optional) measure of reproducibility',
'- starting point: through kwargs'
' ',
'This method is described in detail in:',
'Hansen and Ostermeier, 2001. Completely Derandomized Self-Adaptation in Evolution Strategies. Evolutionary Computation'
])(CMA_ES)
| 36.852459 | 144 | 0.631821 |
95e555ee7266bd7c5e0f103c5c42eba12b36c67d | 622 | py | Python | DailyCoding/11.py | jason71319jason/Interview-solved | 42ca93a68475952753d185c325cb55c79e2e55e1 | [
"MIT"
] | 46 | 2019-10-14T01:21:35.000Z | 2022-01-08T23:55:15.000Z | DailyCoding/11.py | jason71319jason/Interview-solved | 42ca93a68475952753d185c325cb55c79e2e55e1 | [
"MIT"
] | 53 | 2019-10-03T17:16:43.000Z | 2020-12-08T12:48:19.000Z | DailyCoding/11.py | jason71319jason/Interview-solved | 42ca93a68475952753d185c325cb55c79e2e55e1 | [
"MIT"
] | 96 | 2019-10-03T18:12:10.000Z | 2021-03-14T19:41:06.000Z | """
This problem was asked by Twitter.
Implement an autocomplete system. That is, given a query string s and a set of all possible query strings, return all strings in the set that have s as a prefix.
For example, given the query string de and the set of strings [dog, deer, deal], return [deer, deal].
Hint: Try preprocessing the dictionary into a more efficient data structure to speed up queries.
"""
print(autocomplete_bruteforce(['dog','deer','deal'], 'de')) | 28.272727 | 161 | 0.705788 |
95e79ef92334e9854cdc295c02dc16e232f812ed | 4,974 | py | Python | pyblnet/blnet_parser.py | henfri/pyblnet | 0a3a59ea39ab569d4b59be5a918736dc238bcf13 | [
"MIT"
] | 3 | 2019-03-11T12:38:43.000Z | 2022-02-18T21:40:54.000Z | pyblnet/blnet_parser.py | henfri/pyblnet | 0a3a59ea39ab569d4b59be5a918736dc238bcf13 | [
"MIT"
] | 26 | 2018-10-15T10:57:21.000Z | 2021-03-23T18:35:06.000Z | pyblnet/blnet_parser.py | henfri/pyblnet | 0a3a59ea39ab569d4b59be5a918736dc238bcf13 | [
"MIT"
] | 7 | 2018-10-03T09:39:30.000Z | 2020-03-12T19:44:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 09.08.2018
This is basically a python port of of a script by berwinter
https://github.com/berwinter/uvr1611/blob/master/lib/backend/blnet-connection.inc.php
author: Niels
"""
import struct
from datetime import datetime
# Parser constant
# 1 bit
DIGITAL_ON = 1
DIGITAL_OFF = 0
# 8 bit
SPEED_ACTIVE = 0x80
SPEED_MASK = 0x1F
# 16 bit
INT16_POSITIVE_MASK = 0xFFFF
SIGN_BIT = 0x8000
POSITIVE_VALUE_MASK = 0x0FFF
TYPE_MASK = 0x7000
TYPE_NONE = 0x0000
TYPE_DIGITAL = 0x1000
TYPE_TEMP = 0x2000
TYPE_VOLUME = 0x3000
TYPE_RADIATION = 0x4000
TYPE_RAS = 0x7000
RAS_POSITIVE_MASK = 0x01FF
# 32 bit
INT32_MASK = 0xFFFFFFFF
INT32_SIGN = 0x80000000
| 29.784431 | 85 | 0.559912 |
95e8a73a4c141ad9d18c2ea514ffb13b8b700b03 | 3,568 | py | Python | app/routers/nodes.py | yamatteo/vue-fastapi-boilerplate | 5fa3de29a6e7ec4a8df9b3a4073f462307f62cb6 | [
"MIT"
] | 2 | 2020-03-11T02:58:44.000Z | 2020-03-27T16:00:25.000Z | app/routers/nodes.py | yamatteo/vue-fastapi-boilerplate | 5fa3de29a6e7ec4a8df9b3a4073f462307f62cb6 | [
"MIT"
] | 7 | 2021-03-10T07:59:29.000Z | 2022-02-26T23:46:17.000Z | app/routers/nodes.py | yamatteo/vue-fastapi-boilerplate | 5fa3de29a6e7ec4a8df9b3a4073f462307f62cb6 | [
"MIT"
] | 1 | 2020-03-11T02:58:48.000Z | 2020-03-11T02:58:48.000Z | from typing import Optional
from typing import List
from fastapi import APIRouter, Depends, Body
from models import User, Content, Node, Group, ExternalContent
from routers import get_current_user, admin_only
from schemas import NodeAdd, NodeEdit, NodeFind
#
router = APIRouter()
| 37.166667 | 121 | 0.690583 |
95eaee2ff327784e0d2a6285027d63a294194fa5 | 283 | py | Python | Programming_Maester/Phoneketmon.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | 1 | 2021-04-01T06:19:02.000Z | 2021-04-01T06:19:02.000Z | Programming_Maester/Phoneketmon.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | null | null | null | Programming_Maester/Phoneketmon.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | null | null | null | # https://programmers.co.kr/learn/courses/30/lessons/1845
print(solution([3,1,2,3]))
print(solution([3,3,3,2,2,4]))
print(solution([3,3,3,2,2,2])) | 25.727273 | 57 | 0.618375 |
95ec274e03ce16625cb08ace26548a81e6d7c252 | 3,903 | py | Python | ooobuild/lo/xml/crypto/sax/xsax_event_keeper.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/xml/crypto/sax/xsax_event_keeper.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/xml/crypto/sax/xsax_event_keeper.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.crypto.sax
import typing
from abc import abstractmethod
from ....uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ...sax.x_document_handler import XDocumentHandler as XDocumentHandler_9b90e28
from ...wrapper.xxml_element_wrapper import XXMLElementWrapper as XXMLElementWrapper_66c0107c
__all__ = ['XSAXEventKeeper']
| 37.171429 | 206 | 0.688189 |
95ed4a727fcf9707dcfd7fa3fc1e4e7848fbb44c | 992 | py | Python | neodroidagent/common/session_factory/vertical/procedures/training/sampling/rollout.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 8 | 2017-09-13T08:28:44.000Z | 2022-01-21T15:59:19.000Z | neodroidagent/common/session_factory/vertical/procedures/training/sampling/rollout.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 4 | 2019-03-22T13:49:16.000Z | 2019-03-25T13:49:39.000Z | neodroidagent/common/session_factory/vertical/procedures/training/sampling/rollout.py | gitter-badger/agent | 3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11 | [
"Apache-2.0"
] | 3 | 2017-09-13T08:31:38.000Z | 2021-11-09T11:22:27.000Z | from itertools import count
from tqdm import tqdm
from neodroid.environments.droid_environment import VectorUnityEnvironment
| 26.810811 | 82 | 0.626008 |
95eef20a68a045c35b991c4b9eef565e70a03766 | 17,995 | py | Python | sysevr/slicer/mapping.py | Saleh-Ibtasham/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | 1 | 2021-04-12T12:59:33.000Z | 2021-04-12T12:59:33.000Z | sysevr/slicer/mapping.py | Jokers-grin/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | sysevr/slicer/mapping.py | Jokers-grin/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import copy
import os
import string
import xlrd
import pickle
from .get_tokens import *
keywords_0 = ('auto', 'typedf', 'const', 'extern', 'register', 'static', 'volatile', 'continue', 'break',
'default', 'return', 'goto', 'else', 'case')
keywords_1 = ('catch', 'sizeof', 'if', 'switch', 'while', 'for')
keywords_2 = ('memcpy', 'wmemcpy', '_memccpy', 'memmove', 'wmemmove', 'memset', 'wmemset', 'memcmp', 'wmemcmp', 'memchr',
'wmemchr', 'strncpy', 'lstrcpyn', 'wcsncpy', 'strncat', 'bcopy', 'cin', 'strcpy', 'lstrcpy', 'wcscpy', '_tcscpy',
'_mbscpy', 'CopyMemory', 'strcat', 'lstrcat', 'fgets', 'main', '_main', '_tmain', 'Winmain', 'AfxWinMain', 'getchar',
'getc', 'getch', 'getche', 'kbhit', 'stdin', 'm_lpCmdLine', 'getdlgtext', 'getpass', 'istream.get', 'istream.getline',
'istream.peek', 'istream.putback', 'streambuf.sbumpc', 'streambuf.sgetc', 'streambuf.sgetn', 'streambuf.snextc', 'streambuf.sputbackc',
'SendMessage', 'SendMessageCallback', 'SendNotifyMessage', 'PostMessage', 'PostThreadMessage', 'recv', 'recvfrom', 'Receive',
'ReceiveFrom', 'ReceiveFromEx', 'CEdit.GetLine', 'CHtmlEditCtrl.GetDHtmlDocument', 'CListBox.GetText', 'CListCtrl.GetItemText',
'CRichEditCtrl.GetLine', 'GetDlgItemText', 'CCheckListBox.GetCheck', 'DISP_FUNCTION', 'DISP_PROPERTY_EX', 'getenv', 'getenv_s', '_wgetenv',
'_wgetenv_s', 'snprintf', 'vsnprintf', 'scanf', 'sscanf', 'catgets', 'gets', 'fscanf', 'vscanf', 'vfscanf', 'printf', 'vprintf', 'CString.Format',
'CString.FormatV', 'CString.FormatMessage', 'CStringT.Format', 'CStringT.FormatV', 'CStringT.FormatMessage', 'CStringT.FormatMessageV',
'vsprintf', 'asprintf', 'vasprintf', 'fprintf', 'sprintf', 'syslog', 'swscanf', 'sscanf_s', 'swscanf_s', 'swprintf', 'malloc',
'readlink', 'lstrlen', 'strchr', 'strcmp', 'strcoll', 'strcspn', 'strerror', 'strlen', 'strpbrk', 'strrchr', 'strspn', 'strstr',
'strtok', 'strxfrm', 'kfree', '_alloca')
keywords_3 = ('_strncpy*', '_tcsncpy*', '_mbsnbcpy*', '_wcsncpy*', '_strncat*', '_mbsncat*', 'wcsncat*', 'CEdit.Get*', 'CRichEditCtrl.Get*',
'CComboBox.Get*', 'GetWindowText*', 'istream.read*', 'Socket.Receive*', 'DDX_*', '_snprintf*', '_snwprintf*')
keywords_5 = ('*malloc',)
xread = xlrd.open_workbook('./sysevr/ml_models/function.xls')
keywords_4 = []
for sheet in xread.sheets():
col = sheet.col_values(0)[1:]
keywords_4 += col
#print keywords_4
typewords_0 = ('short', 'int', 'long', 'float', 'doubule', 'char', 'unsigned', 'signed', 'void' ,'wchar_t', 'size_t', 'bool')
typewords_1 = ('struct', 'union', 'enum')
typewords_2 = ('new', 'delete')
operators = ('+', '-', '*', '/', '=', '%', '?', ':', '!=', '==', '<<', '&&', '||', '+=', '-=', '++', '--', '>>', '|=')
function = '^[_a-zA-Z][_a-zA-Z0-9]*$'
variable = '^[_a-zA-Z][_a-zA-Z0-9(->)?(\.)?]*$'
number = '[0-9]+'
stringConst = '(^\'[\s|\S]*\'$)|(^"[\s|\S]*"$)'
constValue = ['NULL', 'false', 'true']
phla = '[^a-zA-Z0-9_]'
space = '\s'
spa = ''
| 38.866091 | 160 | 0.373159 |
95ef3fa428d1d310cb953139858993824869ba27 | 1,929 | py | Python | Flatipie/widgets/cards.py | zenqiwp/Flatipie | 441b1f120d78ec072b86d1c95b381e85fbe7661d | [
"MIT"
] | 11 | 2020-11-12T08:07:56.000Z | 2021-04-27T01:42:30.000Z | Flatipie/widgets/cards.py | zenqiwp/Flatipie | 441b1f120d78ec072b86d1c95b381e85fbe7661d | [
"MIT"
] | null | null | null | Flatipie/widgets/cards.py | zenqiwp/Flatipie | 441b1f120d78ec072b86d1c95b381e85fbe7661d | [
"MIT"
] | 2 | 2020-11-12T07:08:27.000Z | 2020-11-12T08:07:58.000Z |
"""
Copyright (c) 2020 Flatipie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5.QtWidgets import QGroupBox, QGraphicsDropShadowEffect
from PyQt5.QtCore import Qt, pyqtSignal, QObject, QPoint
| 37.096154 | 79 | 0.709176 |
95ef71f8c3f9102a164ab9d3fc0c343aa7cbaaa5 | 7,035 | py | Python | lib/datasets/vrd/to_pascal_format.py | sx14/open-relation.pytorch | 3fe52a0c6129a80abbc84df53903d13b7dea05d6 | [
"MIT"
] | 2 | 2019-04-21T01:45:01.000Z | 2020-03-11T07:09:18.000Z | lib/datasets/vrd/to_pascal_format.py | sx14/open-relation.pytorch | 3fe52a0c6129a80abbc84df53903d13b7dea05d6 | [
"MIT"
] | null | null | null | lib/datasets/vrd/to_pascal_format.py | sx14/open-relation.pytorch | 3fe52a0c6129a80abbc84df53903d13b7dea05d6 | [
"MIT"
] | null | null | null | import os
import shutil
import xml.dom.minidom
| 45.980392 | 87 | 0.729495 |
95f03d2ec095743360ac14d2a11b057617f86d87 | 4,880 | py | Python | stellar/cognition/planning.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | stellar/cognition/planning.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | stellar/cognition/planning.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | """
Contains path planning logic.
"""
import math
import numpy as np
from heapq import heappush, heappop
def heuristics(a, b):
"""Heuristics function using the Euclidian Distance."""
weight = 1.0
x1, y1 = a
x2, y2 = b
distance = np.sqrt(np.square(x2-x1) + np.square(y2-y1))
# distance = math.hypot(x1 - x2, y1 - y2)
return distance
| 28.87574 | 92 | 0.527664 |
95f0995fad2f82fbbbdccca26aa5605a1c0767e1 | 84 | py | Python | libcity/model/traffic_od_prediction/__init__.py | LibCity/Bigscity-LibCity-Docs-zh_CN | 2be639c3fe7d75727ade18f473d6f625900f73f2 | [
"Apache-2.0"
] | 5 | 2021-09-28T12:32:50.000Z | 2022-02-03T09:04:35.000Z | libcity/model/traffic_od_prediction/__init__.py | aptx1231/Bigscity-TrafficDL-Docs-zh_CN | 2be639c3fe7d75727ade18f473d6f625900f73f2 | [
"Apache-2.0"
] | null | null | null | libcity/model/traffic_od_prediction/__init__.py | aptx1231/Bigscity-TrafficDL-Docs-zh_CN | 2be639c3fe7d75727ade18f473d6f625900f73f2 | [
"Apache-2.0"
] | 1 | 2021-12-16T05:10:35.000Z | 2021-12-16T05:10:35.000Z | from libcity.model.traffic_od_prediction.GEML import GEML
__all__ = [
"GEML"
]
| 14 | 57 | 0.738095 |
95f29beeb0a5add129f6eb5d02625efa724d1d4e | 699 | py | Python | core/migrations/0008_auto_20151203_1519.py | rafaelbantu/timtec | 86c51b7440a044704ed33c3e752a6cf6b15ceae3 | [
"BSD-3-Clause"
] | 21 | 2015-09-23T14:07:16.000Z | 2022-02-18T01:35:18.000Z | core/migrations/0008_auto_20151203_1519.py | rafaelbantu/timtec | 86c51b7440a044704ed33c3e752a6cf6b15ceae3 | [
"BSD-3-Clause"
] | 178 | 2016-05-10T16:16:19.000Z | 2021-12-15T20:21:21.000Z | core/migrations/0008_auto_20151203_1519.py | rafaelbantu/timtec | 86c51b7440a044704ed33c3e752a6cf6b15ceae3 | [
"BSD-3-Clause"
] | 18 | 2015-10-23T13:28:17.000Z | 2021-09-22T13:08:28.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 27.96 | 132 | 0.635193 |
95f38c9fc1b89ab08b48f547bb8603c9adde90bb | 628 | py | Python | hipshare/lib/util.py | erg0dic/hipshare | 993f0edee7e9156b7154d578ef6a4e50cfcdd632 | [
"BSD-2-Clause"
] | 1 | 2015-11-03T19:33:44.000Z | 2015-11-03T19:33:44.000Z | hipshare/lib/util.py | erg0dic/hipshare | 993f0edee7e9156b7154d578ef6a4e50cfcdd632 | [
"BSD-2-Clause"
] | 1 | 2015-11-03T19:35:19.000Z | 2015-11-03T19:35:19.000Z | hipshare/lib/util.py | erg0dic/hipshare | 993f0edee7e9156b7154d578ef6a4e50cfcdd632 | [
"BSD-2-Clause"
] | null | null | null | import json
import logging
import sys
log = logging.getLogger(__name__)
| 17.942857 | 60 | 0.598726 |
95f41e13e20cbb1290f9018c568b80de61a76953 | 95 | py | Python | vistrails/tests/resources/test_upgrades_layout/__init__.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 83 | 2015-01-05T14:50:50.000Z | 2021-09-17T19:45:26.000Z | vistrails/tests/resources/test_upgrades_layout/__init__.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 254 | 2015-01-02T20:39:19.000Z | 2018-11-28T17:16:44.000Z | vistrails/tests/resources/test_upgrades_layout/__init__.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 40 | 2015-04-17T16:46:36.000Z | 2021-09-28T22:43:24.000Z | identifier = 'org.vistrails.test.upgrades_layout'
name ='test_upgrades_layout'
version = '0.3'
| 23.75 | 49 | 0.778947 |
95f557240d66982761b806e370f3acd7dca5c78f | 480 | py | Python | python_tips_three.py | obetron/python_tips | 9b6299db4f9dbc51638bba310deaf0c1dec759fe | [
"MIT"
] | null | null | null | python_tips_three.py | obetron/python_tips | 9b6299db4f9dbc51638bba310deaf0c1dec759fe | [
"MIT"
] | null | null | null | python_tips_three.py | obetron/python_tips | 9b6299db4f9dbc51638bba310deaf0c1dec759fe | [
"MIT"
] | null | null | null |
#for all read file code at the end of the code file should closed.
f = open('test.txt', 'r')
file_contents = f.read()
f.close()
words = file_contents.split(' ')
word_count = len(words)
print(word_count)
"""
OUTPUT:
3
"""
#for all the time use a file closing it is became a big job.
with open('test.txt', 'r') as f:
#python manage the file life cycle
file_contents = f.read()
words = file_contents.split(' ')
word_count = len(words)
print(word_count)
"""
OUTPUT:
3
"""
| 17.777778 | 66 | 0.675 |
95f79173711e478161ccc6939dc1c76c706f0ae5 | 1,192 | py | Python | albert/helper.py | vvvm23/albert | 548fa03832f7e64ab79f2dfa16aa5ac42469333d | [
"MIT"
] | 1 | 2021-06-10T10:54:59.000Z | 2021-06-10T10:54:59.000Z | albert/helper.py | vvvm23/albert | 548fa03832f7e64ab79f2dfa16aa5ac42469333d | [
"MIT"
] | null | null | null | albert/helper.py | vvvm23/albert | 548fa03832f7e64ab79f2dfa16aa5ac42469333d | [
"MIT"
] | null | null | null | import torch
from einops import rearrange
"""
@openai - VD-VAE
https://github.com/openai/vdvae/blob/main/vae_helpers.py
Nice helper module as calling super.__init__() gets annoying
"""
"""
@lucidrains - Phil Wang (nystrom-attention)
https://github.com/lucidrains/nystrom-attention/blob/main/nystrom_attention/nystrom_attention.py
"""
| 27.090909 | 100 | 0.613255 |
95f8d504586e0cc5968ca2a0c621d00c07ae2c40 | 2,078 | py | Python | p2_mahjong/utils.py | yata0/Mahjong | 764cd607df715b879f3f8a54b6def55e0b7d4706 | [
"MIT"
] | null | null | null | p2_mahjong/utils.py | yata0/Mahjong | 764cd607df715b879f3f8a54b6def55e0b7d4706 | [
"MIT"
] | null | null | null | p2_mahjong/utils.py | yata0/Mahjong | 764cd607df715b879f3f8a54b6def55e0b7d4706 | [
"MIT"
] | null | null | null | # coding=utf-8
import sys
import numpy as np
from p2_mahjong.card import MahjongCard as Card
log_head = "utils.py"
CARD_USED_TYPE = ['characters', 'green', 'red', 'white', 'east', 'west', 'north', 'south',
'spring', 'summer', 'autumn', 'winter', 'mei', 'lan', 'zhu', 'ju']
card_encoding_dict = {}
card_id = 0
DIC_CHOW = {}
character_list = []
wind_list = []
dragon_list = []
card_used = {}
for _type in ['bamboo', 'characters', 'dots']:
for _trait in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
card = _type+"-"+_trait
card_encoding_dict[card] = card_id
DIC_CHOW[card_id] = 1
if _type in ['characters']:
card_used[card_id] = 1
character_list.append(card_id)
card_id += 1
for _trait in ['green', 'red', 'white']:
card = 'dragons-'+_trait
card_encoding_dict[card] = card_id
if _trait in CARD_USED_TYPE:
card_used[card_id] = 1
dragon_list.append(card_id)
card_id += 1
for _trait in ['east', 'west', 'north', 'south']:
card = 'winds-'+_trait
card_encoding_dict[card] = card_id
if _trait in CARD_USED_TYPE:
card_used[card_id] = 1
wind_list.append(card_id)
card_id += 1
for _trait in ['spring', 'summer', 'autumn', 'winter', 'mei', 'lan', 'zhu', 'ju']:
card = 'flowers-'+_trait
card_encoding_dict[card] = card_id
if _trait in CARD_USED_TYPE:
card_used[card_id] = 1
card_id += 1
card_decoding_dict = {card_encoding_dict[key]: key for key in card_encoding_dict.keys()}
| 29.685714 | 90 | 0.590472 |
95fa5390eed432169e5e44214698604b6c85fcde | 1,062 | py | Python | Chapter 01/int_sqrt.py | bpbpublications/Python-Quick-Interview-Guide | ab4ff3e670b116a4db6b9e1f0ccba8424640704d | [
"MIT"
] | 1 | 2021-05-14T19:53:41.000Z | 2021-05-14T19:53:41.000Z | Chapter 01/int_sqrt.py | bpbpublications/Python-Quick-Interview-Guide | ab4ff3e670b116a4db6b9e1f0ccba8424640704d | [
"MIT"
] | null | null | null | Chapter 01/int_sqrt.py | bpbpublications/Python-Quick-Interview-Guide | ab4ff3e670b116a4db6b9e1f0ccba8424640704d | [
"MIT"
] | null | null | null | '''
class Solution:
def mySqrt(self,x) :
# Base cases
if (x == 0 or x == 1) :
return x
# Do Binary Search for integer square root
start = 1
end = x
while (start <= end) :
mid = (start + end) // 2
# If x is a perfect square
if (mid*mid == x) :
return mid
# when mid^2 is smaller than x, check if (mid+1)^2 >x
if (mid * mid < x) :
if (mid+1)*(mid+1) > x:return mid
start = mid + 1
else :
# If mid*mid is greater than x
end = mid-1
'''
sol=Solution()
for i in range(1,10):
print(i,sol.mySqrt(i))
| 24.136364 | 62 | 0.415254 |
95ff75475d347ef322808cfa526e253df07b5f81 | 13,517 | py | Python | meg_runtime/ui/manager.py | MultimediaExtensibleGit/Runtime | ba2e469666163177034e44077b02378dfc6649c9 | [
"MIT"
] | null | null | null | meg_runtime/ui/manager.py | MultimediaExtensibleGit/Runtime | ba2e469666163177034e44077b02378dfc6649c9 | [
"MIT"
] | 5 | 2020-03-24T19:59:38.000Z | 2020-04-22T03:44:43.000Z | meg_runtime/ui/manager.py | MultimediaExtensibleGit/Runtime | ba2e469666163177034e44077b02378dfc6649c9 | [
"MIT"
] | 2 | 2020-03-13T18:35:46.000Z | 2020-04-11T20:19:20.000Z | """MEG UI Manager
"""
import pkg_resources
from PyQt5 import QtCore, QtWidgets, QtGui, uic
from meg_runtime.config import Config
from meg_runtime.logger import Logger
from meg_runtime.app import App
| 41.719136 | 134 | 0.600059 |
95ffe673bc040b87ba5fb46405be623006c98d02 | 1,459 | py | Python | tests/scripts/test_s_rs_raw_shapes.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 14 | 2018-02-23T10:03:45.000Z | 2022-03-03T13:59:30.000Z | tests/scripts/test_s_rs_raw_shapes.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 59 | 2017-02-22T15:03:30.000Z | 2020-12-16T12:26:17.000Z | tests/scripts/test_s_rs_raw_shapes.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 5 | 2017-08-22T11:31:42.000Z | 2020-06-24T18:30:12.000Z | """testing
"""
from energy_demand.scripts import s_rs_raw_shapes
import numpy as np
| 31.717391 | 70 | 0.437286 |
2501aa9e0452052b19ad9fe91a29c5a969b9d03e | 1,935 | py | Python | release/davis16/evaluate.py | MSiam/segment-any-moving | 82cb782867d866d2f4eb68230edb75f613e15a02 | [
"Apache-2.0"
] | 70 | 2019-09-16T17:55:55.000Z | 2022-03-07T00:26:53.000Z | release/davis16/evaluate.py | MSiam/segment-any-moving | 82cb782867d866d2f4eb68230edb75f613e15a02 | [
"Apache-2.0"
] | 9 | 2019-09-30T09:15:11.000Z | 2021-07-21T11:33:13.000Z | release/davis16/evaluate.py | MSiam/segment-any-moving | 82cb782867d866d2f4eb68230edb75f613e15a02 | [
"Apache-2.0"
] | 5 | 2019-09-25T05:14:37.000Z | 2021-07-08T20:13:47.000Z | import argparse
import logging
import yaml
from pathlib import Path
from script_utils.common import common_setup
from release.davis16.compute_flow import link_splits
from release.helpers.misc import msg, subprocess_call
if __name__ == "__main__":
main()
| 31.209677 | 74 | 0.632041 |
2503cb791f9ad674e778396da993788db1fa44bb | 4,712 | py | Python | qq/mention.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 40 | 2021-12-07T02:18:14.000Z | 2022-03-28T13:14:16.000Z | qq/mention.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 2 | 2021-12-12T17:34:29.000Z | 2021-12-17T04:43:03.000Z | qq/mention.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 5 | 2021-12-10T11:17:41.000Z | 2022-03-05T13:53:50.000Z | # The MIT License (MIT)
# Copyright (c) 2021-present foxwhite25
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
from typing import Type, TypeVar, List, TYPE_CHECKING, Any, Union
__all__ = (
'AllowedMentions',
)
if TYPE_CHECKING:
from .types.message import AllowedMentions as AllowedMentionsPayload
from .member import Member
from .role import Role
default: Any = _FakeBool()
A = TypeVar('A', bound='AllowedMentions')
| 35.164179 | 102 | 0.655136 |
2504849dd9ac056161c71352758bc395f3e09c7d | 6,395 | py | Python | cupcake/editor/autocomplete/itemkinds.py | billyeatcookies/cupcake | 2f2d1d5f8a1a454e50283547cf433cc82d1825d6 | [
"MIT"
] | 3 | 2022-03-29T12:55:24.000Z | 2022-03-30T17:06:11.000Z | cupcake/editor/autocomplete/itemkinds.py | billyeatcookies/Cupcake | 2f2d1d5f8a1a454e50283547cf433cc82d1825d6 | [
"MIT"
] | null | null | null | cupcake/editor/autocomplete/itemkinds.py | billyeatcookies/Cupcake | 2f2d1d5f8a1a454e50283547cf433cc82d1825d6 | [
"MIT"
] | null | null | null | import tkinter as tk
| 81.987179 | 120 | 0.854887 |
2505a01273ca3c6e0204f8f0d9964f3bd464761f | 1,125 | py | Python | register/migrations/0001_initial.py | duks500/Nilestone-Project | 6b45ac21f9d9e7a4839d6f616711cbe8daa6e051 | [
"bzip2-1.0.6"
] | null | null | null | register/migrations/0001_initial.py | duks500/Nilestone-Project | 6b45ac21f9d9e7a4839d6f616711cbe8daa6e051 | [
"bzip2-1.0.6"
] | null | null | null | register/migrations/0001_initial.py | duks500/Nilestone-Project | 6b45ac21f9d9e7a4839d6f616711cbe8daa6e051 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 3.0 on 2020-03-29 14:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 37.5 | 165 | 0.623111 |
250a44eb50bdd484b59b76e217165e7deeb8a326 | 9,686 | py | Python | utils/iwr6843_utils/parse_tlv.py | ApocalyVec/mGesf | 21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5 | [
"MIT"
] | 18 | 2020-06-02T11:21:47.000Z | 2022-03-25T08:16:57.000Z | utils/iwr6843_utils/parse_tlv.py | ApocalyVec/mGesf | 21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5 | [
"MIT"
] | 4 | 2020-06-20T13:53:44.000Z | 2021-09-11T22:58:21.000Z | utils/iwr6843_utils/parse_tlv.py | ApocalyVec/mGesf | 21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5 | [
"MIT"
] | 6 | 2020-04-23T21:30:17.000Z | 2021-08-03T19:59:12.000Z | import struct
import sys
import math
import numpy as np
#
# TODO 1: (NOW FIXED) Find the first occurrence of magic and start from there
# TODO 2: Warn if we cannot parse a specific section and try to recover
# TODO 3: Remove error at end of file if we have only fragment of TLV
#
def parseRDheatmap(data, tlvLength, range_bins, rm_clutter=True):
"""
range bins times doppler bins times 2, doppler bins = chirps/ frame divided by num of antennas TX (3)
#default chirps per frame is (128/3) = 42 * 2 * 256
the call to replace_left_right mirror-flips left and right after reshaping.
replace_left_right is equivalent to this line from mmWave.js in the visualizer code
# rangeDoppler = rangeDoppler.slice((rangeDoppler.length + 1) / 2).concat(
# rangeDoppler.slice(0, (rangeDoppler.length + 1) / 2));
:param range_bins:
:param data: the incoming byte stream to be interpreted as range-doppler heatmap/profile
:param tlvLength:
:return:
"""
doppler_bins = (tlvLength / 2) / range_bins
rd_heatmap = struct.unpack(str(int(range_bins * doppler_bins)) + 'H', data[:tlvLength])
rd_heatmap = np.reshape(rd_heatmap, (int(range_bins), int(doppler_bins)))
overall_mean = np.mean(rd_heatmap)
if rm_clutter:
rd_heatmap = np.array([row - np.mean(row) for row in rd_heatmap])
return replace_left_right(rd_heatmap)
def parseAziheatmap(data, tlvLength, range_bins):
"""
:param range_bins:
:param data: the incoming byte stream to be interpreted as range-doppler heatmap/profile
:param tlvLength:
:return:
"""
# range_bins = 256
azi_bins = (tlvLength / 2) / range_bins
azi_heatmap = struct.unpack(str(int(range_bins * azi_bins)) + 'H', data[:tlvLength])
# azi_heatmap = [chg_val(x) for x in azi_heatmap]
azi_heatmap = np.reshape(azi_heatmap, (int(range_bins), int(azi_bins)))
# use the default order of 3 Tx's and ordering is TX0, TX1, TX2
row_indices = [7, 5, 11, 9]
qrows = 4
qcols = range_bins
rowSizeBytes = 48
q = data[:tlvLength]
qq = []
for col in range(qcols):
real = []
img = []
for row in range(qrows):
index = col * rowSizeBytes + 4 * row_indices[row]
real.append(q[index + 1] * 256 + q[index])
img.append(q[index + 3] * 256 + q[index + 2])
real = [chg_val(x) for x in real]
img = [chg_val(x) for x in img]
# convert to complex numbers
data = np.array([real, img]).transpose()
data = np.pad(data, ((0, 60), (0, 0)), 'constant', constant_values=0)
data = data[..., 0] + 1j * data[..., 1]
transformed = np.fft.fft(data)
# take the magnitude
transformed = np.absolute(transformed)
qq.append(np.concatenate((transformed[int(len(transformed) / 2):], transformed[:int(len(transformed) / 2)])))
qq = np.array(qq)
return qq
negative_rtn = False, None, None, None, None, None
def decode_iwr_tlv(in_data):
"""
Must disable range profile for the quick RD heatmap to work, this way the number of range bins will be be calculated
from the absent range profile. You can still get the range profile by inferring it from the RD heatmap
:param in_data:
:return: if no detected point at this frame, the detected point will be an empty a
"""
magic = b'\x02\x01\x04\x03\x06\x05\x08\x07'
header_length = 36
offset = in_data.find(magic)
data = in_data[offset:]
if len(data) < header_length:
return negative_rtn
try:
data_magic, version, length, platform, frameNum, cpuCycles, numObj, numTLVs = struct.unpack('Q7I',
data[
:header_length])
except struct.error:
print("Improper TLV structure found: ", (data,))
return negative_rtn
# print("Packet ID:\t%d "%(frameNum))
# print("Version:\t%x "%(version))
# print("Data Len:\t\t%d", length)
# print("TLV:\t\t%d "%(numTLVs))
# print("Detect Obj:\t%d "%(numObj))
# print("Platform:\t%X "%(platform))
if version >= 50462726 and len(data) >= length:
# if version > 0x01000005 and len(data) >= length:
try:
sub_frame_num = struct.unpack('I', data[36:40])[0]
header_length = 40
# print("Subframe:\t%d "%(subFrameNum))
pending_bytes = length - header_length
data = data[header_length:]
detected_points = None
range_profile = None
rd_heatmap = None
azi_heatmap = None
range_bins = 8
statistics = None
for i in range(numTLVs):
tlvType, tlvLength = tlvHeaderDecode(data[:8])
data = data[8:]
if tlvType == 1:
# print('Outputting Points')
detected_points = parseDetectedObjects(data, numObj,
tlvLength) # if no detected points, tlvType won't have 1
elif tlvType == 2:
# the range bins is modified in the range profile is enabled
range_profile = parseRangeProfile(data, tlvLength)
elif tlvType == 4:
# resolving static azimuth heatmap
pass
elif tlvType == 5:
# try:
# assert range_bins
# except AssertionError:
# raise Exception('Must enable range-profile while enabling range-doppler-profile, in order to'
# 'interpret the number of range bins')
rd_heatmap = parseRDheatmap(data, tlvLength, range_bins)
elif tlvType == 6:
# TODO why is the states' TLV not present?
interProcess, transmitOut, frameMargin, chirpMargin, activeCPULoad, interCPULoad = parseStats(data)
pass
elif tlvType == 7:
pass
elif tlvType == 8:
# resolving static azimuth-elevation heatmap
try:
azi_heatmap = parseAziheatmap(data, tlvLength, range_bins)
except:
print('bad azimuth')
azi_heatmap = None
pass
elif tlvType == 9: # only for AoP EV2
pass
else:
# print("Unidentified tlv type %d" % tlvType, '. Its len is ' + str(tlvLength))
n_offset = data.find(magic)
if n_offset != offset and n_offset != -1:
print('New magic found, discarding previous frame with unknown tlv')
data = data[n_offset:]
return True, data, detected_points, range_profile, rd_heatmap, azi_heatmap
data = data[tlvLength:]
pending_bytes -= (8 + tlvLength)
data = data[pending_bytes:] # data that are left
# infer range profile from heatmap is the former is not enabled
if range_profile is None and rd_heatmap is not None and len(rd_heatmap) > 0:
range_profile = rd_heatmap[:, 0]
return True, data, detected_points, range_profile, rd_heatmap, azi_heatmap
except struct.error as se:
print('Failed to parse tlv message, type = ' + str(tlvType) + ', error: ')
print(se)
pass
return negative_rtn
if __name__ == "__main__":
magic = b'\x02\x01\x04\x03\x06\x05\x08\x07'
fileName = 'D:/PycharmProjects/mmWave_gesture_iwr6843/test_data2.dat'
rawDataFile = open(fileName, "rb")
rawData = rawDataFile.read()
rawDataFile.close()
offset = rawData.find(magic)
rawData = rawData[offset:]
# for i in range(len(rawData/36))
#
# for length, frameNum in tlvHeader(rawData):
# print
| 39.056452 | 120 | 0.58218 |
250b9e3749e20788bc502eca2699fe190f414709 | 11,285 | py | Python | shape_generator/helpers.py | MarkusPic/SWMM_xsections_shape_generator | daf19c9508f3727e8cd12c450485f7f21f3e30ed | [
"MIT"
] | null | null | null | shape_generator/helpers.py | MarkusPic/SWMM_xsections_shape_generator | daf19c9508f3727e8cd12c450485f7f21f3e30ed | [
"MIT"
] | null | null | null | shape_generator/helpers.py | MarkusPic/SWMM_xsections_shape_generator | daf19c9508f3727e8cd12c450485f7f21f3e30ed | [
"MIT"
] | 1 | 2020-12-29T13:02:28.000Z | 2020-12-29T13:02:28.000Z | from io import StringIO
from os import path, listdir, remove
from math import radians, tan, cos, pi, atan, sin
from pandas import read_csv
import sympy as sy
import numpy as np
# these variables are used to solve symbolic mathematical equations
# x is the control variable over the height ... max(x) = H_cross_section
x = sy.Symbol('x', real=True, positive=True)
accuracy = 10
def csv(txt, comment=None):
"""
Read the string in txt as csv file and return the content as DataFrame.
Args:
txt (str): content of csv
comment (str): comment sign
Returns:
dict: profile label and values
"""
df = read_csv(StringIO(txt), index_col=0, skipinitialspace=True, skip_blank_lines=True, comment=comment)
df = df[df.index.notnull()].copy()
df.index = df.index.astype(str)
return df
def to_xs_dict(txt, comment=None):
"""
Read the string in txt as csv file and return the content as DataFrame.
Args:
txt (str): content of csv
comment (str): comment sign
Returns:
dict: profile label and values
"""
di = dict()
names = []
for line in txt.split('\n'):
if line == '':
continue
elif isinstance(comment, str) and line.startswith(comment):
continue
elif not names:
names = [n.strip() for n in line.split(',')[1:]]
di['_names'] = names
else:
name, *values = [n.strip() for n in line.split(',')]
# di[name] = {k: to_num(v) for k, v in zip(names, values)}
di[name] = [to_num(v) for v in values]
return di
def deg2slope(degree):
"""
convert degrees to a slope (:math:`\\Delta x / \\Delta y`)
Args:
degree (float): angle in degree
Returns:
float: slope
.. figure:: images/slope.gif
:align: center
:alt: slope
:figclass: align-center
Slope
"""
return tan(radians(degree))
def channel_end(r, end_degree):
"""
get vertical end of the channel based on the radius of the channel and an end angle
Args:
r (float): radius of the channel
end_degree (float): end angle in degree ()
Returns:
float: height of the channel when the circle reaches a certain angle
.. figure:: images/channel_end.gif
:align: center
:alt: channel end
:figclass: align-center
Channel end
"""
return r * (1 - cos(radians(end_degree)))
def sqrt(i):
""" Return the square root of x. """
return i ** (1 / 2)
def combine_input_files(shape_path, delete_original=False):
"""combine all generated shape text files to a single inp-like text file
When running the :func:`shape_generator.shape_generator.Profile.input_file` function, a .txt file will be created.
Those txt files will be combines to a single file with this function.
This makes it easier to import all shapes to the .inp file.
Args:
shape_path (str): path where the shapes are stored
delete_original (bool): whether to delete the original single files
"""
with open(os.path.join(shape_path, 'all_shapes.txt'), 'w') as outfile:
for fname in listdir(shape_path):
if not fname.endswith('_shape.txt'):
continue
in_fn = os.path.join(shape_path, fname)
with open(in_fn) as infile:
outfile.write(infile.read())
outfile.write('\n\n')
if delete_original:
remove(in_fn)
print('Files are combined and originals {}deleted.'.format('' if delete_original else 'NOT '))
####################################################################################################################
####################################################################################################################
####################################################################################################################
####################################################################################################################
####################################################################################################################
| 25.823799 | 118 | 0.517767 |
250eb809dd09ad7a9b6aa51c271e231f078546da | 1,772 | py | Python | bunq/sdk/util/util.py | mwiekens/sdk_python | 9333636083bc63dca4353e8f497588f57617efec | [
"MIT"
] | null | null | null | bunq/sdk/util/util.py | mwiekens/sdk_python | 9333636083bc63dca4353e8f497588f57617efec | [
"MIT"
] | null | null | null | bunq/sdk/util/util.py | mwiekens/sdk_python | 9333636083bc63dca4353e8f497588f57617efec | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
import socket
import requests
from bunq.sdk.context.api_context import ApiContext, ApiEnvironmentType
from bunq.sdk.exception.bunq_exception import BunqException
from bunq.sdk.http.api_client import ApiClient
from bunq.sdk.model.generated import endpoint
from bunq.sdk.model.generated.endpoint import SandboxUser
__UNIQUE_REQUEST_ID = "uniqueness-is-required"
__FIELD_API_KEY = "ApiKey"
__INDEX_FIRST = 0
__FIELD_RESPONSE = "Response"
__ENDPOINT_SANDBOX_USER = "sandbox-user"
_ERROR_COULD_NOT_CREATE_NEW_SANDBOX_USER = "Could not create new sandbox user."
| 34.076923 | 79 | 0.744921 |
2510cbb743f1f29e0bb13e1ae7ff3435645c3b4d | 184 | py | Python | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | import mysql.connector as mydb
conn = mydb.connect(
host='mysql_container',
port='3306',
user='docker',
password='docker',
database='my_db'
)
conn.is_connected()
| 15.333333 | 30 | 0.657609 |
2511753f88ea48953fbf7d9fff0197ffc5356c2e | 752 | py | Python | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | from django.db import models
| 22.117647 | 60 | 0.599734 |
25132e1264d30cca913fe293f3805c8d79177d9b | 2,201 | py | Python | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe
from frappe import _
from datetime import datetime, timedelta, date, time
from frappe.utils import getdate, get_time, flt, now_datetime
from frappe.utils import escape_html
from frappe import throw, msgprint, _
| 33.348485 | 116 | 0.606997 |
2513a6b22c946cb8b820c0695cdd317c638f6bf0 | 647 | py | Python | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | null | null | null | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | 10 | 2021-07-30T14:39:05.000Z | 2021-07-30T14:39:07.000Z | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | null | null | null | '''
goalboost.model package
The goalboost model package consists of MongoEngine models along with
Marshmallow schemas. MongoEngine is our database ORM to MongoDB,
and Marshmallow is a serialization library that helps us validate, consume,
and expose these Orm objects for clients that need it at the API layer.
For MongoEngine, see http://mongoengine.org/
For Marshmallow and the MongoEngine integration piece, see:
https://marshmallow.readthedocs.org/en/latest/
https://github.com/touilleMan/marshmallow-mongoengine
'''
from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
| 26.958333 | 75 | 0.789799 |
2513a8a764760e74306e494219df1291ea86952f | 3,290 | py | Python | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import logging
| 29.115044 | 89 | 0.614286 |
2515ebed6d44cdb6e775f2b149da71a36b8ce3fa | 6,270 | py | Python | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null |
import boto3
from zipfile import ZipFile
import argparse
import json
import os
import shutil
if __name__ == "__main__":
main()
| 33 | 79 | 0.601435 |
25166ab3132cfb837c187df9b62bcf91450b7109 | 6,260 | py | Python | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | null | null | null | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | 3 | 2020-08-12T06:16:40.000Z | 2020-08-17T05:44:26.000Z | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | 1 | 2020-08-04T01:56:03.000Z | 2020-08-04T01:56:03.000Z | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import functools
import os
from absl import logging
import numpy as np
import tensorflow as tf
from typing import Any, List, Optional, MutableMapping
from official.utils.misc import keras_utils
from official.vision.image_classification.pruning.pruning_base_configs import ModelPruningConfig
from tensorflow_model_optimization.python.core.keras import compat
from tensorflow_model_optimization.python.core.sparsity.keras.cprune_registry import ConstraintRegistry
def get_callbacks(model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
model_pruning_config: Optional[ModelPruningConfig] = None,
write_model_weights: bool = True,
batch_size: int = 0,
log_steps: int = 0,
model_dir: str = None) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True, verbose=1))
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
model_pruning_config=model_pruning_config,
write_images=write_model_weights))
if time_history:
callbacks.append(
keras_utils.TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None))
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
| 36.184971 | 103 | 0.684824 |
2516f01f8f44e4e51781ce4ffc642a90318eac4f | 129 | py | Python | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 10 | 2017-05-10T08:10:23.000Z | 2020-03-23T10:23:37.000Z | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 38 | 2017-04-26T14:13:37.000Z | 2021-06-24T11:36:38.000Z | """Initialize the index package"""
# flake8: noqa
from __future__ import absolute_import
from .base import *
from .typ import *
| 18.428571 | 38 | 0.751938 |
2519a94caf6b2f931b487b3397703da9ddf2b842 | 885 | py | Python | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
burgerPython = Saucer("Hamburguesa de Python", 0.13,
cadDescription="Barely an eigth of a byte")
print(burgerPython) | 34.038462 | 75 | 0.59661 |
2519e01a81d1d3e2c4f4e4fede4c19c82e764391 | 9,768 | py | Python | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | 2 | 2019-01-10T03:44:03.000Z | 2019-05-24T08:50:14.000Z | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch import nn
from resnext.resnext101_regular import ResNeXt101
| 51.141361 | 135 | 0.619676 |
251a755eafd6983caca29826a579cc38212144dd | 7,413 | py | Python | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | 'Classes and functions for creating fonts and text buttons'
#IMPORTS
import pygame
from pathlib import Path
from .core import clip_surface, load_image
from .colour import palette_swap
#IMPORTS
#VARIALBES
__all__ = ['create_font', 'TextButton']
path = Path(__file__).resolve().parent
#VARIABLES
#CREATE_FONT
def create_font(colour):
'''A function to create small and large Font objects
colour will be the colour of the text
The first value in the returned tuple is the small font and the second value is the large font
Returns: tuple'''
if tuple(colour[:3]) == (0, 0, 0):
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (255, 255, 255)})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (255, 255, 255)})
return Font(small_font_image, background_colour=255), Font(large_font_image, background_colour=255)
if tuple(colour[:3]) == (127, 127, 127):
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (128, 128, 128)})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (128, 128, 128)})
return Font(small_font_image, 128), Font(large_font_image, 128)
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3]})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3]})
return Font(small_font_image), Font(large_font_image)
#CREATE_FONT
#FONT
#FONT
#TEXTBUTTON
#SET_TEXT
#COLLIDE
def collide(self, click, check_location=None):
'''This will check collision with the mouse location and also if click is True with it
A custom location can be set with location if pygame.mouse.get_pos() is not wished to be used
The first value returns True if the mouse has collided with the button, the second one is if the mouse clicked on it
Returns: tuple'''
check_location = pygame.mouse.get_pos() if check_location is None else check_location
if self.rect.collidepoint(check_location):
if click:
return True, True
return True, False
return False, False
#COLLIDE
#RENDER
def render(self, surface, font, scroll=pygame.Vector2()):
'Renders the text from the button'
if not isinstance(font, Font):
raise TypeError('font is not a Font object')
font.render(surface, self.text, self.location, scroll)
#RENDER
#TEXTBUTTON | 37.439394 | 356 | 0.673816 |
251ac80cf768d166a984daeae7c4d2c5d7422487 | 1,814 | py | Python | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 28 | 2017-05-03T17:48:21.000Z | 2022-02-14T13:40:24.000Z | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 6 | 2017-08-21T07:52:18.000Z | 2020-07-17T16:41:44.000Z | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 3 | 2018-03-13T23:33:10.000Z | 2021-09-09T02:33:07.000Z | """
This modules contain helper function to deal with PIL / Pillow Images.
.. note::
Please note that the ``[PIL]`` (pillow) extra dependency must be installed
to allow functions from this module to work.
"""
from . import guetzli
def _to_pil_rgb_image(image):
"""Returns an PIL Image converted to the RGB color space. If the image has
an alpha channel (transparency), it will be overlaid on a black background.
:param image: the PIL image to convert
:returns: The input image if it was already in RGB mode, or a new RGB image
if converted.
:raises ImportError: PIL / Pillow cannot be imported.
"""
if image.mode == "RGB":
return image
from PIL import Image
image.load()
rgb_image = Image.new("RGB", image.size, (0x00, 0x00, 0x00))
mask = None
if image.mode == "RGBA":
mask = image.split()[3] # bands: R=0, G=1, B=2, 1=3
rgb_image.paste(image, mask=mask)
return rgb_image
def process_pil_image(image, quality=guetzli.DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from a PIL image. If the image has an alpha
channel (transparency), it will be overlaid on a black background.
:param image: the PIL image
:param quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ImportError: PIL / Pillow cannot be imported.
.. code:: python
import pyguetzli
from PIL import Image
image = Image.open("./test/image.jpg")
optimized_jpeg = pyguetzli.process_pil_image(image)
"""
image_rgb = _to_pil_rgb_image(image)
image_rgb_bytes = image_rgb.tobytes()
return guetzli.process_rgb_bytes(
image_rgb_bytes,
*image.size,
quality=quality
)
| 26.676471 | 79 | 0.656009 |
251c85ca611047b1b27da7153669dd51f78397d6 | 1,034 | py | Python | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=201 lang=python
#
# [201] Bitwise AND of Numbers Range
#
# https://leetcode.com/problems/bitwise-and-of-numbers-range/description/
#
# algorithms
# Medium (35.44%)
# Total Accepted: 77.3K
# Total Submissions: 217.7K
# Testcase Example: '5\n7'
#
# Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND
# of all numbers in this range, inclusive.
#
# Example 1:
#
#
# Input: [5,7]
# Output: 4
#
#
# Example 2:
#
#
# Input: [0,1]
# Output: 0
#
| 19.509434 | 78 | 0.548356 |
251cba64cfe05ed7cdba8439be4d154984b803ea | 12,053 | py | Python | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | ''' dip
@author Bardia Mojra - 1000766739
@brief ee-5323 - project -
@date 10/31/21
code based on below YouTube tutorial and Pymotw.com documentation for socket mod.
@link https://www.youtube.com/watch?v=3QiPPX-KeSc
@link https://pymotw.com/2/socket/tcp.html
python socket module documentation
@link https://docs.python.org/3/library/socket.html
@link https://docs.python.org/3/howto/sockets.html
'''
import csv
import math
import numpy as np
import os
import pygame
import pyglet
from pyglet.window import key
import pymunk
import pymunk.constraints
import pymunk.pygame_util
import pandas as pd
import pyglet.gl as gl
''' custom libs
'''
import dvm
import tcm
''' NBUG
'''
from nbug import *
''' TEST CONFIG
'''
TEST_ID = 'Test 903'
SIM_DUR = 30.0 # in seconds
OUT_DIR = '../out/'
OUT_DATA = OUT_DIR+TEST_ID+'_data.csv'
CONF_DIR = '../config/'
# cart
m_c = 0.5
all_friction = 0.2
''' pendulum 1 '''
l_1 = 0.4 # 6, 5, 4, 7 -- 4 ->
m_1 = 0.2 # 2, 3, 4 -- 1 -> stable
m_1_moment = 0.01
m_1_radius = 0.05
''' pendulum 2 '''
l_2 = 0.7 # 6, 5, 7 -- 3 -> unstable
m_2 = 0.3 # 2, 3, 4 -- 2 -> unstable
m_2_moment = 0.001
m_2_radius = 0.05
# other config
output_labels=['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2']
# control config
# K gain matrix and Nbar found from modelling via Jupyter
# K = [16.91887353, 21.12423935, 137.96378003, -3.20040325, -259.72220049, -50.48383455]
# Nbar = 17.0
K = [51.43763708,
54.12690472,
157.5467596,
-21.67111679,
-429.11603909,
-88.73125241]
Nbar = 51.5
tConfig = tcm.test_configuration(TEST_ID=TEST_ID,
OUT_DIR=OUT_DIR,
OUT_DATA=OUT_DATA,
CONF_DIR=CONF_DIR,
SIM_DUR=SIM_DUR,
output_labels=output_labels,
all_friction=all_friction,
cart_mass=m_c,
pend_1_length=l_1,
pend_1_mass=m_1,
pend_1_moment=m_1_moment,
pend_2_length=l_2,
pend_2_mass=m_2,
pend_2_moment=m_2_moment,
K=K,
Nbar=Nbar)
# log test config
tcm.pkl(tConfig)
''' MOD CONFIG
'''
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
# sim config
MAX_FORCE = 25
DT = 1 / 60.0
PPM = 200.0 # pxls per meter
END_ = 1000 # samples used for plotting and analysis
SHOW_ = True
cart_size = 0.3, 0.2
white_color = (0,0,0,0)
black_color = (255,255,255,255)
green_color = (0,135,0,255)
red_color = (135,0,0,255)
blue_color = (0,0,135,255)
''' main
'''
pygame.init()
# screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
surface = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
# clock = pygame.time.Clock()
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT, vsync=False, caption='Double Inverted Pendulum Simulation')
gl.glClearColor(255,255,255,255)
# setup the space
space = pymunk.Space()
# options = pymunk.pygame_util.DrawOptions(surface)
# space.debug_draw(options)
space.gravity = 0, -9.81
# space.debug_draw(options)
fil = pymunk.ShapeFilter(group=1)
# screen.fill(pygame.Color("white"))
# options = pymunk.pygame_util.DrawOptions(screen)
# space.debug_draw(options)
# ground
ground = pymunk.Segment(space.static_body, (-4, -0.1), (4, -0.1), 0.1)
# ground.color = pygame.Color("pink")
ground.friction = all_friction
ground.filter = fil
space.add(ground)
# space.debug_draw(options)
# cart
cart_moment = pymunk.moment_for_box(m_c, cart_size)
cart_body = pymunk.Body(mass=m_c, moment=cart_moment)
cart_body.position = 0.0, cart_size[1] / 2
cart_shape = pymunk.Poly.create_box(cart_body, cart_size)
cart_shape.color = black_color
# cart_shape.color = red_color
# cart_shape.fill_color = red_color
# cart_shape.color = black_color
cart_shape.friction = ground.friction
space.add(cart_body, cart_shape)
# space.debug_draw(options)
# pendulum 1
pend_1_body = pymunk.Body(mass=m_1, moment=m_1_moment)
pend_1_body.position = cart_body.position[0], cart_body.position[1] + cart_size[1] / 2 + l_1
pend_shape = pymunk.Circle(pend_1_body, m_1_radius)
pend_shape.filter = fil
space.add(pend_1_body, pend_shape)
# joint
joint = pymunk.constraints.PivotJoint(cart_body, pend_1_body, cart_body.position + (0, cart_size[1] / 2))
joint.collide_bodies = False
space.add(joint)
# pendulum 2
pend_2_body = pymunk.Body(mass=m_2, moment=m_2_moment)
pend_2_body.position = cart_body.position[0], cart_body.position[1] + cart_size[1] / 2 + (2 * l_2)
pend_shape2 = pymunk.Circle(pend_2_body, m_2_radius)
pend_shape2.filter = fil
space.add(pend_2_body, pend_shape2)
# joint 2
joint2 = pymunk.constraints.PivotJoint(pend_1_body, pend_2_body, cart_body.position + (0, cart_size[1] / 2 + l_2))
joint2.collide_bodies = False
space.add(joint2)
# space.debug_draw(options)
print(f"cart mass = {cart_body.mass:0.1f} kg")
print(f"pendulum 1 mass = {pend_1_body.mass:0.1f} kg, pendulum moment = {pend_1_body.moment:0.3f} kg*m^2")
print(f"pendulum 2 mass = {pend_2_body.mass:0.1f} kg, pendulum moment = {pend_2_body.moment:0.3f} kg*m^2")
force = 0.0
ref = 0.0
color = (200, 200, 200, 200)
label_x = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 28)
label_th_1 = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 58)
label_th_2 = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 88)
label_force = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 118)
labels = [label_x, label_th_1, label_th_2, label_force]
# data recorder so we can compare our results to our predictions
if os.path.exists(OUT_DATA):
os.remove(OUT_DATA)
with open(OUT_DATA, 'w') as f:
output_header = str()
for i, s in enumerate(output_labels):
if i == 0:
output_header = s
else:
output_header += ', '+s
output_header += '\n'
f.write(output_header)
f.close()
currtime = 0.0
record_data = True
def simulate(_):
global currtime
if currtime > SIM_DUR:
window.close()
# nprint('_',_)
# ensure we get a consistent simulation step - ignore the input dt value
dt = DT
# simulate the world
# NOTE: using substeps will mess up gains
space.step(dt)
# populate the current state
posx = cart_body.position[0]
velx = cart_body.velocity[0]
th_1 = pend_1_body.angle
th_1v = pend_1_body.angular_velocity
th_2 = pend_2_body.angle
th_2v = pend_2_body.angular_velocity
# dump our data so we can plot
if record_data:
with open(OUT_DATA, 'a+') as f:
f.write(f"{currtime:0.5f}, {posx:0.5f}, {velx:0.5f}, {th_1:0.5f}, {th_1v:0.5f}, {th_2:0.5f}, {th_2v:0.5f} \n")
f.close()
currtime += dt
# calculate our gain based on the current state
gain = K[0] * posx + K[1] * velx + K[2] * th_1 + K[3] * th_1v + K[4] * th_2 + K[5] * th_2v
# calculate the force required
global force
force = ref * Nbar - gain
# kill our motors if our angles get out of control
if math.fabs(pend_1_body.angle) > 1.0 or math.fabs(pend_2_body.angle) > 1.0:
force = 0.0
# cap our maximum force so it doesn't go crazy
if math.fabs(force) > MAX_FORCE:
force = math.copysign(MAX_FORCE, force)
# apply force to cart center of mass
cart_body.apply_force_at_local_point((force, 0.0), (0, 0))
def update_state_label(_):
'''
function to store the current state to draw on screen
'''
label_x.text = f'x: {cart_body.position[0]:0.3f} m'
label_th_1.text = f'theta_1: {pend_1_body.angle:0.3f} rad'
label_th_2.text = f'theta_2: {pend_2_body.angle:0.3f} rad'
label_force.text = f'force: {force:0.1f} N'
def update_reference(_, newref):
global ref
ref = newref
# callback for simulation
pyglet.clock.schedule_interval(simulate, DT)
pyglet.clock.schedule_interval(update_state_label, 0.25)
# schedule some small movements by updating our reference
pyglet.clock.schedule_once(update_reference, 2, 0.2)
pyglet.clock.schedule_once(update_reference, 7, 0.6)
pyglet.clock.schedule_once(update_reference, 12, 0.2)
pyglet.clock.schedule_once(update_reference, 17, 0.0)
pyglet.app.run()
f.close()
# data recorder so we can compare our results to our predictions
# f = open(OUT_DATA, 'r')
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
# for i in test_IDs:
tConfig = tcm.unpkl(TEST_ID, CONF_DIR)
df = pd.read_csv(tConfig.out_data)
df = dvm.get_losses(df,
dataPath=tConfig.data_path,
lossPath=tConfig.loss_path)
# plot pose
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 1, 3, 5]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Position',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# plot vel
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 2, 4, 6]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Velocity',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# plot losses
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 7, 8]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Losses',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# print losses
dvm.print_losses(df)
| 31.064433 | 118 | 0.652867 |
251d295ac1daf4f6c0aa7d07697c6e03ea7c9186 | 1,128 | py | Python | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | 1 | 2017-05-26T00:18:26.000Z | 2017-05-26T00:18:26.000Z | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | null | null | null | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | null | null | null | """
A Command parser to parse over each jinja template for a given cmake command
"""
import os
from apigen.Logger import Logger
from jinja2 import Environment, PackageLoader, FileSystemLoader
| 31.333333 | 76 | 0.675532 |
251d599be91a9d5e66da8bf669765945fc72709e | 299 | py | Python | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | 5 | 2016-08-21T16:24:03.000Z | 2021-01-11T23:04:21.000Z | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | null | null | null | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | 5 | 2016-11-05T17:05:39.000Z | 2022-01-31T20:19:12.000Z | #!/usr/bin/env python
import sys
print("Please tell me your favorite color: ")
color = sys.stdin.readline()
animal = raw_input("Please tell me your favorite animal: ")
print(animal)
sys.stdout.write("Your favorite color is: " + color + " favorite animal is: " + animal + "\n")
print("*" * 10)
| 19.933333 | 94 | 0.67893 |
251e7d6fbbff67cb94790461d92eb77f3f88ed53 | 111 | py | Python | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 15 | 2015-11-29T18:53:58.000Z | 2022-03-09T15:47:30.000Z | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 29 | 2016-01-21T18:10:45.000Z | 2021-10-01T16:41:12.000Z | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 11 | 2016-01-22T14:05:51.000Z | 2022-03-09T17:49:56.000Z | # Comet VOEvent Broker.
# Event handlers.
from comet.handler.relay import *
from comet.handler.spawn import *
| 18.5 | 33 | 0.765766 |
251efaea3581632f73c0223d75becaac1ffc7162 | 954 | py | Python | measure_mate/tests/api/test_template.py | niche-tester/measure-mate | c3acba57747bcb89fe0c6b9509ec90f04a581506 | [
"MIT"
] | 15 | 2015-12-14T02:20:31.000Z | 2022-01-30T04:36:39.000Z | measure_mate/tests/api/test_template.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 1,403 | 2017-02-16T01:00:04.000Z | 2022-03-15T21:12:13.000Z | measure_mate/tests/api/test_template.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 10 | 2015-12-18T01:30:46.000Z | 2022-01-30T04:36:41.000Z | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from measure_mate.models import Template
from measure_mate.tests.factories import TemplateFactory
| 38.16 | 73 | 0.715933 |
251f5df96375dbae57ea9bdc6db0a3e28bc73439 | 658 | py | Python | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-29 11:36
from django.db import migrations, models
| 27.416667 | 97 | 0.62462 |
252023af22ef5365f5e3d2b2d4c333240848fc36 | 4,085 | py | Python | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from lib.util import normal_log_density
| 31.183206 | 98 | 0.59388 |
252143e0b4bc8782465cc8f472bab67d3793cee0 | 1,129 | py | Python | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | import importlib
import unittest
solution = importlib.import_module('2020_04_2')
| 29.710526 | 82 | 0.578388 |
252147a24fb71425db336b4bd835e50e021bad1a | 1,649 | py | Python | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 2,650 | 2020-06-01T16:31:25.000Z | 2022-03-31T07:32:41.000Z | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 199 | 2020-06-02T01:09:09.000Z | 2022-03-31T17:11:20.000Z | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 344 | 2020-06-01T16:45:21.000Z | 2022-03-30T11:15:09.000Z | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a AIL agent."""
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import rewards
from acme.agents.jax.ail.agents import AIL
from acme.agents.jax.ail.agents import DistributedAIL
from acme.agents.jax.ail.builder import AILBuilder
from acme.agents.jax.ail.config import AILConfig
from acme.agents.jax.ail.dac_agents import DAC
from acme.agents.jax.ail.dac_agents import DACConfig
from acme.agents.jax.ail.dac_agents import DistributedDAC
from acme.agents.jax.ail.gail_agents import DistributedGAIL
from acme.agents.jax.ail.gail_agents import GAIL
from acme.agents.jax.ail.gail_agents import GAILConfig
from acme.agents.jax.ail.learning import AILLearner
from acme.agents.jax.ail.networks import AILNetworks
from acme.agents.jax.ail.networks import AIRLModule
from acme.agents.jax.ail.networks import compute_ail_reward
from acme.agents.jax.ail.networks import DiscriminatorMLP
from acme.agents.jax.ail.networks import DiscriminatorModule
from acme.agents.jax.ail.networks import make_discriminator
| 45.805556 | 74 | 0.814433 |
2521a1ac6de3b8964ba83ce10e729714793f678d | 2,578 | py | Python | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 2 | 2016-12-02T02:29:01.000Z | 2019-03-03T15:48:50.000Z | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 128 | 2016-05-22T21:44:20.000Z | 2022-03-11T23:14:18.000Z | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 1 | 2017-08-20T14:14:52.000Z | 2017-08-20T14:14:52.000Z | from __future__ import print_function
from cineapp import app, db, lm
from flask_login import login_required
from flask import jsonify, session, g, url_for, request
from pywebpush import webpush, WebPushException
from cineapp.models import PushNotification
import json, traceback, sys, datetime, time
from cineapp.auth import guest_control
| 39.060606 | 202 | 0.747867 |
252492e17fae91abe1251ab7bb4d09c4949ed235 | 37,380 | py | Python | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | 1 | 2022-03-09T14:51:54.000Z | 2022-03-09T14:51:54.000Z | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | null | null | null | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | null | null | null | # generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:50:50+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Extra, Field
| 24.511475 | 640 | 0.736811 |
25249f6ffc68bd327fd5d0540e42e061ccc8880f | 4,577 | py | Python | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | null | null | null | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | 3 | 2015-01-12T09:33:30.000Z | 2015-01-29T22:56:47.000Z | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 25 15:48:52 2015
@author: Sofia
"""
import csv
import json
import os
sourceEncoding = "iso-8859-1"
targetEncoding = "utf-8"
# encode files to utf8 (source: http://stackoverflow.com/questions/191359/how-to-convert-a-file-to-utf-8-in-python)
csvfile = open('..\Data\AMFI.csv',"r")
csvfile_encoded = open("..\Data\AMFI_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
csvfile = open('..\Data\AMFI_categories.csv',"r")
csvfile_encoded = open("..\Data\AMFIcategories_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
csvfile = open('..\Data\AMFI_domains.csv',"r")
csvfile_encoded = open("..\Data\AMFIdomains_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
# open files
AMFI_books = open("..\Data\AMFI_encoded.csv","r")
AMFI_categories = open("..\Data\AMFIcategories_encoded.csv","r")
AMFI_domains = open("..\Data\AMFIdomains_encoded.csv","r")
# define fieldnames
fieldnames_books = ("Callnumber","Barcode","Title","Year","Location")
fieldnames_categories = ("Barcode","Category")
# put data in reader
reader_books = csv.DictReader(AMFI_books, fieldnames_books, delimiter=';')
reader_categories = csv.DictReader(AMFI_categories, fieldnames_categories, delimiter = ';')
reader_domains = csv.DictReader(AMFI_domains, delimiter = ';')
output = {"name": "Library of the University of Applied Sciences", "type":"parent", "total":5605, "value":50, "children": []}
# get data from reader_books
barcode_books = []
names_books = []
tags_books = []
copies = []
for books in reader_books:
barcode_books.append(books["Callnumber"])
names_books.append(books["Title"])
tags_books.append(books["Barcode"])
tags = []
size_books = len(barcode_books)
# Modify data books
for k in range(0, len(names_books), 1):
# count copies
count = names_books.count(names_books[k])
copies.append(count)
# collect unique ids
indeces = [i for i, x in enumerate(names_books) if x == names_books[k]]
if len(indeces) == 1:
tags.append(tags_books[indeces[0]])
else:
list_tags = []
for w in range(0,len(indeces),1):
tag = tags_books[indeces[w]]
list_tags.append(tag)
tags.append(list_tags)
# set copies to NaN
for t in range(1,len(indeces),1):
names_books[indeces[t]] = "NaN"
# Enter domains
barcode_domain = []
for domain in reader_domains:
output["children"].append({
"type": "domain",
"name": domain["Domain"],
"barcode": domain["Barcode"],
"value": 6,
"children": []
})
barcode_domain.append(domain["Barcode"])
# get category data
barcode_category = []
names_category = []
for category in reader_categories:
barcode_category.append(category["Barcode"])
names_category.append(category["Category"])
# Enter categories
for i in range(0,len(barcode_domain),1):
barcode_domain_values = output["children"][i]["barcode"]
for j in range(0,len(barcode_category),1):
if barcode_category[j] < barcode_domain_values:
if names_category[j] != "NaN":
output["children"][i]["children"].append({
"type":"category",
"barcode": barcode_category[j],
"value": 5,
"name": names_category[j],
"children": []
})
names_category[j] = "NaN"
# append data to output
lengths = []
codes_categories =[]
for i in range(0,len(barcode_domain),1):
lengths.append(len(output["children"][i]["children"]))
for k in range(0, lengths[i], 1):
#counter = 0
codes_categories = output["children"][i]["children"][k]["barcode"]
for j in range(0,len(names_books),1):
if barcode_books[j] < codes_categories:
if names_books[j] != "NaN":
output["children"][i]["children"][k]["children"].append({
"type":"book",
"barcode": barcode_books[j],
"tags": tags[j],
"value": 2,
"name": names_books[j],
"copies": copies[j]
})
names_books[j] = "NaN"
# write data to file
with open('../Data/tree.json', 'w') as f:
json.dump(output, f, indent=True)
| 31.136054 | 125 | 0.622023 |
2526119172205dbcc83b912e56e47b1cfd9d139b | 3,751 | py | Python | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 2,021 | 2015-02-06T07:45:08.000Z | 2022-03-30T12:26:39.000Z | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 787 | 2015-02-03T20:06:04.000Z | 2022-03-30T09:00:38.000Z | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 878 | 2015-02-04T15:29:50.000Z | 2022-03-28T16:51:44.000Z | import datetime
import os
import unittest
from io import StringIO
from tempfile import mkdtemp
from unittest.mock import patch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command as real_call_command
from django.core.management.base import CommandError
from django.test import TestCase
from whoosh.qparser import QueryParser
from haystack import connections, constants, indexes
from haystack.utils.loading import UnifiedIndex
from ..core.models import MockModel
from .test_whoosh_backend import WhooshMockSearchIndex
from .testcases import WhooshTestCase
| 33.491071 | 84 | 0.691816 |
25282fa8805725b2acc31f9c959840083384e1e2 | 2,977 | py | Python | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from cowpy import cow
import json
import sys
if __name__ == '__main__':
run_forever()
| 26.114035 | 76 | 0.543164 |
2529f17c13ced51c4629d6195cff0d46c5800cac | 7,033 | py | Python | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 115 | 2020-06-18T15:00:58.000Z | 2022-03-02T10:13:19.000Z | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 2 | 2020-11-06T11:02:31.000Z | 2021-01-22T12:44:35.000Z | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 60 | 2020-07-22T14:53:10.000Z | 2022-03-23T10:17:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 00:58:34 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and key varable values
'''
import os
import quandl
import pandas as pd
import numpy as np
import keras
from PIL import Image
#folder path
folder_path = os.path.dirname(__file__)
#date range for full dataset
str_dte = '2003-01-01'
end_dte = '2018-7-31'
date_dict = {'gte':str_dte, 'lte':end_dte}
#Dates for back-testing
start_dte = '2015-1-1'
#Create list of dates
datelist = pd.date_range(start_dte, periods=365*2).tolist()
#API key for quandl
quandl.ApiConfig.api_key = '[quandl id]'
#Parameters for the image generation
col_num_mid = 10
col_num_dte = 9
pixel_size = 100
window_size = 60
pred_window_size = 1
#model path
model_path = "model2_2DCov.h5"
model = keras.models.load_model(model_path)
#number of channel for the image
num_channel=1
#strategies parameters
curr_pnl = 10000
curr_pnl_0=curr_pnl
curr_pnl_1=curr_pnl
curr_pnl_2=curr_pnl
quant_trans_0 = 0
quant_trans_1 = 0
quant_trans_2 = 0
min_pnl = 0.0005
trading_cost = 0
trade_limit = 0.5
'''*************************************
#2. Define functions
'''
#input_X is a series of price
#output_X is a series of price expressed in pixel
'''*************************************
#3. Running the test
'''
#Get the data
tkr = 'VTV'
df =quandl.get_table('SHARADAR/SFP',date=date_dict,ticker=tkr)
df = df.sort_values(by=['date'])
df=df.reset_index(drop=True)
#write header for the log of the strategy back-testing
f = open('log.txt','w+')
f.write('strategy\tBuySell\t' + 'dte' +'\t'+ 'cost' +'\t'+ 'T+1_actual' +'\t'+ 'T+1_pred'+'\t'+ 'Quantity'+'\t'+ 'PnL'+'\n')
#loop through the dates
for pred_dte in datelist:
df_i = df.index[df['date']==pred_dte]
#make sure both start and end dates are valid
if df_i.empty:
print('no data')
continue
df_i = df_i[0]
print(pred_dte)
df_start = df_i-(window_size) #starts at zero
if df_start < 0: #in case the date inputted is not valid
print('later date')
continue
#prepare the input data
df['mid'] = (df['high'] + df['low'])/2
df_plot = df.iloc[df_start:df_i,:]
min_p = min(df_plot['mid'])
max_p = max(df_plot['mid'])
output_pixel,unit = rescale(df_plot['mid'],pixel_size,min_p,max_p)
#if no trend, then drop this data point
if min_p ==max_p:
print('no trend')
continue
#stack up for a numpy for Image Recognition
#print the historical data
img_ar = np.zeros((1,pixel_size,window_size,num_channel))
img_display = np.zeros((pixel_size,window_size,num_channel))
k=0
pix_p=0
for pix in output_pixel:
y_pos = int(pix)-1
img_ar[0][y_pos][k][num_channel-1] = 255
img_display[y_pos][k][num_channel-1] = 255
pix_p=y_pos
k+=1
img_row = img_ar/255
last_actual_p = pix_p * unit + min_p
#make prediction
pred_y = model.predict(img_row)
max_y_val = max(pred_y[0])
pred_y_img = np.zeros((pixel_size,1))
#Obtain predicted price
pred_pixel = 0
expected_p = 0
#calculate expected values
for i in range(pixel_size):
expected_p += pred_y_img[i,0] * i
if pred_y[0,i] == max_y_val:
pred_y_img[i,0] = 255
pred_pixel = i
pred_p = pred_pixel * unit + min_p
print('cost at ' + str(last_actual_p))
print('predict p be ' + str(pred_p) + ' and probability of ' + str(max_y_val))
pred_exp_p = expected_p * unit + min_p
print('expected predict p be ' + str(pred_exp_p))
y_actual_p = df.iloc[df_i+1,:]['mid']
print('actual p be '+str(y_actual_p))
#Strategy Back-Testing
#Benchmark - Strategy 0 - buy and hold
if quant_trans_0 == 0:
quant_trans_0 = curr_pnl/y_actual_p
pnl = 0-trading_cost
else:
pnl = (y_actual_p/last_actual_p-1) * quant_trans_0
curr_pnl_0 += pnl
f.write('B0\tNA\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(y_actual_p)+'\t'+ str(1)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
#Testing of strategy1
order_type = ""
quant_trans_1 = int(curr_pnl_1/last_actual_p*0.5)
if abs(pred_exp_p/last_actual_p-1)>min_pnl:
if pred_exp_p>last_actual_p:
#buy one now / long one unit
#stock_unit_1+=quant_trans_1
pnl = (y_actual_p-last_actual_p) * quant_trans_1-trading_cost
order_type = "B"
curr_pnl_1 += pnl
f.write('S1\tBuy\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_exp_p)+'\t'+ str(quant_trans_1)+'\t'+ str(y_actual_p-last_actual_p)+'\n')
elif pred_exp_p<last_actual_p:
#sell one now / short one unit
#stock_unit_1-=quant_trans_1
pnl = (last_actual_p-y_actual_p) * quant_trans_1-trading_cost
order_type = "S"
curr_pnl_1 += pnl
f.write('S1\tSell\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_exp_p)+'\t'+ str(quant_trans_1)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
else: #no trade
if order_type == "B":
pnl = (y_actual_p-last_actual_p) * quant_trans_1
else:
pnl = (last_actual_p-y_actual_p) * quant_trans_1
curr_pnl_1 += pnl
#Testing of strategy2
if max_y_val > 0.99 and abs(pred_p/last_actual_p-1)>min_pnl:
quant_trans_2 = int(curr_pnl_2/last_actual_p*0.5)
if pred_p>last_actual_p:
#buy one now / long one unit
#stock_unit_2+=quant_trans_2
order_type = "B"
curr_pnl_2 += (y_actual_p-last_actual_p) * quant_trans_2-trading_cost
f.write('S2\tBuy\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_p) +'\t'+str(quant_trans_2)+'\t'+ str(y_actual_p-last_actual_p)+'\n')
elif pred_p<last_actual_p:
#sell one now / short one unit
#stock_unit_2-=quant_trans_2
order_type = "S"
curr_pnl_2 += (last_actual_p-y_actual_p) * quant_trans_2-trading_cost
f.write('S2\tSell\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_p)+'\t'+ str(quant_trans_2)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
else: #no trade
if order_type == "B":
pnl = (y_actual_p-last_actual_p) * quant_trans_2
else:
pnl = (last_actual_p-y_actual_p) * quant_trans_2
curr_pnl_2 += pnl
#print the final result of the strategies
print(curr_pnl_0)
print(curr_pnl_1)
print(curr_pnl_2)
f.close()
'''
export CUDA_VISIBLE_DEVICES=''
tensorboard --logdir AI_Finance_book/6B_TrendFollowings/Graph/ --host localhost --port 6006
'''
| 32.560185 | 187 | 0.624485 |
252ac1c22921db6597accc034da434758be4405a | 2,589 | py | Python | lichee/dataset/field_parser/image_local_path.py | Tencent/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 91 | 2021-10-30T02:25:05.000Z | 2022-03-28T06:51:52.000Z | lichee/dataset/field_parser/image_local_path.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 1 | 2021-12-17T09:30:25.000Z | 2022-03-05T12:30:13.000Z | lichee/dataset/field_parser/image_local_path.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 17 | 2021-11-04T07:50:23.000Z | 2022-03-24T14:24:11.000Z | # -*- coding: utf-8 -*-
from lichee import plugin
from .field_parser_base import BaseFieldParser
import os
from PIL import Image
from torchvision import transforms
import torch
from lichee.utils import storage
| 30.821429 | 117 | 0.611047 |
252b421527774d5fb18e906562e999ce4cef4de4 | 2,054 | py | Python | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | 35 | 2019-01-11T00:55:19.000Z | 2021-07-14T11:44:10.000Z | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | null | null | null | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | 9 | 2019-01-11T01:42:14.000Z | 2020-03-02T05:47:18.000Z | import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision.models.inception import BasicConv2d, InceptionAux
import pretrainedmodels
from common import num_class
| 36.035088 | 108 | 0.595424 |
252c453ec6e9dc3416a26d47c38bcfb973477454 | 74 | py | Python | python/sequences.py | saedyousef/Python-scratch | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | 5 | 2020-07-20T17:47:08.000Z | 2021-08-17T18:26:25.000Z | python/sequences.py | saedyousef/CS-50 | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | null | null | null | python/sequences.py | saedyousef/CS-50 | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | 1 | 2021-06-29T19:49:46.000Z | 2021-06-29T19:49:46.000Z | name = "Saeed"
cordinates = (10.0, 20.0)
names = ["Saeed", "Bob", "Mousa"] | 24.666667 | 33 | 0.581081 |
252d0e1541c6bce0edda34974ac8e4c3861ecde4 | 2,622 | py | Python | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 9/2/14
###Function: mean peak-based retro zOR metric vs. total attack rate
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/OR_allweeks.csv
###Command Line: python Supp_zOR_totalAR.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications.csv','r')
zORin.readline() # rm header
zOR = csv.reader(zORin, delimiter=',')
allincidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
allincid = csv.reader(allincidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
### program ###
## import severity index ##
# d_nat_classif[season] = (mean retro zOR, mean early zOR)
d_nat_classif = fxn.readNationalClassifFile(zOR)
## import attack rate ##
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid = fxn.week_incidCA_processing(allincid, pop)
# dict_tot_attack[seasonnum] = total attack rate for weeks 40 to 20 by 100,000
_, d_tot_attack = fxn.contributions_CAO_to_attack(d_wk, d_incid)
# plot values
AR = [d_tot_attack[s] for s in ps]
retrozOR = [d_nat_classif[s][0] for s in ps]
earlyzOR = [d_nat_classif[s][1] for s in ps]
print 'retro corr coef', np.corrcoef(AR, retrozOR)
print 'early corr coef', np.corrcoef(AR, earlyzOR)
# draw plots
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
# mean retro zOR vs. attack rate
ax1.plot(AR, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, AR, retrozOR):
ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax1.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax1.set_xlabel(fxn.gp_attackrate, fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_ylim([-10,20])
ax1.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_totalAR.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 32.775 | 171 | 0.71167 |
252f0a3cb8c24df7cf5db2bc1599071146727275 | 1,238 | py | Python | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | """This is the solution to Problem 1 of Project Euler."""
"""Copyright 2021 Dimitris Mantas"""
import time
def compute_all_multiples(of_number, below_number):
"""Compute all natural numbers, which are multiples of a natural number below a predefined number."""
# Register the list of said multiples.
multiples = []
for i in range(1, below_number):
if not i % of_number:
multiples.append(i)
return multiples
# These lines are for debugging purposes.
# print(compute_all_multiples(3,10))
# print(compute_all_multiples(5,10))
if __name__ == "__main__":
# This line is for debugging purposes.
# Start measuring the program runtime.
runtime = time.time()
# The resulting list is not sorted and contains the unique values the lists involved in the calculation.
# This is because the multiples of 15 are contained on both said lists.
ans = set([i for i in (compute_all_multiples(3, 1000) + compute_all_multiples(5, 1000))])
print(ans)
print(sum(ans))
# These lines are for debugging purposes.
# Compute the program runtime.
print("This problem was solved in {0} seconds.".format(time.time() - runtime))
| 30.195122 | 109 | 0.673667 |
252f723efb0474d342e7055aa1aa0011f4760543 | 3,731 | py | Python | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Colecciones - Listas
# In[1]:
l = [22, True, "una lista", [1, 2]]
mi_var = l[0] # mi_var vale 22
mi_var1 = l[3] # mi_var1 vale [1, 2]
print (mi_var)
print (mi_var1)
# In[50]:
# Si queremos acceder a un elemento de una lista incluida dentro de otra lista tendremos que
# utilizar dos veces este operador
l = ["una lista", [1, 2]]
mi_var = l[1][0] # mi_var vale 1
print (mi_var)
# In[51]:
# Tambin podemos utilizar este operador para modificar un elemento de la lista si lo colocamos en la parte izquierda
# de una asignacin
l = [22, True]
l[0] = 99 # Con esto l valdr [99, True]
print (l)
# In[52]:
# Una curiosidad sobre el operador [] de Python es que podemos utilizar tambin nmeros negativos. Si se utiliza un nmero
# negativo como ndice, esto se traduce en que el ndice empieza a contar desde el final, hacia la izquierda; es decir,
# con [-1] accederamos al ltimo elemento de la lista, con [-2] al penltimo, con [-3], al antepenltimo, y as
# sucesivamente.
l = [22, True, "una lista", [1, 2], "antepenltimo", "penltimo", "ltimo"]
print (l[-1]) # ltimo
print (l[-2]) # penltimo
print (l[-3]) # antepenltimo
# In[53]:
# Otra cosa inusual es lo que en Python se conoce como slicing o particionado, y que consiste en ampliar este mecanismo
# para permitir seleccionar porciones de la lista. Si en lugar de un nmero escribimos dos nmeros inicio y fin separados
# por dos puntos (inicio:fin) Python interpretar que queremos una lista que vaya desde la posicin inicio a la posicin
# fin, sin incluir este ltimo. Si escribimos tres nmeros (inicio:fin:salto) en lugar de dos, el tercero se utiliza para
# determinar cada cuantas posiciones aadir un elemento a la lista.
l = [99, True, "una lista", [1, 2]]
mi_var = l[0:2] # mi_var vale [99, True]
print (mi_var)
mi_var = l[0:4:2] # mi_var vale [99, "una lista"]
print (mi_var)
# In[54]:
# Hay que mencionar as mismo que no es necesario indicar el principio y el final del slicing, sino que, si estos se
# omiten, se usarn por defecto las posiciones de inicio y fin de la lista, respectivamente:
l = [99, True, "una lista"]
mi_var = l[1:] # mi_var vale [True, "una lista"]
print (mi_var)
mi_var = l[:2] # mi_var vale [99, True]
print (mi_var)
mi_var = l[:] # mi_var vale [99, True, "una lista"]
print (mi_var)
mi_var = l[::2] # mi_var vale [99, "una lista"]
print (mi_var)
# In[55]:
# Tambin podemos utilizar este mecanismo para modificar la lista:
l = [99, True, "una lista", [1, 2]]
l[0:2] = [0, 1] # l vale [0, 1, "una lista", [1, 2]]
print (l)
# # Colecciones - Tuplas
# In[57]:
# Todo lo que hemos explicado sobre las listas se aplica tambin a las tuplas, a excepcin de la forma de definirla,
# para lo que se utilizan parntesis en lugar de corchetes.
t = (1, 2, True, "python")
print (t)
# # Colecciones - Diccionarios
# In[59]:
# Los diccionarios, tambin llamados matrices asociativas, deben su nombre a que son colecciones que relacionan una clave y
# un valor. Por ejemplo, veamos un diccionario de pelculas y directores:
d = {"Love Actually": "Richard Curtis", "Kill Bill": "Tarantino", "Amlie": "Jean-Pierre Jeunet"}
# La diferencia principal entre los diccionarios y las listas o las tuplas es que a los valores almacenados en un
# diccionario se les accede no por su ndice, porque de hecho no tienen orden, sino por su clave, utilizando de nuevo
# el operador [].
mi_var = d["Love Actually"] # devuelve "Richard Curtis"
print (mi_var)
# Al igual que en listas y tuplas tambin se puede utilizar este operador para reasignar valores.
d["Kill Bill"] = "Quentin Tarantino"
mi_var = d["Kill Bill"] # devuelve "Quentin Tarantino"
print (mi_var)
| 31.091667 | 123 | 0.703029 |
2530a05e38dc4778931bafbbddc794641c581d85 | 28,045 | py | Python | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | import pytest
from itertools import product
import torch
from torch import nn
from torch.nn.utils import parameters_to_vector
from torch.utils.data import DataLoader, TensorDataset
from torchvision.models import wide_resnet50_2
from laplace import Laplace, SubnetLaplace, FullSubnetLaplace, DiagSubnetLaplace
from laplace.baselaplace import DiagLaplace
from laplace.utils import (SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask,
LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask,
ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask)
torch.manual_seed(240)
torch.set_default_tensor_type(torch.DoubleTensor)
score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask,
LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask]
layer_subnet_masks = [ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask]
all_subnet_masks = score_based_subnet_masks + layer_subnet_masks
likelihoods = ['classification', 'regression']
hessian_structures = ['full', 'diag']
| 46.976549 | 117 | 0.724764 |
253330ec00e3e989dc1286f84a83a5c56cf85fc5 | 332 | py | Python | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | """
rylog
Logging happening in a 3-dimensional Cartesian product of:
1. The logging level: [debug, info, warn, error]
2. The logging category: e.g. software event, action, output
3. The detected function/method: e.g. my_class.class_method or foo
"""
from .misc import *
from .server import *
from .client import *
| 25.538462 | 70 | 0.701807 |
2533ae4893b1c779f4471ef4511dd0dbc0e4068c | 3,701 | py | Python | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | 1 | 2021-08-13T10:55:33.000Z | 2021-08-13T10:55:33.000Z | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import timeit
import numpy as np
import sys
import random as rand
if __name__ == '__main__':
# 1.
# queue=Queue_array(8)
# string_list=['a','b','c','d','e','f','g','h']
#
# for ele in string_list:
# queue.enqueue(ele)
#
# print(queue._items)
#
# queue.enqueue('i')
#
# print('pop:',queue.dequeue())
# print('pop:', queue.dequeue())
# print('pop:', queue.dequeue())
# print(queue._items)
#
# queue.enqueue('i')
# print(queue)
#2.
queue = CircularQueue(8)
string_list=['e','f','g','h','i','j']
for ele in string_list:
queue.enqueue(ele)
print(queue._items)
for i in range(3):
print('pop:',queue.dequeue())
print(queue._items)
queue.enqueue('a')
queue.enqueue('b')
print(queue._items)
queue.enqueue('c')
queue.enqueue('d')
print(queue._items)
queue.enqueue('e')
| 19.276042 | 87 | 0.518779 |
253438c9cde5237ab336b6ebc0e8e1089525b6e7 | 1,703 | py | Python | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
import math
MAX_SPEED = 2
ACCELERATION = 0.5
DRAG = 0.3
TURN_SPEED=5
IMAGE = np.array([
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[0,1,1,1,1,1,0],
[0,0,1,1,1,0,0],
[0,0,0,1,0,0,0]])
if __name__ == "__main__":
main() | 22.706667 | 81 | 0.613623 |
2536ce2ad28b7718b5111d981d1c1217ff573d5d | 1,868 | py | Python | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | 2 | 2017-10-09T16:59:01.000Z | 2017-10-10T08:38:08.000Z | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | null | null | null | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | null | null | null | # coding: utf-8
# n a1a2...an(i,ai)
# n i(i,ai) (i, 0)
# x
#
# LeetCode
# https://leetcode-cn.com/problems/container-with-most-water
list = [1,8,6,2,5,4,8,3,7]
print Solution().maxArea(list)
| 20.527473 | 68 | 0.4197 |
253841f648fa7d855056a9bf18031761bbedfe7c | 561 | py | Python | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 7 | 2018-11-13T17:39:15.000Z | 2019-03-27T04:55:24.000Z | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 72 | 2018-11-09T14:20:25.000Z | 2020-06-05T19:28:19.000Z | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 3 | 2018-11-19T19:10:39.000Z | 2019-08-23T20:52:23.000Z | # Generated by Django 2.1.3 on 2018-12-06 10:42
from django.db import migrations, models
import system.models.system_settings
| 28.05 | 134 | 0.691622 |
25388135b2590bec6c24b4f712d9da835c81c62b | 4,338 | py | Python | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 110 | 2015-07-12T15:13:18.000Z | 2022-03-28T00:58:59.000Z | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 70 | 2016-02-23T03:19:55.000Z | 2022-03-14T09:12:43.000Z | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 66 | 2015-07-10T20:43:30.000Z | 2022-02-18T01:00:33.000Z | from __future__ import division, print_function
from .trajgroup import TrajectoryGroup
from .hypath import HyPath
from .hygroup import HyGroup
def print_clusterprocedure():
"""Print clustering guide."""
print("""
In ``PySPLIT``
1. Create ``TrajectoryGroup`` with desired set of trajectories
2. ``TrajectoryGroup.make_infile()``
In ``HYSPLIT``
3. Trajectory --> Special Runs --> Clustering --> Standard
4. Adjust clustering parameters and working folder
(where output will be stored, where INFILE lives)
5. ``Run cluster analysis``
6. Determine and set appropriate number of clusters
7. Assign trajectories to clusters (``Run``)
8. ``Display Means``, ``Quit``
In ``PySPLIT``
9. ``spawn_clusters()``""")
| 27.807692 | 79 | 0.574919 |
253a183c509b499df726c22fb7b3ee45b370c6ff | 2,424 | py | Python | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 3 | 2018-12-14T02:37:10.000Z | 2020-04-30T19:07:01.000Z | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 25 | 2018-07-27T13:38:17.000Z | 2021-10-05T13:01:36.000Z | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 12 | 2018-07-09T22:52:32.000Z | 2021-11-29T19:45:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import re
import requests
import sys
sys.path.append(os.path.join(sys.path[0], "../", "lib"))
import lkft_squad_client # noqa: E402
def get_branch_from_make_kernelversion(make_kernelversion):
"""
IN: "4.4.118"
OUT: "4.4"
IN: "4.9.118-rc1"
OUT: "4.9"
"""
pattern = re.compile(r"^(\d+\.\d+).*$")
match = pattern.match(make_kernelversion)
return match.group(1)
def get_most_recent_release(builds_url):
"""
Given a list of builds that is sorted with the newest first,
return the most recent finished build.
"""
first_build = None
for build in lkft_squad_client.Builds(builds_url):
if not first_build:
first_build = build
if build["finished"]:
return build
# If none found, return first build
return first_build
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("build_url", help="API URL to developer build")
args = parser.parse_args()
(email_destination, email_subject, email_body) = get_build_report(args.build_url)
with open("email.to", "w") as f:
f.write(email_destination)
with open("email.subject", "w") as f:
f.write(email_subject)
with open("email.body", "w") as f:
f.write(email_body)
print("TO: {}".format(email_destination))
print("SUBJECT: {}".format(email_subject))
print("\n{}\n".format(email_body))
| 28.186047 | 86 | 0.664604 |
253c3b4e7dd3233e756d0a0d7809bcec3e7f9d2a | 1,507 | py | Python | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | from math import ceil, sqrt
if __name__ == "__main__":
input = 325489
print(part_one(input))
print(part_two(input))
| 25.542373 | 119 | 0.639681 |
253ce464d772dd296d3b8fca083e60adbb02df3d | 423 | py | Python | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | #022: Crie um programa que leia o nome completo de uma pessoa e mostre:
# - O nome com todas as letras maisculas e minsculas.
# - Quantas letras ao tdo (sem considerar espaos).
# - Quantas letras tem o primeiro nome.
nome = input("Qual o seu nome? ")
print(">>",nome.upper())
print(">>",nome.lower())
jnome = nome.strip()
v = jnome.count(" ")
print(">>",len(jnome)- v)
pnome = nome.split()
print(">>",len(pnome[0]))
| 28.2 | 71 | 0.664303 |
253e8b5989062bd43d076499f35aace1547716ff | 2,395 | py | Python | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | 1 | 2020-06-12T03:32:35.000Z | 2020-06-12T03:32:35.000Z | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | null | null | null | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | 1 | 2020-06-12T03:32:15.000Z | 2020-06-12T03:32:15.000Z | from typing import Optional
from pysqldump.domain.formatters import (
CSVFormatter,
DictFormatter,
JsonFormatter,
ConsoleFormatter,
)
from pysqldump.settings.base import get_config
config = get_config()
def __to_console(self, pprint: bool = False):
if pprint:
return ConsoleFormatter(
headers=self.headers, data=self.data, export_to=self.filename
).print()
return DictFormatter(
headers=self.headers, data=self.data, export_to=self.filename
).export()
def __to_csv(self, pprint: bool = False):
formatter = CSVFormatter(
headers=self.headers, data=self.data, export_to=self.filename
)
if pprint:
return formatter.print()
return formatter.export()
def __to_json(self, pprint: bool = False, json: bool = False):
formatter = JsonFormatter(
headers=self.headers, data=self.data, export_to=self.filename
)
if pprint:
formatter.print()
if self._filename and not json:
return formatter.export()
return formatter.use()
| 29.567901 | 84 | 0.617954 |
25405166ea1f14ffbb145a0fad72cb35236d7ab6 | 605 | py | Python | Mortgage Calculator.py | BokijonovM/Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | 1 | 2021-03-18T08:12:15.000Z | 2021-03-18T08:12:15.000Z | Mortgage Calculator.py | BokijonovM/Python_Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | null | null | null | Mortgage Calculator.py | BokijonovM/Python_Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | null | null | null | """**Mortgage Calculator** -
Calculate the monthly payments of a fixed term mortgage
over given Nth terms at a given interest rate. Also figure
out how long it will take the user to pay back the loan."""
months = int(input("Enter mortgage term (in months): "))
rate = float(input("Enter interest rate (in %): "))
loan = float(input("Enter loan value: "))
monthly_rate = rate / 100 / 12
payment = (monthly_rate / (1 - (1 + monthly_rate)**(-months))) * loan
print("Monthly payment for a $%.2f %s year mortgage at %.2f%% interest rate is: $%.2f" % (loan, (months / 12), rate, payment))
| 37.8125 | 127 | 0.661157 |
2540e4b774668ff785e806c6ddc07e0e515e0f5f | 172 | py | Python | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | import numpy as np | 15.636364 | 39 | 0.511628 |
25428819afdb8bcef5f733f483e2dfff517079e7 | 956 | py | Python | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | import cv2
| 21.727273 | 47 | 0.65272 |
25446e5536422db53c3887d8fec73e5ede336aa7 | 5,460 | py | Python | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | 17 | 2020-02-04T16:42:40.000Z | 2021-11-11T14:37:32.000Z | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | null | null | null | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | 4 | 2020-02-04T12:59:04.000Z | 2021-05-30T14:10:54.000Z | from brnolm.runtime.tensor_reorganization import TensorReorganizer
import torch
from torch.autograd import Variable
from .common import TestCase
| 30.502793 | 86 | 0.540842 |
254476669d60a9c34049aba84879aa1422202a6a | 25 | py | Python | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | jobname="BachelorThesis"
| 12.5 | 24 | 0.84 |
25456b38415fb42cb49ec1c612d93c5272eac7b9 | 331 | py | Python | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 269 | 2015-01-09T00:56:41.000Z | 2022-03-30T17:09:46.000Z | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 22 | 2017-04-01T13:44:56.000Z | 2018-09-10T11:48:56.000Z | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 59 | 2015-01-09T00:56:50.000Z | 2022-03-13T23:52:27.000Z | from eventsourcing.infrastructure.event_sourced_repo import EventSourcedRepository
from quantdsl.domain.model.contract_specification import ContractSpecification, ContractSpecificationRepository
| 33.1 | 111 | 0.89426 |
2545a6ce4bad291b2182fea9564fd36668358b01 | 660 | py | Python | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | from requests_html import HTMLSession
import os
import sys
writeFileName = "courseLinks.out"
writeFileStream = open(writeFileName,'w',encoding='utf-8')
session = HTMLSession()
url = 'https://www.ji.sjtu.edu.cn/academics/courses/courses-by-number/'
r = session.get(url)
for i in range(2,100):
sel = '#Faculty-information > li:nth-child(' + str(i) + ') > a'
# print(sel)
results = r.html.find(sel)
if len(results) == 0:
break;
else:
for result in results:
writeFileStream.write(result.absolute_links.pop()+'\n')
writeFileStream.close()
# #Faculty-information > li:nth-child(3) > a
| 24.444444 | 72 | 0.636364 |
254a5b1fda824a925564dbbe740873888025ca2b | 7,655 | py | Python | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | 1 | 2021-07-26T02:44:00.000Z | 2021-07-26T02:44:00.000Z | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | null | null | null | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | null | null | null | import logging
import discord
from datetime import datetime
from discord.ext import tasks, commands
from discord.ext.commands import Cog
from cogs.utils.utils import json_io_dump, json_io_load
log = logging.getLogger(__name__)
STATUS = 'status'
TIME_STARTED = 'time_started'
NAME = 'name'
GAMES = 'games'
NONE = 'none'
# Reference JSON
# {
# "player_id1": {
# "status": "a string of status",
# "time_started": "time_started_current_status",
# "games":{
# "COD MW2": "time_played",
# "Poop": "time_played"
# }
# },
# "player_id2": {
# "status": "a string of status",
# "time_started": "time_started_current_status",
# "games":{
# "COD MW2": "time_played",
# "Poop": "time_played"
# }
# }
# }
def calculate_days_minutes_seconds(self, seconds):
""" Returns the days hours minutes seconds from seconds """
# years, seconds = seconds // 31556952, seconds % 31556952
# months, seconds = seconds // 2629746, seconds % 2629746
days, seconds = seconds // 86400, seconds % 86400
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
msg = '{:02d} Days, {:02d} Hours, {:02d} Minutes, {:02d} Seconds'.format(days, hours, minutes, seconds)
if days > 9000:
msg += ' ITS OVER 9000!'
if days == 69:
msg += ' Hah, nice... 69'
return msg
| 40.502646 | 142 | 0.617897 |
254ca1af527eda83d904a3bb25f7ec725799bb3b | 2,578 | py | Python | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T18:06:06.000Z | 2021-06-22T18:06:06.000Z | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | null | null | null | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from .converter import TransformConverter
from .. import linear
| 34.837838 | 114 | 0.564779 |
254d3022845aae3d1a9293a0181f060be7c09b6f | 28 | py | Python | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | from pyqt.utils import time
| 14 | 27 | 0.821429 |
254f90068c187cfc444d126472019f1e35637c92 | 1,373 | py | Python | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 192 | 2017-10-19T18:04:56.000Z | 2019-09-21T23:29:03.000Z | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 120 | 2017-10-16T09:46:07.000Z | 2019-06-20T18:34:24.000Z | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 50 | 2017-10-19T09:57:45.000Z | 2019-07-24T13:46:26.000Z | import os
def readAnn(textfolder="../data/SemEval2017Task10/"):
'''
Read .ann files and look up corresponding spans in .txt files
Args:
textfolder:
'''
flist = os.listdir(textfolder)
for f in flist:
if not f.endswith(".ann"):
continue
f_anno = open(os.path.join(textfolder, f), "rU")
f_text = open(os.path.join(textfolder, f.replace(".ann", ".txt")), "rU")
# there's only one line, as each .ann file is one text paragraph
for l in f_text:
text = l
#@TODO: collect all keyphrase and relation annotations, create pairs of all keyphrase that appear in same sentence for USchema style RE
for l in f_anno:
anno_inst = l.strip().split("\t")
if len(anno_inst) == 3:
keytype, start, end = anno_inst[1].split(" ")
if not keytype.endswith("-of"):
# look up span in text and print error message if it doesn't match the .ann span text
keyphr_text_lookup = text[int(start):int(end)]
keyphr_ann = anno_inst[2]
if keyphr_text_lookup != keyphr_ann:
print("Spans don't match for anno " + l.strip() + " in file " + f)
#if keytype.endswith("-of"):
if __name__ == '__main__':
readAnn() | 32.690476 | 143 | 0.554261 |
25507a35dbe62df6d608b962eb29203e902472af | 5,018 | py | Python | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 10 | 2016-05-25T08:28:39.000Z | 2020-06-04T03:19:50.000Z | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 5 | 2015-12-08T14:01:15.000Z | 2020-01-10T22:42:18.000Z | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 6 | 2015-12-10T17:24:11.000Z | 2021-03-22T16:12:17.000Z | from collections import namedtuple
import os
import sympy
import numpy as np
from means.core.model import Model
_Reaction = namedtuple('_REACTION', ['id', 'reactants', 'products', 'propensity', 'parameters'])
def read_sbml(filename):
"""
Read the model from a SBML file.
:param filename: SBML filename to read the model from
:return: A tuple, consisting of :class:`~means.core.model.Model` instance,
set of parameter values, and set of initial conditions variables.
"""
import libsbml
if not os.path.exists(filename):
raise IOError('File {0!r} does not exist'.format(filename))
reader = libsbml.SBMLReader()
document = reader.readSBML(filename)
sbml_model = document.getModel()
if not sbml_model:
raise ValueError('Cannot parse SBML model from {0!r}'.format(filename))
species = sympy.symbols([s.getId() for s in sbml_model.getListOfSpecies()])
initial_conditions = [s.getInitialConcentration() for s in sbml_model.getListOfSpecies()]
compartments = sympy.symbols([s.getId() for s in sbml_model.getListOfCompartments()])
compartment_sizes = [s.getSize() for s in sbml_model.getListOfCompartments()]
reactions = map(_parse_reaction, sbml_model.getListOfReactions())
# getListOfParameters is an attribute of the model for SBML Level 1&2
parameters_with_values = [(sympy.Symbol(p.getId()), p.getValue())
for p in sbml_model.getListOfParameters()]
parameter_values = dict(parameters_with_values)
parameters = map(lambda x: x[0], parameters_with_values)
if not parameters:
track_local_parameters = True
parameters = set()
parameter_values = {}
else:
track_local_parameters = False
stoichiometry_matrix = np.zeros((len(species), len(reactions)), dtype=int)
propensities = []
for reaction_index, reaction in enumerate(reactions):
if track_local_parameters:
for param, value in reaction.parameters:
parameters.add(param)
parameter_values[param] = value
reactants = reaction.reactants
products = reaction.products
propensities.append(reaction.propensity)
for species_index, species_id in enumerate(species):
net_stoichiometry = products.get(species_id, 0) - reactants.get(species_id, 0)
stoichiometry_matrix[species_index, reaction_index] = net_stoichiometry
if track_local_parameters:
# sympy does not allow sorting its parameter lists by default,
# explicitly tell to sort by str representation
sorted_parameters = sorted(parameters, key=str)
else:
sorted_parameters = parameters
parameter_values_list = [parameter_values[p] for p in sorted_parameters]
# We need to concatenate compartment names and parameters as in our framework we cannot differentiate the two
compartments_and_parameters = compartments + sorted_parameters
parameter_values_list = compartment_sizes + parameter_values_list
model = Model(species, compartments_and_parameters, propensities, stoichiometry_matrix)
return model, parameter_values_list, initial_conditions | 40.144 | 115 | 0.682742 |
2551cc7f888a7265ce1f8beeca110b9348759577 | 1,123 | py | Python | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 2 | 2019-12-04T05:38:17.000Z | 2022-02-17T06:24:23.000Z | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 9 | 2019-11-11T20:01:11.000Z | 2021-09-30T00:41:52.000Z | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 4 | 2017-08-24T00:00:34.000Z | 2021-06-25T16:41:20.000Z | import pytest
import clrenv
| 27.390244 | 81 | 0.715049 |
25536ba36fdcd55ea907e174eeadb755910513a2 | 2,583 | py | Python | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | import random
import pandas as pd
import numpy as np
import json
from tqdm import *
# Convert the QA file json to output dictionary containing premise and hypothesis
# Create the output json dictionary from the input json, premise and hypothesis statement
if __name__ == "__main__":
convert_to_codah_statement('../data/codah/fold_0/train.csv', './data/codah/fold_0/train.jsonl')
# train, dev, test = split(full_list, shuffle=True, ratio=0.2)
# convert_to_codah_statement(train, 'train.statement.jsonl')
# convert_to_codah_statement(dev, 'train.statement.jsonl')
# convert_to_codah_statement(test, 'train.statement.jsonl')
print('Hey, there!')
| 37.434783 | 99 | 0.684863 |
25541a58e6ade5999bf8649b87e0a951c63912f5 | 3,237 | py | Python | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | yaosichao0915/DeepImmuno | a2a7832f6cded9296735475c2e8fa5c9b62b3f8d | [
"MIT"
] | 20 | 2020-12-28T03:34:34.000Z | 2022-03-14T01:36:52.000Z | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | zhangjiahuan17/DeepImmuno | 5ab182429bc3276fd43be2ec8d86b72e773992ef | [
"MIT"
] | 3 | 2021-04-23T19:21:11.000Z | 2021-08-22T00:39:01.000Z | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | zhangjiahuan17/DeepImmuno | 5ab182429bc3276fd43be2ec8d86b72e773992ef | [
"MIT"
] | 11 | 2021-04-23T16:46:29.000Z | 2022-03-18T15:53:55.000Z | '''
pip install Scrapy
pip install selenium
In a folder:
scrapy startproject imgt
when running:
scrapy crawl new_imgt -o out.json
when using scrapy shell:
scrapy shell 'url'
in Ipython, you can use response.xpath or response.css to try out
object:
1. selectorlist if css('a') and there are a lot of 'a'
2. selector it will have css and xpath method
3. reponse
conda activate selenium
remember make change to the python scirpt under spider folder
'''
'''
If encounter robot blockage error:
open setting.py and change the robot setting to False
you can specify hla in __init__, and then when call:
scrapy crawl new_imgt -a hla="HLA-A*0101" -o out.json
When encounter dynamic page, use selenium to get the page and pass it to scrapy response object
Double check using both 'inspect' and 'see source code' in a webpage, they can be different
'''
'''
cat inventory_compliant.txt | while read line; do scrapy crawl new_imgt -a hla="$line" -o "./hla_paratope/$line.json"; done
'''
import scrapy
from scrapy.crawler import CrawlerProcess
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
# if using process, you can just run a python new_imgt_spider.py
# process = CrawlerProcess()
# process.crawl(imgtSpider)
# process.start() | 33.030612 | 139 | 0.666976 |
25582433fc1391299fee92277e679b595fa40a57 | 1,667 | py | Python | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.script.provider
__all__ = ['ScriptFrameworkErrorType']
| 30.87037 | 165 | 0.721656 |
25582a95ad549fbb53f7bc9394341328228fcce8 | 38,786 | py | Python | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 171 | 2020-01-30T16:58:07.000Z | 2022-03-27T22:12:17.000Z | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 14 | 2021-05-15T02:12:09.000Z | 2022-03-16T04:16:18.000Z | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 5 | 2021-03-01T20:52:13.000Z | 2022-03-07T06:35:03.000Z | #!/usr/bin/python3
# (c) Robert Muth - see LICENSE for more info
from typing import List, Dict
import enum
from Util import cgen
# maximum number of operands in an instruction
MAX_OPERANDS = 5
# maximum number of function parameters (or results)
MAX_PARAMETERS = 64
############################################################
# Opcode Families [OF.]
#
# Each Opcode belongs to one of the families below.
# Within each family the order and kind of the operands is similar
############################################################
_OF_TO_PURPOSE = {
OPC_KIND.ALU: ["dst", "src1", "src2"],
OPC_KIND.ALU1: ["dst", "src"],
OPC_KIND.COND_BRA: ["op1", "op2", "target_bbl"],
OPC_KIND.SWITCH: ["index", "table"],
OPC_KIND.BRA: ["target_bbl"],
OPC_KIND.RET: [],
OPC_KIND.BSR: ["target_fun"],
OPC_KIND.JSR: ["target_fun_addr", "target_fun_sig"],
OPC_KIND.SYSCALL: ["target_fun_sig", "syscall_no"],
OPC_KIND.LEA: ["dst", "base", "offset"],
OPC_KIND.LEA1: ["dst", "base"],
OPC_KIND.LD: ["dst", "base", "offset"],
OPC_KIND.ST: ["base", "offset", "src"],
OPC_KIND.NOP: [],
OPC_KIND.NOP1: ["src_and_dst"],
OPC_KIND.BZERO: ["dst_addr", "width"],
OPC_KIND.BCOPY: ["dst_addr", "src_addr", "width"],
OPC_KIND.POPARG: ["dst"],
OPC_KIND.PUSHARG: ["src"],
OPC_KIND.CONV: ["dst", "src"],
OPC_KIND.MOV: ["dst", "src"],
OPC_KIND.CMP: ["dst", "src1", "src2", "cmp1", "cmp2"],
}
_OFS_CFG = {OPC_KIND.BSR, OPC_KIND.JSR, OPC_KIND.SYSCALL, OPC_KIND.SWITCH,
OPC_KIND.BRA, OPC_KIND.COND_BRA, OPC_KIND.RET}
# These instructions do not have a written register
_OFS_NO_DEF = _OFS_CFG | {OPC_KIND.ST, OPC_KIND.BCOPY, OPC_KIND.BZERO,
OPC_KIND.PUSHARG, OPC_KIND.NOP}
# These instructions have a written register
_OFS_WRITING_REGS = {
OPC_KIND.LEA, OPC_KIND.LEA1, OPC_KIND.ALU, OPC_KIND.ALU1, OPC_KIND.CMP,
OPC_KIND.MOV, OPC_KIND.CONV, OPC_KIND.LD,
OPC_KIND.POPARG, OPC_KIND.NOP1}
OAS_CFG = OA.CALL | OA.BBL_TERMINATOR
OAS_SIDE_EFFECT = OA.CALL | OA.BBL_TERMINATOR | OA.MEM_RD | OA.MEM_WR | OA.SPECIAL
############################################################
# Operand Kinds [OK.]
#
# Each instruction operates on a list of operands. Since we mimic a
# three address machine, ALU instructions usually have 3 operands,
# the destination being the first one.
# There is a large variety of operands denoting registers or immediates
# which enable some basic typing on a per operand basis.
# Additional typing constraints across the operands are enforced by "rules".
############################################################
############################################################
# Type Constraints
############################################################
############################################################
# DataType Flavors
############################################################
DK_FLAVOR_S = 0x20 # signed int
DK_FLAVOR_U = 0x40 # unsigned int
DK_FLAVOR_F = 0x60 # ieee floating point
DK_FLAVOR_A = 0x80 # data address
DK_FLAVOR_C = 0xa0 # code address
_DK_WIDTH_8 = 0
_DK_WIDTH_16 = 1
_DK_WIDTH_32 = 2
_DK_WIDTH_64 = 3
_DK_WIDTH_128 = 4
SHORT_STR_TO_RK = {x.name: x for x in DK} # this does contain the aliases
TC_TO_CHECKER = {
TC.ANY: lambda x: True,
TC.ADDR_NUM: lambda x: x.flavor() != DK_FLAVOR_C,
TC.NUM: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S, DK_FLAVOR_F},
TC.INT: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S},
TC.ADDR: lambda x: x.flavor() == DK_FLAVOR_A,
TC.CODE: lambda x: x.flavor() == DK_FLAVOR_C,
TC.SINT: lambda x: x.flavor() == DK_FLAVOR_S,
TC.UINT: lambda x: x.flavor() == DK_FLAVOR_U,
TC.ADDR_INT: RegIsAddrInt,
TC.FLT: lambda x: x.flavor() == DK_FLAVOR_F,
TC.OFFSET: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S},
# maybe change this to just U or S
}
SHORT_STR_TO_MK = {x.name: x for x in MEM_KIND}
SHORT_STR_TO_FK = {x.name: x for x in FUN_KIND}
############################################################
# Operand Value Kind Sets
############################################################
OKS_LIST = {OP_KIND.BYTES, OP_KIND.NAME_LIST, OP_KIND.BBL_TAB,
OP_KIND.TYPE_LIST}
OKS_ALLOWED_FOR_INSTRUCTIONS = {OP_KIND.REG, OP_KIND.CONST,
OP_KIND.REG_OR_CONST,
OP_KIND.FUN, OP_KIND.BBL, OP_KIND.JTB,
OP_KIND.MEM, OP_KIND.STK, OP_KIND.FIELD}
# we do not want non-scalar operands in instructions as they
# increase memory usage and complicate the code
assert not (OKS_LIST & OKS_ALLOWED_FOR_INSTRUCTIONS)
OKS_ALLOWED_FOR_DIRECTIVES = {OP_KIND.INT, OP_KIND.MEM_KIND, OP_KIND.BYTES,
OP_KIND.NAME, OP_KIND.BBL_TAB,
OP_KIND.FUN_KIND, OP_KIND.TYPE_LIST,
OP_KIND.NAME_LIST, OP_KIND.DATA_KIND, OP_KIND.FUN,
OP_KIND.MEM, OP_KIND.BBL
}
OKS_ALL = OKS_ALLOWED_FOR_INSTRUCTIONS | OKS_ALLOWED_FOR_DIRECTIVES
############################################################
# Opcode Groups
############################################################
_DIR_TO_PURPOSE = {
".mem": ["name", "alignment", "mem_kind"],
".data": ["repeat", "data"],
".addr.fun": ["width", "fun"],
".addr.mem": ["width", "mem", "offset"],
".fun": ["name", "fun_kind", "out_params", "in_params"],
".bbl": ["name"],
".reg": ["reg_kind", "names"],
".stk": ["name", "alignment", "size"],
".jtb": ["name", "size", "default_bbl", "map"],
".struct": ["name"],
".field": ["name", "alignment", "size"],
".endstruct": [],
".stk.s": ["name", "name"],
}
############################################################
# Opcode
############################################################
############################################################
# ARITHMETIC ALU 0x10
# FLOAT + INT
# note: limited address arithmetic allowed
ADD = Opcode(0x10, "add", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Addition: dst := src1 + src2",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
SUB = Opcode(0x11, "sub", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Subtraction: dst := src1 - src2
Note: `sub dst = 0 src` can be used to emulate `neg` for integers.
(for floating point use `dat = mul src -1.0`)
""")
# needs more work wrt to size
MUL = Opcode(0x12, "mul", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Multiplication: dst := src1 \\* src2",
OA.COMMUTATIVE)
DIV = Opcode(0x13, "div", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Division: dst := src1 / src2
Some day the operation might be more strictly defined as:
dst := 0 if src2 == 0 else src1 / src2""")
# cf.:
# https://www.gingerbill.org/article/2020/01/25/a-reply-to-lets-stop-copying-c/
REM = Opcode(0x14, "rem", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Modulo: dst := a % b
Some day the sign of the result might be more strictly defined.
Note: does not apply to floating point numbers""")
COPYSIGN = Opcode(0x15, "copysign", OPC_KIND.ALU, [OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Set the sign of src1 to match src2 (floating point only)
Note: `copysign dst src1 0.0` can be used to emulate `abs`""")
############################################################
# LOGIC ALU 0x30
# INT ONLY (all regs are treated as unsigned except for shr/rshr
XOR = Opcode(0x18, "xor", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Bitwise exclusive or: dst := src1 ^ src2
Note: `xor dst = src1 0b111...1` can be used to emulate `not`""",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
AND = Opcode(0x19, "and", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Bitwise and: dst := src1 & src2",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
OR = Opcode(0x1a, "or", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Bitwise or: dst := src1 | src2",
OA.COMMUTATIVE)
# shift amount is determined as follows:
# use the log2(width(dst)) low order bits of src2
# e.g. for a dst of kind s8 the low order 3 bits of
# src2 will be used.
# src2 is treated as an unsigned register
SHL = Opcode(0x1b, "shl", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Shift left: dst := src1 << src2
dst: = src1 << (src2 % bitwidth(src1))""")
SHR = Opcode(0x1c, "shr", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Shift right: dst := src1 >> src2
dst: = src1 >> (src2 % bitwidth(src1))""")
# do we need both directions, do we need a reverse version?
# should we rather use a funnel shift?
# ROTL = Opcode(0x1d, "rotl", OPC_KIND.ALU,
# [OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
# [TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.TBD,
# "Rotation Left")
############################################################
# CONDITIONAL BRANCHES 0x20
# do we need unordered variants for floating point?
# not beq/bne is the only operation for c_regs
BEQ = Opcode(0x20, "beq", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ANY, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if equal.",
OA.COMMUTATIVE | OA.BBL_TERMINATOR)
BNE = Opcode(0x21, "bne", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ANY, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if not equal.",
OA.COMMUTATIVE | OA.BBL_TERMINATOR)
BLT = Opcode(0x22, "blt", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ADDR_NUM, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if greater than.",
OA.BBL_TERMINATOR)
BLE = Opcode(0x23, "ble", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ADDR_NUM, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if less or equal.",
OA.BBL_TERMINATOR)
############################################################
# More Control Flow 0x28
SWITCH = Opcode(0x28, "switch", OPC_KIND.SWITCH, [OP_KIND.REG, OP_KIND.JTB],
[TC.UINT, TC.INVALID], OPC_GENUS.BASE,
"""Multi target computed jump.
The register argument must be less than the jtb `size`.
The jtb symbol must have been previously defined with the `.jtb` directive.
""",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
BRA = Opcode(0x29, "bra", OPC_KIND.BRA, [OP_KIND.BBL],
[TC.INVALID], OPC_GENUS.BASE,
"Unconditional branch.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
RET = Opcode(0x2a, "ret", OPC_KIND.RET, [],
[], OPC_GENUS.BASE,
"Return from subroutine.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
BSR = Opcode(0x2b, "bsr", OPC_KIND.BSR, [OP_KIND.FUN],
[TC.INVALID], OPC_GENUS.BASE,
"Branch to subroutine fun",
OA.CALL)
JSR = Opcode(0x2c, "jsr", OPC_KIND.JSR, [OP_KIND.REG, OP_KIND.FUN],
[TC.CODE, TC.INVALID], OPC_GENUS.BASE,
"""Jump indirectly to subroutine through register (fun describes the signature).
The signature must have been previously defined with the `.fun` directive.""",
OA.CALL)
SYSCALL = Opcode(0x2d, "syscall", OPC_KIND.SYSCALL,
[OP_KIND.FUN, OP_KIND.CONST],
[TC.INVALID, TC.UINT], OPC_GENUS.BASE,
"""Syscall to `syscall_no`. (fun describes the signature).
The signature must have been previously defined with the `.fun` directive.""",
OA.CALL)
TRAP = Opcode(0x2e, "trap", OPC_KIND.RET, [],
[], OPC_GENUS.BASE,
"Abort program.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
############################################################
# Misc 0x30
PUSHARG = Opcode(0x30, "pusharg", OPC_KIND.PUSHARG, [OP_KIND.REG_OR_CONST],
[TC.ANY], OPC_GENUS.BASE,
"push a call or return arg - must immediately precede bsr/jsr or ret.",
OA.SPECIAL)
POPARG = Opcode(0x31, "poparg", OPC_KIND.POPARG, [OP_KIND.REG],
[TC.ANY], OPC_GENUS.BASE,
"pop a call or return arg - must immediately follow fun entry or bsr/jsr.",
OA.SPECIAL)
CONV = Opcode(0x32, "conv", OPC_KIND.CONV, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.NUM], OPC_GENUS.BASE,
# TODO: specify rounding and overflow for float <-> int conversions
"""Conversion of numerical regs which do not have to be of same size. Bits may change.
If the conversion involves both a widening and a change of type, the widening is performed
first. """)
BITCAST = Opcode(0x33, "bitcast", OPC_KIND.CONV,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_SIZE_AS_PREV], OPC_GENUS.BASE,
"""Cast between regs of same size. Bits will be re-interpreted but do not change.
This is useful for manipulating addresses in unusual ways or
looking at the binary representation of floats.""")
MOV = Opcode(0x34, "mov", OPC_KIND.MOV, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Move between registers.
While a mov can be emulated via a `add dst = src 0`,
having a dedicated instruction makes some optimizations easier to
implement when combined with a canonicalization.""")
CMPEQ = Opcode(0x35, "cmpeq", OPC_KIND.CMP,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST,
OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV, TC.SAME_AS_PREV, TC.ANY,
TC.SAME_AS_PREV],
OPC_GENUS.BASE,
"""Conditional move (compare equal). dst := (cmp1 == cmp2) ? src1 : src2
Note: dst/cmp1/cmp2 may be of a different type than src1/src2.""",
OA.COMMUTATIVE)
CMPLT = Opcode(0x36, "cmplt", OPC_KIND.CMP,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST,
OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV, TC.SAME_AS_PREV, TC.ADDR_NUM,
TC.SAME_AS_PREV],
OPC_GENUS.BASE,
"""Conditional move (compare less than). dst := (cmp1 < cmp2) ? src1 : src2
Note: dst/cmp1/cmp2 may be of a different type than src1/src2.""")
# materialize addresses in a register
LEA = Opcode(0x38, "lea", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.SAME_AS_PREV, TC.OFFSET], OPC_GENUS.BASE,
"""Load effective Address. dst := base + offset
Note: dst and base are addresses but offset is not.""")
LEA_MEM = Opcode(0x39, "lea.mem", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.MEM, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load effective memory address with offset, dst := base + offset")
LEA_STK = Opcode(0x3a, "lea.stk", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.STK, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load effective stack address with offset. dst := base + offset")
LEA_FUN = Opcode(0x3b, "lea.fun", OPC_KIND.LEA1, [OP_KIND.REG, OP_KIND.FUN],
[TC.CODE, TC.INVALID], OPC_GENUS.BASE,
"Load effective function address: dst := base (note: no offset).")
############################################################
# LOAD STORE 0x60
# ld/st base address is in register, offset is immediate
# ld/st base address is register
LD = Opcode(0x40, "ld", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.ADDR, TC.OFFSET], OPC_GENUS.BASE,
"Load from register base with offset. dst := RAM[base + offset]",
OA.MEM_RD)
# note: signedness of offset may matter here
LD_MEM = Opcode(0x41, "ld.mem", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.MEM, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load from memory base with offset. dst := RAM[base + offset] ",
OA.MEM_RD)
LD_STK = Opcode(0x42, "ld.stk", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.STK, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load from stack base with offset. dst := RAM[base + offset]",
OA.MEM_RD)
ST = Opcode(0x48, "st", OPC_KIND.ST,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to register base with offset. RAM[base + offset] := src",
OA.MEM_WR)
ST_MEM = Opcode(0x49, "st.mem", OPC_KIND.ST,
[OP_KIND.MEM, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INVALID, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to memory base with offset. RAM[base + offset] := src",
OA.MEM_WR)
ST_STK = Opcode(0x4a, "st.stk", OPC_KIND.ST,
[OP_KIND.STK, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INVALID, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to stack base with offset. RAM[base + offset] := src",
OA.MEM_WR)
############################################################
# FLOAT ALU OPERAND: 0x50
CEIL = Opcode(0x50, "ceil", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, toward positive infinity")
FLOOR = Opcode(0x51, "floor", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, toward negative infinity")
ROUND = Opcode(0x52, "round", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, to nearest with ties to away")
TRUNC = Opcode(0x53, "trunc", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""
Round float to integral, toward zero.
Note, frac(val) = val - trunc(val)""")
SQRT = Opcode(0x54, "sqrt", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Compute the sqrt of floating point value")
# do we need all these?
Opcode(0x58, "sin", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x59, "cos", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5a, "tan", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5b, "asin", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5c, "acos", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5d, "atan", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5e, "exp", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5f, "log", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
############################################################
# Advanced ALU
############################################################
CNTLZ = Opcode(0x60, "cntlz", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Count leading zeros.")
CNTTZ = Opcode(0x61, "cnttz", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Count trailing zeros.")
# INT SINGLE OPERAND 0xb0
# the src reg is treated as an unsigned reg
Opcode(0x62, "cntpop", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
############################################################
# Annotations
############################################################
NOP = Opcode(0x70, "nop", OPC_KIND.NOP, [],
[], OPC_GENUS.BASE,
"nop - internal use.")
NOP1 = Opcode(0x71, "nop1", OPC_KIND.NOP1, [OP_KIND.REG],
[TC.ANY], OPC_GENUS.BASE,
"nop with one reg - internal use. Can be used to `reserve` a reg for code generation.",
OA.SPECIAL)
# LINE = Opcode(0x78, "line", OPC_KIND., [OP_KIND.NAME, OP_KIND.CONST],
# [TC.ANY], OPC_GENUS.BASE,
# "",
# OA.SPECIAL)
############################################################
# Misc Experimental
############################################################
# Note, negative lengths copy downwards
Opcode(0xb8, "bcopy", OPC_KIND.BCOPY,
[OP_KIND.REG, OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.SAME_AS_PREV, TC.OFFSET], OPC_GENUS.TBD,
"TBD",
OA.MEM_WR | OA.MEM_RD)
# Note, negative lengths copy downwards
Opcode(0xba, "bzero", OPC_KIND.BZERO, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.OFFSET], OPC_GENUS.TBD,
"TBD",
OA.MEM_WR)
############################################################
# Directives 0xd
#
# do not correspond to instructions
############################################################
Directive(0x01, ".mem", [OP_KIND.NAME, OP_KIND.INT, OP_KIND.MEM_KIND],
"Add new memory region to unit")
Directive(0x02, ".data", [OP_KIND.INT, OP_KIND.BYTES],
"Add content to current memory region: multiple bytes")
Directive(0x03, ".addr.fun", [OP_KIND.INT, OP_KIND.FUN],
"Add content to current memory region: code address")
Directive(0x04, ".addr.mem", [OP_KIND.INT, OP_KIND.MEM, OP_KIND.INT],
"Add content to current memory region: "
"memory address with offset")
Directive(0x05, ".fun", [OP_KIND.NAME, OP_KIND.FUN_KIND, OP_KIND.TYPE_LIST,
OP_KIND.TYPE_LIST],
"Add new function to unit")
Directive(0x06, ".bbl", [OP_KIND.NAME],
"Add new basic block to current function")
Directive(0x07, ".reg", [OP_KIND.DATA_KIND, OP_KIND.NAME_LIST],
"Add new registers to current function")
Directive(0x08, ".stk", [OP_KIND.NAME, OP_KIND.INT, OP_KIND.INT],
"Add stack region to current function")
Directive(0x09, ".jtb",
[OP_KIND.NAME, OP_KIND.INT, OP_KIND.BBL, OP_KIND.BBL_TAB],
"bbl jump table: <name> <size> <default-bbl> <sparse-table>")
############################################################
# experimental/unimplemented
############################################################
# add/sub/rotate with carry for legalizing say 64bit regs into pairs of 32bit regs
# unreachable
# swap
# unordered comparison
# https://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
# conv int - flt (urgent)
# conv int - int (urgent)
# extract (urgent)
# insert (urgent)
# ld_l, st_C, cmpxch, cmpswp
# pow, pow2 powi
# log
# crc32c (supported by x86-64 and arm64 - using 0x1EDC6F41)
# aes ???
# ld.scaled /st.scaled: base_reg + index_reg * scale imm + offset_imm
# copysign
# prefetch
# other built-ins: cf.:
# https://github.com/llvm-mirror/compiler-rt/tree/master/lib/builtins
_GROUPS = {
0x01: "## Directives\n",
0x10: "## Basic ALU\n",
0x20: "## Conditional Branches\n",
0x28: "## Other Control Flow\n",
0x30: "## Move/Conversion\n",
0x38: "## Address Arithmetic\n",
0x40: "## Load\n",
0x48: "## Store\n",
0x50: "## Float ALU\n",
0x60: "## Advanced ALU\n",
0x70: "## Annotation\n",
0xf1: "## Misc\n",
}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "documentation":
cgen.ReplaceContent(_render_documentation, sys.stdin, sys.stdout)
elif sys.argv[1] == "gen_h":
cgen.ReplaceContent(_render_h, sys.stdin, sys.stdout)
elif sys.argv[1] == "gen_c":
cgen.ReplaceContent(_render_c, sys.stdin, sys.stdout)
else:
Dump()
| 36.113594 | 108 | 0.571443 |
255944c391999d6773e34c522056f9b52e9a85c5 | 784 | py | Python | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 3.2.7 on 2021-10-27 09:40
from django.db import migrations, models
| 25.290323 | 65 | 0.477041 |
2559a4a18ffbdf9ee00369efeb0eacf72905221a | 3,447 | py | Python | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2019-09-20T08:03:47.000Z | 2021-05-10T11:02:09.000Z | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: LAMARCK_ML/data_util/TypeShape.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from LAMARCK_ML.data_util import Shape_pb2 as LAMARCK__ML_dot_data__util_dot_Shape__pb2
from LAMARCK_ML.data_util import DType_pb2 as LAMARCK__ML_dot_data__util_dot_DType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='LAMARCK_ML/data_util/TypeShape.proto',
package='LAMARCK_ML',
syntax='proto3',
serialized_pb=_b('\n$LAMARCK_ML/data_util/TypeShape.proto\x12\nLAMARCK_ML\x1a LAMARCK_ML/data_util/Shape.proto\x1a LAMARCK_ML/data_util/DType.proto\"w\n\x0eTypeShapeProto\x12\x0f\n\x07id_name\x18\x01 \x01(\t\x12)\n\tdtype_val\x18\x02 \x01(\x0b\x32\x16.LAMARCK_ML.DTypeProto\x12)\n\tshape_val\x18\x03 \x01(\x0b\x32\x16.LAMARCK_ML.ShapeProtob\x06proto3')
,
dependencies=[LAMARCK__ML_dot_data__util_dot_Shape__pb2.DESCRIPTOR,LAMARCK__ML_dot_data__util_dot_DType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TYPESHAPEPROTO = _descriptor.Descriptor(
name='TypeShapeProto',
full_name='LAMARCK_ML.TypeShapeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_name', full_name='LAMARCK_ML.TypeShapeProto.id_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dtype_val', full_name='LAMARCK_ML.TypeShapeProto.dtype_val', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape_val', full_name='LAMARCK_ML.TypeShapeProto.shape_val', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=239,
)
_TYPESHAPEPROTO.fields_by_name['dtype_val'].message_type = LAMARCK__ML_dot_data__util_dot_DType__pb2._DTYPEPROTO
_TYPESHAPEPROTO.fields_by_name['shape_val'].message_type = LAMARCK__ML_dot_data__util_dot_Shape__pb2._SHAPEPROTO
DESCRIPTOR.message_types_by_name['TypeShapeProto'] = _TYPESHAPEPROTO
TypeShapeProto = _reflection.GeneratedProtocolMessageType('TypeShapeProto', (_message.Message,), dict(
DESCRIPTOR = _TYPESHAPEPROTO,
__module__ = 'LAMARCK_ML.data_util.TypeShape_pb2'
# @@protoc_insertion_point(class_scope:LAMARCK_ML.TypeShapeProto)
))
_sym_db.RegisterMessage(TypeShapeProto)
# @@protoc_insertion_point(module_scope)
| 38.730337 | 354 | 0.78387 |
255a4a642a2b2e33a26ec84bb18d2413e8e4b098 | 31,149 | py | Python | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, session, redirect, url_for
import pymysql.cursors
import datetime
from pyecharts import options as opts
from pyecharts.charts import Pie,Bar
from appdef import *
#Get the airline the staff member works for
#Make sure that the user is actually staff before performing any operations
| 40.400778 | 129 | 0.596969 |
255a812a0890850fc537c0377c504771edd7d281 | 261 | py | Python | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 42 | 2020-11-12T11:34:29.000Z | 2022-01-17T11:40:29.000Z | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 1 | 2021-06-09T11:41:49.000Z | 2021-06-09T11:41:49.000Z | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 2 | 2021-03-17T18:16:15.000Z | 2021-06-08T17:29:38.000Z | from fastapi import FastAPI
try:
from routes import analytics,templates,auth
except:
from .routes import analytics,templates,auth
app = FastAPI()
app.include_router(analytics.router)
app.include_router(templates.router)
app.include_router(auth.authr) | 21.75 | 48 | 0.800766 |
255c0c175a4464c1c647d5f7bc88a0e78a8ca610 | 873 | py | Python | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 1 | 2020-02-09T14:15:11.000Z | 2020-02-09T14:15:11.000Z | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 12 | 2018-04-16T09:09:40.000Z | 2020-04-15T07:09:15.000Z | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 7 | 2018-04-07T01:43:48.000Z | 2020-07-23T08:12:37.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
from future import standard_library
standard_library.install_aliases()
import query_releases
import unittest
if __name__ == "__main__":
unittest.main()
| 27.28125 | 86 | 0.719359 |
255d364d93e590ec6297742e59e58cf8fe8ad6e3 | 1,211 | py | Python | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | # simplePendulum.py
# Model of a simple pendulum
import math
def simplePendulum(length = 1, angle = math.pi/4, angular_velocity = 0, DT = 0.0001, simLength = 12):
numIterations = int(simLength/DT) + 1
g = 9.81
angle_change = angular_velocity
angular_acceleration = -g * math.sin(angle)/length
t = 0
timeLst = [0]
angleLst = [angle]
angular_velocityLst = [angular_velocity]
angular_accelerationLst = [angular_acceleration]
for i in range(1, numIterations):
t = i * DT
timeLst.append(t)
angle = angle + (angle_change) * DT
angleLst.append(angle)
angular_velocity = angular_velocity + (angular_acceleration) * DT
angular_velocityLst.append(angular_velocity)
angle_change = angular_velocity
angular_acceleration = -g * math.sin(angle)/length
angular_accelerationLst.append(angular_acceleration)
return timeLst, angleLst, angular_velocityLst, angular_accelerationLst | 1,211 | 1,211 | 0.560694 |