blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
e09c7099fa358b374bb80eac3b6f80d611f99860
|
Python
|
josephdouce/alexa-find-any-film
|
/aws lambda deployment package/yaep/yaep.py
|
UTF-8
| 1,568 | 3.078125 | 3 |
[] |
no_license
|
"""
YAEP - Yet Another Environment Package
"""
import os
import ConfigParser
from .utils import SectionHeader, str_to_bool
from .exceptions import UnsetException
def env(key, default=None, convert_booleans=True, boolean_map=None,
sticky=False, type_class=unicode):
"""
Retrieves key from the environment.
"""
value = os.getenv(key, default)
if value == default and default == UnsetException:
raise UnsetException('{} was unset, but is required.'.format(
key
))
if sticky and value == default:
os.environ[key] = str(value)
if convert_booleans and isinstance(value, str):
value = str_to_bool(value, boolean_map)
# This is sort of a weird situation - if we've autoconverted
# a boolean, we don't want to change its type. If somebody
# doesn't want this behavior they should set convert_booleans
# to false.
if isinstance(value, bool):
type_class = bool
# If we've just used the default or if it's None, just return that
if value is None or value == default:
return value
else:
return type_class(value)
def populate_env(env_file='.env'):
env_file = os.getenv('ENV_FILE', env_file)
if os.path.exists(env_file):
with open(env_file) as ef:
env_data = ConfigParser.SafeConfigParser()
env_data.optionxform = str
env_data.readfp(SectionHeader(ef))
for key, value in env_data.items(SectionHeader.header):
os.environ[key] = str(value)
| true |
7a681d83606749261aff6fd3b394998f0f22b311
|
Python
|
martinferianc/SentimentAnalysis-EIE3
|
/src/test.py
|
UTF-8
| 861 | 3 | 3 |
[
"MIT"
] |
permissive
|
from exceptions import NonExistantDataType, OutofBoundsError
from decision_forest import DecisionForest
def testTrees(T, X, mode="var"):
return T.test(X, mode)
def loadTrees(dataType="clean", split=None):
"""Returns a DecisionForest according to the datatype and split that
was input. If the split is None, the full decision forest is returned.
The dataType can be 'clean' and 'noisy'."""
if dataType not in ["clean", "noisy"]:
raise NonExistantDataType("'"+dataType+"' is not a valid data type")
df = DecisionForest()
if split is None:
df.load("forests/decision_forest_{}.forest".format(dataType))
else:
if split < 0 or split > 9:
raise OutofBoundsError("split '"+split+"' is not available.")
df.load("forests/decision_forest_{}_{}.forest".format(dataType, split))
return df
| true |
a54bfa51068b337a7900f694c4cac9766d6b8704
|
Python
|
jier/DL_assignments_2019
|
/assignment_2/part1/vanilla_rnn.py
|
UTF-8
| 2,816 | 2.671875 | 3 |
[] |
no_license
|
################################################################################
# MIT License
#
# Copyright (c) 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Deep Learning Course | Fall 2019
# Date Created: 2019-09-06
################################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import sys
################################################################################
class VanillaRNN(nn.Module):
def __init__(self, seq_length, input_dim, num_hidden, num_classes, batch_size,gradient_check, device):
super(VanillaRNN, self).__init__()
# Initialization here ...
self.seq_length = seq_length
self.device = device
self.num_classes = num_classes
self.input_dim = input_dim
self.batch_size = batch_size
self.num_hidden = num_hidden
self.gradient_check = gradient_check
self.W_hx = nn.Parameter(torch.Tensor(num_hidden, input_dim), requires_grad=True)
self.W_hh = nn.Parameter(torch.Tensor(num_hidden, num_hidden), requires_grad=True)
self.W_hy = nn.Parameter(torch.Tensor(num_hidden, num_classes), requires_grad=True)
if self.gradient_check:
self.grad_hidden_list = []
# Xavier bound
bound = np.sqrt(1 / num_hidden)
# print('bound for xavier: ', bound)
for param in self.parameters():
nn.init.uniform_(param, -bound, bound)
self.b_h = nn.Parameter(torch.zeros(num_hidden, 1), requires_grad=True)
self.b_p = nn.Parameter(torch.zeros(num_classes, 1), requires_grad=True)
self.tanh = nn.Tanh()
def forward(self, x):
# Implementation here ...
hidden = torch.zeros((self.num_hidden, x.shape[0]), requires_grad=True).to(self.device)
for step in range(self.seq_length):
hidden = self.tanh(self.W_hx @ x[:,step].reshape(1, -1) + self.W_hh @ hidden + self.b_h)
if self.gradient_check:
h = torch.zeros((self.num_hidden, x.shape[0]), requires_grad=True).to(self.device)
hidden = h + hidden
self.grad_hidden_list.append(h)
out = self.W_hy.t() @ hidden + self.b_p
return out.t()
| true |
399f83c02d99c17c639037da3b50323ea29f8674
|
Python
|
deckerego/hack-clock
|
/lib/hackclock/runapp/Libs/Clock.py
|
UTF-8
| 1,980 | 3.078125 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
import threading
import datetime
import time
class Clock(threading.Thread):
__RESOLUTION = 1000
__running = True
__waitLock = threading.Lock()
def __init__(self):
threading.Thread.__init__(self)
self.daemon = False
self.tickFunc = None
self.additionalWait = 0
self.timedEvents = []
self.timedEventsIdx = -1
self.atTime(0, 0, None) # The EOF event
self.start()
def __del__(self):
self.__running = False
def __executeEvents(self):
if self.timedEventsIdx > -1:
now = datetime.datetime.now()
currentHash = (now.hour * 60) + now.minute
(timeHash, action) = self.timedEvents[self.timedEventsIdx]
if timeHash == currentHash:
self.timedEventsIdx = (self.timedEventsIdx + 1) % len(self.timedEvents)
if action: action()
def run(self):
while self.__running:
self.__waitLock.acquire()
totalWait = self.__RESOLUTION + self.additionalWait
self.additionalWait = 0
self.__waitLock.release()
if self.tickFunc: self.tickFunc()
self.__executeEvents()
time.sleep(totalWait / 1000.0)
def atTime(self, hour, minute, action):
timeHash = (hour * 60) + minute
event = (timeHash, action)
self.timedEvents.append(event)
self.timedEvents = sorted(self.timedEvents)
now = datetime.datetime.now()
currentHash = (now.hour * 60) + now.minute
# Find the index of the next event that should fire based on the tuple's time key
self.timedEventsIdx = next((self.timedEvents.index(evt) for evt in self.timedEvents if evt[0] >= currentHash), 0)
def onTick(self, tickFunc):
self.tickFunc = tickFunc
def waitAbout(self, seconds):
self.__waitLock.acquire()
self.additionalWait = seconds * 1000
self.__waitLock.release()
| true |
8a8e67659328b0adb763dd8984e16a0147f9feff
|
Python
|
MGoglio2212/StreamLit
|
/ProveDurata.py
|
UTF-8
| 4,248 | 2.734375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 6 12:35:21 2020
@author: gogliom
"""
'''
Doc = convert_pdf_to_txt( r"D:\Altro\RPA\Energy\IREN\TEST CTE\CTE\esempi cte\EnelLuce30.pdf")
Doc = Doc.upper()
'''
##### BISOGNA IMPORTARE FUNZIONE CONVERT_PDF_TO_TEXT DA ALTRO PROGRAMMA!
import pandas as pd
import re
from ProvePDF import convert_pdf_to_txt
import numpy as np
def Durata(Doc):
PossiblePrice = []
Base = []
#Doc = convert_pdf_to_txt(Doc)
#Doc = Doc.upper()
#le inserisco come regular expression perchè mettendo . come any character (spezi,"E", a capo..)
r1 = 'DURATA'
r2 = 'VALIDIT'
r3 = 'RINNOV'
r4 = 'PER.{,5}MESI'
r5 = 'PER.{,5}ANN'
regex = [r1,r2,r3,r4,r5]
regex = re.compile('|'.join(regex))
Base = [m.start() for m in regex.finditer(Doc)]
Base = pd.DataFrame(Base, columns = ['PositionBase'])
#prendo numeri interi (mesi o anni)
d1 = r'\s\d{1,2}\s'
d = [d1] #le regex potrebbero essere sovrapposte,metto prima
#le più lunghe così se prende quelle si ferma a quella --> SI DOVREBBE GESTIRE MEGLIO
regexNum = re.compile('|'.join(d))
NumberPos = [m.start() for m in regexNum.finditer(Doc)]
NumberValue = regexNum.findall(Doc)
NumberTuples = list(zip(NumberValue,NumberPos))
PossiblePrice = pd.DataFrame(NumberTuples, columns=['Price', 'Position'])
PossiblePrice['Price'] = PossiblePrice['Price'].str.extract('(\d+)').astype(int)
PossiblePrice = PossiblePrice[PossiblePrice['Price'].isin(['1', '2', '3', '4', '6','12','18','24','36'])]
'''
APPROCCIO IN BASE AL QUALE CERCO €/KWH E PRENDO PAROLA PRECEDENTE, MA IN ALCUNI CASI NELLE TABELLE NON E ESPLICITATA
EURO/KWH VICINO AL NUMERO
ii = 0
for ww in Doc.split():
if "€/KWH" in ww or "EURO/KWH" in ww:
pw = Doc.split()[ii-1]
po = Doc.find(Doc.split()[ii-1]+" "+Doc.split()[ii])
nn = pd.DataFrame({'Price': [pw], 'Position': [po]})
PossiblePrice = PossiblePrice.append(nn)
#Positions = Positions + list(ii-1)
ii = ii + 1
#estraggo i numeri
PossiblePrice['Price'] = PossiblePrice.apply(lambda row: re.findall('-?\d*\,.?\d+', str(row.Price)), axis=1)
#elimino eventuali stringe vuote
PossiblePrice = PossiblePrice[PossiblePrice['Price'].apply(lambda row: len(row)) > 0]
'''
Base['key'] = 0
PossiblePrice['key'] = 0
Durata = Base.merge(PossiblePrice, how='outer')
Durata['dist'] = Durata.apply(lambda row: row.Position - row.PositionBase, axis = 1)
#FILTRO PER LE DISTANZE POSITIVE (IL NUMERO VIENE DOPO LA PAROLA, OPPURE NEGATIVE MOLTO PICCOLE DOVE QUINDI LA BASE VIENE IMMEDIATAMENTE DOPO )
Durata = Durata[(Durata['dist'] > - 30) & (Durata['dist'] < 300)]
#verifico se nei 40 caratteri prima o dopo c'è riferimento a mese o anno
dur1_m = r'\bMESE\b'
dur2_m = r'\bMESI\b'
dur_m = [dur1_m, dur2_m]
regexDur_m = re.compile('|'.join(dur_m))
dur1_a = r'\bANNO\b'
dur2_a = r'\bANNI\b'
dur_a = [dur1_a, dur2_a]
regexDur_a = re.compile('|'.join(dur_a))
Durata['Intorno'] = Durata.apply(lambda row: Doc[row.Position-40:row.Position+40], axis = 1)
Durata['Mese'] = np.where(Durata['Intorno'].str.contains(regexDur_m),1,0)
Durata['Anno'] = np.where(Durata['Intorno'].str.contains(regexDur_a),1,0)
#filtro per le durata possibili (6, 12, 18, 24 mesi -- 1, 2 anni)
Dm = Durata[(Durata['Mese'] == 1) & (Durata['Price'].isin(['6','12','18','24']))]
Da = Durata[(Durata['Anno'] == 1) & (Durata['Price'].isin(['1','2']))]
Durata = Dm.append(Da)
Durata = Durata.nsmallest(1, 'dist')
if Durata['Anno'].all() == 1:
Durata['Price'] = Durata['Price'].apply(str) + ' anno'
elif Durata['Mese'].all() == 1:
Durata['Price'] = Durata['Price'].apply(str) + ' mesi'
else:
Durata['Price'] = Durata['Price'].apply(str) + ' anno'
#print(Prezzo)
return Durata['Price']
| true |
51d636f222b9273ca4b049b70834dcef7eb7d04e
|
Python
|
yuanxu00/Image-recognition-trash-can
|
/test_camera.py
|
UTF-8
| 599 | 2.578125 | 3 |
[] |
no_license
|
import cv2
print("test start")
camera = cv2.VideoCapture(1)
print("test open")
camera.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT,720)
print("test1")
while True:
ret, image = camera.read()
print("test2")
# 这一步根据实际需要来
# 由于摄像头是定焦摄像头
# 而模型的接口是[228,228]
# 所以这一步就是让读取到的图片也是[A,A]型
image = image[0:780,150:1050]
cv2.imshow('Webcam', image)
print("test3")
if cv2.waitKey(0):
pass
print("test3")
camera.release()
cv2.destroyAllWindows()
| true |
a3618cd48ad08024b48f9304ece96c74b1039815
|
Python
|
kevin-kretz/Park-University
|
/CS 152 - Introduction to Python Programming/Unit 6/Assignment 2/programming_assignment_2.py
|
UTF-8
| 2,855 | 4.1875 | 4 |
[] |
no_license
|
"""
Unit 6 - Programming Assignment (2)
By Kevin Kretz | 20 July 2019
Using the data provided from the file "health-no-head.csv", print the values of the appropriate states according the filters provided by the user.
Also print the states with the lowest and highest number of effected people.
"""
def main():
#get filter information
filter_state = input("Enter state (Empty means all): ")
filter_disease = input("Enter disease (Empty means all): ")
filter_year = input("Enter year (Empty means all): ")
#declare variables for future use
lowest = []
highest = []
total = 0
#print header
print("{:21} {:12} {:12} {:4}\n".format('State', 'Disease', 'Number', 'Year'))
#open file
file = open("health-no-head.csv", "r")
# Process each line of the file
for aline in file:
values = aline.rstrip('\n').split(',')
#rename values for easier readability
state = values[2]
disease = values[0]
number = int(values[3])
year = values[5]
#declare variables for filtering
state_matches = False
disease_matches = False
year_matches = False
line_matches = False
#filter information
#if state filter is empty or current state matches state in filter
if (filter_state == "" or filter_state.lower() == state.lower()):
state_matches = True
#if disease filter is empty or current disease matches disease in filter
if (filter_disease == "" or filter_disease.lower() == disease.lower()):
disease_matches = True
#if year filter is empty or current year matches year in filter
if (filter_year == "" or filter_year.lower() == year.lower()):
year_matches = True
#if state, disease and year all match the filter infomation, set line matches to true
if state_matches & disease_matches & year_matches:
line_matches = True
#if current line matches filter information provided by user
if line_matches:
#print line
print ("{:21} {:12} {:6,} {:>9}".format(state, disease, number, year))
#add number to total
total += number
#if number is the new lowest number, change lowest
if len(lowest) == 0 or number < lowest[1]:
lowest = [state, number]
#if number is the new highest, change highest
if len(highest) == 0 or number > highest[1]:
highest = [state, number]
# Close file
file.close()
#print total
print("{:21} {:12} {:6,}".format('', 'Total', total))
#if there was at least one found in the filter, print highest and lowest states.
if len(lowest) != 0:
print("\nLowest")
print("{:21} {:12} {:6,}".format(lowest[0], '', lowest[1]))
if len(highest) != 0:
print("\nHighest")
print("{:21} {:12} {:6,}".format(highest[0], '', highest[1]))
main()
| true |
7b42360d0430843f69adcb8edef8c47f061cbe01
|
Python
|
jrzmnt/ActionRecognitionSmallDatasets
|
/codes/utils/svm.py
|
UTF-8
| 2,366 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import sys
sys.path.insert(0, '..')
import warnings
warnings.filterwarnings("ignore")
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import os
import argparse
from os.path import join, realpath, dirname, exists, basename, splitext
from sklearn import svm
import numpy as np
import pathfile
import progressbar
def classifier_svm(ftrain, ftest, fileout=None, **kwargs):
"""
Using features from `ftrain`, train a SVM classifier and uses `ftest` to
predict its labels.
Parameters:
-----------
ftrain : string
path to the file containing the frame, the true label and the list of features
ftest : string
path to the file containing the frame, the true label and the list of features
fileout : string
path to the output file
**kwargs : dict
contains the parameters of the SVM classifier to SKlearn
(C, cache_size, class_weight, coef0, decision_function_shape, degree, gamma,
kernel, max_iter, probability, random_state, shrinking, tol, verbose)
"""
ftrain = realpath(ftrain)
ftest = realpath(ftest)
if not fileout:
dirout = dirname(ftrain)
fname, ext = splitext(basename(ftrain))
fileout = join(dirout, fname+'.svm.txt')
# training phase
_, y, X = pathfile.extract_vectors(ftrain, with_paths=False)
logger.info('feeding SVM with data in training phase')
clf = svm.SVC(**kwargs)
clf.fit(X, y)
# testing phase
paths, y_test, X_test = pathfile.extract_vectors(ftest, with_paths=True)
logger.info('predicting classes for testing vector')
pred = clf.predict(X_test)
logger.info('saving output file as: %s' % fileout)
with open(fileout, 'w') as fout:
for path, label, p in zip(paths, y_test, pred):
fout.write('%s %d %d\n' % (path, label, p))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filetrain', metavar='file_train',
help='file containing training examples')
parser.add_argument('filetest', metavar='file_test',
help='file containing test examples')
args = parser.parse_args()
classifier_svm(args.filetrain, args.filetest)
| true |
d61b8b4662108a65ac5710d0e2899901b1e4b1c5
|
Python
|
thonny/thonny
|
/thonny/plugins/micropython/base_api_stubs/usocket.pyi
|
UTF-8
| 18,366 | 2.734375 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
"""
socket.
Descriptions taken from:
https://raw.githubusercontent.com/micropython/micropython/master/docs/library/socket.rst.
******************************
.. module:: socket
:synopsis: socket module
|see_cpython_module| :mod:`python:socket`.
This module provides access to the BSD socket interface.
.. admonition:: Difference to CPython
:class: attention
For efficiency and consistency, socket objects in MicroPython implement a `stream`
(file-like) interface directly. In CPython, you need to convert a socket to
a file-like object using `makefile()` method. This method is still supported
by MicroPython (but is a no-op), so where compatibility with CPython matters,
be sure to use it.
Socket address format(s)
------------------------
The native socket address format of the ``socket`` module is an opaque data type
returned by `getaddrinfo` function, which must be used to resolve textual address
(including numeric addresses)::
sockaddr = socket.getaddrinfo('www.micropython.org', 80)[0][-1]
# You must use getaddrinfo() even for numeric addresses
sockaddr = socket.getaddrinfo('127.0.0.1', 80)[0][-1]
# Now you can use that address
sock.connect(addr)
Using `getaddrinfo` is the most efficient (both in terms of memory and processing
power) and portable way to work with addresses.
However, ``socket`` module (note the difference with native MicroPython
``socket`` module described here) provides CPython-compatible way to specify
addresses using tuples, as described below. Note that depending on a
:term:`MicroPython port`, ``socket`` module can be builtin or need to be
installed from `micropython-lib` (as in the case of :term:`MicroPython Unix port`),
and some ports still accept only numeric addresses in the tuple format,
and require to use `getaddrinfo` function to resolve domain names.
Summing up:
* Always use `getaddrinfo` when writing portable applications.
* Tuple addresses described below can be used as a shortcut for
quick hacks and interactive use, if your port supports them.
Tuple address format for ``socket`` module:
* IPv4: *(ipv4_address, port)*, where *ipv4_address* is a string with
dot-notation numeric IPv4 address, e.g. ``"8.8.8.8"``, and *port* is and
integer port number in the range 1-65535. Note the domain names are not
accepted as *ipv4_address*, they should be resolved first using
`socket.getaddrinfo()`.
* IPv6: *(ipv6_address, port, flowinfo, scopeid)*, where *ipv6_address*
is a string with colon-notation numeric IPv6 address, e.g. ``"2001:db8::1"``,
and *port* is an integer port number in the range 1-65535. *flowinfo*
must be 0. *scopeid* is the interface scope identifier for link-local
addresses. Note the domain names are not accepted as *ipv6_address*,
they should be resolved first using `socket.getaddrinfo()`. Availability
of IPv6 support depends on a :term:`MicroPython port`.
.. exception:: socket.error
MicroPython does NOT have this exception.
.. admonition:: Difference to CPython
:class: attention
CPython used to have a ``socket.error`` exception which is now deprecated,
and is an alias of `OSError`. In MicroPython, use `OSError` directly.
"""
__author__ = "Howard C Lovatt"
__copyright__ = "Howard C Lovatt, 2020 onwards."
__license__ = "MIT https://opensource.org/licenses/MIT (as used by MicroPython)."
__version__ = "7.3.9" # Version set by https://github.com/hlovatt/tag2ver
from typing import Final, Any, Literal, overload
from uio import AnyReadableBuf, AnyWritableBuf
_Address: Final = tuple[str, int] | tuple[str, int, int, int] | str
def socket(
af: int = "AF_INET", type: int = "SOCK_STREAM", proto: int = "IPPROTO_TCP", /,
) -> "Socket":
"""
Create a new socket using the given address family, socket type and
protocol number. Note that specifying *proto* in most cases is not
required (and not recommended, as some MicroPython ports may omit
``IPPROTO_*`` constants). Instead, *type* argument will select needed
protocol automatically::
# Create STREAM TCP socket
socket(AF_INET, SOCK_STREAM)
# Create DGRAM UDP socket
socket(AF_INET, SOCK_DGRAM)
"""
def getaddrinfo(
host: str, port: int, af: int = 0, type: int = 0, proto: int = 0, flags: int = 0, /,
) -> list[tuple[int, int, int, str, tuple[str, int] | tuple[str, int, int, int]]]:
"""
Translate the host/port argument into a sequence of 5-tuples that contain all the
necessary arguments for creating a socket connected to that service. Arguments
*af*, *type*, and *proto* (which have the same meaning as for the `socket()` function)
can be used to filter which kind of addresses are returned. If a parameter is not
specified or zero, all combinations of addresses can be returned (requiring
filtering on the user side).
The resulting list of 5-tuples has the following structure::
(family, type, proto, canonname, sockaddr)
The following example shows how to connect to a given url::
s = socket.socket()
# This assumes that if "type" is not specified, an address for
# SOCK_STREAM will be returned, which may be not true
s.connect(socket.getaddrinfo('www.micropython.org', 80)[0][-1])
Recommended use of filtering params::
s = socket.socket()
# Guaranteed to return an address which can be connect'ed to for
# stream operation.
s.connect(socket.getaddrinfo('www.micropython.org', 80, 0, SOCK_STREAM)[0][-1])
.. admonition:: Difference to CPython
:class: attention
CPython raises a ``socket.gaierror`` exception (`OSError` subclass) in case
of error in this function. MicroPython doesn't have ``socket.gaierror``
and raises OSError directly. Note that error numbers of `getaddrinfo()`
form a separate namespace and may not match error numbers from
the :mod:`errno` module. To distinguish `getaddrinfo()` errors, they are
represented by negative numbers, whereas standard system errors are
positive numbers (error numbers are accessible using ``e.args[0]`` property
from an exception object). The use of negative values is a provisional
detail which may change in the future.
"""
def inet_ntop(af: int, bin_addr: bytes, /) -> str:
"""
Convert a binary network address *bin_addr* of the given address family *af*
to a textual representation::
>>> socket.inet_ntop(socket.AF_INET, b"\x7f\0\0\1")
'127.0.0.1'
"""
def inet_pton(af: int, txt_addr: str, /) -> bytes:
"""
Convert a textual network address *txt_addr* of the given address family *af*
to a binary representation::
>>> socket.inet_pton(socket.AF_INET, "1.2.3.4")
b'\x01\x02\x03\x04'
"""
AF_INET: Final[int] = ...
"""
Address family types. Availability depends on a particular :term:`MicroPython port`.
"""
AF_INET6: Final[int] = ...
"""
Address family types. Availability depends on a particular :term:`MicroPython port`.
"""
SOCK_STREAM: Final[int] = ...
"""
Socket types.
"""
SOCK_DGRAM: Final[int] = ...
"""
Socket types.
"""
IPPROTO_UDP: Final[int] = ...
"""
IP protocol numbers. Availability depends on a particular :term:`MicroPython port`.
Note that you don't need to specify these in a call to `socket.socket()`,
because `SOCK_STREAM` socket type automatically selects `IPPROTO_TCP`, and
`SOCK_DGRAM` - `IPPROTO_UDP`. Thus, the only real use of these constants
is as an argument to `setsockopt()`.
"""
IPPROTO_TCP: Final[int] = ...
"""
IP protocol numbers. Availability depends on a particular :term:`MicroPython port`.
Note that you don't need to specify these in a call to `socket.socket()`,
because `SOCK_STREAM` socket type automatically selects `IPPROTO_TCP`, and
`SOCK_DGRAM` - `IPPROTO_UDP`. Thus, the only real use of these constants
is as an argument to `setsockopt()`.
"""
SOL_SOCKET: Final[int] = ...
"""
Socket option levels (an argument to `setsockopt()`). The exact
inventory depends on a :term:`MicroPython port`.
"""
SO_REUSEADDR: Final[int] = ...
"""
Socket options (an argument to `setsockopt()`). The exact
inventory depends on a :term:`MicroPython port`.
"""
IPPROTO_SEC: Final[int] = ...
"""
Special protocol value to create SSL-compatible socket.
Constants specific to WiPy:
"""
class Socket:
"""
A unix like socket, for more information see module ``socket``'s description.
The name, `Socket`, used for typing is not the same as the runtime name, `socket` (note lowercase `s`).
The reason for this difference is that the runtime uses `socket` as both a class name and as a method name and
this is not possible within code written entirely in Python and therefore not possible within typing code.
"""
def close(self) -> None:
"""
Mark the socket closed and release all resources. Once that happens, all future operations
on the socket object will fail. The remote end will receive EOF indication if
supported by protocol.
Sockets are automatically closed when they are garbage-collected, but it is recommended
to `close()` them explicitly as soon you finished working with them.
"""
def bind(self, address: _Address | bytes, /) -> None:
"""
Bind the socket to *address*. The socket must not already be bound.
"""
def listen(self, backlog: int = ..., /) -> None:
"""
Enable a server to accept connections. If *backlog* is specified, it must be at least 0
(if it's lower, it will be set to 0); and specifies the number of unaccepted connections
that the system will allow before refusing new connections. If not specified, a default
reasonable value is chosen.
"""
def accept(self) -> None:
"""
Accept a connection. The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket object usable to send
and receive data on the connection, and address is the address bound to the socket on the
other end of the connection.
"""
def connect(self, address: _Address | bytes, /) -> None:
"""
Connect to a remote socket at *address*.
"""
def send(self, bytes: AnyReadableBuf, /) -> int:
"""
Send data to the socket. The socket must be connected to a remote socket.
Returns number of bytes sent, which may be smaller than the length of data
("short write").
"""
def sendall(self, bytes: AnyReadableBuf, /) -> None:
"""
Send all data to the socket. The socket must be connected to a remote socket.
Unlike `send()`, this method will try to send all of data, by sending data
chunk by chunk consecutively.
The behaviour of this method on non-blocking sockets is undefined. Due to this,
on MicroPython, it's recommended to use `write()` method instead, which
has the same "no short writes" policy for blocking sockets, and will return
number of bytes sent on non-blocking sockets.
"""
def recv(self, bufsize: int, /) -> bytes:
"""
Receive data from the socket. The return value is a bytes object representing the data
received. The maximum amount of data to be received at once is specified by bufsize.
"""
def sendto(self, bytes: AnyReadableBuf, address: _Address, /) -> int:
"""
Send data to the socket. The socket should not be connected to a remote socket, since the
destination socket is specified by *address*.
"""
def recvfrom(self, bufsize: int, /) -> tuple[bytes, Any]:
"""
Receive data from the socket. The return value is a pair *(bytes, address)* where *bytes* is a
bytes object representing the data received and *address* is the address of the socket sending
the data.
"""
def setsockopt(
self, level: int, optname: int, value: AnyReadableBuf | int, /
) -> None:
"""
Set the value of the given socket option. The needed symbolic constants are defined in the
socket module (SO_* etc.). The *value* can be an integer or a bytes-like object representing
a buffer.
"""
def settimeout(self, value: float | None, /) -> None:
"""
**Note**: Not every port supports this method, see below.
Set a timeout on blocking socket operations. The value argument can be a nonnegative floating
point number expressing seconds, or None. If a non-zero value is given, subsequent socket operations
will raise an `OSError` exception if the timeout period value has elapsed before the operation has
completed. If zero is given, the socket is put in non-blocking mode. If None is given, the socket
is put in blocking mode.
Not every :term:`MicroPython port` supports this method. A more portable and
generic solution is to use `select.poll` object. This allows to wait on
multiple objects at the same time (and not just on sockets, but on generic
`stream` objects which support polling). Example::
# Instead of:
s.settimeout(1.0) # time in seconds
s.read(10) # may timeout
# Use:
poller = select.poll()
poller.register(s, select.POLLIN)
res = poller.poll(1000) # time in milliseconds
if not res:
# s is still not ready for input, i.e. operation timed out
.. admonition:: Difference to CPython
:class: attention
CPython raises a ``socket.timeout`` exception in case of timeout,
which is an `OSError` subclass. MicroPython raises an OSError directly
instead. If you use ``except OSError:`` to catch the exception,
your code will work both in MicroPython and CPython.
"""
def setblocking(self, value: bool, /) -> None:
"""
Set blocking or non-blocking mode of the socket: if flag is false, the socket is set to non-blocking,
else to blocking mode.
This method is a shorthand for certain `settimeout()` calls:
* ``sock.setblocking(True)`` is equivalent to ``sock.settimeout(None)``
* ``sock.setblocking(False)`` is equivalent to ``sock.settimeout(0)``
"""
@overload
def makefile(
self, mode: Literal["rb", "wb", "rwb"] = "rb", buffering: int = 0, /
) -> Socket:
"""
Return a file object associated with the socket. The exact returned type depends on the arguments
given to makefile(). The support is limited to binary modes only ('rb', 'wb', and 'rwb').
CPython's arguments: *encoding*, *errors* and *newline* are not supported.
.. admonition:: Difference to CPython
:class: attention
As MicroPython doesn't support buffered streams, values of *buffering*
parameter is ignored and treated as if it was 0 (unbuffered).
.. admonition:: Difference to CPython
:class: attention
Closing the file object returned by makefile() WILL close the
original socket as well.
"""
@overload
def makefile(self, mode: str, buffering: int = 0, /) -> Socket:
"""
Return a file object associated with the socket. The exact returned type depends on the arguments
given to makefile(). The support is limited to binary modes only ('rb', 'wb', and 'rwb').
CPython's arguments: *encoding*, *errors* and *newline* are not supported.
.. admonition:: Difference to CPython
:class: attention
As MicroPython doesn't support buffered streams, values of *buffering*
parameter is ignored and treated as if it was 0 (unbuffered).
.. admonition:: Difference to CPython
:class: attention
Closing the file object returned by makefile() WILL close the
original socket as well.
"""
@overload
def read(self) -> bytes:
"""
Read up to size bytes from the socket. Return a bytes object. If *size* is not given, it
reads all data available from the socket until EOF; as such the method will not return until
the socket is closed. This function tries to read as much data as
requested (no "short reads"). This may be not possible with
non-blocking socket though, and then less data will be returned.
"""
@overload
def read(self, size: int, /) -> bytes:
"""
Read up to size bytes from the socket. Return a bytes object. If *size* is not given, it
reads all data available from the socket until EOF; as such the method will not return until
the socket is closed. This function tries to read as much data as
requested (no "short reads"). This may be not possible with
non-blocking socket though, and then less data will be returned.
"""
@overload
def readinto(self, buf: AnyWritableBuf, /) -> int | None:
"""
Read bytes into the *buf*. If *nbytes* is specified then read at most
that many bytes. Otherwise, read at most *len(buf)* bytes. Just as
`read()`, this method follows "no short reads" policy.
Return value: number of bytes read and stored into *buf*.
"""
@overload
def readinto(self, buf: AnyWritableBuf, nbytes: int, /) -> int | None:
"""
Read bytes into the *buf*. If *nbytes* is specified then read at most
that many bytes. Otherwise, read at most *len(buf)* bytes. Just as
`read()`, this method follows "no short reads" policy.
Return value: number of bytes read and stored into *buf*.
"""
def readline(self) -> bytes:
"""
Read a line, ending in a newline character.
Return value: the line read.
"""
def write(self, buf: AnyReadableBuf, /) -> int | None:
"""
Write the buffer of bytes to the socket. This function will try to
write all data to a socket (no "short writes"). This may be not possible
with a non-blocking socket though, and returned value will be less than
the length of *buf*.
Return value: number of bytes written.
"""
| true |
bdf4b9700d6e8f0a3920e2452e75b1d4f7b485f4
|
Python
|
jeevanmore/MyPythonPrograms
|
/py01_HR_Jumping_on_the_Cloud.py
|
UTF-8
| 128 | 3.03125 | 3 |
[] |
no_license
|
n = 6
s = ['0','0','0','0','1','0']
jumps = 0
for cloud in s:
if cloud == 0:
jumps += 1
print(jumps)
| true |
6326416590f9691eaceca6a7230dc2074ce9b10b
|
Python
|
OctopusHugz/holbertonschool-interview
|
/0x10-rain/0-rain.py
|
UTF-8
| 620 | 3.84375 | 4 |
[] |
no_license
|
#!/usr/bin/python3
""" This module creates a rain function """
def rain(walls):
""" Calculates how much rain water is retained """
total_rain = 0
for i in range(len(walls)):
index = walls[i]
if i > 0:
left_list = walls[0:i]
left_wall_height = max(left_list)
else:
left_wall_height = index
if i < len(walls):
right_list = walls[i:]
right_wall_height = max(right_list)
else:
right_wall_height = index
total_rain += (min(left_wall_height, right_wall_height) - index)
return total_rain
| true |
f2f3d3ba1c7463b4fa67d2bc464d30f89787f06a
|
Python
|
HarikaSatyaPreethi/opencv_programms
|
/videoplayback.py
|
UTF-8
| 764 | 2.640625 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 01:08:14 2019
@author: preethi
"""
import cv2
def main():
windowName = "video play back"
cv2.namedWindow(windowName)
filename = '/home/preethi/Documents/opencv/output/videorecord.avi'
cap = cv2.VideoCapture(filename)
while(cap.isOpened()):
ret,frame=cap.read()
print(ret)
if ret:
cv2.imshow(windowName, frame)
if cv2.waitKey(133) == 27:
break
else:
break
cv2.destroyAllWindows()
cap.release()
if __name__ == "__main__":
main()
| true |
4a942cf9efd64421ebd1f50523c7355368dcd679
|
Python
|
JJong-Min/Baekjoon-Online-Judge-Algorithm
|
/Data Structure(자료구조)/Data_Structure_6번.py
|
UTF-8
| 419 | 3.109375 | 3 |
[] |
no_license
|
import sys
inputVal = sys.stdin.readline()
values = []
while (inputVal != "고무오리 디버깅 끝"):
inputVal = sys.stdin.readline().rstrip()
if inputVal == "문제":
values.append(1)
if inputVal == "고무오리":
if len(values) == 0:
values.append(1)
values.append(1)
else:
values.pop()
print("고무오리야 사랑해") if len(values) == 0 else print("힝구")
| true |
d63161ce0f6116d37bb30dc5ed9ab7147f890e16
|
Python
|
MichaelTrzaskoma/CSCI380_Nutrifetch
|
/backend/flask_app.py
|
UTF-8
| 4,809 | 2.828125 | 3 |
[] |
no_license
|
from flask import Flask
from flask import request, jsonify
# import the things that Michael has coded
from Nutrifetch import *
app = Flask(__name__)
app.config["DEBUG"] = True
allergy_profile = {
'Email': "",
'Allergy': [],
}
profileInput = {}
@app.route('/api/v1/CSCI380/profileInput', methods=['POST'])
def assignProfile():
# post user allergy profile to the server
# INPUT: (inside request header) user email and allergy item(s) <-- array
# OUTPUT: json format allergy_profile && code 202
data = request.json
global allergy_profile
response = ""
if data['email'] != "":
email = data['email']
first_name = data['fname']
last_name = data['lname']
gender = data['gender']
age = data['age']
weight = data['weight']
allergy_profile = data['allergens']
profileInput[email] = (first_name, last_name, gender, age, weight,
allergy_profile)
userProfile(email,
first_name,
last_name,
gender,
age,
weight,
userallergens=allergy_profile)
# set the info to FireStore
response = jsonify("Data captured!")
response.status_code = 202
else:
response = jsonify("An error occured!")
response.status_code = 500
return response
@app.route('/api/v1/CSCI380/getUPCinfo', methods=['GET'])
def getUPCinfo():
# get the nutriction info from the server
# INPUT: UPC code and user email
# OUTPUT: food nutrition info and food allergy status
response = ""
# check if the upc and email is in the URL
if 'upc' in request.args and 'email' in request.args:
upc = request.args['upc']
email = request.args['email']
if upcCheck(upc):
nutrition = upcNutrition(upc)
nutrition.append(allergyCheck(upc, email))
response = jsonify(nutrition)
# response.allery = allergyCheck(upc, email)
response.status_code = 202
else:
response = "product not found"
response = jsonify(response)
response.status_code = 404
else:
response = jsonify("Error code: AX002")
response.status_code = 500
return response
# =========================== TEST ===========================
# REST API test sample
# Create some test data for our catalog in the form of a list of dictionaries.
books = [{
'id': 0,
'title': 'A Fire Upon the Deep',
'author': 'Vernor Vinge',
'first_sentence': 'The coldsleep itself was dreamless.',
'year_published': '1992'
}, {
'id': 1,
'title': 'The Ones Who Walk Away From Omelas',
'author': 'Ursula K. Le Guin',
'first_sentence':
'With a clamor of bells that set the swallows soaring, the Festival of Summer came to the city Omelas, bright-towered by the sea.',
'published': '1973'
}, {
'id': 2,
'title': 'Dhalgren',
'author': 'Samuel R. Delany',
'first_sentence': 'to wound the autumnal city.',
'published': '1975'
}]
userAcc = {
'signedIn': False,
'full_name': "",
'last_name': "",
'first_name': "",
'email': "",
'photoUrl': "",
}
@app.route('/api/v1/csci426/test', methods=['POST'])
def assignUsrAcc():
global userAcc
data = request.json
userAcc = data
return jsonify(userAcc)
@app.route('/', methods=['GET'])
def home():
return '''
<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>
'''
# A route to return all of the available entries in our catalog.
@app.route('/api/v1/resources/books/all', methods=['GET'])
def api_all():
response = jsonify(books)
response.status_code = 202
return response
@app.route('/api/v1/resources/books', methods=['GET'])
def api_bookID():
# Check if an ID was provided as part of the URL.
# If ID is provided, assign it to a variable.
# If no ID is provided, display an error in the browser.
if 'id' in request.args:
id = int(request.args['id'])
else:
response = "Error: No id field provided. Please specify an id."
response.statue_code = 204
return response
# Create an empty list for our results
results = []
# Loop through the data and match results that fit the requested ID.
# IDs are unique, but other fields might return many results
for book in books:
if book['id'] == id:
results.append(book)
# Use the jsonify function from Flask to convert our list of
# Python dictionaries to the JSON format.
response = jsonify(results)
response.status_code = 202
return response
if __name__ == '__main__':
app.run()
| true |
9170a44e2219ab020c615a365fb126d61dc08fd7
|
Python
|
sumanth-nirmal/SpeedPrediction
|
/extract_images.py
|
UTF-8
| 1,988 | 2.703125 | 3 |
[] |
no_license
|
#!/usr/bin/python
## Author: sumanth
## Date: March 23, 2017
#extract images from video
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import json
import argparse
import subprocess
# no.of images extracted
count=0
# extracted images path
data_extracted_path='./data_extracted/'
def main(video_path, data_json_path):
# if data json file is not avilable
if data_json_path != "no":
#load the json data
with open(data_json_path) as data_file:
data = json.load(data_file)
# load the data
cap = cv2.VideoCapture(video_path)
# check if the video is opened sucessfully
if cap.isOpened() == True:
global count
# frames per sec
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
print("Frames per second in the input video: {0}".format(fps))
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('video',frame)
if data_json_path != "no":
cv2.imwrite(data_extracted_path+"%f.jpg" % data[count][0], frame)
else:
cv2.imwrite(data_extracted_path+"%f.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
else:
print("the video can not be opened\ncheck the video or use different opencv version\nusing opencv version: ",cv2.__version__)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='extract images from the video and labels if avialable')
parser.add_argument(
'--video',
type=str,
help='path for the video, usage: --video "./speed_challenge/drive.mp4"'
)
parser.add_argument(
'--data_json',
type=str,
help='path for the json file if available, usage: --data_json "./speed_challenge/drive.json" or --data_json "no" if not avialable'
)
args = parser.parse_args()
main(args.video, args.data_json)
| true |
1396b2ba5463064ab879c70584d4a0ca10b5eda7
|
Python
|
MayankMaheshwar/DS-and-Algo-solving
|
/microsoft1.py
|
UTF-8
| 349 | 3.0625 | 3 |
[] |
no_license
|
from itertools import groupby
def solution(S):
# write your code in Python 3.6
if len(S) == 1:
return 0
mx = -2**32
group = groupby(S)
for k, v in group:
mx = max(mx, len(list(v)))
print(group, "yes")
for j, l in group:
print(j, l)
print(group)
return 0
print(solution("babaa"))
| true |
af2d558c3be84a098832e0fd65864ec3ddfe8a48
|
Python
|
starkizard/CrudeCode
|
/Codeforces/Practice/A2oJ Ladders/Ladder #14/presentFromLena.py
|
UTF-8
| 461 | 3.640625 | 4 |
[] |
no_license
|
# author: starkizard
# implementation problem. function string computes what actually has to be printed, for n=5 i print from 0 to 4 store in a list , print 5 and print reverse of the list
def string(i):
s=""
for j in range(i):
s+=str(j)+" "
r=s[::-1]
s+=str(i)
s+=r
return s
n=int(input())
spaces=2*n
l=[]
for i in range(n):
l.append(" "*spaces + string(i))
spaces-=2
l+=[string(n)]+l[::-1]
for i in l:
print(i)
| true |
f6d6a5439e9a6779c1d27f5d225be9b150147fd6
|
Python
|
simonr0204/twitch_plays
|
/main.py
|
UTF-8
| 897 | 2.8125 | 3 |
[] |
no_license
|
import twitch
from keyholder import holdForSeconds as Press
from secrets import username, key
from collections import Counter
def collect_messages(t):
new_messages = t.twitch_recieve_messages()
if not new_messages:
return
else:
msgs = [message['message'].lower() for message in new_messages]
inputs = [char for message in msgs for char in message]
return inputs
def most_frequent(commands):
occurrences = Counter(commands)
return occurrences.most_common(1)[0][0]
def main():
t = twitch.Twitch()
t.twitch_connect(username, key)
while True:
inputs = collect_messages(t)
if inputs is None:
continue
command = most_frequent(inputs)
print('Executing : {}'.format(command))
Press(command, 0.1)
if __name__ == '__main__':
main()
| true |
dd8af3ae8a63861fd13c9557f70698b5f36cdd8e
|
Python
|
oVirt/ovirt-scheduler-proxy
|
/doc/plugin_samples/ksm_same_os_score.py
|
UTF-8
| 1,987 | 2.609375 | 3 |
[
"Apache-2.0"
] |
permissive
|
from __future__ import print_function, division
from ovirtsdk.api import API
import sys
class ksm_same_os_score():
"""rank hosts higher the more vms they have with similar
os to the scored vm"""
properties_validation = ''
def _get_connection(self):
# open a connection to the rest api
connection = None
try:
connection = API(url='http://localhost:8080',
username='admin@internal', password='1')
except BaseException as ex:
# letting the external proxy know there was an error
print(ex, file=sys.stderr)
return None
return connection
def _get_hosts(self, host_ids, connection):
# get all the hosts with the given ids
engine_hosts = connection.hosts.list(
query=" or ".join(["id=%s" % u for u in host_ids]))
return engine_hosts
def _get_vms(self, host_name, connection):
# get all the vms with the given host
host_vms = connection.vms.list('host='+host_name)
return host_vms
def score_host(self, vm, host, connection):
score = 0
host_vms = self._get_vms(host.name, connection)
if not host_vms:
return (host.id, 0)
for host_vm in host_vms:
if vm.get_os().get_type() == host_vm.get_os().get_type():
if vm.get_os().get_version() == \
host_vm.get_os().get_version():
score += 100
else:
score += 20
return (host.id, score / len(host_vms))
def do_score(self, hosts_ids, vm_id, args_map):
conn = self._get_connection()
if conn is None:
return
engine_hosts = self._get_hosts(hosts_ids, conn)
vm = conn.vms.get(id=vm_id)
host_scores = []
for host in engine_hosts:
host_scores.append(self.score_host(vm, host, conn))
print(host_scores)
| true |
377467355d16b6e093d9374da180434ab41591fa
|
Python
|
skailasa/practice
|
/ctc/ch04-trees-and-graphs/9-bst-sequences.py
|
UTF-8
| 2,362 | 4.25 | 4 |
[] |
no_license
|
"""
A BST is created by traversing through an array left to right and
inserting each element. Given a BST with distinct elements, print
all possible arrays that could have led to this tree.
Strategy:
- root node always first, a different first element would make a
different BST
- need to 'weave' together possible arrays and prepend root node
to find final sequence.
- Have to make sure that relative order is maintained, can do this
recursively
- Have to then implement a second recursive function to do this on
each branch of a given root.
"""
import copy
class TreeNode:
"""Simple node class for binary tree"""
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
def weave(first, second, results, prefix):
# if one list is empty add remainder to cloned prefix, and store
if not first or not second:
result = copy.deepcopy(prefix)
result.extend(first)
result.extend(second)
results.append(result)
return
# recurse with head of first added to the prefix. Removing the head
# will damage first, so we'll need to replace it
head_first = first[0]
prefix.append(head_first)
first = first[1:]
weave(first, second, results, prefix)
first = [head_first] + first
prefix.pop()
head_second = second[0]
prefix.append(head_second)
second = second[1:]
weave(first, second, results, prefix)
prefix.pop()
def all_sequences(node):
result = []
if not node:
return result
prefix = list()
prefix.append(node.data)
result.append(prefix)
left_sequences = all_sequences(node.left)
right_sequences = all_sequences(node.right)
# weave together each list from left and right sides
for left in left_sequences:
for right in right_sequences:
weaved = []
weave(left, right, weaved, prefix)
result.extend(weaved)
return result
if __name__ == "__main__":
a = TreeNode(4)
b = TreeNode(2)
c = TreeNode(6)
#d = TreeNode(1)
#e = TreeNode(3)
#f = TreeNode(5)
#g = TreeNode(7)
# construct simple BST
a.left = b
a.right = c
#b.left = d
#b.right = e
#c.left = f
#c.right = g
print(all_sequences(a))
| true |
e5f8998e8eb045c5a7e485f65c2b95c014a70481
|
Python
|
lxxue/TorchSeg
|
/model/pspnet2/cil.pspnet2.R50/split_train_val.py
|
UTF-8
| 1,933 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
import os
import numpy as np
import argparse
import glob
parser = argparse.ArgumentParser()
parser.add_argument('--trainall', default=False, action='store_true')
parser.add_argument('-v', '--verbose', default=True, action='store_true')
parser.add_argument('-s', '--size', default=0.9, action='store', type=int)
args = parser.parse_args()
dataset_path = os.path.dirname(os.path.abspath(__file__))
gt = []
img = []
test = []
# get img
for name in glob.glob('training/images/satImage*'):
img.append(name)
# get gt
for name in glob.glob('training/groundtruth/satImage*'):
gt.append(name)
# get test
for name in glob.glob('test_images/*'):
test.append(name)
img.sort()
gt.sort()
test.sort()
train_size = args.size
assert len(img) == len(gt)
idx = np.arange(len(img))
train_idx = np.random.choice(idx, int(train_size * len(img)), replace=False)
val_idx = list(set(idx) - set(train_idx))
train_idx.sort()
val_idx.sort()
if args.trainall:
train_idx = idx
if args.verbose:
print("Writing images from {}".format(dataset_path))
print("{:<10d} images {:<10d} groundtruth".format(len(img), len(gt)))
print("training size: {0:.0%}".format(train_size))
print("validation size: {0:.0%}".format(1 - train_size))
with open('train.txt', 'w') as f:
for i in train_idx:
line = img[i] + '\t' + gt[i] + '\n'
f.write(line)
if args.verbose:
print("Writing training {:3} images to {}".format(len(train_idx), f.name))
f.close()
with open('val.txt', 'w') as f:
for i in val_idx:
line = img[i] + '\t' + gt[i] + '\n'
f.write(line)
if args.verbose:
print("Writing validation {:3} images to {}".format(len(val_idx), f.name))
f.close()
with open('test.txt', 'w') as f:
for i in range(len(test)):
line = test[i] + '\t' + test[i] + '\n'
f.write(line)
if args.verbose:
print("Writing test {:3} images to {}".format(len(test), f.name))
f.close()
| true |
afcc5ae9597a1f37b177086f4eed386b787b33ae
|
Python
|
ED2589/gesture_classify
|
/Preprocessing/LoopForAllParticipant_Preproc.py
|
UTF-8
| 18,521 | 2.796875 | 3 |
[] |
no_license
|
# Created May 24,2019 #
# Overview:
# takes each of the 19 EMG files + each of the 19 IMU files,
# and returns ,mat files of binned data (in Python it's 3D array)
##########
# Step 1 #
##########
# import raw data (.csv) into Python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
import itertools
import scipy.io
def import_data(csv_file):
"""
:param csv_file: a string i.e. name of .csv file
csv_file is the raw data (uniqueID, date, pods columns) - for 1 participant
:return: pandas data frame of imported data
"""
return pd.read_csv(csv_file)
emg_csv_list = ['F01_emg.csv','F02_emg.csv','F03_emg.csv', 'F04_emg.csv','F05_emg.csv','F06_emg.csv','F07_emg.csv', 'F08_emg.csv',
'F09_emg.csv','F10_emg.csv','P12_emg.csv','P43_emg.csv','P52_emg.csv','P63_emg.csv','P81_emg.csv','P84_emg.csv',
'P96_emg.csv','P97_emg.csv','P98_emg.csv']
imu_csv_list = ['F01_imu.csv','F02_imu.csv','F03_imu.csv', 'F04_imu.csv','F05_imu.csv','F06_imu.csv','F07_imu.csv', 'F08_imu.csv',
'F09_imu.csv','F10_imu.csv','P12_imu.csv','P43_imu.csv','P52_imu.csv','P63_imu.csv','P81_imu.csv','P84_imu.csv',
'P96_imu.csv','P97_imu.csv','P98_imu.csv']
emg_rawDF_list = []
imu_rawDF_list = []
# creates a list of 19 dataframes, by reading in the 19 emg raw files
for i in emg_csv_list: # import all 19 EMG datasets
emg_rawDF_list.append(import_data(i))
# creates a list of 19 dataframes, by reading in the 19 imu raw files
for i in imu_csv_list: # import all 19 IMU datasets
imu_rawDF_list.append(import_data(i))
# extract each df from list - IS THIS NECESSARY?
#F01EmgRaw = emg_rawDF_list[0]
#F02EmgRaw = emg_rawDF_list[1]
#F03EmgRaw = emg_rawDF_list[2]
#F04EmgRaw = emg_rawDF_list[3]
# DO THIS FOR ALL 19 PARTICIPANTS
#F01ImuRaw = imu_rawDF_list[0]
# DO THIS FOR ALL 19 PARTICIPANTS
##########
# Step 2 #
##########
# Normalize each channel column in EMG to range (0,1)
def normalize_emg(df):
'''
:param df: Raw emg df
:return: Emg df with each of pod0 columns normalized to range(0,1)
'''
ncol = df.shape[1]
df1 = df.iloc[ :, 7: ncol-1] # all pod columns
df2 = df.iloc[:,0] # uniqueID column
df1 = abs(df1)
absEMG = pd.concat([df2,df1],axis=1)
groupEMG = absEMG.groupby(['uniqueID'])
def norm_func(g):
return ((g - g.min()) / (g.max() - g.min()))
dfnorm = groupEMG.apply(norm_func) # normalizing step!
subs_1 = df[['uniqueID', 'participant', 'phase', 'trial', 'date', 'structName','Timestr']]
subs_2 = df[['TrueLabel']]
return pd.concat([subs_1, dfnorm, subs_2], axis=1)
emg_normDF_list = [] # create empty list to store normalized EMG dfs
for df in emg_rawDF_list:
emg_normDF_list.append(normalize_emg(df))
##########
# Step 3 #
##########
# Low - pass filter
def lowpass_filt(df , sf, order,cutoff):
'''
:param df: normalized Emg df OR Raw Imu df
:param sf: sampling freq (200 Hz for Emg ; 40 Hz for Imu)
:param order: order of low pass filter
:param cutoff: cutoff freq in Hz
:return: lowpass filtered df
'''
ncol = df.shape[1]
df_a = df.iloc[:, 7: ncol - 1] # all numeric columns excluding 'TrueLabel' column
df_b = df.iloc[:, 0] # uniqueID column
df2 = pd.concat([df_b, df_a], axis=1)
df2_gp = df2.groupby(['uniqueID']) # do new filter for each different trial
df3_gp = [x for _, x in df2_gp] # list of data frames, each consisting of a different trial
for group in df3_gp:
group.drop('uniqueID', axis=1, inplace=True)
norm_param = cutoff / (sf/2)
d, c = signal.butter(order,norm_param,'low')
for group in df3_gp:
ncol = group.shape[1]
for j in range(ncol):
group.iloc[:, j] = signal.filtfilt(d, c, group.iloc[:, j], axis=0) # butterworth for each dataframe in the df3_gp (which is a list)
df3_conc = pd.concat(df3_gp)
# put the data back together, after the numerical pod columns are all low pass filtered
subs_1 = df[['uniqueID', 'participant', 'phase', 'trial', 'date', 'structName', 'Timestr']]
subs_2 = df[['TrueLabel']]
df4 = pd.concat([subs_1, df3_conc, subs_2], axis=1) # this is the LOW-PASS FILTERED EMG Data
return df4
emg_LowFiltNormDF_list = [] # create empty list to store lowpass filtered, normalized EMG dfs
# low pass for all 19
for df_emg in emg_normDF_list:
emg_LowFiltNormDF_list.append(lowpass_filt(df_emg , 200, 4,10))
imu_LowFiltDF_list = [] # create empty list to store lowpass filtered IMU dfs
for df_imu in imu_rawDF_list:
imu_LowFiltDF_list.append(lowpass_filt(df_imu , 50, 4,10))
#######
## pod4 plot check for F02 Trial 1
#FakeT_Emg_F02T1_list = FakeTimeArray(emg_normDF_list[1],0.005)[0].tolist()
#EmgF02T1pod4_Nofilt_list = list(emg_normDF_list[1].pod4[0:RowNum(emg_normDF_list[1])[0]])
#EmgF02T1pod4_filt_list = list(emg_LowFiltNormDF_list[1].pod4[0:RowNum(emg_LowFiltNormDF_list[1])[0]])
#testdf_F02T1_pod4_fakeT = pd.DataFrame({'pod4_norm':EmgF02T1pod4_Nofilt_list,'pod4_norm_lowfilt': EmgF02T1pod4_filt_list},
#index=FakeT_Emg_F02T1_list)
#LinePlot_Emg_F02T1_pod4 = testdf_F02T1_pod4_fakeT.plot.line()
#LinePlot_Emg_F02T1_pod4.set(xlabel='Time elapsed(s)', ylabel = 'Pod 4 Channel (V)')
#SepLinePlot_Emg_F02T1_pod4 = testdf_F02T1_pod4_fakeT.plot.line(subplots=True)
##########
# Step 4 #
##########
# REMOVE all rows where TrueLabel!=0 for both EMG and IMU
def RemoveZeroTrueLabel(df_filt):
'''
:param df_filt: filtered EMG or IMU df
:return: same input df but with 'TrueLabel == 0 ' rows removed
'''
return df_filt[df_filt.TrueLabel != 0]
## EMG ##
emg_Subs_NoFiltDF_list =[] # create empty list to store subsetted, NON-filtered, normalized EMG dfs
for df_emg in emg_normDF_list:
emg_Subs_NoFiltDF_list.append(RemoveZeroTrueLabel(df_emg))
emg_Subs_LowFiltNormDF_list = [] # create empty list to store subsetted, lowpass filt, normalized EMG dfs
for df_filt_emg in emg_LowFiltNormDF_list:
emg_Subs_LowFiltNormDF_list.append(RemoveZeroTrueLabel(df_filt_emg))
## IMU ##
imu_Subs_NoFiltDF_list =[] # create empty list to store subsetted, NON-filtered filt IMU dfs
for df_imu in imu_rawDF_list:
imu_Subs_NoFiltDF_list.append(RemoveZeroTrueLabel(df_imu))
imu_Subs_LowFiltNormDF_list = [] # create empty list to store subsetted, lowpass filt IMU dfs
for df_filt_imu in imu_LowFiltDF_list:
imu_Subs_LowFiltNormDF_list.append(RemoveZeroTrueLabel(df_filt_imu))
##########
# Step 5 #
##########
#### binning after subsetting ######
# Step 1: delete rows from EMG and IMU to ensure SAME starting time for EACH uniqueID group
# For detailed algorithm of function 'start_time_same' -> see 'PreprocStep1to4_F01.py' Line 375
def start_time_same(dfEMG,dfIMU):
dfEMG["Timestr"] = pd.to_datetime(dfEMG["Timestr"])
dfIMU["Timestr"] = pd.to_datetime(dfIMU["Timestr"])
df_g_EMG = dfEMG.groupby('uniqueID')
df_gp_EMG = [x for _, x in df_g_EMG]
df_g_IMU = dfIMU.groupby('uniqueID')
df_gp_IMU = [x for _, x in df_g_IMU]
list_EMG = []
list_IMU = []
for i in range(len(df_gp_EMG)):
EMG_1st_time = df_gp_EMG[i]['Timestr'].iloc[0]
IMU_1st_time = df_gp_IMU[i]['Timestr'].iloc[0]
# Case 1: if EMG earlier
if EMG_1st_time < IMU_1st_time:
# if any of the first 5 EMG time elements MATCHES IMU[0] original:
if len( df_gp_EMG[i]['Timestr'].head(5) [ df_gp_EMG[i]['Timestr'].head(5) == IMU_1st_time ] ) != 0:
# then remove all Emg[Emg < Imu[0]] i.e. keep ALL later times in EMG + last earlier element
df_gp_EMG[i] = df_gp_EMG[i] [ df_gp_EMG[i]['Timestr'] >= IMU_1st_time ]
list_EMG.append(df_gp_EMG[i])
list_IMU.append(df_gp_IMU[i])
# if not, remove all EMG < IMU[0] except for last earlier EMG Time (ensures output EMG time[0] still earlier)
elif len( df_gp_EMG[i]['Timestr'].head(5) [ df_gp_EMG[i]['Timestr'].head(5) == IMU_1st_time ] ) == 0:
last_earlier_EMG = df_gp_EMG[i]['Timestr'][df_gp_EMG[i]['Timestr'] < IMU_1st_time].max() # extract the last EARLIER element from EMG time
df_gp_EMG[i] = df_gp_EMG[i][ df_gp_EMG[i]['Timestr'] >= last_earlier_EMG] # keep last EARLIER EMG time + all other times greater than IMU[0]
list_EMG.append(df_gp_EMG[i])
list_IMU.append(df_gp_IMU[i])
# Case 2: if IMU earlier -> remove from IMU all earlier times (so 1st time in output IMU STILL later than 1st EMG time orig)
elif EMG_1st_time > IMU_1st_time:
df_gp_IMU[i] = df_gp_IMU[i] [ df_gp_IMU[i]['Timestr'] > EMG_1st_time ]
list_EMG.append(df_gp_EMG[i])
list_IMU.append(df_gp_IMU[i])
else:
list_EMG.append(df_gp_EMG[i])
list_IMU.append(df_gp_IMU[i])
return [pd.concat(list_EMG), pd.concat(list_IMU)]
emg_SameT_NoFiltDF_list =[] # create empty list to store SameTime, no tl==0 rows,NON-filtered, normalized EMG dfs
imu_SameT_NoFiltDF_list =[] # create empty list to store SameTime,no tl==0 rows,NON-filtered IMU dfs
#start_time_same(emg_Subs_NoFiltDF_list[1],imu_Subs_NoFiltDF_list[1])[0]
for (df_emg, df_imu) in itertools.zip_longest(emg_Subs_NoFiltDF_list,imu_Subs_NoFiltDF_list):
emg_SameT_NoFiltDF_list.append(start_time_same(df_emg, df_imu)[0])
imu_SameT_NoFiltDF_list.append(start_time_same(df_emg, df_imu)[1])
emg_SameT_LowFiltNormDF_list = [] # create empty list to store SameTime, subsetted,lowpass filt, normalized EMG dfs
imu_SameT_LowFiltNormDF_list = [] # create empty list to store SameTime, subsetted, lowpass filt, IMU dfs
#start_time_same(emg_Subs_LowFiltNormDF_list[1],imu_Subs_LowFiltNormDF_list[1])[1]
for (df_filt_emg,df_filt_imu) in itertools.zip_longest(emg_Subs_LowFiltNormDF_list,imu_Subs_LowFiltNormDF_list):
emg_SameT_LowFiltNormDF_list.append(start_time_same(df_filt_emg,df_filt_imu)[0])
imu_SameT_LowFiltNormDF_list.append(start_time_same(df_filt_emg,df_filt_imu)[1])
## check ##
#emg_a = emg_Subs_LowFiltNormDF_list[1].groupby('uniqueID')
#emg_b = [x for _, x in emg_a]
#imu_a = imu_Subs_LowFiltNormDF_list[1].groupby('uniqueID')
#imu_b = [x for _, x in imu_a]
#emg_b[0].Timestr.head(15)
#imu_b[0].Timestr.head(20)
#emg_a_t = emg_SameT_LowFiltNormDF_list[1].groupby('uniqueID')
#emg_b_t = [x for _, x in emg_a_t]
#imu_a_t = imu_SameT_LowFiltNormDF_list[1].groupby('uniqueID')
#imu_b_t = [x for _, x in imu_a_t]
#emg_b_t[0].Timestr.head(10)
#imu_b_t[0].Timestr.head(10)
#####################################
# Step 2: Binning
def binning(dfEmg, dfImu, winEmg , winImu):
'''
:param dfEmg , dfImu: EMG or IMU data (whether filtered or unfiltered)
:param winEmg, winImu: # of rows of data frame per bin : EMG -> 40 ; IMU -> 10
:return: 2 3D data frames (one of EMG , one of IMU) with 3rd dimension being # of bins
'''
df.drop('Timestr' , axis = 1 , inplace= True) # CONFIRM W ALEX - drop 'Timestr' column ? Yes, won't be needing for feature table.
Emg_Array_List = []
Imu_Array_List = []
df_Emg = dfEmg.groupby('uniqueID')
df_gp_Emg = [x for _ , x in df_Emg]
df_Imu = dfImu.groupby('uniqueID')
df_gp_Imu = [x for _, x in df_Imu]
for group in df_gp_Emg:
remEmg = group.shape[0] % winEmg # number of rows to remove (mod operation)
group.drop(group.tail(remEmg).index, inplace=True)
group = group.values.reshape(group.size // winEmg // group.shape[1], winEmg, group.shape[1]) # (number of bins, num row aka winbin, num col)
Emg_Array_List.append(group)
for group in df_gp_Imu:
remImu = group.shape[0] % winEmg # number of rows to remove (mod operation)
group.drop(group.tail(remImu).index, inplace=True)
group = group.values.reshape(group.size // winImu // group.shape[1], winImu, group.shape[1]) # (number of bins, num row aka winbin, num col)
Imu_Array_List.append(group)
for i in range(len(Emg_Array_List)):
diff_bins = Emg_Array_List[i].shape[0] - Imu_Array_List[i].shape[0]
if diff_bins > 0:
Emg_Array_List[i] = np.delete(Emg_Array_List[i] ,np.s_[-abs(diff_bins):] , axis=0)
elif diff_bins < 0:
Imu_Array_List[i] = np.delete(Imu_Array_List[i], np.s_[-abs(diff_bins):], axis=0)
else:
Emg_Array_List[i] = Emg_Array_List[i]
Imu_Array_List[i] = Imu_Array_List[i]
Emg_df = np.concatenate(Emg_Array_List , axis = 0 )
Imu_df = np.concatenate(Imu_Array_List , axis = 0)
return [Emg_df , Imu_df]
# return [ pd.Panel(Emg_df) , pd.Panel(Imu_df)]
emg_bin_NoFiltDF_list =[] # create empty list to store binned, SameTime, subsetted,NON-filtered, normalized EMG dfs
imu_bin_NoFiltDF_list =[] # create empty list to store binned, SameTime,subsetted,NON-filtered IMU dfs
for (df_emg,df_imu) in itertools.zip_longest(emg_SameT_NoFiltDF_list,imu_SameT_NoFiltDF_list):
emg_bin_NoFiltDF_list.append(binning(df_emg,df_imu,40,10)[0])
imu_bin_NoFiltDF_list.append(binning(df_emg,df_imu,40,10)[1])
emg_bin_LowFiltNormDF_list = [] # create empty list to store binned,SameTime, subsetted,lowpass filt, normalized EMG dfs
imu_bin_LowFiltNormDF_list = [] # create empty list to store binned,SameTime, subsetted, lowpass filt, IMU dfs
for (df_filt_emg,df_filt_imu) in itertools.zip_longest(emg_SameT_LowFiltNormDF_list,imu_SameT_LowFiltNormDF_list):
emg_bin_LowFiltNormDF_list.append(binning(df_filt_emg,df_filt_imu,40,10)[0])
imu_bin_LowFiltNormDF_list.append(binning(df_filt_emg,df_filt_imu,40,10)[1])
## check ##
# emg_a_t = emg_SameT_LowFiltNormDF_list[1].groupby('uniqueID')
# emg_b_t = [x for _, x in emg_a_t]
# imu_a_t = imu_SameT_LowFiltNormDF_list[1].groupby('uniqueID')
# imu_b_t = [x for _, x in imu_a_t]
# emg_b_t[5].shape[0] // 40
# imu_b_t[5].shape[0] // 10
# # versus
# emg_bin_NoFiltDF_list[1].shape
# imu_bin_NoFiltDF_list[1].shape
# emg_bin_LowFiltNormDF_list[1].shape
# imu_bin_LowFiltNormDF_list[1].shape
###########
## Step 6 #
###########
# export to Matlab (cell array format)
## P12 ##
P12_Emg_NoFilt_Fin =emg_bin_NoFiltDF_list[0]
numcol_emg = P12_Emg_NoFilt_Fin.shape[2]
P12_Emg_NoFilt_Fin2 = np.array(P12_Emg_NoFilt_Fin[:,:,7:numcol_emg],dtype = np.float64)
P12_Emg_LowFilt_Fin =emg_bin_LowFiltNormDF_list[0]
P12_Emg_LowFilt_Fin2 = np.array(P12_Emg_LowFilt_Fin[:,:,7:numcol_emg],dtype = np.float64)
P12_Imu_NoFilt_Fin = imu_bin_NoFiltDF_list[0]
numcol_imu = P12_Imu_NoFilt_Fin.shape[2]
P12_Imu_NoFilt_Fin2 = np.array(P12_Imu_NoFilt_Fin[:,:,7:numcol_imu],dtype = np.float64)
P12_Imu_LowFilt_Fin =imu_bin_LowFiltNormDF_list[0]
P12_Imu_LowFilt_Fin2 = np.array(P12_Imu_LowFilt_Fin[:,:,7:numcol_imu],dtype = np.float64)
scipy.io.savemat('P97.mat', dict(P97Emg_NoFilt = P12_Emg_NoFilt_Fin2 , P97Imu_NoFilt = P12_Imu_NoFilt_Fin2,
P97Emg_LowFilt = P12_Emg_LowFilt_Fin2, P97Imu_LowFilt = P12_Imu_LowFilt_Fin2))
## P63 ##
P43_Emg_NoFilt_Fin =emg_bin_NoFiltDF_list[1]
numcol_emg2 = P43_Emg_NoFilt_Fin.shape[2]
P43_Emg_NoFilt_Fin2 = np.array(P43_Emg_NoFilt_Fin[:,:,7:numcol_emg2],dtype = np.float64)
P43_Emg_LowFilt_Fin =emg_bin_LowFiltNormDF_list[1]
P43_Emg_LowFilt_Fin2 = np.array(P43_Emg_LowFilt_Fin[:,:,7:numcol_emg2],dtype = np.float64)
P43_Imu_NoFilt_Fin = imu_bin_NoFiltDF_list[1]
numcol_imu2 = P43_Imu_NoFilt_Fin.shape[2]
P43_Imu_NoFilt_Fin2 = np.array(P43_Imu_NoFilt_Fin[:,:,7:numcol_imu2],dtype = np.float64)
P43_Imu_LowFilt_Fin =imu_bin_LowFiltNormDF_list[1]
P43_Imu_LowFilt_Fin2 = np.array(P43_Imu_LowFilt_Fin[:,:,7:numcol_imu2],dtype = np.float64)
scipy.io.savemat('P98.mat', dict(P98Emg_NoFilt = P43_Emg_NoFilt_Fin2 , P98Imu_NoFilt = P43_Imu_NoFilt_Fin2,
P98Emg_LowFilt = P43_Emg_LowFilt_Fin2, P98Imu_LowFilt = P43_Imu_LowFilt_Fin2))
## P52 ##
P52_Emg_NoFilt_Fin =emg_bin_NoFiltDF_list[2]
numcol_emg3 = P52_Emg_NoFilt_Fin.shape[2]
P52_Emg_NoFilt_Fin2 = np.array(P52_Emg_NoFilt_Fin[:,:,7:numcol_emg3],dtype = np.float64)
P52_Emg_LowFilt_Fin =emg_bin_LowFiltNormDF_list[2]
P52_Emg_LowFilt_Fin2 = np.array(P52_Emg_LowFilt_Fin[:,:,7:numcol_emg3],dtype = np.float64)
P52_Imu_NoFilt_Fin = imu_bin_NoFiltDF_list[2]
P52_Imu_NoFilt_Fin2 = np.array(P52_Imu_NoFilt_Fin[:,:,7:19],dtype = np.float64)
P52_Imu_LowFilt_Fin =imu_bin_LowFiltNormDF_list[2]
P52_Imu_LowFilt_Fin2 = np.array(P52_Imu_LowFilt_Fin[:,:,7:19],dtype = np.float64)
scipy.io.savemat('P96.mat', dict(P96Emg_NoFilt = P52_Emg_NoFilt_Fin2 , P96Imu_NoFilt = P52_Imu_NoFilt_Fin2,
P96Emg_LowFilt = P52_Emg_LowFilt_Fin2, P96Imu_LowFilt = P52_Imu_LowFilt_Fin2))
## P63 ##
F10_Emg_NoFilt_Fin =emg_bin_NoFiltDF_list[3]
numcol_emg4 = F10_Emg_NoFilt_Fin.shape[2]
P63_Emg_NoFilt_Fin2 = np.array(F10_Emg_NoFilt_Fin[:,:,7:numcol_emg4],dtype = np.float64)
F10_Emg_LowFilt_Fin =emg_bin_LowFiltNormDF_list[3]
P63_Emg_LowFilt_Fin2 = np.array(F10_Emg_LowFilt_Fin[:,:,7:numcol_emg4],dtype = np.float64)
F10_Imu_NoFilt_Fin = imu_bin_NoFiltDF_list[3]
P63_Imu_NoFilt_Fin2 = np.array(F10_Imu_NoFilt_Fin[:,:,7:19],dtype = np.float64)
F10_Imu_LowFilt_Fin =imu_bin_LowFiltNormDF_list[3]
P63_Imu_LowFilt_Fin2 = np.array(F10_Imu_LowFilt_Fin[:,:,7:19],dtype = np.float64)
scipy.io.savemat('F09.mat', dict(F09Emg_NoFilt = P63_Emg_NoFilt_Fin2 , F09Imu_NoFilt = P63_Imu_NoFilt_Fin2,
F09Emg_LowFilt = P63_Emg_LowFilt_Fin2, F09Imu_LowFilt = P63_Imu_LowFilt_Fin2))
## P63 ##
F10_Emg_NoFilt_Fin =emg_bin_NoFiltDF_list[4]
numcol_emg4 = F10_Emg_NoFilt_Fin.shape[2]
P63_Emg_NoFilt_Fin2 = np.array(F10_Emg_NoFilt_Fin[:,:,7:numcol_emg4],dtype = np.float64)
F10_Emg_LowFilt_Fin =emg_bin_LowFiltNormDF_list[4]
P63_Emg_LowFilt_Fin2 = np.array(F10_Emg_LowFilt_Fin[:,:,7:numcol_emg4],dtype = np.float64)
F10_Imu_NoFilt_Fin = imu_bin_NoFiltDF_list[4]
P63_Imu_NoFilt_Fin2 = np.array(F10_Imu_NoFilt_Fin[:,:,7:19],dtype = np.float64)
F10_Imu_LowFilt_Fin =imu_bin_LowFiltNormDF_list[4]
P63_Imu_LowFilt_Fin2 = np.array(F10_Imu_LowFilt_Fin[:,:,7:19],dtype = np.float64)
scipy.io.savemat('F10.mat', dict(F10Emg_NoFilt = P63_Emg_NoFilt_Fin2 , F10Imu_NoFilt = P63_Imu_NoFilt_Fin2,
F10Emg_LowFilt = P63_Emg_LowFilt_Fin2, F10Imu_LowFilt = P63_Imu_LowFilt_Fin2))
# check
# P43_Emg_NoFilt_Fin2[0,:,:]
# P63_Emg_LowFilt_Fin2[0,:,:]
# P63_Imu_NoFilt_Fin2[0,:,:]
# P63_Imu_LowFilt_Fin2[0,:,:]
# Line 128 from 'classifycalib_Rndfrst - gived parametrs of Random Forest
| true |
daf9d0d5f44080cdc3e8ae121acd60681f1e3ece
|
Python
|
shendrix07/tide_station_info
|
/make_station_info.py
|
UTF-8
| 1,737 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
import pandas as pd
import csv
import geopy
station_info = pd.read_csv('station_web_info.csv')
station_info['Timezone'] = '***' # initialize
stations_by_state = station_info.groupby('State')
with open('timezones_by_state.csv') as f:
f_csv = csv.DictReader(f)
for row in f_csv:
rows_to_change = stations_by_state.get_group(row['STATE']).index
station_info.loc[rows_to_change, 'Timezone'] = row['TIMEZONE']
# now have all the timezones that can be looked up by state
# next, have to look up individual station timezones with geocoding
# for unknown timezones ('***')
stations_by_timezone = station_info.groupby('Timezone')
rows_to_geocode = stations_by_timezone.get_group('***').index
print('{} rows to geocode.'.format(len(rows_to_geocode)))
from geopy.geocoders import GoogleV3
for i in rows_to_geocode:
lat = station_info.loc[i, 'Latitude']
lon = station_info.loc[i, 'Longitude']
try:
st_timezone = GoogleV3().timezone((lat, lon)).zone
except:
print('ERROR: Could not get timezone for {}, {}'.format(
station_info.loc[i, 'StationName'],
station_info.loc[i, 'State']))
station_info.loc[i, 'Timezone'] = 'UNKNOWN'
else:
station_info.loc[i, 'Timezone'] = st_timezone
print('Geocoded {}, {} to timezone {}'.format(
station_info.loc[i, 'StationName'],
station_info.loc[i, 'State'], st_timezone))
station_info.to_csv('station_info.csv', index=False)
stations_by_timezone = station_info.groupby('Timezone')
print('Done, with the following missing timezones:')
print(repr(stations_by_timezone.get_group('UNKNOWN')))
# Will go through the csv output by hand and fill in UNKNOWN values
| true |
9334896f1e1441525ebdee3c4465a0b2005f2704
|
Python
|
StefanoBelli/foi1819
|
/changebrightness.py
|
UTF-8
| 213 | 2.890625 | 3 |
[] |
no_license
|
def changeBrightness(pict, x):
# @param pict : Picture
# @param x : float
for pix in getPixels(pict):
origColor = makeColor( getRed(pix) + x, getGreen(pix) + x, getBlue(pix) + x)
setColor(pix, origColor)
| true |
d8a395c904098043f361f46bacf4e97710e2aaca
|
Python
|
Skvidvardin/project_s
|
/vocab.py
|
UTF-8
| 2,045 | 2.984375 | 3 |
[] |
no_license
|
from collections import Counter, OrderedDict
import numpy as np
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first seen"""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__,
OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class Vocabulary:
"""A vocabulary, assigns IDs to tokens"""
def __init__(self):
self.freqs = OrderedCounter()
self.w2i = {}
self.i2w = []
def count_token(self, t):
self.freqs[t] += 1
def add_token(self, t):
self.w2i[t] = len(self.w2i)
self.i2w.append(t)
def build(self, min_freq=0):
self.add_token("<unk>")
self.add_token("<pad>")
tok_freq = list(self.freqs.items())
tok_freq.sort(key=lambda x: x[1], reverse=True)
for tok, freq in tok_freq:
if freq >= min_freq:
self.add_token(tok)
def load_glove(glove_path, vocab, glove_dim=300):
"""
Load Glove embeddings and update vocab.
:param glove_path:
:param vocab:
:param glove_dim:
:return:
"""
vectors = []
w2i = {}
i2w = []
# Random embedding vector for unknown words
vectors.append(np.random.uniform(
-0.05, 0.05, glove_dim).astype(np.float32))
w2i["<unk>"] = 0
i2w.append("<unk>")
# Zero vector for padding
vectors.append(np.zeros(glove_dim).astype(np.float32))
w2i["<pad>"] = 1
i2w.append("<pad>")
with open(glove_path, mode="r", encoding="utf-8") as f:
for line in f:
word, vec = line.split(u' ', 1)
w2i[word] = len(vectors)
i2w.append(word)
vectors.append(np.array(vec.split(), dtype=np.float32))
# fix brackets
w2i[u'-LRB-'] = w2i.pop(u'(')
w2i[u'-RRB-'] = w2i.pop(u')')
i2w[w2i[u'-LRB-']] = u'-LRB-'
i2w[w2i[u'-RRB-']] = u'-RRB-'
vocab.w2i = w2i
vocab.i2w = i2w
return np.stack(vectors)
| true |
2dab948a46427783e9ea126404ec01d97d1630bf
|
Python
|
RicardoHernandezVarela/GradingStudents-
|
/gradingStudents.py
|
UTF-8
| 1,510 | 4 | 4 |
[] |
no_license
|
def getMultiplesOfFive(num):
numToString = str(num)
if len(numToString) < 2:
numToString = '0' + numToString
zeros = len(numToString) - 1
times = int('1' + '0' * zeros)
multiple_1 = (int(numToString[0]) + 1) * times
multiple_2 = multiple_1 - 5
return [multiple_2, multiple_1]
def getRoundedGrade(multiple, grade):
rounded = grade
diff = multiple - grade
if diff < 3:
rounded = multiple
else:
rounded = grade
return rounded
def gradingStudents(grades):
roundedGrades = []
for grade in grades:
if grade % 5 == 0 or grade < 38:
roundedGrades.append(grade)
else:
multiples = getMultiplesOfFive(grade)
roundedNum = 0
if grade > multiples[0]:
roundedNum = getRoundedGrade(multiples[1], grade)
else:
roundedNum = getRoundedGrade(multiples[0], grade)
roundedGrades.append(roundedNum)
return roundedGrades
def main():
print('Grading Students script')
totalGrades = int(input('Enter the total grades tha will be rounded: '))
gradesToRound = []
for _ in range(0, totalGrades):
newGrade = int(input('Enter a grade between 0 and 100: '))
gradesToRound.append(newGrade)
print('INPUT GRADES: ', gradesToRound)
result = gradingStudents(gradesToRound)
print('OUTPUT GRADES: ', result)
# Call main() function.
if __name__ == '__main__':
main()
| true |
5f063bfeac1be4bc4411e3e86b135000ca1a5632
|
Python
|
kennycaiguo/glglue
|
/src/glglue/sample/samplecontroller.py
|
UTF-8
| 2,506 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
# encoding: utf-8
import re
import sys
import math
from OpenGL.GL import *
from ..basecontroller import BaseController
from . import targetview
from . import triangle
from . import cube
from . import coord
from logging import getLogger
logger = getLogger(__name__)
DELEGATE_PATTERN=re.compile('^on[A-Z]')
VELOCITY=0.1
def to_radian(degree):
return degree/180.0*math.pi
class Scene(object):
def __init__(self):
self.coord=coord.Coord(1.0)
self.cube=cube.Cube(0.3)
self.xrot=0
self.yrot=0
def onUpdate(self, ms):
self.yrot+=ms * VELOCITY
while self.yrot>360.0:
self.yrot-=360.0
self.xrot+=ms * VELOCITY * 0.5
while self.xrot>360.0:
self.xrot-=360.0
def draw(self):
self.coord.draw()
glRotate(math.sin(to_radian(self.yrot))*180, 0, 1, 0)
glRotate(math.sin(to_radian(self.xrot))*180, 1, 0, 0)
self.cube.draw()
class SampleController(object):
'''
OpenGL1.5
'''
def __init__(self, view=None, root=None):
view=view or targetview.TargetView()
self.view=view
self.root=root or Scene()
self.isInitialized=False
self.delegate(view)
self.delegate(root)
def delegate(self, to):
for name in dir(to):
if DELEGATE_PATTERN.match(name):
method = getattr(to, name)
setattr(self, name, method)
def onUpdate(self, ms):
self.root.onUpdate(ms)
def onKeyDown(self, key):
logger.debug("onKeyDown: %x", key)
if key==ord('\033'):
# Escape
sys.exit()
if key==ord('q'):
# q
sys.exit()
def onInitialize(*args):
pass
def initilaize(self):
self.view.onResize()
glEnable(GL_DEPTH_TEST)
glClearColor(0.6, 0.6, 0.4, 0.0)
# 初期化時の呼び出し
self.onInitialize()
def draw(self):
if not self.isInitialized:
self.initilaize()
self.isInitialized=True
# OpenGLバッファのクリア
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# 投影行列のクリア
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
self.view.updateProjection()
# モデルビュー行列のクリア
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self.view.updateView()
# 描画
self.root.draw()
glFlush()
| true |
3b80831e17fd5da1328b43847fe3dbf17b08f988
|
Python
|
etnpce/PhysLab2
|
/autoErr.py
|
UTF-8
| 3,774 | 3.5625 | 4 |
[] |
no_license
|
class autoErr:
__slots__ = ("val","err") # Root of all evil
def __init__(self, val, err):
self.val = val
# INVARIANT: self.err >= 0
self.err = abs(err)
# Python has to ways to print a value:
# str(x) calls __str__ if it exists, and is meant to be "pretty"
# repr(x) calls __repr__ if it exists,
# and is supposed to be a valid expression for reproducing the value
def __repr__(self):
# Python f(ormat)-strings are nice
# { ... } escapes an f-string, and puts in a value.
return f"autoErr{(self.val,self.err)}"
def __str__(self):
return f"{self.val}±{self.err}"
# Python uses "dunder methods" to implement operator overloading
def __abs__(self):#abs(x) = x.__abs__
return autoErr(abs(self.val), self.err)
def __neg__(self): # (-x) = x.__neg__
return autoErr(-self.val, self.err)
def __add__(self, oth): # a+b = a.__add__(b)
# I use this if pattern constantly to allow interaction with normal numbers
if isinstance(oth, autoErr):
return autoErr(self.val+oth.val, (self.err**2 + oth.err**2)**0.5)
else:
return autoErr(self.val+oth, self.err)
def __sub__(self, oth): # a-b = a.__minus__(b)
if isinstance(oth, autoErr):
return autoErr(self.val-oth.val, (self.err**2 + oth.err**2)**0.5)
else:
return autoErr(self.val-oth, self.err)
def __mul__(self, oth): # a*b = a.__mul__(b)
if isinstance(oth, autoErr):
val = self.val * oth.val
return autoErr(val, val*((self.err/self.val)**2 + (oth.err/oth.val)**2)**0.5)
else:
return autoErr(self.val*oth, abs(self.err*oth)) # abs to maintain invariant
def __truediv__(self, oth): # a/b = a.__truediv__(b)
# __div__ is deprecated, and exists purely for python 2 compatibility
if isinstance(oth, autoErr):
val = self.val / oth.val
return autoErr(val, val*((self.err/self.val)**2 + (oth.err/oth.val)**2)**0.5)
else:
return autoErr(self.val/oth, abs(self.err/oth))
def __pow__(self, oth): # a^b = a.__pow__(b)
if isinstance(oth, autoErr):
return NotImplemented
# Let's not do this
#l,u = oth.split()
#l = self**l
#u = self**u
#ll, lu = l.split()
#ul, uu = u.split()
#return autoErr((uu+ll)/2, (uu-ll)/2)
else:
val = self.val**oth
return autoErr(val, abs(val*oth*self.err/self.val))
# When python tries to do a binary operation, such as a+b,
# it checks both a.__add__(b) and b.__radd(a), and
# takes the first one to succeed. This allows you to make
# custom types compatible with a base type on both sides.
# Otherwise, 2*autoErr(...) would be an error.
def __radd__(self, oth): # a+b = b.__radd__(a)
return self + oth
def __rsub__(self, oth): # a-b = b.__rsub__(a)
return -self + oth
def __rmul__(self, oth): # a*b = b.__rmul__(a)
return self * oth
def __rtruediv__(self, oth): # a/b = b.__rtruediv__(a)
return self**-1 * oth
#def __rpow__(self, oth):
# These methods allow you to convert a autoErr into normal number types
def __float__(self): # float(x) = x.__float__()
return float(self.val)
def __int__(self): # int(x) = x.__int__()
return int(self.val)
def __long__(self): # long(x) = x.__long__()
return long(self.val)
# The only non-dunder method, used for convenience
def split(self):
return (self.val - self.err, self.val + self.err)
| true |
2c3474930d5dbbcb11abd69a54cd36a8acedb39a
|
Python
|
Psyborrg/Baka-Bot
|
/Music.py
|
UTF-8
| 3,934 | 2.6875 | 3 |
[] |
no_license
|
import asyncio
import time
import discord
from discord.ext import commands
from discord import FFmpegPCMAudio
from youtube_dl import YoutubeDL
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': False,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
# List variable to hold the player objects for music
music_queue = []
class Music(commands.Cog, description="music"):
def __init__(self, bot):
self.bot = bot
#Leave command to have the bot disconnect from the channel
@commands.command(name='leave', help='Disconnect the bot from its current voice channel')
async def leave(self, ctx):
#If the bot is in a voice channel
if ctx.voice_client:
await ctx.voice_client.disconnect()
await ctx.send("Bye Bye!")
#If not in a voice channel
else:
await ctx.send("I'm not in a voice channel silly!")
# # Suppress noise about console usage from errors
# youtube_dl.utils.bug_reports_message = lambda: ''
@commands.command(name='stop', help='stops the music')
async def stop(self, ctx):
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
await ctx.send('stopped the music')
@commands.command()
async def play(self, ctx, url : str):
if ctx.voice_client is None:
if ctx.author.voice:
#Connect to the message author's voice channel
channel = ctx.message.author.voice.channel
await channel.connect()
else:
await ctx.send("I'm not sure which channel to join. Do you think you could join a voice channel for me?")
with YoutubeDL(ytdl_format_options) as ydl:
info = ydl.extract_info(url, download=False)
URL = info['formats'][0]['url']
if not ctx.voice_client.is_playing():
ctx.voice_client.play(FFmpegPCMAudio(URL, **ffmpeg_options), after=lambda x: self.play_next(ctx))
ctx.voice_client.is_playing()
await ctx.send(f"Playing: {info['title']}")
print(f"playing {info['title']}")
else:
music_queue.append(URL)
await ctx.send(f"Added {info['title']} to the queue")
print(f"Added {info['title']} to the queue")
def play_next(self, ctx):
# wait until there is nothing playing
while ctx.voice_client.is_playing():
print("I'm playing right now, stop it")
time.sleep(2)
# If there is a song in the queue, get it and then pop it from the queue
if (len(music_queue) >= 1):
url = music_queue[0]
music_queue.pop(0)
# Play the popped player, and then repeat this function after the source has ended
with YoutubeDL(ytdl_format_options) as ydl:
info = ydl.extract_info(url, download=False)
URL = info['formats'][0]['url']
ctx.voice_client.play(FFmpegPCMAudio(URL, **ffmpeg_options), after=lambda x: self.play_next(ctx))
ctx.voice_client.is_playing()
asyncio.run_coroutine_threadsafe(ctx.send(f"playing {info['title']}"), self.bot.loop)
print(f"playing {info['title']}")
else:
asyncio.run_coroutine_threadsafe(ctx.send("No more songs in the queue"), self.bot.loop)
time.sleep(5)
asyncio.run_coroutine_threadsafe(ctx.voice_client.disconnect(), self.bot.loop)
| true |
4c31db1025e984d406ebf400088d4ff824f3a323
|
Python
|
swagatk/Raspi_codes
|
/Edukit3/pwm_motors.py
|
UTF-8
| 2,170 | 3.1875 | 3 |
[] |
no_license
|
# CamJam Edukit 3 - robotics
# Worksheet - 7: controlling motors with PWM
import RPi.GPIO as GPIO
import time
#set GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# set variables for GPIO motor Pins
pinMotorAForward = 10
pinMotorABackward = 9
pinMotorBForward = 8
pinMotorBBackward = 7
# PWM parameters
Frequency = 20
DutyCycleA = 30
DutyCycleB = 30
Stop = 0
# Set the GPIO Pin Mode
GPIO.setup(pinMotorAForward, GPIO.OUT)
GPIO.setup(pinMotorABackward, GPIO.OUT)
GPIO.setup(pinMotorBForward, GPIO.OUT)
GPIO.setup(pinMotorBBackward, GPIO.OUT)
# Set the GPIO to Software PWM at 'Frequency' Hertz
pwmMotorAForward = GPIO.PWM(pinMotorAForward, Frequency)
pwmMotorABackward = GPIO.PWM(pinMotorABackward, Frequency)
pwmMotorBForward = GPIO.PWM(pinMotorBForward, Frequency)
pwmMotorBBackward = GPIO.PWM(pinMotorBBackward, Frequency)
# set the duty cycle for software PWM - initially to 0
pwmMotorAForward.start(Stop)
pwmMotorABackward.start(Stop)
pwmMotorBForward.start(Stop)
pwmMotorBBackward.start(Stop)
def stopmotors():
pwmMotorAForward.ChangeDutyCycle(Stop)
pwmMotorABackward.ChangeDutyCycle(Stop)
pwmMotorBForward.ChangeDutyCycle(Stop)
pwmMotorBBackward.ChangeDutyCycle(Stop)
def forward():
pwmMotorAForward.ChangeDutyCycle(DutyCycleA)
pwmMotorABackward.ChangeDutyCycle(Stop)
pwmMotorBForward.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackward.ChangeDutyCycle(Stop)
def backward():
pwmMotorAForward.ChangeDutyCycle(Stop)
pwmMotorABackward.ChangeDutyCycle(DutyCycleA)
pwmMotorBForward.ChangeDutyCycle(Stop)
pwmMotorBBackward.ChangeDutyCycle(DutyCycleB)
def turnleft():
pwmMotorAForward.ChangeDutyCycle(Stop)
pwmMotorABackward.ChangeDutyCycle(DutyCycleA)
pwmMotorBForward.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackward.ChangeDutyCycle(Stop)
def turnright():
pwmMotorAForward.ChangeDutyCycle(DutyCycleA)
pwmMotorABackward.ChangeDutyCycle(Stop)
pwmMotorBForward.ChangeDutyCycle(Stop)
pwmMotorBBackward.ChangeDutyCycle(DutyCycleB)
######
forward()
time.sleep(1)
turnleft()
time.sleep(0.5)
forward()
time.sleep(1)
turnright()
time.sleep(0.5)
stopmotors()
GPIO.cleanup()
| true |
9335e57aa2ce5a8bad6b582c4a28a63b808bd118
|
Python
|
yann021/PythonAN21
|
/Jour1/Exo/Tst1.py
|
UTF-8
| 98 | 3.640625 | 4 |
[] |
no_license
|
a = 3
b = 12
c = 4
result = (b-a)//c
print("Le resultat de l'opération est " + str(result))
| true |
ef0e41dccb633376c0d52796bbd1a9aa80b539d1
|
Python
|
ScholarIndex/LinkedBooks
|
/disambiguation/primary/model_dev/supporting_functions.py
|
UTF-8
| 553 | 3.046875 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Loads checked and correct disambiguations AND a sample of references which are NOT checked-
"""
__author__ = """Giovanni Colavizza"""
import string,re
def cleanup(text):
"""
remove punctuation except . and numbers
:param text: a string
:return: a string
"""
RE_D = re.compile('\d')
tokens = text.split()
new_tokens = list()
for t in tokens:
if RE_D.search(t):
continue
for p in string.punctuation:
if p == ".":
continue
t=t.replace(p,"")
new_tokens.append(t.lower().strip())
return " ".join(new_tokens)
| true |
f6f9dc6c06adcc69d61b69cebdc4d522a8325205
|
Python
|
abubakarsiddik1221/SiddikR151530_DSP
|
/linearity_dtft.py
|
UTF-8
| 1,022 | 3.03125 | 3 |
[] |
no_license
|
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
import cmath as cm
k=int(input("enter number of samples for x[n]:"))
x=[]
print("enter samples for x[n]:")
for i in range(0,k):
y=int(input())
x=np.append(x,y)
print("x[n]=",x)
N=1000
j=cm.sqrt(-1)
X=[]
w=np.linspace(-np.pi,np.pi,N)
for i in range(0,N):
s=0
for n in range(0,len(x)):
s=s+(x[n]*np.exp(-n*w[i]*j))
X.append(s)
print("X[n]=",np.abs(X))
plt.subplot(411)
plt.plot(w,np.abs(X))
plt.xlabel("freq(w/pi)")
plt.ylabel("magnitude")
plt.title("magnitude spectrum")
plt.grid()
plt.subplot(412)
plt.plot(w,angle(X)/np.pi)
plt.xlabel("freq(w/pi)")
plt.ylabel("phase angle in radian")
plt.title("phase spectrum")
plt.grid()
plt.subplot(413)
plt.plot(w,real(X))
plt.xlabel("freq(w/pi)")
plt.ylabel("real part")
plt.title("real values of X")
plt.grid()
plt.subplot(414)
plt.plot(w,imag(X))
plt.xlabel("freq(w/pi)")
plt.ylabel("imaginary part")
plt.title("imaginary values of X")
plt.grid()
plt.show()
| true |
671b1f2e7960e8bcc394d17de9ba2f7f9d86310c
|
Python
|
nbearman/city-scrapers
|
/city_scrapers/spiders/det_entertainment_commission.py
|
UTF-8
| 2,784 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import re
from datetime import time
from dateutil.parser import parse
from city_scrapers.constants import COMMISSION
from city_scrapers.spider import Spider
class DetEntertainmentCommissionSpider(Spider):
name = 'det_entertainment_commission'
agency_name = 'Detroit Entertainment Commission'
timezone = 'America/Detroit'
allowed_domains = ['www.detroitsentertainmentcommission.com']
start_urls = ['https://www.detroitsentertainmentcommission.com/services']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
location = {
'name': 'Coleman A. Young Municipal Center',
'address': '2 Woodward Avenue, Detroit, MI 48226',
'neighborhood': '',
}
for item in self._parse_entries(response):
data = {
'_type': 'event',
'name': 'Entertainment Commission',
'event_description': '',
'classification': COMMISSION,
'start': self._parse_start(item, response),
'end': {
'date': None,
'time': None,
'note': ''
},
'all_day': False,
'location': location,
'documents': [],
'sources': [{
'url': response.url,
'note': ''
}],
}
data['status'] = self._generate_status(data)
data['id'] = self._generate_id(data)
yield data
def _parse_entries(self, response):
entries_list = []
month_day_xpath = response.xpath(
'//p[span[contains(string(), "Next Meeting Date")]]/following-sibling::p[span]//text()'
).extract()
for item in month_day_xpath:
valid_entry = re.match(r"\w+ \d{1,2}$", item)
if valid_entry:
entries_list.append(item)
return entries_list
def _parse_start(self, item, response):
"""
Parse start date and time.
"""
year_text = response.xpath('//p//span[contains(string(), "Meeting Dates")]/text()'
).extract_first()
year_regex = re.search(r"(\d+){4}", year_text).group(0)
try:
start_date = parse(item + ', ' + year_regex)
return {'date': start_date.date(), 'time': time(17, 00), 'note': ''}
except ValueError:
return {'date': None, 'time': None, 'note': item}
| true |
12680f7077b4d0dda117697d5e36bc0fc735f29a
|
Python
|
Yushgoel/Python
|
/oddeven.py
|
UTF-8
| 155 | 3.96875 | 4 |
[] |
no_license
|
number = raw_input("Please enter a positive number.")
number = float(number)
remainder = number % 2.0
if remainder == 0:
print "Even"
else:
print "Odd"
| true |
b4e359fce4b5e0af6118bac4390b003e86eb3c96
|
Python
|
andresrv94/zerotomastery-twitterbot
|
/search.py
|
UTF-8
| 2,491 | 3.125 | 3 |
[] |
no_license
|
# This file will use the Tweepy Cursor API to reply to mentions, follow users that follow us, and a backup like and retweet
# imports tweepy, time, and the create_api function from config.py
import tweepy
import time
from config import create_api
# Define a follow_followers function that accepts api and check if they are not followed, then follow them
# Define a check_mentions function that accepts api, keywords, and since_id, follow and reply to the user if user has mentioned us
def check_mentions(api, keywords, since_id):
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline,
since_id=since_id).items(100):
new_since_id = max(tweet.id, new_since_id)
if tweet.in_reply_to_status_id is not None:
continue
if any(keyword in tweet.text.lower() for keyword in keywords):
if not tweet.user.following:
tweet.user.follow()
api.update_status(status="\"while(!( succeed = try_again() ) )\" \nZero To Mastery, ZTMBot to the rescue! \nhttps://zerotomastery.io/",
in_reply_to_status_id=tweet.id)
return new_since_id
# Define a fav_retweet function that accepts api, create terms string to search for and use the tweepy.Cursor object to search those terms 100 times
def fav_retweet(api):
'''
This function search for tweets in the with a search criteria
and automatic like the tweet if the tweet has not been liked and
retweet the tweet if the tweet has not been retweeted
'''
search = ["#ZTM", "#Zerotomastery"]
for tweet in tweepy.Cursor(api.search, search).items(100):
try:
if not tweet.favorite():
tweet.favorite()
print("I have liked the tweet")
if not tweet.retweet():
tweet.retweet()
print('Retweeted the tweet')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
# Define a main function to connect to the api and create a since_id counter, call all above functions
def main():
api = create_api()
since_id = 1
keywords = ["#ZTM", "#Zerotomastery",
"#ztm", "zerotomastery", "ZeroToMastery"]
while True:
since_id = check_mentions(api, keywords, since_id)
fav_retweet(api)
time.sleep(60)
# if __name__ main, call the main function
if __name__ == "__main__":
main()
| true |
b0bc8dc7a9c96845803d99c41b3cd75baf391552
|
Python
|
Godaday/pythonDemo
|
/mergeExcel/main.py
|
UTF-8
| 2,075 | 2.921875 | 3 |
[] |
no_license
|
#pip3 install xlrd
#pip3 install xlsxwriter
import os
import xlrd
import xlsxwriter
import datetime,time
Target_Folder_Path="mergeData/" #合并Excel目标文件夹默认程序当前目录下
Sheet_Index=0#数据在Excel的第几个Sheet中,默认第一个0
First_Data_row=5#数据开始的第一行(排除表头)
mergeDoneData=[]#存储读取数据
def MergeFiles(folderPath):
for root ,dirs,files in os.walk(folderPath):
for name in files:
fileFullPath =os.path.join(root,name)
finalData=ReadExcelRow(fileFullPath)
WriteMargeDataInNewFile(finalData)
#读取文件内容
def ReadExcelRow(filePath):
filedata =xlrd.open_workbook(filePath)
sheets = filedata.sheets()
if len(sheets)>0:
print("reading file- :",filePath)
tragetSheet =sheets[Sheet_Index]
rowCount=0
for rownum in range(tragetSheet.nrows):
if rownum<First_Data_row-1:
continue
currentRow=tragetSheet.row_values(rownum)
if len(currentRow[1].strip())!=0 and len(currentRow[2].strip())!=0:
currentRow.insert(0,filePath.replace(Target_Folder_Path,""))#增加一列文件名
mergeDoneData.append(currentRow)
rowCount+=1
print("Rows Count:",rowCount)
else:
print(filePath,"has 0 sheet")
return mergeDoneData
def WriteMargeDataInNewFile(mergeDoneData):
newfileName= "合并结果"+time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime(time.time()))+".xlsx"
merageDoneFile=xlsxwriter.Workbook(newfileName)
merageDoneFile_Sheet = merageDoneFile.add_worksheet()
for row in range(len(mergeDoneData)):
for cell in range(len(mergeDoneData[row])):
value=mergeDoneData[row][cell]
merageDoneFile_Sheet.write(row,cell,value)
merageDoneFile.close()
print('merage excel is done file name is',newfileName)
os.system("start explorer %s" % os.getcwd())
if __name__=='__main__':
MergeFiles(Target_Folder_Path)
| true |
a643b53e755277aa450f0ef97e3bdc56392f6e54
|
Python
|
c0deTang/lintCode
|
/140_fast-power/fast-power.py
|
UTF-8
| 578 | 3.1875 | 3 |
[] |
no_license
|
# coding:utf-8
'''
@Copyright:LintCode
@Author: CodeWTKH
@Problem: http://www.lintcode.com/problem/fast-power
@Language: Python
@Datetime: 17-06-01 15:07
'''
class Solution:
"""
@param a, b, n: 32bit integers
@return: An integer
"""
def fastPower(self, a, b, n):
# write your code here
if n == 0:
return 1 % b
elif n == 1:
return a % b
else:
i = n // 2
s = self.fastPower(a, b, i) ** 2
if n % 2:
s = s * a
return s % b
| true |
fd53c2cbd6aee3426cd501e520576f754c1de955
|
Python
|
james-claar/Python-Projects
|
/email_checker.py
|
UTF-8
| 2,434 | 2.984375 | 3 |
[] |
no_license
|
"""
A Project I created to check my email without opening a Gmail tab.
"""
import time
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from blink1.blink1 import blink1
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
LIGHT_COLOR_RGB = "#CC0000" # Flash this color upon receiving emails. Should be bright and noticeable.
OLD_EMAIL_COLOR = "#330000" # Fade to this color if there are emails but they are not new.
LONG_FADE_LENGTH_MS = 5 * 1000
FLASH_TIMES = 5
FLASH_LENGTH = 0.15
WAIT_LENGTH = 0.15
CHECK_WAIT_LENGTH = 3 # Wait this many seconds before checking emails again
def authenticate():
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
return build('gmail', 'v1', http=creds.authorize(Http()))
def get_message_number(service):
# Call the Gmail API to fetch INBOX, and return the amount of unread messages in the INBOX
results = service.users().messages().list(userId='me', labelIds=['UNREAD', 'INBOX']).execute()
messages = results.get('messages', [])
return len(messages)
def b1_flash(b1_object):
for _ in range(FLASH_TIMES - 1):
b1_object.fade_to_color(100, LIGHT_COLOR_RGB)
time.sleep(FLASH_LENGTH)
b1_object.off() # Fadeout
time.sleep(WAIT_LENGTH)
b1_object.fade_to_color(100, LIGHT_COLOR_RGB)
time.sleep(FLASH_LENGTH)
b1_object.fade_to_color(LONG_FADE_LENGTH_MS, OLD_EMAIL_COLOR)
time.sleep(LONG_FADE_LENGTH_MS / 1000) # Give the blink1 time to fade
def main():
service = authenticate()
old_message_num = 0
with blink1() as b1:
# Let user know it is working
b1.fade_to_color(100, "#005500")
time.sleep(2)
b1.off()
time.sleep(1)
while True:
message_num = get_message_number(service)
if message_num == 0:
b1.off()
elif message_num > old_message_num: # We have new messages
b1_flash(b1)
elif message_num > 0:
b1.fade_to_color(100, OLD_EMAIL_COLOR)
old_message_num = message_num
time.sleep(CHECK_WAIT_LENGTH)
if __name__ == '__main__':
main()
| true |
1525b6591117eaee608fefe3bcf9e1cdcfed9de6
|
Python
|
damlassugur/KohenenNetwork
|
/number.py
|
UTF-8
| 1,403 | 3.140625 | 3 |
[] |
no_license
|
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#3 sınıftan 200er nokta oluşturabilmek amacıyla farklı ortalama ve varyans değerlerine sahip gauss dağılımlarından faydalanılmıştır
mu1=1000
sigma1=500
mu2=2
sigma2=6
mu3=3
sigma3=7
#noktalar npy noktasına kaydedildi
#dosyadan çekilen nokta kümeleri kullanıldı.
def nokta(mu, sigma,x0,y0,z0):
n1=[1 for i in range(200)]
n2=[1 for i in range(200)]
n3=[1 for i in range(200)]
for i in range(200): #birinci nokta kümesi
x=x0+random.gauss(mu,sigma)
y=y0+random.gauss(mu,sigma)
z=z0+random.gauss(mu,sigma)
n1[i]=[x,y,z]
return np.array(n1)
n1=nokta(mu1,sigma1,10,10,-10)
n2=nokta(mu2,sigma2,-40,10,-10)
n3=nokta(mu3,sigma3,10,-20,10)
#np.save('n1.npy',n1)
#np.save('n2.npy',n2)
#np.save('n3.npy',n3)
train=[]
train.extend(n1[0:130])
train.extend(n2[0:130])
train.extend(n3[0:130])
test=[]
test.extend(n1[130:200])
test.extend(n2[130:200])
test.extend(n3[130:200])
#np.save('train1.npy', train)
#np.save('test1.npy', test)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
ax.scatter(n1[:,0],n1[:,1],n1[:,2],c='r')
ax.scatter(n2[:,0],n2[:,1],n2[:,2],c='blue')
ax.scatter(n3[:,0],n3[:,1],n3[:,2],c='purple')
plt.title('Belirlenmiş Nokta Kümesi')
plt.show()
| true |
262ec8a9fc2241422d79720605e903e873c25d4e
|
Python
|
BLOODMAGEBURT/exercise100
|
/data-structure/sort-search/5.9.插入排序.py
|
UTF-8
| 1,522 | 4.40625 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: 5.9.插入排序
Description :
Author : Administrator
date: 2019/10/12 0012
-------------------------------------------------
Change Activity:
2019/10/12 0012:
-------------------------------------------------
"""
def insertion_sort(alist):
size = len(alist)
# 先循环获取从第二项到最后一项
for i in range(1, size):
current_value = alist[i]
for j in range(i - 1, -1, -1):
# 将值依次与前方的数字做比较, 如果比前一项小,则前一项往后移一位,直到比前一项大为止
if alist[j] > current_value:
alist[j + 1] = alist[j]
# 需要注意如果比第一项大的时候
if j == 0:
alist[0] = current_value
else:
alist[j + 1] = current_value
break
return alist
def insertion_sort2(alist):
"""
使用while的方式
:param alist:
:return:
"""
for index in range(1, len(alist)):
current_value = alist[index]
position = index
while position > 0 and alist[position - 1] > current_value:
alist[position] = alist[position - 1]
position -= 1
alist[position] = current_value
return alist
if __name__ == '__main__':
alist = [45, 20, 15, 30, 25, 10]
print(insertion_sort2(alist))
| true |
62122b8a006e3bedd738fb04afa2e0ac85ac63a5
|
Python
|
m1m0r1/galgo
|
/galgo/samutil.py
|
UTF-8
| 25,411 | 2.53125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
import logging
from operator import attrgetter
from .interval import Interval, IntervalMixin
import pysam
from .utils import fill_text, cached_property, attrdict
from . import csamutil
from builtins import zip
import re
# deprecated
def _parse_region_old(region):
"""
Args:
region: 1-based region
Returns:
(contig, start, end) 0-based coordinate
"""
sp = region.split(':')
contig = sp[0]
if len(sp) == 1:
return (contig, None, None)
sp = sp[1].split('-')
start = int(sp[0].replace(',', ''))
if len(sp) == 1:
return (contig, start - 1, None)
return (contig, start - 1, int(sp[1].replace(',', '')))
_region_pat2 = re.compile('^([\w*:]+):(\d+)-(\d+)$')
_region_pat1 = re.compile('^([\w*:]+):(\d+)$')
_region_pat0 = re.compile('^([\w*:]+)$')
def parse_region(region):
"""
Args:
region: 1-based region
Returns:
(contig, start, end) 0-based coordinate
>>> parse_region('abc*011')
('abc*011', None, None)
>>> parse_region('abc:abc00001')
('abc:abc00001', None, None)
>>> parse_region('abc:abc00001:1001')
('abc:abc00001', 1000, None)
>>> parse_region('abc00001:1001')
('abc00001', 1000, None)
>>> parse_region('abc:abc00001:1001-2000')
('abc:abc00001', 1000, 2000)
>>> parse_region('1:1001-2000')
('1', 1000, 2000)
>>> parse_region('1:1001')
('1', 1000, None)
>>> parse_region('1')
('1', None, None)
"""
m = _region_pat2.match(region)
if m:
c, s, e = m.groups()
return (c, int(s) - 1, int(e))
m = _region_pat1.match(region)
if m:
c, s = m.groups()
return (c, int(s) - 1, None)
m = _region_pat0.match(region)
if m:
c, = m.groups()
return (c, None, None)
raise ValueError(region)
def sam_intervals(sam, regions=None):
"""
sam: pysam.Samfile
"""
ref_lens = dict(zip(sam.references, sam.lengths))
if regions is None:
# ':' can be accidentally contained in refenrece names (e.g. HLA:HLA00001)
for contig in sam.references:
start = 0
end = ref_lens[contig]
yield Interval(contig, start, end, None)
else:
for r in regions:
contig, start, end = parse_region(r)
if start is None:
start = 0
if end is None:
end = ref_lens[contig]
yield Interval(contig, start, end, None)
import re
_blank_pat1 = re.compile('^\s+$')
def _is_blank(st):
return bool(_blank_pat1.match(st))
# This does not work for multimapped records
class LocalInfo:
def __init__(self, sam, iv, fasta=None, skip_flags=0x004):
self.contig = iv.contig
self.start = iv.start
self.end = iv.end
if fasta:
fasta = pysam.Fastafile(fasta)
ref_bases = fasta.fetch(reference=iv.contig, start=iv.start, end=iv.end).upper()
else:
ref_bases = '?' * (iv.end - iv.start)
self._ref_bases = ref_bases
self._reads = [Read(rec) for rec in sam.fetch(reference=iv.contig, start=iv.start, end=iv.end) if not rec.flag & skip_flags]
self._bases_liss = bases_liss = [] # e.g. [['A', 'GC', '-', ...]]
for read in self._reads:
bases_lis = read.get_bases_list(iv.start, iv.end)
if bases_lis is None: # is it ok?
continue
bases_liss.append(bases_lis)
self._pos_infos = pos_infos = []
#logging.info(bases_liss[56])
for bases_lis in zip(*bases_liss):
length = max(1, max(len(b) for b in bases_lis))
pos_infos.append({'length': length, 'bases': bases_lis})
self._max_lens = lens = [r['length'] for r in pos_infos]
self.align_length = sum(lens)
def iter_column_info(self):
"""
Yields: {
'bases': ['A', 'T', 'A', '-', '-', '-', ...], # all the bases should have 1 bp width
}
"""
#logging.info(self._max_lens)
#logging.info(self._bases_liss)
assert len(self._max_lens) == len(self._pos_infos)
for l, pos_info in zip(self._max_lens, self._pos_infos):
base_bundles = [fill_text(bs, l, char='-') for bs in pos_info['bases']]
# TODO distinguish empty and padding
#logging.info((l, bases_lis))
for base_column in zip(*base_bundles):
yield {'bases': list(base_column)}
def get_left_offset(self, pos): # pos0
assert self.start <= pos < self.end
offset = pos - self.start
return sum(self._max_lens[:offset], 0)
def get_ref_seq(self):
return ''.join(self._ref_bases)
def get_ref_align(self):
filled = [fill_text(bs, l, char='-') for l, bs in zip(self._max_lens, self._ref_bases)] # fill missing bases
return filled
def iter_read_aligns(self): # TODO sorted by position bases
for read, bases_lis in zip(self._reads, self._bases_liss):
if bases_lis is None:
continue
filled = [fill_text(bs, l, char=(' ' if _is_blank(bs) else '-')) for l, bs in zip(self._max_lens, bases_lis)] # fill missing bases
yield (read, filled)
def iter_align_pairs(self):
"""
Yields: (Read, (aln, ref_aln))
"""
ref_bases = self._ref_bases
for read, bases_lis in zip(self._reads, self._bases_liss):
if bases_lis is None:
continue
b1s = []
b2s = []
for b1, b2 in zip(bases_lis, ref_bases):
fill_len = max(len(b1), len(b2))
#b1 = fill_text(b1, fill_len, char=(' ' if _is_blank(b1) else '-'))
#b2 = fill_text(b2, fill_len, char=(' ' if _is_blank(b2) else '-'))
b1 = fill_text(b1, fill_len, char=(' ' if _is_blank(b1) else '-'))
b2 = fill_text(b2, fill_len, char=(' ' if _is_blank(b2) else '-'))
b1s.append(b1)
b2s.append(b2)
seq1 = ''.join(b1s)
seq2 = ''.join(b2s)
yield read, (seq1, seq2)
class Cigar(csamutil.Cigar):
"""
>>> Cigar.parse('2I3M3D3M').values == [(Cigar.I, 2), (Cigar.M, 3), (Cigar.D, 3), (Cigar.M, 3)]
True
>>> Cigar.parse('3M2I3M2S').values == [(Cigar.M, 3), (Cigar.I, 2), (Cigar.M, 3), (Cigar.S, 2)]
True
>>> Cigar([(Cigar.I, 2), (Cigar.M, 3), (Cigar.D, 3), (Cigar.M, 3)]).to_str()
'2I3M3D3M'
>>> Cigar([(Cigar.M, 3), (Cigar.I, 2), (Cigar.M, 3), (Cigar.S, 2)]).to_str()
'3M2I3M2S'
>>> ' '.join(Cigar([(Cigar.I, 2), (Cigar.M, 3), (Cigar.D, 3), (Cigar.M, 3)]).to_str_tuple())
'2I 3M 3D 3M'
>>> ' '.join(Cigar([(Cigar.M, 3), (Cigar.I, 2), (Cigar.M, 3), (Cigar.S, 2)]).to_str_tuple())
'3M 2I 3M 2S'
>>> Cigar.parse('1I1I3M2D1D3M').contract().to_str()
'2I3M3D3M'
>>> Cigar.parse('3M2I3M2S').contract().to_str()
'3M2I3M2S'
Without hard clip length
>>> Cigar().read_length
0
>>> Cigar.parse('4S3M').read_length
7
>>> Cigar.parse('10H3D3M').read_length
13
>>> Cigar.parse('3M3D2M').read_length
5
>>> Cigar.parse('15M2D3I4M').read_length
22
With hard clip length
>>> Cigar().query_length
0
>>> Cigar.parse('4S3M').query_length
7
>>> Cigar.parse('10H3D3M').query_length
3
>>> Cigar.parse('3M3D2M').query_length
5
>>> Cigar.parse('15M2D3I4M').query_length
22
Reference length
>>> Cigar().ref_length
0
>>> Cigar.parse('4S3M').ref_length
3
>>> Cigar.parse('10H3D3M').ref_length
6
>>> Cigar.parse('3M3D2M').ref_length
8
>>> Cigar.parse('15M2D3I4M').ref_length
21
>>> c = Cigar()
>>> for op, l in reversed(list(Cigar.parse('1I1I3M2D1D3M'))):
... c.prepend((op, l))
>>> str(c)
'1I1I3M2D1D3M'
>>> c = Cigar()
>>> for op, l in Cigar.parse('1I1I3M2D1D3M'):
... c.append((op, l))
>>> str(c)
'1I1I3M2D1D3M'
>>> c = Cigar()
>>> for op, l in Cigar.parse('1I1I3M2D1D3M'):
... c.add((op, l))
>>> str(c)
'2I3M3D3M'
>>> Cigar.parse('101M').clips
(0, 0)
>>> Cigar.parse('10M1I90M').clips
(0, 0)
>>> Cigar.parse('10H100M10S').clips
(10, 10)
>>> Cigar.parse('62H39M').clips
(62, 0)
>>> Cigar.parse('39M62H').clips
(0, 62)
>>> Cigar.parse('20S61M20H').clips
(20, 20)
>>> Cigar.parse('10M1I90M').has_clip()
False
>>> Cigar.parse('10H100M10S').has_clip()
True
>>> Cigar.parse('62H39M').has_clip()
True
>>> Cigar.parse('2M4I2M').hard_clip_seq('ATCGATCG')
'ATCGATCG'
>>> Cigar.parse('2S4I2S').hard_clip_seq('ATCGATCG')
'ATCGATCG'
>>> Cigar.parse('2H4I2H').hard_clip_seq('ATCGATCG') # only clip when hard clip
'CGAT'
>>> Cigar.parse('2H4I2H').hard_clip_seq([0, 1, 2, 3, 4, 5, 6, 7]) # quality scores
[2, 3, 4, 5]
# >>> Cigar.parse('2H' '4I' '2H').is_consistent_with(Cigar.parse('2S' '4I' '2S'))
"""
_re_del = re.compile('-+')
def aln2cigar(aln, ref_aln=None):
"""
>>> assert aln2cigar('NNN') == Cigar.parse('3M')
>>> assert aln2cigar('NNN') != Cigar.parse('4M')
>>> assert aln2cigar('---') == Cigar.parse('3D')
>>> assert aln2cigar('---NNN') == Cigar.parse('3D3M')
>>> assert aln2cigar('NNN---') == Cigar.parse('3M3D')
>>> assert aln2cigar('---NNN--') == Cigar.parse('3D3M2D')
>>> assert aln2cigar('NNN---NN') == Cigar.parse('3M3D2M')
>>> aln1 = '--------ATATGGGCCATCT'
>>> assert aln2cigar(aln1) == Cigar.parse('8D13M')
>>> aln2 = 'ATATATATATACGGG--ATAT'
>>> assert aln2cigar(aln2) == Cigar.parse('15M2D4M')
>>> aln2cigar(aln1, aln2).to_str()
'8D7M2I4M'
>>> aln2cigar(aln2, aln1).to_str()
'8I7M2D4M'
"""
if ref_aln is None:
return _aln2cigar1(aln)
return _aln2cigar2(aln, ref_aln)
def _aln2cigar1(aln):
cigar = Cigar()
s = 0
e = len(aln)
for m in _re_del.finditer(aln):
s1 = m.start()
e1 = m.end()
#print (m, s1, e1)
if s < s1:
cigar.append((Cigar.M, s1 - s))
cigar.append((Cigar.D, e1 - s1))
s = e1
if s < e:
cigar.append((Cigar.M, e - s))
return cigar
def _aln2cigar2(aln, ref_aln):
cigar = Cigar()
op = None
l = 0
for a1, a2 in zip(aln, ref_aln):
if a1 == '-':
if op == Cigar.D:
l += 1
continue
if op is not None:
cigar.append((op, l))
op = Cigar.D
l = 1
elif a2 == '-':
if op == Cigar.I:
l += 1
continue
if op is not None:
cigar.append((op, l))
op = Cigar.I
l = 1
else:
if op == Cigar.M:
l += 1
continue
if op is not None:
cigar.append((op, l))
op = Cigar.M
l = 1
cigar.append((op, l))
return cigar
class SamInfo(csamutil.SamInfo):
"""
>>> sam_info = SamInfo(attrdict(references=['chr1'], lengths=[1000], get_tid=lambda x: 1))
>>> rec_tmpl = dict(tid=1, qname='1', pos=10, aend=110, is_unmapped=0)
>>> sam_info.get_length('chr1')
1000
>>> sam_info.get_length_tid(1)
1000
>>> r1 = sam_info.get_read_info(attrdict(rec_tmpl, cigartuples=Cigar.parse('100M').values))
>>> r1.left_overhang, r1.right_overhang, r1.overhang
(0, 0, 0)
>>> r2 = sam_info.get_read_info(attrdict(rec_tmpl, cigartuples=Cigar.parse('5S100M').values))
>>> r2.left_overhang, r2.right_overhang, r2.overhang
(5, 0, 5)
>>> r3 = sam_info.get_read_info(attrdict(rec_tmpl, cigartuples=Cigar.parse('11S100M').values))
>>> r3.left_overhang, r3.right_overhang, r3.overhang
(10, 0, 10)
>>> r4 = sam_info.get_read_info(attrdict(rec_tmpl, cigartuples=Cigar.parse('11S100M12H').values))
>>> r4.left_overhang, r4.right_overhang, r4.overhang
(10, 12, 12)
"""
class Read(IntervalMixin):
has_lclip = None
has_rclip = None
lclip = None
rclip = None
qlen = None
qalen = None
def __init__(self, rec):
self._rec = rec
self.name = rec.qname
self.start = rec.pos
self.end = rec.pos if rec.aend is None else rec.aend
self.rec = rec # pysam.AlignedRead
self._seq = rec.seq
self.unmapped = int(rec.is_unmapped)
self.nins = 0
self.ndel = 0
if not self.unmapped:
seq_len = rec.query_length # previously rec.rlen
qstart = rec.query_alignment_start # previously rec.qstart
qend = rec.query_alignment_end # previously rec.qend
self.qlen = rec.query_alignment_length # aligned length of query sequence
self.cigar = Cigar(rec.cigar)
self.lclip, self.rclip = self.cigar.clips # soft clip or hard clip
self.has_lclip = self.lclip > 0
self.has_rclip = self.rclip > 0
#self.has_lclip = (qstart > 0)
#self.has_rclip = (qend < seq_len)
for op, length in self.cigar:
if op == Cigar.D:
self.ndel += length
elif op == Cigar.I:
self.nins += length
self.mapq = rec.mapq
self.alen = 0 if self.end is None else self.end - self.start # aligned length of reference reference sequence
self.tags = dict(rec.get_tags())
self.edit = self.tags.get('NM', None)
#self.mismatches = 0
self.reverse = int(rec.is_reverse)
self.suppl = int(rec.is_supplementary)
self.read1 = int(rec.is_read1)
self.contig = self.rname = self._rec.reference_name
# set mate pattern
# ----------> .... # right unmapped
# ... <---------- # left unmapped
self.is_left = int(not rec.is_reverse) # mate is expected to be in opposite side
self.mate_miss = int(rec.mate_is_unmapped)
self.mate_tid = rec.next_reference_id
self.mate_pos = rec.next_reference_start
self.tlen = tlen = rec.tlen
# whether mate is far is determined from tlen and deviation of tlen distribution
#self.mate_end = rec.pos + tlen if tlen > 0 else (rec.pos is rec.end is None else rec.aend) - tlen # is ok?
self.mate_invert = 0
self.mate_back = 0
self.mate_jump = 0
if self.mate_miss:
return
self.mate_jump = int(rec.tid != self.mate_tid)
self.mate_invert = int(rec.mate_is_reverse == rec.is_reverse)
if not rec.is_reverse:
self.mate_back = int(rec.pnext < rec.pos)
else:
self.mate_back = int(rec.pos < rec.pnext)
@property
def mate_rname(self):
return rec.next_reference_name
@property
def both_clipped(self):
return self.has_lclip and self.has_rclip
@cached_property
def _bases(self):
return _get_aln_columns(self._seq, self.rec.cigar)
@cached_property
def _quals(self):
return _get_aln_columns(tuple(self.rec.query_qualities), self.rec.cigar, is_qual=True)
@cached_property
def fragmented(self):
return self.suppl or 'SA' in self.tags
@cached_property
def num_frags(self):
sa = self.tags.get('SA')
if sa is None:
return 1
else:
return 1 + sa.count(';')
@cached_property
def edit_ratio(self):
return 1. * self.edit / self.alen
def _get_sublist(self, lis, start, end, missing=' '):
start_offset = start - self.start
end_offset = end - self.end
if not lis:
return None
if start_offset > 0:
lis = lis[start_offset:]
else:
lis = [missing] * (-start_offset) + lis
if end_offset < 0:
lis = lis[:end_offset]
else:
lis = lis + [missing] * end_offset
return lis
def get_bases_list(self, start, end, missing=' '):
return self._get_sublist(self._bases, start, end, missing=missing)
def get_quals_list(self, start, end, missing=None):
return self._get_sublist(self._quals, start, end, missing=missing)
def _get_aln_columns(seq, cigar, is_qual=False, del_char=None, n_char=None):
'''
Returns:
[(A|T|C|G|-|*)+] at each columns
>>> _get_aln_columns('AATCAGTA', Cigar.parse('2I3M3D3M').values)
['T', 'C', 'A', '-', '-', '-', 'G', 'T', 'A']
>>> _get_aln_columns('AATCAGTACC', Cigar.parse('3M2I3M2S').values)
['A', 'A', 'TCA', 'G', 'T', 'A']
>>> _get_aln_columns([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], Cigar.parse('3M2I3M2S').values, is_qual=True)
[(1,), (2,), (3, 4, 5), (6,), (7,), (8,)]
'''
if is_qual:
del_char = del_char or (20,)
n_char = n_char or (20,)
else:
del_char = del_char or '-'
n_char = n_char or '*'
if seq is None:
return None
# mask low qualities
#seq = ''.join(s if q >= 10 else '.' for (s, q) in zip(seq, rec.qual))
offset = 0
bases = []
for (op, length) in cigar: # TODO (show clip?)
if op in (0, 7, 8): # M, X, =
if is_qual:
bases.extend([(x,) for x in seq[offset:offset + length]])
else:
bases.extend(list(seq[offset:offset + length]))
offset += length
elif op == 1: # I
if bases: # inserted seq. at start position will be ignored
if is_qual:
bases[-1] = bases[-1] + tuple(seq[offset:offset + length]) # add to prev sequence
else:
bases[-1] = bases[-1] + seq[offset:offset + length] # add to prev sequence
offset += length
elif op == 2: # D
bases.extend([del_char] * length)
elif op == 3: # N
bases.extend([n_char] * length) # TODO this is rare case, but need revision
elif op == 4: # S
offset += length
elif op == 5: # H
pass
elif op == 6: # P
raise NotImplementedError
return bases
class ReadCounterBase(object):
__slots__ = ('start', 'end', 'window',)
def __init__(self, start, window=1):
self.start = start
self.end = start + window
def add(self, rec):
"""
Add SAM Record
"""
raise NotImplementedError
class ReadCounter(ReadCounterBase):
def __init__(self, start, window):
super(ReadCounter, self).__init__(start, window=window)
self.count = 0 # qstart is contained
self.clip = 0
self.lclip = 0
self.rclip = 0
self.bclip = 0
self.reverse = 0
self.mapq1 = 0
self.mate = MateInfoCounter()
self.covlen = 0 # covered length within the bin
self.covlen_mapq1 = 0
def add(self, rec):
self.count += 1
is_lclip = (rec.qstart > 0)
is_rclip = (rec.qend < rec.rlen)
is_mapq1 = (rec.mapq <= 1)
covlen = min(self.end, rec.aend) - max(self.start, rec.pos)
self.clip += int(is_lclip or is_rclip)
self.lclip += int(is_lclip)
self.rclip += int(is_rclip)
self.bclip += int(is_lclip and is_rclip)
self.reverse += rec.is_reverse
self.mapq1 += int(is_mapq1)
self.mate.add(rec)
self.covlen += covlen
if is_mapq1:
self.covlen_mapq1 += covlen
def __str__(self):
return '\t'.join((str(self.start), str(self.end), str(self.__dict__)))
class MateInfoCounter(ReadCounterBase):
""" Count mate info
"""
__slots__ = ('_tlen_far', 'unmapped', 'jumped', 'far', 'overlap', 'penetrate', 'rr', 'ff')
attrs = tuple(filter(lambda x: not x.startswith('_'), __slots__))
_getter = attrgetter(*attrs)
def values(self):
return self._getter(self)
def items(self):
return dict(zip(self.attrs, self.values()))
def __init__(self, tlen_far=700):
self._tlen_far = tlen_far
self.unmapped = self.jumped = self.far = self.overlap = self.penetrate = self.rr = self.ff = 0
def add(self, rec):
if rec.mate_is_unmapped:
self.unmapped += 1
return
if rec.rnext != rec.tid:
self.jumped += 1
return
# check orientation
if not rec.is_reverse:
if not rec.mate_is_reverse:
self.ff += 1
elif rec.tlen > self._tlen_far:
self.far += 1
elif rec.pnext < rec.aend:
self.overlap += 1
elif rec.tlen < 0:
self.penetrate += 1
else:
if rec.mate_is_reverse:
self.rr += 1
elif - rec.tlen > self._tlen_far:
self.far += 1
elif rec.aend - rec.pnext < - 2 * rec.tlen: # adhoc
self.overlap += 1
elif rec.aend < rec.pnext:
self.penetrate += 1
def __str__(self):
return str(self.items())
class BreakPointCounter(ReadCounterBase):
def __init__(self, start, window):
self.start = start
self.end = start + window
self.lclips = []
def add(self, rec):
self.count += 1
is_lclip = (rec.qstart > 0)
is_rclip = (rec.qend < rec.rlen)
is_clip = is_lclip or is_rclip
is_mapq1 = (rec.mapq <= 1)
if is_lclip and self.start <= rec.pos < self.end:
lclips.append()
self.lclip += int(is_lclip)
self.rclip += int(is_rclip)
self.bclip += int(is_lclip and is_rclip)
self.reverse += rec.is_reverse
self.mapq1 += int(is_mapq1)
self.mate.add(rec)
self.covlen += covlen
if is_mapq1:
self.covlen_mapq1 += covlen
def __str__(self):
return '\t'.join((str(self.start), str(self.end), str(self.__dict__)))
class ReadCountGenerator(object):
def __init__(self, sam, rname, start=0, end=None, window=50, mass='middle', skip_flag=0x904, counter_cls=ReadCounter):
#self._samit = samit
if end is None:
rlens = dict(zip(sam.references, sam.lengths))
end = rlens[rname]
self._samit = sam.fetch(reference=rname, start=start, end=end)
self.counters = []
self.wstart = start if start % window == 0 else start // window * window # 110 -> 100, 100 -> 100, 149 -> 100 # window: 50
self.wend = end if end % window == 0 else (end // window + 1) * window # 110 -> 150, 100 -> 100, 149 -> 150 # window: 50
self.cstart = self.wstart
self.cend = self.wstart
self.window = window
self.skip_count = 0
self.skip_flag = skip_flag # unmapped, secondary or supplementary
get_read_masses = {
'start': lambda rec: (rec.pos,),
'end': lambda rec: (rec.aend - 1,), # end position of the alignment (note that aend points one past the last aligned residue)
'middle': lambda rec: ((rec.pos + rec.aend - 1) / 2.,), # middle point of the alignment
'overlap': lambda rec: range(rec.pos, rec.aend, window), # one point per window overlaped for each alignment
}
self.get_read_masses = get_read_masses[mass]
self._bulk_size = 200
self._counter_cls = counter_cls
def _flush(self):
while self.counters:
yield self._dequeue()
def _enqueue(self):
if self.wend is None:
enque_size = self._bulk_size
else:
enque_size = min(self._bulk_size, (self.wend - self.cend) // self.window)
self.counters.extend([self._counter_cls(self.cend + self.window * i, self.window) for i in xrange(enque_size)])
self.cend += self.window * enque_size
def _dequeue(self):
self.cstart += self.window
return self.counters.pop(0)
def _should_skip(self, rec):
if rec.flag & self.skip_flag:
self.skip_count += 1
return True
def __iter__(self):
try:
while 1:
rec = next(self._samit)
if self._should_skip(rec):
continue
start = rec.pos # 0-based
end = rec.aend
while self.cend < start:
for counter in self._flush():
yield counter
self._enqueue()
#yield self._dequeue()
while self.cend < end and self.cend < self.wend:
self._enqueue()
while self.counters and self.counters[0].end < start:
yield self._dequeue()
masses = self.get_read_masses(rec)
for mass in masses:
rec_index = int(mass - self.cstart) // self.window
if 0 <= rec_index < len(self.counters):
self.counters[rec_index].add(rec)
except StopIteration:
pass
except AssertionError as e:
logging.error('Invalid record was found: (pos: %s, aend: %s)', rec.pos, rec.aend)
logging.info(rec)
raise
for counter in self._flush():
yield counter
| true |
4229e94b6d8e0c586fcd93e50229589b800b7fd8
|
Python
|
lagerone/advent-of-code-2020
|
/src/day4/day4.py
|
UTF-8
| 4,523 | 2.75 | 3 |
[] |
no_license
|
import logging
import os
import re
from typing import List, Optional
logging.basicConfig(level=logging.DEBUG)
def read_input(input_path: str):
input_file = open(input_path, "r")
lines = input_file.readlines()
result: List[str] = []
for line in lines:
result.append(line.strip())
return result
_VALID_HCL_PATTERN = re.compile(r"^#[0-9a-f]{6}$")
_VALID_PID_PATTERN = re.compile(r"^[0-9]{9}$")
class Passport:
def __init__(
self,
byr: Optional[str] = "",
iyr: Optional[str] = "",
eyr: Optional[str] = "",
hgt: Optional[str] = "",
hcl: Optional[str] = "",
ecl: Optional[str] = "",
pid: Optional[str] = "",
cid: Optional[str] = "",
):
""""""
self._byr = byr
self._iyr = iyr
self._eyr = eyr
self._hgt = hgt
self._hcl = hcl
self._ecl = ecl
self._pid = pid
self._cid = cid
def is_valid_hgt(self) -> bool:
if "cm" in self._hgt:
return 150 <= int(self._hgt.replace("cm", "")) <= 193
if "in" in self._hgt:
return 59 <= int(self._hgt.replace("in", "")) <= 76
return False
def is_valid_hcl(self) -> bool:
return bool(re.match(_VALID_HCL_PATTERN, self._hcl))
def is_valid_pid(self) -> bool:
return bool(re.match(_VALID_PID_PATTERN, self._pid))
def has_valid_props(self) -> bool:
req_props = [
self._byr,
self._iyr,
self._eyr,
self._hgt,
self._hcl,
self._ecl,
self._pid,
]
for p in req_props:
if not bool(p):
return False
return True
def has_valid_prop_values(self) -> bool:
if not 1920 <= int(self._byr) <= 2002:
logging.debug(f"invalid byr {self._byr}")
return False
if not 2010 <= int(self._iyr) <= 2020:
logging.debug(f"invalid iyr {self._iyr}")
return False
if not 2020 <= int(self._eyr) <= 2030:
logging.debug(f"invalid eyr {self._eyr}")
return False
if not self.is_valid_hgt():
logging.debug(f"invalid hgt {self._hgt}")
return False
if not self.is_valid_hcl():
logging.debug(f"invalid hcl {self._hcl}")
return False
if not self._ecl in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]:
logging.debug(f"invalid ecl {self._ecl}")
return False
if not self.is_valid_pid():
logging.debug(f"invalid pid {self._pid}")
return False
return True
def parse_rows(rows: List[str]) -> List[List[str]]:
""""""
parsed: List[List[str]] = []
current: List[str] = []
for row in rows:
if row == "":
parsed.append(current)
current = []
continue
current.append(row)
parsed.append(current)
return parsed
def parse_passport(rows: List[str]) -> Passport:
""""""
res = dict()
for row in rows:
try:
split_res = row.split(" ")
for sr in split_res:
key, value = sr.split(":")
res[key] = value
except ValueError as e:
logging.error(f"{e}")
return Passport(
byr=res.get("byr", ""),
iyr=res.get("iyr", ""),
eyr=res.get("eyr", ""),
hgt=res.get("hgt", ""),
hcl=res.get("hcl", ""),
ecl=res.get("ecl", ""),
pid=res.get("pid", ""),
cid=res.get("cid", ""),
)
def run_1():
input_filepath = input_filepath = os.path.join(
os.path.dirname(__file__), "input.txt"
)
entries = read_input(input_filepath)
raw_pps = parse_rows(entries)
passports = [parse_passport(pp_raw) for pp_raw in raw_pps]
valid_passports = [pp for pp in passports if pp.has_valid_props()]
logging.debug(f"{len(valid_passports)} valid passports of {len(passports)}.")
def run_2():
input_filepath = input_filepath = os.path.join(
os.path.dirname(__file__), "input.txt"
)
entries = read_input(input_filepath)
raw_pps = parse_rows(entries)
passports = [parse_passport(pp_raw) for pp_raw in raw_pps]
valid_passports = [
pp for pp in passports if pp.has_valid_props() and pp.has_valid_prop_values()
]
logging.debug(f"{len(valid_passports)} valid passports of {len(passports)}.")
if __name__ == "__main__":
run_2()
| true |
91fa324d4550e6681d77b15f4b50fd2e3368ac1b
|
Python
|
AhosanuzzamanRoni/python-basic-a2z
|
/p-6.py
|
UTF-8
| 217 | 2.84375 | 3 |
[] |
no_license
|
name="roni"
age="23"
gpa="3.50"
name=input ("enter your name :")
name=input ("enter your age :")
name=input ("enter your gpa:")
print("student information")
print("name:"+ name)
print("age:" +age)
print("gpa :" +gpa)
| true |
472c077040d5e436a9d0b7d8e3ad13ebf4ae6d6c
|
Python
|
BTAutist/meme-generator
|
/quote_engine/docx_ingestor.py
|
UTF-8
| 866 | 2.921875 | 3 |
[] |
no_license
|
from .ingestor_interface import IngestorInterface
from .quote_model import QuoteModel
import docx
class DocxImporter(IngestorInterface):
allowed_extensions = ['docx']
@classmethod
def parse(cls, path):
""" Parses text from file, returns quote models """
if not cls.can_ingest(path):
raise Exception(f'Problem ingesting .docx file.'
f'Please check for correct format/corrupt file.')
try:
doc = docx.Document(path)
quote_models = []
for p in doc.paragraphs:
if p.text != "":
parsed = p.text.split(cls.delimiter)
quote_models.append(QuoteModel(parsed[0], parsed[1]))
return quote_models
except BaseException:
raise Exception('Error in ingesting .docx filetype')
| true |
faeaa75c9113dd53d1d426804d9d14c436cec68b
|
Python
|
bwjubrother/Algorithms
|
/Learn_advanced/5204_mergeSort.py
|
UTF-8
| 849 | 3.125 | 3 |
[] |
no_license
|
import sys
sys.stdin = open('5204.txt', 'r')
def mergeSort(arr):
if len(arr) <= 1:
return arr
else:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
left = mergeSort(left)
right = mergeSort(right)
return merge(left, right)
def merge(left, right):
global cnt
result = []
if left[-1] > right[-1]:
cnt += 1
while left and right:
if left[0] <= right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if left:
result.extend(left)
if right:
result.extend(right)
return result
T = int(input())
for tc in range(T):
n = int(input())
arr = list(map(int, input().split()))
cnt = 0
arr = mergeSort(arr)
mid = arr[n//2]
print('#%d %d %d' % (tc+1, mid, cnt))
| true |
9b03b641978e4933b49b7ecc2918f66ae034b69b
|
Python
|
mrtbld/practice
|
/leetcode/238-product-of-array-except-self/238.py
|
UTF-8
| 953 | 3.75 | 4 |
[] |
no_license
|
class Solution:
# t:O(n), s:O(1 + output)
def productExceptSelf(self, nums):
"""For each element in nums, return the product of every other elements.
>>> _ = Solution()
>>> _.productExceptSelf([1,2,3,4])
[24, 12, 8, 6]
>>> _.productExceptSelf([1,2,3,4,0])
[0, 0, 0, 0, 24]
>>> _.productExceptSelf([2,2,2,2])
[8, 8, 8, 8]
>>> _.productExceptSelf([1]*10000)[:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
"""
products = [1] * len(nums) # t:O(n), s:O(output)
left_product = 1
right_product = 1
for i, (n, m) in enumerate(zip(nums, reversed(nums))): # t:O(n), s:O(1)
# Build products of all elements to the left of nums[i].
products[i] *= left_product # t:O(1), s:O(1)
left_product *= n
products[-i-1] *= right_product # t:O(1), s:O(1)
right_product *= m
return products
| true |
8e2d2f6d69b53c4d23a0c6afaaa1dfe20e5d3b97
|
Python
|
jmvalente/PathPlanning
|
/src/entities/robot.py
|
UTF-8
| 6,721 | 3.28125 | 3 |
[] |
no_license
|
from random import randint
from grid import Grid
import logging
#Create the robot objects
class Robot:
population = 0 #Keep track of how many robots exist, useful for assigning ID.
#We should keep a list of occupied start and end nodes so we don't give two robots the same point
startNodes = ()
goalNodes = ()
def __init__(self, start, goal):
self.start = start
self.goal = goal
self.color = self.genColor()
self.ID = 'R_' + str(Robot.population)
self.paths = []
self.bestPath = ''
Robot.population += 1
def __str__(self):
#Tell me about your mother.
return "ID : {id!s}\nStart: {st!s}\nGoal: {gl!s}\nColor: {cl!s}\nBest Path: {bp}\n" \
.format(id=self.ID, st=self.start, gl=self.goal, cl=self.color, bp=self.bestPath)
def findPaths(self, graph, start, goal, path=[]):
path = path + [start]
paths = []
if start == goal:
return [path]
if not graph.has_key(str(start)):
return []
for node in graph[str(start)]:
if node not in path:
newpaths = self.findPaths(graph, node, goal, path)
for newpath in newpaths:
paths.append(newpath)
if len(paths) > 9:
return paths
return paths
@staticmethod
def genColor():
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
return str((r, g, b))
@staticmethod
def genWaypoints(obstacles):
start = Robot.genWaypoint("start", obstacles)
goal = Robot.genWaypoint("goal", obstacles)
return start, goal
@staticmethod
def genWaypoint(kind, obstacles):
height, width = Grid.size
p = (randint(1, height) - 1, randint(1, width) - 1)
if kind == "start":
if p in Robot.startNodes or p in obstacles:
return Robot.genWaypoint("start", obstacles)
else: return p
elif kind == "goal":
if p in Robot.goalNodes or p in obstacles:
return Robot.genWaypoint("goal", obstacles)
else: return p
@staticmethod
def pathToDirection(path):
directions = ''
for step in range(len(path) - 1):
if path[step][0] > path[step + 1][0]:
directions += "N"
elif path[step][0] < path[step + 1][0]:
directions += "S"
elif path[step][0] == path[step + 1][0]:
if path[step][1] > path[step + 1][1]:
directions += "W"
elif path[step][1] < path[step + 1][1]:
directions += "E"
elif path[step][1] == path[step + 1][1]:
directions += "H"
return directions
@staticmethod
def getCollisionPoint(pathA, pathB, kind=1):
if kind == 1:
if len(pathA) > len(pathB):
for iNode in range(len(pathB)):
if pathA[iNode] == pathB[iNode]:
return iNode
else:
for iNode in range(len(pathA)):
if pathA[iNode] == pathB[iNode]:
return iNode
return False #If there are no collisions, return False to let us know things are OK.
if kind == 2:
if len(pathA) > len(pathB):
for iNode in range(len(pathB) - 1):
if pathA[iNode] == pathB[iNode + 1] and pathA[iNode + 1] == pathB[iNode]:
return iNode + 1
else:
for iNode in range(len(pathA) - 1):
if pathA[iNode] == pathB[iNode + 1] and pathA[iNode + 1] == pathB[iNode]:
return iNode + 1
return False
@staticmethod
def getBestPath(robotList):
robotPathPairs = set([(0, 0)]) #Initialize the set of pairs with Robot 0, Path 0
robotIndex = 1
pathIndex = 0
while robotIndex < len(robotList):
badPath = False
while True:
badPath = False
for pair in robotPathPairs:
try:
collisionPoint = Robot.getCollisionPoint(robotList[pair[0]].paths[pair[1]],
robotList[robotIndex].paths[pathIndex])
except IndexError:
pathIndex = 0
badPath = False
break
else:
if collisionPoint:
logging.warning(Robot.collisionDetails(pair, (robotIndex, pathIndex), collisionPoint))
Robot.addWait(robotList[robotIndex], pathIndex, collisionPoint)
pathIndex += 1
badPath = True
break
if not badPath:
logging.info("No collision, adding Robot {0}, Path {1} to set".format(robotIndex, pathIndex))
robotPathPairs.add((robotIndex, pathIndex))
pathIndex = 0
robotIndex += 1
badPath = False
break
#Now to update each robot with the best path
for pair in robotPathPairs:
r, p = pair
robotList[r].bestPath = Robot.pathToDirection(robotList[r].paths[p])
@staticmethod
def collisionDetails(pathA, pathB, point):
aR, aP = pathA #First robot, path
bR, bP = pathB #Second robot,path
message = "***Collision found at step {0} between R_{1}.path[{2}] and R_{3}.path[{4}]. Incrementing path index!***"
return message.format(point, aR, aP, bR, bP)
@staticmethod
def addWait(robot, path, index):
value = robot.paths[path][index - 1]
robot.paths[path].insert(index, value)
@staticmethod
###Walk Around function that will probably never be implemented
def addWalkAround(robot, path, index):
value = robot.paths[path][index - 1]
prev = robot.paths[path][index - 2]
direction = Robot.pathToDirection([prev, value])
if direction == "N" or direction == "S":
#Add east or west wait
pass
elif direction == "E" or direction == "W":
#Add North or South wait
pass
elif direction == "H":
#Try to move one direction
pass
| true |
dc68fe3acb6cdb3d3717b7a29300c9f8b956e6fa
|
Python
|
henryeleonu/time_series_queue_model
|
/arrival_queue.py
|
UTF-8
| 11,245 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
from math import sin, cos, sqrt, atan2, radians
import pandas as pd
import datetime
from matplotlib import pyplot as plt
from geopy.distance import geodesic
"""
Code developed by Henry Eleonu
"""
def get_distance_between_point(test_long, test_lat, lab_long, lab_lat):
"""
Calculates the distance between two points on the earth
Parameters:
test_long: float
Longitude of test locatation
test_lat: float
Latitude of test locatation
lab_long: float
Longitude of lab locatation
lab_lat: float
Latitude of lab locatation
Returns:
Distance: float
Disitance between two locations
"""
test = (test_lat, test_long)
lab = (lab_lat, lab_long)
return geodesic(test, lab).miles
def add_new_column(dataframe, column_name):
"""
Adds a new column to a dataframe
Parameters:
dataframe: DataFrame
pandas dataframe object
column_name: str
name of column to add
Returns:
dataframe: DataFrame
dataframe object with new column
"""
dataframe[column_name] = ""
return dataframe
def update_lab_name_with_closest_lab(tests_dataframe, labs_dataframe):
"""
Update lab_name column with name of lab closest to test location
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
labs_dataframe: DataFrame
pandas dataframe object
Returns:
tests_dataframe: DataFrame
dataframe object with updated lab_name column
"""
tests_dataframe['lab_name'] = tests_dataframe.apply(lambda x: get_closest_lab_to_test(x['long'], x['lat'], labs_dataframe)[0], axis=1)
return tests_dataframe
def update_distance_from_closest_lab(tests_dataframe, labs_dataframe):
"""
Update distance_from_lab column
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
labs_dataframe: DataFrame
pandas dataframe object
Returns:
tests_dataframe: DataFrame
dataframe object with updated distance_from_lab column
"""
tests_dataframe['distance_from_lab'] = tests_dataframe.apply(lambda x: get_closest_lab_to_test(x['long'], x['lat'], labs_dataframe)[1], axis=1)
return tests_dataframe
def update_time_test_arrives_lab(tests_dataframe, speed):
"""
Update time_test_arrives_lab column
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
speed: Float
the speed of movement of test form test location to closest lab
Returns:
tests_dataframe: DataFrame
dataframe object with updated time_test_arrives_lab column
"""
tests_dataframe['time_test_arrives_lab'] = tests_dataframe.apply(lambda x: get_time_test_arrives_lab(x['distance_from_lab'], speed, x['time']), axis=1)
return tests_dataframe
def update_completion_time(tests_dataframe):
"""
Creates a new column, completion time, set all values to arrival time + 5hours.
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
Returns:
tests_dataframe: DataFrame
dataframe object with completion_time column
"""
tests_dataframe['time_test_arrives_lab'] = pd.to_datetime(tests_dataframe['time_test_arrives_lab'])
hours = 5
processing_time = datetime.timedelta(hours = hours)
tests_dataframe['completion_time'] = tests_dataframe['time_test_arrives_lab'] + processing_time
return tests_dataframe
def update_server_size(tests_dataframe):
"""
Creates a new column, server_size, set all values to 0.
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
Returns:
tests_dataframe: DataFrame
dataframe object with server_size column
"""
tests_dataframe['server_size'] = 0
return tests_dataframe
def get_closest_lab_to_test(test_long, test_lat, labs_dataframe):
"""
This function will find the closet lab to each test
Parameters:
test_long: Float
longitude of test location
test_lat: Float
latitude of test location
labs_dataframe: DataFrame
dataframe object holding lab data
Returns:
lab_name: str
name of closest lab to test location
distance: Float
distance of closest lab to test location
"""
closest_lab = {"lab_name": "", "distance": 0}
for i in range(len(labs_dataframe)) :
distance = get_distance_between_point(test_long, test_lat, labs_dataframe.loc[i, "long"], labs_dataframe.loc[i, "lat"])
if closest_lab['lab_name'] == "" or distance < closest_lab['distance']:
closest_lab['distance'] = distance
closest_lab['lab_name'] = labs_dataframe.loc[i, "lab_name"]
return closest_lab['lab_name'], closest_lab['distance']
def get_time_test_arrives_lab(distance, speed, date_time_of_test):
"""
This function gets the date and time test arrives lab
Parameters:
distance: Float
distance between test location and lab
speed: Float
speed test moves from test location to lab location
date_time_of_test: Datetime
date and time test was taken
Returns:
future_date_and_time: Datetime
date and time test arrives lab
"""
travel_time = distance/speed
hours_added = datetime.timedelta(hours = travel_time)
future_date_and_time = pd.to_datetime(date_time_of_test) + hours_added
return future_date_and_time
def create_dataframe_from_csv(path_to_csv_file):
"""
This function transforms a csv file to a pandas dataframe
Parameters:
path_to_csv_file: str
path to csv file
Returns:
df: DataFrame
dataframe object
"""
df = pd.read_csv(path_to_csv_file)
return df
def drop_missing_values_in_dataframe(dataframe):
"""
This function deletes rows with missing values
Parameters:
dataframe: DataFrame
dataframe object
Returns:
dataframe: DataFrame
dataframe object without missing values
"""
return dataframe.dropna()
def merge_arrival_and_completion_time(tests_dataframe):
"""
This function merges the arrival time and complesion time vertically
and create a new row, 'add' which 1 for arrival and -1 for completion.
Arrival increments the server_size by 1 and completion decrements it by 1.
Parameters:
dataframe: DataFrame
dataframe object
Returns:
union: DataFrame
dataframe object
"""
arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]
completion_time_df = tests_dataframe[['completion_time', 'server_size']]
arrival_time_df['add'] = 1
completion_time_df['add'] = -1
arrival_time_df = arrival_time_df.rename(columns={"time_test_arrives_lab":"time"})
completion_time_df = completion_time_df.rename(columns={"completion_time":"time"})
union = pd.concat([arrival_time_df, completion_time_df])
union = union.sort_values(by="time")
prev_server_size = 0
for index, row in union.iterrows():
if index == 0:
current_server_size= row['server_size'] + row['add']
prev_server_size = current_server_size
#union['server_size'] = union['server_size'] + union['add']
else:
current_server_size = prev_server_size + row['add']
prev_server_size = current_server_size
union.at[index,'server_size'] = current_server_size
#union.to_csv('union.csv')
return union
def visualise_hourly_arrivals_at_each_lab(tests_dataframe):
"""
This function visualises the hourly arrivals of tests at each lab with respect to time
Parameters:
tests_dataframe: DataFrame
dataframe object
"""
labs_df = create_dataframe_from_csv('labs.csv')
labs_df = drop_missing_values_in_dataframe(labs_df)
list_of_labs = labs_df['lab_name'].to_list()
for lab_name in list_of_labs:
df = tests_dataframe.loc[tests_dataframe['lab_name'] == lab_name]
df.time_test_arrives_lab = pd.to_datetime(df.time_test_arrives_lab)
df = df.sort_values(by="time_test_arrives_lab")
df = df[['time_test_arrives_lab']]
df = df.reset_index().set_index('time_test_arrives_lab')
df = df.resample('H').count()
df.plot(title = 'hourly arrivals at ' + lab_name)
plt.show()
def visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe):
"""
This function visualises the number of tests simultaneously prodessed at each lab with respect to time
Parameters:
tests_dataframe: DataFrame
dataframe object
"""
labs_df = create_dataframe_from_csv('labs.csv')
labs_df = drop_missing_values_in_dataframe(labs_df)
list_of_labs = labs_df['lab_name'].to_list()
for index in range(len(list_of_labs)):
df = tests_dataframe.loc[tests_dataframe['lab_name'] == list_of_labs[index]]
df = merge_arrival_and_completion_time(df)
df.plot.line(x = 'time', y = 'server_size', rot=70, title="Visualise the number of tests being simultaneously processed at " + list_of_labs[index])
plt.show()
def run_processes(path_to_tests_file, path_to_labs_file):
"""
This function serves as the pipeline with functions executed in the right order
Parameters:
path_to_tests_file: str
path to file holding tests data
path_to_labs_file: str
path to file holding labs data
"""
tests_dataframe = create_dataframe_from_csv(path_to_tests_file)
labs_dataframe = create_dataframe_from_csv(path_to_labs_file)
tests_dataframe = drop_missing_values_in_dataframe(tests_dataframe)
labs_dataframe = drop_missing_values_in_dataframe(labs_dataframe)
tests_dataframe = add_new_column(tests_dataframe, "lab_name")
tests_dataframe = add_new_column(tests_dataframe, "distance_from_lab")
tests_dataframe = add_new_column(tests_dataframe, "time_test_arrives_lab")
tests_dataframe = update_lab_name_with_closest_lab(tests_dataframe, labs_dataframe)
tests_dataframe = update_distance_from_closest_lab(tests_dataframe, labs_dataframe)
tests_dataframe = update_time_test_arrives_lab(tests_dataframe, 60)
tests_dataframe = update_completion_time(tests_dataframe)
tests_dataframe = update_server_size(tests_dataframe)
print(tests_dataframe)
visualise_hourly_arrivals_at_each_lab(tests_dataframe)
visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe)
def main():
"""
This function is the main where program execution starts
"""
run_processes('tests.csv', 'labs.csv')
if __name__ == "__main__":
main()
| true |
2d1aae770d95977f84374bdf4222e99eac41d784
|
Python
|
h4t4u/heapling
|
/heapling/model/wall.py
|
UTF-8
| 3,681 | 3.28125 | 3 |
[] |
no_license
|
from model.physicalobject import PhysicalObject
import random
from math import *
import pygame
from utility.vector import Vector
KEY = (0, 0, 0)
BLACK = (0, 0, 1)
def plane_angle_and_length(a, b):
'''
it finds all I need for my program please DON'T TOUCH
:param a:
:param b:
:return:
'''
if a!=b:
x = b[0] - a[0]
y = b[1] - a[1]
l = sqrt(x ** 2 + y ** 2)
return [(asin(y / l) if x >= 0 else (pi - asin(y / l))), l]
else:
return 0, 0
def iter(coord1, coord2, angle_and_length_list, full_length):
'''
creates new iteration between two points
:param coord1:
:param coord2:
:param angle_and_length_list:
:param full_length:
:return:
'''
new_list = [coord1]
angle0, length0 = plane_angle_and_length(coord1, coord2)
for angle, length in angle_and_length_list:
old_x, old_y = new_list[-1]
new_length = length * (length0 / full_length)
new_x = old_x + cos(angle + angle0) * new_length
new_y = old_y + sin(angle + angle0) * new_length
new_list.append([new_x, new_y])
return new_list[:-1]
class Wall(PhysicalObject):
def __init__(self, first_coords, last_coords, depth, number_dots, amplitude):
'''
:param first_coords: coordinates of first point
:param last_coords: coordinates of last point
:param depth: number of fractal iterations
:param number_dots: number of inner points of original poligonal chain
'''
self.amplitude = amplitude
self.first_coords = first_coords
self.last_coords = last_coords
self.number_dots = number_dots
self.depth = depth
self.variation1 = random.random()
self.variation2 = random.random()
self.time = 0
self.dots = [[0,0]]
self.width = abs(self.first_coords.x - self.last_coords.x)
self.height = amplitude*3
self.all_iterations = []
for i in range(depth):
self.all_iterations.append([first_coords, last_coords])
def step(self, dt):
self.all_iterations = []
self.time += dt
iter_number = self.depth
time = self.time
initial_list = [[0, 0]]
frequency = 2
variation1 = self.variation1
variation2 = self.variation2
length = self.last_coords.x-self.first_coords.x
vertex_number = self.number_dots
l0 = length / (vertex_number + 1)
for i in range(vertex_number):
x = self.amplitude * cos(
frequency * time * variation1 + i * 2 * pi / vertex_number) + l0 * (i + 1)
y = self.amplitude * sin(frequency * time * variation2 + i * 2 * pi / vertex_number)
initial_list.append([x, y])
initial_list.append([length, 0])
initial_angle_and_length_list = []
for i in range(vertex_number + 1):
initial_angle_and_length_list.append(plane_angle_and_length(initial_list[i], initial_list[i + 1]))
previous_list = initial_list
for i in range(iter_number):
new_list = []
for k in range(len(previous_list) - 1):
new_dots = iter(previous_list[k], previous_list[k + 1], initial_angle_and_length_list, length)
for j in new_dots:
new_list.append(j)
previous_list = new_list
previous_list.append([length, 0])
vector_list = []
for i in previous_list:
vector_list.append(Vector(*i)+self.first_coords)
self.all_iterations.append(vector_list)
self.dots = vector_list
def display(self):
surf = pygame.Surface((self.width+100, self.height))
surf.set_colorkey(KEY)
for i in range(len(self.dots) - 1):
pygame.draw.line (surf, BLACK, (self.dots[i].x - self.get_cords().x, self.dots[i].y - self.get_cords().y + round(self.height/2)),
(self.dots[i + 1].x - self.get_cords().x, self.dots[i + 1].y - self.get_cords().y + round(self.height/2)), 5)
return surf
def get_dots(self):
return self.all_iterations[2]
def get_cords(self):
return self.first_coords
if __name__ == '__main__':
pass
| true |
95fbc46c09dd69e92a2948f2b7ab5270eff794a4
|
Python
|
tate1010/Python-Neural-Network
|
/Basic Learning/Machine learning to mutiple two number.py
|
UTF-8
| 1,628 | 2.84375 | 3 |
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import random
from keras import backend as K
import tensorflow as tf
import math
from keras.callbacks import EarlyStopping, ModelCheckpoint
np.random.seed(42)
#Generate training data
input = []
output = []
realnumber =[]
model_path = "hello.h5"
# prepare callbacks
callbacks = [
EarlyStopping(
monitor='val_loss',
patience=50, # was 10
min_delta=0.1,
verbose=1),
ModelCheckpoint(
model_path,
monitor='val_loss',
save_best_only=True,
verbose=0)
]
for i in range(5000):
num1 = random.random()*100+1
num2 = random.random()*100+1
output.append([math.log(num1)+math.log(num2)])
realnumber.append([num1*num2])
input.append([math.log(num1),math.log(num2)])
input = np.array(input)
def RMSE(y_true,y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_true, y_pred))))
print(input[0:2])
print(output[0:2])
model = Sequential()
model.add(Dense(2,input_dim= 2,activation='relu'))
for i in range(20):
model.add(Dense(2,input_dim= 2, activation = "linear"))
model.add(Dense(1))
model.compile(loss='MSE',optimizer='adam', verbose= 1 )
model.fit(input,output,epochs = 5000, batch_size = 512 ,verbose = 1 , validation_split=0.2)
plt.plot(np.divide((np.exp(model.predict(input))- realnumber),realnumber)*100)
plt.show()
plt.plot(np.exp(model.predict(input)))
plt.plot(realnumber)
plt.show()
print(np.exp(model.predict(np.array([[math.log(1),math.log(100000)]]))))
| true |
358d4728fefcd1a894177866ed64fef9a4da9928
|
Python
|
MangledPotat0/1DSolarWind
|
/thing.py
|
UTF-8
| 3,514 | 2.859375 | 3 |
[] |
no_license
|
from time import sleep
# List of parameters to be used
numberOfCells = 5
gr = 396.41 #solar radius^3/solar mass*seconds
initVelocity = 0
initDensity = 0.000000000000001
initEnergy = 2.1395 * (10 ** (-64)) # 3/2 kT = 2.071 E-16 joules
x0 = 0.5
dx = 0.005
dt = 0.0000001
# List of functions to be used
def calc_pressure(ncells,rho,e): #Calculating pressure
pcold = 1
ecold = 1
p = []
for i in range(0,ncells):
p.append(pcold + (2*rho[i]/3)*(e[i] - ecold))
return(p)
def calc_force(ncells,area,p,x,x0): # Calculating force
area = [1] + area
xnew = [x0] + x
force = []
for i in range(0,ncells):
if xnew[i] < 1:
force.append(p[i-1]*area[i] - p[i]*area[i] - gr*(xnew[i]+xnew[i+1]/2))
else:
force.append(p[i-1]*area[i] - p[i]*area[i] - gr/((xnew[i]+xnew[i+1])/2)**2)
print(force)
return(force)
def calc_velocity(ncells,vel,force,m,dt): # Calculating velocity
velocity = []
for i in range(0,ncells):
velocity.append(vel[i]+force[i]/m[i]*dt)
return(velocity)
def calc_position(ncells,x,vel,dt): # Calculating pressure
position = []
for i in range(0,ncells):
position.append(x[i] + vel[i]*dt)
return(position)
def calc_area():
a=1
return(a)
def calc_volume(ncells,x,area):
volume = [x[0]*area[0]]
for i in range(1,ncells):
volume.append(x[i]*area[i]-x[i-1]*area[i-1])
return(volume)
def calc_density(ncells,v):
density = []
for i in range(0,ncells):
density.append(1/v[i])
return(density)
def calc_energy(ncells,vol_old,vol,p):
energy = []
for i in range(0,ncells):
energy.append((vol[i]-vol_old[i])*p[i])
return(energy)
#Initialization
def initCells(ncells, velocity0, density0, energy0, xstep,x0):
area = [1.0001] * ncells
velocity = [velocity0] * ncells
density = [density0] * ncells
energy = [energy0] * ncells
pressure = calc_pressure(ncells,density,energy)
position = []
for i in range(1,ncells+1):
position.append(x0 + dx*i)
volume = calc_volume(ncells,position,area)
mass = []
for i in range(0,ncells):
mass.append(volume[i]*density[i])
return([area,mass,velocity,volume,density,pressure,energy,position])
def iterate(ncells,area,m,vel,vol,rho,p,e,x,dt,x0):
pressure = calc_pressure(ncells,rho,e)
force = calc_force(ncells,area,pressure,x,x0)
velocity = calc_velocity(ncells,vel,force,m,dt)
position = calc_position(ncells,x,vel,dt,)
area = [1] * ncells
volume = calc_volume(ncells,position,area)
density = calc_density(ncells,volume)
energy = calc_energy(ncells,vol,volume,pressure)
return([area,m,velocity,volume,density,pressure,energy,position])
def run(ncells,vel,rho,e,dx,dt,x0):
init = initCells(ncells,vel,rho,e,dx,x0)
cont = 1
while cont:
area = init[0]
mass = init[1]
velocity = init[2]
volume = init[3]
density = init[4]
pressure = init[5]
energy = init[6]
position = init[7]
print('Area: ',init[0], '\nMass: ',init[1], '\nVelocity: ',init[2], '\nVolume: ',init[3], '\nDensity: ',init[4], '\nPressure: ',init[5], '\nInternal Energy: ',init[6], '\nPosition: ',init[7])
init = iterate(ncells,area,mass,velocity,volume,density,pressure,energy,position,dt,x0)
cont = input('Type anytihng to contine. \n')
return
run(numberOfCells,initVelocity,initDensity,initEnergy,dx,dt,x0)
| true |
b29dab0fe94b3b9c5490f0e3ad21128e0baa3633
|
Python
|
deyh2020/Metsco_Insulator_Detection
|
/final_design/data_reader.py
|
UTF-8
| 1,428 | 2.765625 | 3 |
[] |
no_license
|
import json
from urllib import request
from PIL import Image
import numpy as np
import scipy.misc
import os
def normalize_image(file):
img = Image.open(file, 'r')
arr = np.array(img)
arr = np.floor_divide(arr, 255)
img = Image.fromarray(arr)
img.save(file)
with open('data.json') as f:
data = json.load(f)
# Output: {'name': 'Bob', 'languages': ['English', 'Fench']}
folder = '/home/isidor/Documents/keras/data/mask_data/'
os.system("find " + folder + " -name '*.jpg' -delete")
os.system("find " + folder + " -name '*.png' -delete")
print("Deleted old files")
masks = ['a','b','c', 'd', 'e', 'f']
i = 0
while i < len(data):
if len(data[i]['Label']) != 0:
image = data[i]['Labeled Data']
request.urlretrieve(image, folder + 'frames/files/image' + str(i) + '.jpg')
arr = 0
numOfObjects = len(data[i]['Label']['objects'])
for j in range(0,numOfObjects):
mask = data[i]['Label']['objects'][j]['instanceURI']
mask_file = folder + 'tmp_masks/image' + str(i) + masks[j] + '.png'
request.urlretrieve(mask, mask_file)
img = Image.open(mask_file)
if j == 0:
arr = np.asarray(img)
else:
arr = arr + np.asarray(img)
img = Image.fromarray(arr)
img.save(folder + 'masks/files/image' + str(i) + '.png')
#normalize_image(mask_file)
i = i+1
| true |
1912c2cbbb9e4638b51d7189bbb90ebd5b1d3eff
|
Python
|
rusvoskres/python_homework1
|
/dice_game_dif.py
|
UTF-8
| 2,318 | 3.984375 | 4 |
[
"MIT"
] |
permissive
|
from dice_game import Dice
class Dice_dif(Dice):
def __init__(self, N,type):
super().__init__(N)
self.type_game=type
def __str__(self):
super().__str__()
if self.type_game == 1:
return f'Нужно угадать числа как непорядоченную пару'
elif self.type_game==2:
return f'Нужно угадать хотя бы одно значение'
elif self.type_game==3:
return f'Нужно, чтобы совпадали суммы чисел'
def throw_dices(self):
# type 1: совпала как непорядоченная пара
# type 2: совпало хотя бы одно значение
# type 3: совпала сумма
self.current_throw+=1
if self.current_throw>self.throw_num:
raise Exception('Вы превысили количество попыток')
print(f'Попытка номер: {self.current_throw}')
dice_1 = int(input('Введите первое число:'))
dice_2 = int(input('Введите второе число:'))
print(f'Вы ввели числа: ({dice_1}, {dice_2})')
if self.type_game ==1:
if {dice_1, dice_2} == {self._hidden_num1,self._hidden_num2}:
return True
else:
return False
elif self.type_game == 2:
if dice_1 in {self._hidden_num1, self._hidden_num2} \
or dice_2 in {self._hidden_num1, self._hidden_num2}:
return True
else:
return False
elif self.type_game == 3:
if dice_1+dice_2 == self._hidden_num1+self._hidden_num2:
return True
else:
return False
if __name__ == '__main__':
dice_game=Dice_dif(5, 2)
dice_game.set_hidden_numbers()
print(dice_game)
try:
for i in range(15):
result = dice_game.throw_dices()
print(result)
if result:
print('Вы выиграли!!!')
print(dice_game.show_result())
break
except:
print('Игра закончена. Вы проиграли')
print(dice_game.show_result())
| true |
7b91592b4ce15bcc601ba4c91803d6dcabe94e12
|
Python
|
AntoineGachet/new_repo_python
|
/trash_code/progamme nul.py
|
UTF-8
| 110 | 3.109375 | 3 |
[] |
no_license
|
r=float(input('entrer un nombre'))
if r<5:
print('bien joué')
else:
print('sal petite merde')
| true |
5024608c4818660e938999cbab7dd0d3dcd074b4
|
Python
|
flocka12/Flocka_records
|
/DB/db_con.py
|
UTF-8
| 457 | 2.796875 | 3 |
[] |
no_license
|
''' module that connects to the database '''
import psycopg2
import os
def db_init():
"""creates db connection"""
dbname = os.getenv('DB_NAME')
user = os.getenv('DB_USER')
host = os.getenv('DB_HOST')
password = os.getenv('DB_PASSWORD')
db_conn = psycopg2.connect(
"dbname={} user={} host={} password={}".format(dbname, user, host, password),
connection_factory=None, cursor_factory=None)
return db_conn
| true |
b46e1d282e10b2db105ab35b199685cb69ffa081
|
Python
|
jrucl2d/Python_Coding_Test
|
/1Cycle/13장/q15.py
|
UTF-8
| 705 | 3.0625 | 3 |
[] |
no_license
|
from collections import deque
n, m, k, x = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for _ in range(m):
a, b = map(int, input().split())
graph[a].append(b)
visited = [False] * (n + 1)
answer = []
def bfs(graph, start, visited):
depth = 0
queue = deque([(start, depth)])
visited[start] = True
while queue:
v, depth = queue.popleft()
if depth == k:
answer.append(v)
for i in graph[v]:
if not visited[i]:
queue.append((i, depth + 1))
visited[i] = True
bfs(graph, x, visited)
if len(answer) == 0:
print(-1)
else:
answer.sort()
for ans in answer:
print(ans)
| true |
697bd8f5be4f160aca9c1b9f45e50b650152dc00
|
Python
|
TapanBhavsar/CIFAR10_classification_production_level
|
/CIFAR10_Serving/lib/cifar10_model_trainer.py
|
UTF-8
| 4,689 | 2.71875 | 3 |
[] |
no_license
|
from model_trainer import ModelTrainer
from CIFAR10_model import CIFAR10Model
import tensorflow as tf
import time
import os
import utilities
class CIFAR10ModelTrainer(ModelTrainer):
def __init__(
self, train_data, train_labels, validation_data, validation_labels, test_data, test_labels,
):
super(CIFAR10ModelTrainer, self).__init__(
train_data=train_data,
train_labels=train_labels,
test_data=test_data,
test_labels=test_labels,
validation_data=validation_data,
validation_labels=validation_labels,
)
input_data_shape = [None]
input_label_shape = [None]
input_data_shape.extend(train_data.shape[1:])
input_label_shape.extend(train_labels.shape[1:])
self._initialize_placeholders(input_data_shape=input_data_shape, labels_shape=input_label_shape)
self.__CIFAR10_model = CIFAR10Model()
self.__session = tf.Session()
self.__initialize_variables = None
self.__model_saver = None
def train_model(self, epochs, batch_size):
model_output, model_softmax_output = self.__CIFAR10_model.build_model(self._input_data)
error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model_output, labels=self._input_labels))
optm = tf.train.AdamOptimizer(learning_rate=0.01).minimize(error)
corr = tf.equal(tf.argmax(model_output, 1), tf.argmax(self._input_labels, 1))
accuracy = tf.reduce_mean(tf.cast(corr, tf.float32))
self.__initialize_variables = tf.global_variables_initializer()
self.__session.run(self.__initialize_variables)
self.__model_saver = tf.train.Saver()
for epoch in range(epochs):
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
# devide data into mini batch
for batch in self._iterate_minibatches(self._train_data, self._train_labels, batch_size, shuffle=True):
inputs, targets = batch
# this is update weights
self.__session.run([optm], feed_dict={self._input_data: inputs, self._input_labels: targets})
# cost function
err, acc = self.__session.run(
[error, accuracy], feed_dict={self._input_data: inputs, self._input_labels: targets}
)
train_err += err
train_acc += acc
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
# divide validation data into mini batch without shuffle
for batch in self._iterate_minibatches(
self._validation_data, self._validation_labels, batch_size, shuffle=False
):
inputs, targets = batch
# this is update weights
self.__session.run([optm], feed_dict={self._input_data: inputs, self._input_labels: targets})
# cost function
err, acc = self.__session.run(
[error, accuracy], feed_dict={self._input_data: inputs, self._input_labels: targets}
)
val_err += err
val_acc += acc
val_batches += 1
# print present epoch with total number of epoch
# print training and validation loss with accuracy
print("Epoch {} of {} took {:.3f}s".format(epoch + 1, epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(val_acc / val_batches * 100))
test_err = 0
test_acc = 0
test_batches = 0
for batch in self._iterate_minibatches(self._test_data, self._test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = self.__session.run(
[error, accuracy], feed_dict={self._input_data: inputs, self._input_labels: targets}
) # apply tensor function
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
def save_model(self, model_path):
model_folder, model_file = os.path.split(model_path)
utilities.create_folder(model_folder)
save_path = self.__model_saver.save(self.__session, model_path)
| true |
6543740ba77da904da22689a103453f656cfb37b
|
Python
|
GLMF/GLMF191
|
/Dev/1_4_radio/radio.py
|
UTF-8
| 544 | 2.671875 | 3 |
[] |
no_license
|
import npyscreen
class Window(npyscreen.NPSApp):
def main(self):
Form = npyscreen.Form(name="GNU/Linux Magazine")
radio_values = ["Option 1", "Option 2", "Option 3"]
radio = Form.add(npyscreen.TitleSelectOne, max_height=len(radio_values), value = [0], name="Choix:",
values = radio_values, scroll_exit=True)
Form.edit()
npyscreen.notify_wait("Valeur saisie : " + radio.get_selected_objects()[0], title="Vérification")
if __name__ == "__main__":
App = Window()
App.run()
| true |
254584e03f6529313acaa2b3ef96ddf99166bfc6
|
Python
|
Brian-McG/HonoursDefaulterPrediction
|
/src/config/classifiers.py
|
UTF-8
| 4,192 | 2.671875 | 3 |
[] |
no_license
|
"""Configures classifiers and their status"""
from collections import OrderedDict
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from classifier_wrappers.clustering_launched_classification import ClusteringLaunchedClassifier
from classifier_wrappers.extreme_learning_machines import ExtremeLearningMachine
classifiers = OrderedDict()
def append_classifier_details(classifier_class, status, classifier_description, classifier_dict):
"""Adds classifier, data_balancer, status and classifier_description to classifier_arr as a dictionary"""
classifier_dict[classifier_description] = {"classifier": classifier_class, "status": status}
# Generic Classifiers
# Clustering-Launched Classification
clustering_launched_classifier = ClusteringLaunchedClassifier
clustering_launched_classifier_description = "Clustering-Launched Classification"
clustering_launched_classifier_enabled = False
append_classifier_details(clustering_launched_classifier, clustering_launched_classifier_enabled, clustering_launched_classifier_description, classifiers)
# Extreme learning machines
elm = ExtremeLearningMachine
elm_description = "Extreme Learning Machine"
elm_enabled = True
append_classifier_details(elm, elm_enabled, elm_description, classifiers)
# Artificial Neural network
ann = MLPClassifier
ann_description = "Artificial neural network"
ann_enabled = True
append_classifier_details(ann, ann_enabled, ann_description, classifiers)
# Support Vector Machines (with RBF kernel)
svm_rdf = svm.SVC
svm_rdf_description = "SVM (RBF)"
svm_rdf_enabled = True
append_classifier_details(svm_rdf, svm_rdf_enabled, svm_rdf_description, classifiers)
# Support Vector Machines (with linear kernel)
svm_linear = svm.SVC
svm_linear_description = "SVM (linear)"
svm_linear_enabled = True
append_classifier_details(svm_linear, svm_linear_enabled, svm_linear_description, classifiers)
# Support Vector Machines (with polynomial kernel)
svm_poly = svm.SVC
svm_poly_description = "SVM (polynomial)"
svm_poly_enabled = True
append_classifier_details(svm_poly, svm_poly_enabled, svm_poly_description, classifiers)
# Logistic Regression
logistic_regression = LogisticRegression
logistic_regression_description = "Logistic regression"
logistic_regression_enabled = True
append_classifier_details(logistic_regression, logistic_regression_enabled, logistic_regression_description, classifiers)
# Decision Tree
decision_tree = DecisionTreeClassifier
decision_tree_description = "Decision Tree"
decision_tree_enabled = True
append_classifier_details(decision_tree, decision_tree_enabled, decision_tree_description, classifiers)
# AdaBoost
adaboost = AdaBoostClassifier
adaboost_description = "AdaBoost"
adaboost_enabled = True
append_classifier_details(adaboost, adaboost_enabled, adaboost_description, classifiers)
# Random forest
random_forest = RandomForestClassifier
random_forest_description = "Random forest"
random_forest_enabled = True
append_classifier_details(random_forest, random_forest_enabled, random_forest_description, classifiers)
# K-nearest neighbours
k_nearest = KNeighborsClassifier
k_nearest_description = "K-nearest neighbours"
k_nearest_enabled = True
append_classifier_details(k_nearest, k_nearest_enabled, k_nearest_description, classifiers)
# Gaussian Naive Bayes
gaussian_naive_bayes = GaussianNB
gaussian_naive_bayes_description = "Gaussian Naive Bayes"
gaussian_naive_bayes_enabled = True
append_classifier_details(gaussian_naive_bayes, gaussian_naive_bayes_enabled, gaussian_naive_bayes_description, classifiers)
# Bernoulli Naive Bayes
bernoulli_naive_bayes = BernoulliNB
bernoulli_naive_bayes_description = "Bernoulli Naive Bayes"
bernoulli_naive_bayes_enabled = True
append_classifier_details(bernoulli_naive_bayes, bernoulli_naive_bayes_enabled, bernoulli_naive_bayes_description, classifiers)
| true |
9b839ebf46f27bf76f7484218e5714c137215cc0
|
Python
|
rishsharma1/grokking_algorithms
|
/recursion.py
|
UTF-8
| 1,127 | 3.6875 | 4 |
[] |
no_license
|
def sum_array(array):
if len(array) == 0:
return 0
return array[0]+sum_array(array[1:])
def length(array):
if array == []:
return 0
return 1+length(array[1:])
def _max(a,b):
if a > b:
return a
return b
def max_number(array):
mid = (len(array)/2) - 1
if len(array) == 1:
return array[0]
elif len(array) == 2:
return _max(array[0],array[1])
return _max(max_number(array[:mid+1]),max_number(array[mid+1:]))
def alternate_max_number(array):
print array
if len(array) == 2:
return array[0] if array[0] > array[1] else array[1]
sub_max = alternate_max_number(array[1:])
return array[0] if array[0] > sub_max else sub_max
def main():
print sum_array([1,2,3]) == sum([1,2,3])
print sum_array(range(1,100)) == sum(range(1,100))
print sum_array([]) == sum([])
print length([1,2,3]) == len([1,2,3])
print length(range(1,100)) == len(range(1,100))
print max_number([1,11000,3,500,2,34,50,9000])
print alternate_max_number([1,11000,3,500,2,34,50,9000])
if __name__ == "__main__":
main()
| true |
363f7c33ace24d474a91cba62d966dd813a3a434
|
Python
|
katherinebroner/hackbright-intro-classes
|
/plants.py
|
UTF-8
| 554 | 3.921875 | 4 |
[] |
no_license
|
class Plants(object):
def __init__ (self, type_of_flower, flowering_or_not_flowering, color):
self.type_of_flower = type_of_flower
self.flowering_or_not_flowering = flowering_or_not_flowering
self.color = color
def set_is_flowering(self, bool):
self.flowering_or_not_flowering = bool
def flower_color(self):
print "a", self.type_of_flower, "is", self.color[0], "and", self.color[1]
sunflower = Plants("sunflower", True, ["yellow", "green"])
sunflower.set_is_flowering(True)
sunflower.flower_color()
| true |
c94f0bd2b537007520cd09c2fd5d6090cd89d177
|
Python
|
ejoerns/flamingo
|
/flamingo/core/settings.py
|
UTF-8
| 856 | 2.625 | 3 |
[
"Apache-2.0"
] |
permissive
|
import runpy
class Settings:
def __init__(self):
self._values = {}
self._modules = []
self.add('flamingo.default_settings')
def add(self, module):
self._modules.append(module)
if module.endswith('.py') or '/' in module:
values = runpy.run_path(module, init_globals=self._values)
else:
values = runpy.run_module(module, init_globals=self._values)
self._values = {k: v for k, v in values.items()
if not k.startswith('_')}
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
if name in self._values:
return self._values[name]
raise
def __dir__(self):
return super().__dir__() + list(self._values.keys())
| true |
ccf4adc09750dd387c31b170cc7e03c6466a4b87
|
Python
|
darrylyeo/Email-Event-Extractor
|
/utility_functions.py
|
UTF-8
| 3,032 | 2.6875 | 3 |
[] |
no_license
|
import re
import sys
import spacy
from itertools import *
from spacy.attrs import ENT_IOB
from spacy.matcher import Matcher
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def get_dates_spacy(email):
dates = []
labels = []
solo_dates = []
nlp = spacy.load('en_core_web_md')
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(email)
"""dates = [ent for ent in doc.ents if ent.label_ == 'DATE']
print(dates)"""
matcher = Matcher(nlp.vocab)
pattern = [{"IS_ALPHA": True},
{"IS_SPACE": True, "OP": "*"},
{"IS_PUNCT": True},
{"IS_SPACE": True, "OP": "*"},
{"ENT_TYPE": "DATE", "OP": "+"}]
matcher.add("DATE_PATTERN", None, pattern)
matched = matcher(doc)
results = [max(list(group), key=lambda x: x[2]) for key, group in groupby(matched, lambda prop: prop[1])]
for match_id, start, end in results:
matched_span = doc[start:end]
dates.append(matched_span)
for date in dates:
s_token = date.start
s_char = date.start_char
text = doc.text
for i in range(len(text)):
if text[s_char-1] == '\n' or text[s_char-1] == '\t':
break
s_char -= 1
for i in range(len(doc)):
if doc[s_token - 1].idx < s_char:
break
s_token -= 1
labels.append(doc[s_token:date.start+1])
for date in dates:
start_token = date.start
for i in range(len(doc)):
if doc[start_token].ent_type_ == 'DATE':
break
start_token += 1
solo_dates.append(doc[start_token:date.end])
return [(labels[i], solo_dates[i]) for i in range(len(dates))]
def get_names(email):
names = []
nlp = spacy.load('en_core_web_md')
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(email)
for ent in doc.ents:
if ent.label_ == 'PERSON':
is_chair = False
sent = ent.sent
for i in range(sent.start, sent.end):
if 'chair' in doc[i].text.lower():
is_chair = True
for i in range(ent.start, ent.end):
if doc[i].text == 'Khosmood':
is_chair = False
if is_chair:
names.append(ent)
return names
def get_ents(email):
nlp = spacy.load('en_core_web_md')
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(email)
return [ent for ent in doc.ents]
if __name__ == '__main__':
filename = sys.argv[1]
f = open(filename)
raw = f.read()
dates_spacy = get_dates_spacy(raw)
print('returns')
for date_out in dates_spacy:
print(date_out.text)
| true |
4f061868679f32e0d0b88256f5d2e184cc81370e
|
Python
|
Talanvor/infa_2020_Snigir
|
/Solar/graph3.py
|
UTF-8
| 656 | 3.015625 | 3 |
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
x = []
y = []
t = 1.0
px = 0.0
py = 0.0
sx = 0.0
sy = 0.0
objects = []
with open('stats.txt', 'r') as stats:
for line in stats:
if len(line.strip()) == 0 or line[0] == '#':
r = ((px - sx)**2 + (py - sy)**2)**0.5
y.append( r )
continue
object_type = line.split()[0].lower()
if object_type == "planet": #fixed
x.append( (float(line.split()[6])**2 + float(line.split()[7])**2)**0.5 )
px = float(line.split()[4])
py = float(line.split()[5])
if object_type == "star": #fixed
sx = float(line.split()[4])
sy = float(line.split()[5])
plt.plot(x, y)
plt.grid(True)
plt.show()
| true |
26d98e214125814995375a656e485469bc6ece13
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02271/s434090235.py
|
UTF-8
| 271 | 2.78125 | 3 |
[] |
no_license
|
import itertools
n = input()
a = list(map(int, input().split()))
q = input()
aset = set()
for i in range(1, len(a) + 1):
aset |= set(sum(combi) for combi in itertools.combinations(a, i))
for m in map(int, input().split()):
print('yes' if m in aset else 'no')
| true |
d55b8e3250070c038c0ec56447bbd37d20990e53
|
Python
|
Jeevanjeethu/Practice
|
/Total/Python Learning/2nd class/xprint.py
|
UTF-8
| 38 | 3.03125 | 3 |
[] |
no_license
|
x=int(input('x'))
sum=x+x
print(sum)
| true |
d75d15125c609fbabd6b254346f413f310bdc1ee
|
Python
|
Richard-Kirby/otg_keyboard
|
/accel/accel.py
|
UTF-8
| 4,404 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
import smbus
import time
import math
import threading
from collections import deque
import queue
# Accelerometer class - sets up and returns accelerometer values as requested.
class Accelerometer(threading.Thread):
scaled_accel = [0, 0, 0]
address = 0x68
power_mgmt_1 = 0x6b
total_accel = 0
bus = 0
cycle_num = 5 # number of cycles to use for maximum and average
max_accel = 0 # maximum acceleration over the defined number of cycles.
avg_accel = 0 # Average acceleration over cycles
# Set up the accelerometer. Power and sensitivity settings
def __init__(self, cycle_num, accel_queue, delay = 0.01):
# Initialise the Thread.
threading.Thread.__init__(self)
# Set up the queue fed by the accelerometer
self.accel_queue = accel_queue
self.delay = delay
# Start talking to accelerometer - standard I2C stuff.
self.bus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards
# Now wake the 6050 up as it starts in sleep mode
self.bus.write_byte_data(self.address, self.power_mgmt_1, 0)
# Write the setup to the accelerometer -value of 3 in AFS_SEL gives accel range of 16g.
# The register to use is 1C (28 decimal)
self.bus.write_byte_data(self.address, 0x1C, 0b00011000)
# Assign cycle num in object to the one passed in.
self.cycle_num = cycle_num
# Create the queue ot recent reads.
self.recent_reads = deque([0] * cycle_num, cycle_num)
# Read word from accelerometer
def read_word_2c(self, adr):
high = self.bus.read_byte_data(self.address, adr)
low = self.bus.read_byte_data(self.address, adr + 1)
val = (high << 8) + low
if val >= 0x8000:
return -((65535 - val) + 1)
else:
return val
# Returns the scaled values
def get_scaled_accel_values(self):
try:
# Grab Accelerometer Data
self.scaled_accel = [self.read_word_2c(0x3b) / 16384.0 * 8, self.read_word_2c(0x3d) / 16384.0 * 8,
self.read_word_2c(0x3f) / 16384.0 * 8]
except KeyboardInterrupt:
raise
except:
print("** Read failed - assume 0 accel")
self.scaled_accel = [0, 0, 0]
# Scaling is 16g, so scale the 2 bytes to get the +/-16g
# Take the square root of the squared sums to get the total acceleration. Use the opposite sign of the
# Z component.
self.total_accel = math.copysign(math.sqrt(self.scaled_accel[0] ** 2 + self.scaled_accel[1] ** 2 + self.scaled_accel[2] ** 2),
self.scaled_accel[2])
# Let the deque build up to full length before doing calculations.
if self.recent_reads.__len__() == self.cycle_num:
self.recent_reads.popleft()
self.recent_reads.append(self.total_accel)
self.max_accel = max(self.recent_reads)
else:
self.recent_reads.append(self.total_accel)
def run(self):
# Read continuously, refreshing the values each read.
try:
while True:
self.get_scaled_accel_values()
if self.max_accel > 2 and self.recent_reads.__len__() == self.cycle_num:
#print(time.time(), self.total_accel, "^^", self.max_accel, self.accel_queue.qsize())
# Send to acceleration queue
self.accel_queue.put_nowait(self.max_accel)
# Clear out the deque, so we don't report again too soon.
self.recent_reads.clear()
self.recent_reads.append(0)
self.max_accel =0
time.sleep(self.delay)
except KeyboardInterrupt:
print("closing")
except:
raise
if __name__ == "__main__":
accel_que = queue.Queue()
# Create the accelerometer object.
my_accel = Accelerometer(5, accel_que, 0.005)
my_accel.start()
while True:
# Read continuously, refreshing the values each read.
if not accel_que.empty():
q_str = accel_que.get_nowait()
#print("q string {}".format(q_str))
| true |
88a39df0f62bedf8a640e6a94855ebaf8af97b74
|
Python
|
jlucangelio/adventofcode
|
/day7.py
|
UTF-8
| 1,412 | 3 | 3 |
[
"MIT"
] |
permissive
|
import sys
with open(sys.argv[1]) as f:
lines = f.readlines()
count = 0
for line in lines:
line = line.strip()
seen = []
abba = ""
tls = False
hypernet = False
abba_in_hypernet = False
for char in line:
if char == "[":
hypernet = True
seen = []
continue
if char == "]":
hypernet = False
seen = []
continue
if len(seen) == 0 or (len(seen) == 1 and char != seen[0]):
seen.append(char)
elif len(seen) == 2:
if seen[1] != char:
seen = seen[1:]
seen.append(char)
elif len(seen) == 3:
if seen[0] == char:
abba = "".join(seen) + char
if hypernet:
tls = False
abba_in_hypernet = True
break
else:
tls = True
abba = "".join(seen) + char
else:
seen = seen[2:]
seen.append(char)
# print seen, char
if tls:
print "TLS", abba, line
count += 1
elif abba_in_hypernet:
print "ABBA in hypernet", abba, line
print count
| true |
c688ffb759c080d2d33f59b4c30b9cd7a1af70f6
|
Python
|
OpenGL-rolandpls/OpenGL-texture
|
/src/index.py
|
UTF-8
| 16,930 | 2.984375 | 3 |
[
"MIT"
] |
permissive
|
# IF3260: Computer Graphics
# Texture Mapping - Immediate
# --Libraries and Packages--
import sys
import numpy
from math import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from PIL import Image
# --Camera Settings--
# Camera Angle
angle = 0.0
# Camera Coordinate
x = 0.0
y = 0.0
z = 0.0
# Camera Direction
dx = 0.0
dy = 0.0
dz = 0.0
# --Mouse Settings--
xrot = 0.0
yrot = 0.0
xdiff = 0.0
ydiff = 0.0
mouseDown = False
data = []
dim1 = []
dim2 = []
# --CLASSES--
class Camera:
def __init__(self):
self.position = (0.0, 0.0, 0.0)
self.rotation = (0.0, 0.0, 0.0)
def translate(self, dx, dy, dz):
x, y, z = self.position
self.position = (x + dx, y + dy, z + dz)
def rotate(self, dx, dy, dz):
x, y, z = self.rotation
self.rotation = (x + dx, y + dy, z + dz)
def apply(self):
glTranslate(*self.position)
glRotated(self.rotation[0], -1, 0, 0)
glRotated(self.rotation[1], 0, -1, 0)
glRotated(self.rotation[2], 0, 0, -1)
camera = Camera()
# Key Processing Unit
def processNormalKeys(key, x, y):
if (key == 27):
exit(0)
def processSpecialKeys(key, xx, yy):
global x, z, dX, dZ, angle
fraction = 0.1
movespeed = 1
if (key == GLUT_KEY_LEFT):
camera.translate(movespeed, 0, 0)
elif (key == GLUT_KEY_RIGHT):
camera.translate(-movespeed, 0, 0)
elif (key == GLUT_KEY_UP):
camera.translate(0, -movespeed, 0)
elif (key == GLUT_KEY_DOWN):
camera.translate(0, movespeed, 0)
elif (key == GLUT_KEY_PAGE_UP):
camera.translate(0, 0, movespeed)
elif (key == GLUT_KEY_PAGE_DOWN):
camera.translate(0, 0, -movespeed)
# Mouse Processing Unit
def mouseMotion(x, y):
global yrot, xrot, mouseDown
if (mouseDown):
yrot = - x + xdiff
xrot = - y - ydiff
def mouse(button, state, x, y):
global xdiff, ydiff, mouseDown
if (button == GLUT_LEFT_BUTTON and state == GLUT_DOWN):
mouseDown = True
xdiff = x + yrot
ydiff = -y - xrot
else:
mouseDown = False
def idle():
global mouseDown, xrot, yrot
if (not mouseDown):
if(xrot > 1):
xrot -= 0.005 * xrot
elif(xrot < -1):
xrot += 0.005 * -xrot
else:
xrot = 0
if(yrot > 1):
yrot -= 0.005 * yrot
elif(yrot < -1):
yrot += 0.005 * -yrot
else:
yrot = 0
# Lighting
def renderLight():
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glShadeModel(GL_SMOOTH)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)
glEnable(GL_TEXTURE_2D)
specReflection = [1.0, 1.0, 1.0, 1.0]
glMaterialfv(GL_FRONT, GL_SPECULAR, specReflection)
glMateriali(GL_FRONT, GL_SHININESS, 30)
glLightfv(GL_LIGHT0, GL_POSITION, [2.0, 2.0, 2.0, 1.0])
def drawCar():
global data
z = 1.5
loadTexture(data[0],dim1[0],dim1[1])
#back window frame
glEnable(GL_TEXTURE_2D)
glColor3f(206/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 0.25, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 0.25, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, -1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, -1.0, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.5, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 1.5, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 0.25, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 0.25, -z+0.5)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, -z+0.5)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 0.25, z-0.5)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 0.25, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, z-0.5)
glEnd()
#top
glColor3f(240/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.5, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 1.5, z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.6, 1.5, z)
glTexCoord2f(0.0, 1.0); glVertex3f(0.6, 1.5, -z)
glEnd()
#bottom
glColor3f(190/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, -1.0, z)
glTexCoord2f(1.0, 1.0); glVertex3f(3.0, -1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(3.0, -1.0, -z)
glEnd()
#front
glColor3f(206/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(3.0, -1.0, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(3.0, 0.15, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(3.0, 0.15, z)
glTexCoord2f(0.0, 1.0); glVertex3f(3.0, -1.0, z)
glEnd()
#front cover
glColor3f(230/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(3.0, 0.15, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.2, 0.25, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(1.2, 0.25, z)
glTexCoord2f(0.0, 1.0); glVertex3f(3.0, 0.15, z)
glEnd()
#front window frame
glColor3f(235/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(0.6, 1.5, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(0.6, 1.5, z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.65, 1.42, z)
glTexCoord2f(0.0, 1.0); glVertex3f(0.65, 1.42, -z)
glTexCoord2f(0.0, 0.0); glVertex3f(1.15, 0.34, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.15, 0.34, -z+0.1)
glTexCoord2f(1.0, 1.0); glVertex3f(0.65, 1.42, -z+0.1)
glTexCoord2f(0.0, 1.0); glVertex3f(0.65, 1.42, -z)
glTexCoord2f(0.0, 0.0); glVertex3f(1.15, 0.34, z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.15, 0.34, z-0.1)
glTexCoord2f(1.0, 1.0); glVertex3f(0.65, 1.42, z-0.1)
glTexCoord2f(0.0, 1.0); glVertex3f(0.65, 1.42, z)
glTexCoord2f(0.0, 0.0); glVertex3f(1.15, 0.34, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.15, 0.34, z)
glTexCoord2f(1.0, 1.0); glVertex3f(1.2, 0.25, z)
glTexCoord2f(0.0, 1.0); glVertex3f(1.2, 0.25, -z)
glEnd()
#left above (window frame part)
glColor3f(206/255, 20/255, 55/255)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.5, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(0.6, 1.5, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.696, 1.3, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.3, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.3, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 0.25, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(-2.5, 0.25, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(-2.5, 1.3, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-1.2, 1.3, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(-1.2, 0.25, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 0.25, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.3, -z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(0.696, 1.3, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.496, 1.3, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 0.25, -z)
glEnd()
#left back
glBegin(GL_POLYGON)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.2, -1.0, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, -1.0, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 0.25, -z)
glEnd()
#left front
glBegin(GL_POLYGON)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, -z)
glTexCoord2f(1.0, 0.0); glVertex3f(3.0, 0.15, -z)
glTexCoord2f(1.0, 1.0); glVertex3f(3.0, -1.0, -z)
glTexCoord2f(0.0, 1.0); glVertex3f(1.2, -1.0, -z)
glEnd()
#right above (window frame part)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.5, z)
glTexCoord2f(1.0, 0.0); glVertex3f(0.6, 1.5, z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.696, 1.3, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.3, z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.3, z)
glTexCoord2f(1.0, 0.0); glVertex3f(-3.0, 0.25, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-2.5, 0.25, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-2.5, 1.3, z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(-1.2, 1.3, z)
glTexCoord2f(1.0, 0.0); glVertex3f(-1.2, 0.25, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 0.25, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.3, z)
glEnd()
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, z)
glTexCoord2f(1.0, 0.0); glVertex3f(0.696, 1.3, z)
glTexCoord2f(1.0, 1.0); glVertex3f(0.496, 1.3, z)
glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 0.25, z)
glEnd()
#right back
glBegin(GL_POLYGON)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, z)
glTexCoord2f(1.0, 0.0); glVertex3f(1.2, -1.0, z)
glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, -1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 0.25, z)
glEnd()
#right front
glBegin(GL_POLYGON)
glTexCoord2f(0.0, 0.0); glVertex3f(1.2, 0.25, z)
glTexCoord2f(1.0, 0.0); glVertex3f(3.0, 0.15, z)
glTexCoord2f(1.0, 1.0); glVertex3f(3.0, -1.0, z)
glTexCoord2f(0.0, 1.0); glVertex3f(1.2, -1.0, z)
glEnd()
glDisable(GL_TEXTURE_2D)
#lampu
glBegin(GL_QUADS)
glColor3f(0.9,0.9,0.9)
glVertex3f(3.006, -0.65, -z+0.101)
glVertex3f(3.006, -0.35, -z+0.101)
glVertex3f(3.006, -0.35, -z+0.601)
glVertex3f(3.006, -0.65, -z+0.601)
glVertex3f(3.006, -0.65, z-0.101)
glVertex3f(3.006, -0.35, z-0.101)
glVertex3f(3.006, -0.35, z-0.601)
glVertex3f(3.006, -0.65, z-0.601)
glColor3f(0.6,0.2,0.2)
glVertex3f(-3.006, -0.65, -z+0.101)
glVertex3f(-3.006, -0.35, -z+0.101)
glVertex3f(-3.006, -0.35, -z+0.601)
glVertex3f(-3.006, -0.65, -z+0.601)
glVertex3f(-3.006, -0.65, z-0.101)
glVertex3f(-3.006, -0.35, z-0.101)
glVertex3f(-3.006, -0.35, z-0.601)
glVertex3f(-3.006, -0.65, z-0.601)
glColor3f(0,0,0)
glVertex3f(3.006, -0.6, -z+1)
glVertex3f(3.006, -0.37, -z+1)
glVertex3f(3.006, -0.37, z-1)
glVertex3f(3.006, -0.6, z-1)
#lampu2
glColor3f(0.6,0.6,0.6)
glVertex3f(3.005, -0.7, -z)
glVertex3f(3.005, -0.3, -z)
glVertex3f(3.005, -0.3, z)
glVertex3f(3.005, -0.7, z)
glVertex3f(-3.005, -0.7, -z)
glVertex3f(-3.005, -0.3, -z)
glVertex3f(-3.005, -0.3, z)
glVertex3f(-3.005, -0.7, z)
glVertex3f(2.9, -0.3, -z-0.0014)
glVertex3f(3.0, -0.3, -z-0.0014)
glVertex3f(3.0, -0.7, -z-0.0014)
glVertex3f(2.9, -0.7, -z-0.0014)
glVertex3f(2.9, -0.3, z+0.0014)
glVertex3f(3.0, -0.3, z+0.0014)
glVertex3f(3.0, -0.7, z+0.0014)
glVertex3f(2.9, -0.7, z+0.0014)
glColor3f(226/255, 152/255, 22/255)
glVertex3f(2.95, -0.35, z+0.0015)
glVertex3f(2.985, -0.35, z+0.0015)
glVertex3f(2.985, -0.65, z+0.0015)
glVertex3f(2.95, -0.65, z+0.0015)
glVertex3f(2.95, -0.35, -z-0.0015)
glVertex3f(2.985, -0.35, -z-0.0015)
glVertex3f(2.985, -0.65, -z-0.0015)
glVertex3f(2.95, -0.65, -z-0.0015)
glEnd()
#front window glass
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glColor4f(90/255, 90/255, 90/255, 0.3)
glBegin(GL_QUADS)
glVertex3f(0.65, 1.42, -z)
glVertex3f(0.65, 1.42, -z)
glVertex3f(1.15, 0.34, -z)
glVertex3f(1.15, 0.34, -z)
glEnd()
glDisable(GL_BLEND)
#right window glass
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glColor4f(90/255, 90/255, 90/255, 0.3)
glBegin(GL_QUADS)
glVertex3f(-3.0, 1.5, z-0.01)
glVertex3f(0.5, 1.5, z-0.01)
glVertex3f(1.2, 0.25, z-0.01)
glVertex3f(-3.0, 0.25, z-0.01)
glEnd()
glDisable(GL_BLEND)
#front window glass
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glColor4f(90/255, 90/255, 90/255, 0.3)
glBegin(GL_QUADS)
glVertex3f(0.5, 1.5, -z)
glVertex3f(0.5, 1.5, z)
glVertex3f(1.2, 0.25, z)
glVertex3f(1.2, 0.25, -z)
glEnd()
glDisable(GL_BLEND)
#back window glass
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glColor4f(90/255, 90/255, 90/255, 0.3)
glBegin(GL_QUADS)
glVertex3f(-2.99, 0.25, -z+0.5)
glVertex3f(-2.99, 0.25, z-0.5)
glVertex3f(-2.99, 1.0, z-0.5)
glVertex3f(-2.99, 1.0, -z+0.5)
glEnd()
glDisable(GL_BLEND)
loadTexture(data[1],dim1[2],dim1[3])
glEnable(GL_TEXTURE_2D)
# Car's Wheel
glColor3f(0.0, 0.0, 0.0)
quadric = gluNewQuadric()
gluQuadricNormals(quadric, GLU_SMOOTH)
gluQuadricTexture(quadric, GL_TRUE)
glTranslatef(1.7,-1.0,-1.7)
gluCylinder(quadric,0.6,0.6,0.2,15,15)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0,0.0,0.2)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0, 0.0, -0.2)
glTranslatef(-3.3, 0.0, 0.0)
gluCylinder(quadric,0.6,0.6,0.2,15,15)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0,0.0,0.2)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0, 0.0, -0.2)
glTranslatef(0.0, 0.0, 3.2)
gluCylinder(quadric,0.6,0.6,0.2,15,15)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0,0.0,0.2)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0, 0.0, -0.2)
glTranslatef(3.3, 0.0, 0.0)
gluCylinder(quadric,0.6,0.6,0.2,15,15)
gluDisk(quadric, 0, 0.6, 15, 15)
glTranslatef(0.0,0.0,0.2)
gluDisk(quadric, 0, 0.6, 15, 15)
glColor3f(1.0, 1.0, 1.0)
gluDisk(quadric, 0.2, 0.4, 15, 15)
glTranslatef(-3.3, 0.0, 0.0)
gluDisk(quadric, 0.2, 0.4, 15, 15)
glTranslatef(0.0, 0.0, -0.2)
glTranslatef(0.0, 0.0, -3.2)
gluDisk(quadric, 0.2, 0.4, 15, 15)
glTranslatef(+3.3, 0.0, 0.0)
gluDisk(quadric, 0.2, 0.4, 15, 15)
glDisable(GL_TEXTURE_2D)
# Initialization
def InitGL(Width, Height):
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# initialize texture mapping
glEnable(GL_TEXTURE_2D)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
def DrawGLScene():
#global X_AXIS,Y_AXIS,Z_AXIS
#global DIRECTION
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
renderLight()
camera.apply()
camera.rotate(xrot*0.001, 0.0, 0.0)
camera.rotate(0, yrot*0.001, 0.0)
# glBindTexture(GL_TEXTURE_2D, ID)
# Draw Car
drawCar()
# Draw Cube (multiple quads)
'''glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);
glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);
glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);
glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);
glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);
glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);
glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);
glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);
glEnd();
idle()
#X_AXIS = X_AXIS - 0.30
#Z_AXIS = Z_AXIS - 0.30'''
idle()
glutSwapBuffers()
def loadImage(filename):
image = Image.open(filename)
ix = image.size[0]
iy = image.size[1]
data = numpy.array(list(image.getdata()), dtype=numpy.int64)
return data, ix, iy
def loadTexture(data, ix, iy):
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, ix, iy, 0, GL_RGB, GL_UNSIGNED_BYTE, data)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
def main():
global data, dim1, dim2
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640,480)
glutInitWindowPosition(200,200)
window = glutCreateWindow(b'OpenGL Python Textured Cube')
glutDisplayFunc(DrawGLScene)
glutIdleFunc(DrawGLScene)
glutKeyboardFunc(processNormalKeys)
glutSpecialFunc(processSpecialKeys)
#Mouse Function
glutMouseFunc(mouse)
glutMotionFunc(mouseMotion)
InitGL(640, 480)
arr, x, y = loadImage("../img/blue.jpg")
dim1.append(x)
dim1.append(y)
data.append(arr)
arr2, x2, y2 = loadImage("../img/wheel.jpg")
dim1.append(x2)
dim1.append(y2)
data.append(arr2)
# arr, x, y = loadImage("../img/tes.jpg")
# dim2.append(x)
# dim2.append(y)
# data.append(arr)
glutMainLoop()
if __name__ == "__main__":
main()
| true |
7e0d08ced97466efac6ee69191d7bba9df933fd2
|
Python
|
YaooXu/Software_test
|
/analyzer.py
|
UTF-8
| 12,056 | 2.8125 | 3 |
[] |
no_license
|
import os
import re
import ast
import json
import astunparse
import numpy as np
global source, file_parent_path, file_name
TAB_SIZE = 4
cnt = 0
all_func = {}
# 分析源码的class和method
class SourceAnalyser(ast.NodeVisitor):
def __init__(self):
self.all_class = {}
self.all_func = []
self.names = set()
def analyze(self, node):
self.visit(node)
def visit_Module(self, node):
self.generic_visit(node)
def visit_Name(self, node):
self.names.add(node.id)
def visit_FunctionDef(self, node):
if node.name[0] != '_':
# 忽略私有
print('func name:', node.name)
self.all_func.append(node.name)
def visit_ClassDef(self, node):
self.all_class[node.name] = []
print('ClassDef:', node.name)
for sub_node in ast.walk(node):
if isinstance(sub_node, ast.FunctionDef) and sub_node.name[0] != '_':
# 忽略私有
print('method name:', sub_node.name)
self.all_class[node.name].append(sub_node.name)
print()
def typeCastToString(castParameter):
'''
将任意基本类型的对象转换为字符串
:param castParameter:需要转换的参数
'''
newType = []
if type(castParameter) == int:
# 整型转字符串
newType = str(castParameter)
elif type(castParameter) == list:
castParameter_new = []
for i in castParameter:
if type(i) == str:
newType = ','.join(castParameter)
elif type(i) == int:
i = str(i)
castParameter_new.append(i)
newType = ','.join(castParameter_new)
elif type(i) == tuple:
i = ','.join(i)
castParameter_new.append(i)
newType = ','.join(castParameter_new)
elif type(i) == dict:
i = str(i)
castParameter_new.append(i)
newType = ','.join(castParameter_new)
# return type(newType)
return newType
def in_all_func(node, all_func):
# 判断是不是代码库函数调用
# 调用方式的不同会使node.value.func中的属性不一样
if 'attr' in node.value.func.__dict__:
if node.value.func.attr in all_func:
print("代码库函数调用:%s" % node.value.func.attr)
return True
elif 'id' in node.value.func.__dict__:
if node.value.func.id in all_func:
print("代码库函数调用:%s" % node.value.func.id)
return True
print("不是代码库函数调用")
return False
def add_brackets(s):
# 给字符串加括号
s = '(' + s + ')'
return s
def add_quotes(s):
# 给字符串加双引号
s = '\"' + s + '\"'
return s
# 分析目标代码的函数调用
class CallVisitor(ast.NodeVisitor):
def __init__(self):
self.all_class = {}
self.names = set()
def visit_Module(self, node):
self.generic_visit(node)
def visit_Name(self, node):
self.names.add(node.id)
def visit_ClassDef(self, node):
# self.all_class[node.name] = []
# print('ClassDef:', node.name)
# for sub_node in ast.walk(node):
# if isinstance(sub_node, ast.FunctionDef):
# print('func name:', sub_node.name)
# self.all_class[node.name].append(sub_node.name)
body = node.body
for sub_node in body:
if isinstance(sub_node, ast.FunctionDef):
self.visit_FunctionDef(sub_node)
pass
def visit_FunctionDef(self, node):
body = node.body
for sub_node in body:
if isinstance(sub_node, ast.Assign):
self.visit_Assign(sub_node)
pass
# 判断是否有assign
def visit_Assign(self, node):
global cnt
print("\n------访问Assign节点---")
line_num = node.lineno
print("line_num = ", line_num)
print(source[line_num - 1])
# print(type(astunparse.dump(node)))
col_offset = node.col_offset
if not isinstance(node.value, ast.Call):
print("节点未调用方法或者函数")
return
if not in_all_func(node, all_func):
# 不是调用的代码库函数
return
# 每个log文件的前缀路径
prefix_log_path = os.path.join(file_parent_path, 'log', file_name + "_" + str(cnt) + "_line" + str(line_num))
# log中输出内容的前缀信息
prefix_output = "output = %s\\n"
prefix_args = "input ="
if 'id' not in node.value.func.__dict__:
# *.* 形式的函数调用
try:
# 打log
log_name = prefix_log_path + "_" + node.value.func.value.id + "_" + node.value.func.attr + "_py%s.txt"
print("log_name = " + log_name)
except:
print("错误:节点属性缺少")
return
else:
if node.value.func.id == 'input':
# 跳过input函数
return
# * 形式的函数调用
try:
# 打log
log_name = prefix_log_path + "_" + node.value.func.id + "_py%s.txt"
print("log_name = " + log_name)
except:
print("错误:节点属性缺少")
return
try:
# 第二行参数得多加一个缩进
addition = "%swith open(r\"%s\"%%(sys.version[0:3]), \"w+\", encoding=\'utf8\') as f:\n%sf.write(\"%s\"%%str(%s))" % (
" " * col_offset, log_name, " " * (col_offset + TAB_SIZE), prefix_output, node.targets[0].id)
# print(addition)
if '#' in source[line_num - 1]:
# 清楚注释
idx = source[line_num - 1].find('#')
source[line_num - 1] = source[line_num - 1][:idx]
# 清楚右边多余的空格
source[line_num - 1] = source[line_num - 1].rstrip()
source[line_num - 1] += '\n'
while source[line_num - 1][-2] != ')':
# 有的函数调用有多行,注意 [-1] 是 \n
line_num += 1
source[line_num - 1] += '%s\n' % (addition)
# print(source[line_num - 1])
print("插装成功")
cnt += 1
except:
print("错误:节点属性缺少")
return
if len(node.value.args):
# 该函数调用存在参数
# f.write("input = %s %s %s" % ('x*', '-', str(s)))
to_print_list = [] # 待打印的参数
print('参数列表:')
for arg in node.value.args:
# 前面的打印形式str加个%s
prefix_args += " %s"
# 字符串是s , 变量名是id
if 's' in arg.__dict__:
print(arg.s)
arg.s = str(arg.s).replace('\n', r'\n')
# arg.s = arg.s.replace('\n', r'\n')
to_print_list.append("r\"%s\"" % arg.s)
elif 'id' in arg.__dict__:
print(arg.id)
to_print_list.append("str(%s)" % arg.id)
else:
print('参数列表中又无法解析的参数')
return
to_print_str = ",".join(to_print_list) # 'x*', '-', str(s)
to_print_str = add_brackets(to_print_str) # ('x*', '-', str(s))
prefix_args = add_quotes(prefix_args) # "input = %s %s %s"
# 最终插入的语句
instrument_str = "f.write" + add_brackets(prefix_args + ' % ' + to_print_str)
# 加上缩进和换行符插入
source[line_num - 1] += " " * (col_offset + TAB_SIZE) + instrument_str + '\n'
# print(instrument_str)
def visit_Call(self, node):
pass
# try:
# # xx.xx 不区分方法和构造函数
# # re.sub,np.ndarray
# print(node.func.value.id, node.func.attr)
# except:
# # 什么时候是attr,什么时候是id
# if 'attr' in node.func.__dict__:
# print(node.func.attr)
# else:
# print(node.func.id)
# pass
def load_json(json_path):
# 加载json中的所有函数
with open(json_path, 'r') as f:
print("解析json,得到系统调用:")
data = json.load(f)
# print(data)
for i in data:
print(i)
for sub_class in data[i]['classes']:
for j in data[i]['classes'][sub_class]:
# print(j)
all_func[j] = 1
for sub_fun in data[i]['funcs']:
all_func[sub_fun] = 1
def analyse_lib(lib_pathes: list):
res = {}
for path in lib_pathes:
print('********************************************\n\n%s*' % path)
with open(path, encoding='utf8') as f:
source = f.readlines()
source = ''.join(source)
t = ast.parse(source)
analyzer = SourceAnalyser()
analyzer.analyze(t)
cur_res = {
'classes': analyzer.all_class,
'funcs': analyzer.all_func
}
# 得到模块的无后缀名字
module_name = os.path.split(path)[-1].split('.')[0]
if module_name == '__init__':
# 如果有的模块的所有信息在__init__总给出
# 需要再分割一次路径名
dir_path = os.path.split(path)[0]
module_name = os.path.split(dir_path)[-1].split('.')[0]
res[module_name] = cur_res
with open('module_func_class.json', 'w', encoding='utf8') as f:
f.write(json.dumps(res, indent=4))
load_json("module_func_class.json")
print(all_func)
def get_instrument_file(file_path: str, save_path=None, over_write=True):
r"""
:param file_path: 待插装的目标文件
:param save_path: 插装后文件的保存路径,如果为None默认放到与目标文件同一目录下
:return: 插装之后的文件路径
"""
global cnt
cnt = 0
print('generating instrument file for % s' % file_path)
with open(file_path, 'r', encoding='utf8') as f:
global source, file_parent_path, file_name
source = f.readlines()
file_parent_path = os.path.split(file_path)[0]
file_name = os.path.split(file_path)[-1][0:-3]
# print(file_parent_path, file_name)
# log文件夹
log_path = os.path.join(file_parent_path, 'log')
if not os.path.exists(log_path):
os.mkdir(log_path)
source_str = ''.join(source)
root = ast.parse(source_str)
# print(astunparse.dump(root))
if save_path is None:
save_path = file_parent_path + "/" + "instrument_" + file_name + ".py"
if over_write:
visitor = CallVisitor()
visitor.visit(root)
# 插装后代码路径
source[0] = 'import sys\n' + source[0]
with open(save_path, "w+", encoding='utf8') as f:
tmp = ''.join(source)
f.write(tmp)
return save_path
if __name__ == "__main__":
lib_dir = 'E:\Anaconda\Lib'
pathes = []
for root, dir, files in os.walk(lib_dir):
# 暂时只处理root下的.py文件
for file in files:
if file.startswith('_') or file[-2:] != 'py':
continue
path = os.path.join(root, file)
pathes.append(path)
break
pathes.append(r'C:\Users\dell\.PyCharm2019.1\system\python_stubs\-727401014\builtins.py')
analyse_lib(pathes)
# # 待插装的代码集合
# test_pathes = []
# # 识别目标代码自定义的函数
#
# for path in test_pathes:
# # path = r'D:\课件\大三上\软件质量测试\大作业\code\Software_test\test.py'
# get_instrument_file(path)
| true |
cc2b492650aedc5ec9271657e5d76b8bad6edf53
|
Python
|
Ismgh/OCR
|
/neural_network_design.py
|
UTF-8
| 2,315 | 2.609375 | 3 |
[] |
no_license
|
# Try various number of hidden nodes and see what performs best
for i in xrange(5, 50, 5):
nn = OCRNeuralNetwork(i, data_matrix, data_labels, train_indices, False)
performance = str(test(data_matrix, data_labels, test_indices, nn))
print "{i} Hidden Nodes: {val}".format(i=i, val=performance)
def test(data_matrix, data_labels, test_indices, nn):
avg_sum = 0
for j in xrange(100):
correct_guess_count = 0
for i in test_indices:
test = data_matrix[i]
prediction = nn.predict(test)
if data_labels[i] == prediction:
correct_guess_count += 1
avg_sum += (correct_guess_count / float(len(test_indices)))
return avg_sum / 100
actual_vals = [0] * 10
actual_vals[data['label']] = 1
output_errors = np.mat(actual_vals).T - np.mat(y2)
hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors),
self.sigmoid_prime(sum1))
self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors),
np.mat(data['y0']))
self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors),
np.mat(y1).T)
self.hidden_layer_bias += self.LEARNING_RATE * output_errors
self.input_layer_bias += self.LEARNING_RATE * hidden_errors
def save(self):
if not self._use_file:
return
json_neural_network = {
"theta1":[np_mat.tolist()[0] for np_mat in self.theta1],
"theta2":[np_mat.tolist()[0] for np_mat in self.theta2],
"b1":self.input_layer_bias[0].tolist()[0],
"b2":self.hidden_layer_bias[0].tolist()[0]
};
with open(OCRNeuralNetwork.NN_FILE_PATH,'w') as nnFile:
json.dump(json_neural_network, nnFile)
def _load(self):
if not self._use_file:
return
with open(OCRNeuralNetwork.NN_FILE_PATH) as nnFile:
nn = json.load(nnFile)
self.theta1 = [np.array(li) for li in nn['theta1']]
self.theta2 = [np.array(li) for li in nn['theta2']]
self.input_layer_bias = [np.array(nn['b1'][0])]
self.hidden_layer_bias = [np.array(nn['b2'][0])]
| true |
6dcd7ee1176922eb32fcdaed810de63ff5e8d5dc
|
Python
|
Garyguo2011/DistanceVectorRouting
|
/project1/hub.py
|
UTF-8
| 481 | 2.59375 | 3 |
[] |
no_license
|
from sim.api import *
from sim.basics import *
import time
class Hub (Entity):
""" A simple hub -- floods all packets """
def handle_rx (self, packet, port):
"""
Just sends the packet back out of every port except the one it came
in on.
"""
if type(packet) is DiscoveryPacket:
print ("Router: {3} DiscoveryPacket: {0} -> {1}: Latency: {2}".format(packet.src.name, packet.dst, packet.latency, str(self.name)))
# self.send(packet, port, flood=True
| true |
a42d46ca65cbfa162fb0b499a1643d7be618d439
|
Python
|
qianpeng-shen/Study_notes
|
/第二阶段笔记/pythonweb/day04/unix_send.py
|
UTF-8
| 622 | 2.65625 | 3 |
[] |
no_license
|
from socket import *
import sys,os
#确定用哪个文件进行通信
server_address='./test'
#判断'./test'文件存在性,如果已经存在需要处理
#删除这个文件
if os.path.exists(server_address):
os.unlink(server_address)
#创建本地套接字
sockfd=socket(AF_UNIX,SOCK_STREAM)
#绑定本地套接字文件
sockfd.bind(server_address)
#监听
sockfd.listen(5)
while True:
c,addr=sockfd.accept()
while True:
data=c.recv(1024)
if data:
print(data.decode())
c.sendall("收到消息".encode())
else:
break
c.close()
sockfd.close()
| true |
c79ed3d8fb6294f539ccd2a2fdfc75ba86f50356
|
Python
|
zerlok/nsu-prog-all
|
/ptycho-coursework/fp.py
|
UTF-8
| 14,960 | 2.515625 | 3 |
[] |
no_license
|
#!/usr/bin/python3
from argparse import Action
from numpy import (
pi,
sin, cos, radians, arctan, arccos, exp, angle, conjugate, sqrt, conj,
array, zeros, ones, meshgrid, arange,
mean, any as np_any, sum as np_sum, std,
fft,
save as np_save,
)
import optics as o
from abstracts import Factory
from os.path import join as path_join
from settings import DEFAULT_LOWRES_FORMAT
class LED:
def __init__(self, lid, pos, k):
self.id = lid
self.pos = pos
self.k = k
def __str__(self):
return "<LED#{:03}: pos={} k=({:.2f}, {:.2f})>".format(
self.id,
self.pos,
*self.k,
)
def draw(self, data, steps, radius):
pos = tuple(int(p) for p in self.get_center_wavevec(steps, data.shape))
hw = radius.shape[0]//2
data[pos[1]-hw : pos[1]+hw, pos[0]-hw : pos[0]+hw] += radius
def get_center_wavevec(self, steps, lims):
# Center wavevecs.
return (
lims[0]/2.0 + self.k[0]/steps[0],
lims[1]/2.0 + self.k[1]/steps[1]
)
def get_wavevec_slice(self, sizes, steps, lims):
'''Returns slice of amplitude FT frequency range for specified led.'''
kxc, kyc = self.get_center_wavevec(steps, lims)
# Safe slices for x and y (lowest wavevec, highest wavevec, step)
return (
slice(
max(int(round(kxc - sizes[0]/2.0)), 0),
min(int(round(kxc + sizes[0]/2.0)), lims[0]),
1
),
slice(
max(int(round(kyc - sizes[1]/2.0)), 0),
min(int(round(kyc + sizes[1]/2.0)), lims[1]),
1
)
)
# Unsafe slices (no limits used).
# (
# slice(
# int(round(kxc - sizes[0]/2.0)),
# int(round(kxc + sizes[0]/2.0)),
# 1
# ),
# slice(
# int(round(kyc - sizes[1]/2.0)),
# int(round(kyc + sizes[1]/2.0)),
# 1
# )
# )
@Factory
class LEDSystems:
'''LEDs systems....'''
pass
class LEDSystem:
def walk(self):
raise NotImplementedError
def at(self, i, j):
raise NotImplementedError
@LEDSystems.product('grid', default=True, kwargs_types={'num': int, 'gap': float, 'height': float})
class LEDGrid(LEDSystem):
'''A lighting LED grid for Fourier Ptychography purposes.'''
def __init__(self, num, gap, height, wavevec):
self.num = num
self.gap = gap
self.height = height
self.width = (num-1) * gap
hw = self.width / 2.0
self.mtrx = [
LED(
lid = col + row * num,
pos = (col, row),
k = (
-wavevec * sin(arctan((col*gap - hw) / height)),
-wavevec * sin(arctan((row*gap - hw) / height))
)
)
for row in range(num)
for col in range(num)
]
def __len__(self):
return self.num * self.num
def __str__(self):
return "<LED matrix {}x{}, gap {}, height {}>".format(
self.num,
self.num,
self.gap,
self.height,
)
def __iter__(self):
return iter(self.mtrx)
def get_radius(self, na):
return na * sqrt(self.gap**2 + self.height**2) / self.gap
def walk(self):
'''Walk through leds from central to border in spin'''
hn = self.num // 2
x, y = hn, hn
yield self.at(x, y)
# Directions of steps.
dirs = ((0, 1), (-1, 0), (0, -1), (1, 0)) # Down, Left, Up, Right
dn = int(self.num % 2 == 0)
turn = 0
steps_to_take = 1
steps_left = steps_to_take
for i in range(1, len(self)):
if steps_left: # Just make a step
steps_left -= 1
else: # Make step and turn
dn += 1
turn += 1
steps_to_take += int(turn % 2 == 0)
steps_left = steps_to_take-1
# Get step direction and new coordinates
d = dirs[dn % 4]
x, y = x+d[0], y+d[1]
yield self.at(x, y)
def at(self, column, row):
'''Returns LED at specified grid location.'''
return self.mtrx[row * self.num + column]
@LEDSystems.product('sphere', kwargs_types={'start': int, 'end': int, 'step': int})
class LEDSphere(LEDSystem):
'''A lighting LED sphere for Reflective Fourier Ptychography.'''
def __init__(self, start, end, step, wavevec):
self.step = step
self.ring_len = 360 // step
self.sphere = [
LED(
lid = i + j * self.ring_len,
pos = (psy, phi),
k = (
-wavevec * sin(radians(phi)) * cos(radians(psy)),
-wavevec * sin(radians(phi)) * sin(radians(psy))
)
)
for (j, phi) in enumerate(range(start, end+step, step))
for (i, psy) in enumerate(range(360, 0, -step)) # ClockWise
]
def __len__(self):
return len(self.sphere) * self.ring_len
def __str__(self):
return "<LED sphere (reflection) phi: {} -> {} by {}, total {}>".format(
self.sphere[0].pos[1],
self.sphere[-1].pos[1],
self.step,
len(self),
)
def __iter__(self):
return iter(self.sphere)
def get_radius(self, na):
return na / sin(radians(self.step))
def walk(self):
'''Walk through leds in spin.'''
return iter(self)
def at(self, ring, hr):
'''Returns LED at specified ring (from central to border) and hour ().'''
return self.sphere[ring *self.ring_len + hr]
class FourierPtychographySystem(o.System):
def __init__(self, leds, *args, **kwargs):
super(FourierPtychographySystem, self).__init__(*args, **kwargs)
self.leds = leds
def count_leds_overlap(self):
'''Returns the LEDs overlaping factor in Fourier space.'''
r = self.leds.get_radius(self.objective.na)
return (2.0*arccos(1.0/(2.0*r)) - sqrt(1.0 - (1.0/(2.0*r))**2) / r) / pi
def count_total_coverage(self, size=(256, 256)):
wavevec_steps = self.get_wavevec_steps(*size)
radius = self.objective.generate_ctf(*wavevec_steps)
data = zeros(size, dtype=bool)
for led in self.leds:
led.draw(data, wavevec_steps, radius)
return np_sum(data) / (size[0] * size[1])
def check_fourier_space_borders(self, low_size, high_size):
'''Checks if all wavevector slices are inside the image size in the Fourier space.'''
steps = self.get_wavevec_steps(*high_size)
for led in self.leds.walk():
sl = led.get_wavevec_slice(
sizes = low_size,
steps = steps,
lims = high_size,
)
sl = tuple(int(round(s.stop - s.start) / s.step) for s in sl)
if sl != low_size:
print("Failed to get FT slice for led: {}, images: low {}, high {}, slice {}".format(
led,
low_size,
high_size,
sl,
))
return False
return True
def get_leds_look(self, size, brightfield=False, darkfield=False, overlaps=False, factor=255):
'''Returns the image of LEDs look as from microscope.'''
wavevec_steps = self.get_wavevec_steps(*size)
radius = array([[1, 1], [1, 1]]) \
if not overlaps else \
self.objective.generate_ctf(*wavevec_steps) * 1
data = zeros(size, dtype=int)
for led in self.leds:
led.draw(data, wavevec_steps, radius)
cut = zeros(size, dtype=float)
steps = self.objective.generate_wavevec_steps(*size)
if brightfield or overlaps:
cut += self.objective.generate_ctf(*steps) * 1.0
if darkfield or overlaps:
cut += (1 - self.objective.generate_ctf(*steps)) * 0.5
return o.pack_image(data*cut*factor, size)
@Factory
class Generators:
'''Simulators of low resolution images obtaining as from optical microscope system.'''
pass
class LowresGenerator(FourierPtychographySystem):
def save_into_dir(self, result, dirname):
print("WARNING: amplitudes will be saved into image files and will loose some quality!")
low_images = [o.pack_image(data, result['size'], norm=True) for data in result['amplitudes']]
filename = path_join(dirname, DEFAULT_LOWRES_FORMAT)
for (i, img) in enumerate(low_images):
img.save(filename.format(id=i))
def save_into_npy(self, result, filename):
np_save(filename, result['amplitudes'])
@Generators.product('simple')
class SimpleObjectiveView(LowresGenerator):
'''Simple objective view generator.'''
def __init__(self, *args, **kwargs):
super(SimpleObjectiveView, self).__init__(*args, **kwargs)
def _inner_run(self, ampl):
result = super(SimpleObjectiveView, self)._inner_run(ampl)
return {
'ft': fft.fftshift(fft.fft2(ampl)),
'slices': None,
'lows_ft': None,
'amplitudes': result['amplitude'],
'size': result['amplitude'].shape,
'len': 1,
}
def save_into_dir(self, result, dirname):
img = o.pack_image(result['amplitudes'], result['size'], norm=True)
img.save(path_join(dirname, "result.png"))
@Generators.product('lowres-generator', default=True)
class LEDGenerator(LowresGenerator):
def _inner_run(self, ampl):
'''Creates a bundle of low resolution images data for each LED on grid.
Returns an array with matrices (amplitude values).'''
q2 = self.quality*self.quality
x_step, y_step = wavevec_steps = self.get_wavevec_steps(*ampl.shape)
low_size = tuple(int(i * self.quality) for i in ampl.shape)
# pupil = self.objective.generate_pass_mtrx(*self.objective.generate_wavevec_steps(*ampl.shape))
pupil = self.objective.generate_pass_mtrx(x_step, y_step)
ampl_ft = fft.fftshift(fft.fft2(ampl))
slices = [
led.get_wavevec_slice(
sizes = low_size,
steps = wavevec_steps,
lims = ampl.shape
)
for led in self.leds
]
lows_ft = array([
q2 * ampl_ft[y_slice, x_slice] * pupil
for (x_slice, y_slice) in slices
])
ampls = array([
abs(fft.ifft2(fft.ifftshift(low)))
# abs(ampl_ft[y_slice, x_slice])
for low in lows_ft
])
return {
'ft': ampl_ft,
'slices': slices,
'lows_ft': lows_ft,
'amplitudes': ampls,
'size': low_size,
'len': ampls.shape[0],
}
@Factory
class RecoveryMethods:
'''Fourier-Ptychography high resolution images recovery methods.'''
pass
@RecoveryMethods.product('fp', default=True, kwargs_types={'loops': int})
class FPRecovery(FourierPtychographySystem):
'''A simple Fourier Ptychography high resolution image recovery.'''
def __init__(self, loops, *args, **kwargs):
super(FPRecovery, self).__init__(*args, **kwargs)
self.loops = loops
self.q2 = self.quality * self.quality
self.q_2 = 1.0 / self.q2
def _inner_run(self, ampls):
self._init_recovery(ampls)
for i in range(self.loops):
self._init_recovery_iteration(i)
for led in self.leds.walk():
self._recover_for_led(led)
self._finish_recovery_iteration(i)
return self._finish_recovery()
def _init_recovery(self, ampls):
'''Initial method to build FP data before process started.'''
lowres_size = ampls[0].shape
highres_size = tuple(int(i / self.quality) for i in lowres_size)
steps = self.get_wavevec_steps(*highres_size)
ctf = self.objective.generate_ctf(*steps)
self._params = {
'lowres_size': lowres_size,
'steps': steps,
'ctf': ctf,
'ictf': 1.0 - ctf,
'measured': ampls,
'highres_ft': fft.fftshift(fft.fft2(ones(highres_size))),
'highres_size': highres_size,
'slices': [
led.get_wavevec_slice(
sizes = lowres_size,
steps = steps,
lims = highres_size,
)
for led in self.leds
]
}
def _init_recovery_iteration(self, iter_num):
'''Initial method to build FP iteration data (for every loop).'''
pass
def _recover_for_led(self, led):
'''4 steps of FP recovery flow for single led.'''
# 1st - Get highres FT part for current led.
x_slice, y_slice = self._params['slices'][led.id]
old_highres_ft_part = self._params['highres_ft'][y_slice, x_slice]
old_lowres_ft = self.q2 * old_highres_ft_part * self._params['ctf']
# 2nd - Build the I_li (low-res amplitude).
old_lowres_ampl = fft.ifft2(fft.ifftshift(old_lowres_ft))
# 3rd - Build the FT of I_mi with the known phase of I_li.
new_lowres_ampl = self.q_2 * self._params['measured'][led.id] * exp(1j * angle(old_lowres_ampl))
new_lowres_ft = fft.fftshift(fft.fft2(new_lowres_ampl))
# 4th - Add new FT of I_hi to the old one.
self._params['highres_ft'][y_slice, x_slice] = \
self._params['ictf'] * old_highres_ft_part \
+ self._params['ctf'] * new_lowres_ft
def _finish_recovery_iteration(self, iter_num):
'''Finish FP recovery iteration.'''
pass
def _finish_recovery(self):
'''Last method of the FP recovery to build final data.'''
highres_data = fft.ifft2(fft.ifftshift(self._params['highres_ft']))
return {
'ft': self._params['highres_ft'],
'amplitude': abs(highres_data),
'phase': angle(highres_data),
}
@RecoveryMethods.product('epry-fp')
class EPRYRec(FPRecovery):
def __init__(self, *args, **kwargs):
super(EPRYRec, self).__init__(*args, **kwargs)
self.alpha = 1.0
self.beta = 1.0
def _init_recovery(self, ampls):
super(EPRYRec, self)._init_recovery(ampls)
self._params['pupil'] = ones(self._params['lowres_size'], dtype=complex)
def _recover_for_led(self, led):
transfer = self._params['ctf'] * self._params['pupil']
x_slice, y_slice = self._params['slices'][led.id]
old_highres_ft_part = self._params['highres_ft'][y_slice, x_slice]
old_lowres_ft = old_highres_ft_part * transfer;
old_lowres_ampl = fft.ifft2(fft.ifftshift(old_lowres_ft));
new_lowres_ampl = self.q_2 * self._params['measured'][led.id] * exp(1j * angle(old_lowres_ampl));
new_lowres_ft = fft.fftshift(fft.fft2(new_lowres_ampl));
ft_diff = (new_lowres_ft - old_lowres_ft)
new_highres_ft_part = old_highres_ft_part \
+ self.alpha * (conj(transfer) / (abs(transfer)**2).max()) * ft_diff;
self._params['highres_ft'][y_slice, x_slice] = new_highres_ft_part
self._params['pupil'] \
+= self.beta * (conj(new_highres_ft_part) / (abs(new_highres_ft_part)**2).max()) * ft_diff;
def _finish_recovery(self):
'''Last method of the FP recovery to build the final data.'''
highres_data = fft.ifft2(fft.ifftshift(self._params['highres_ft']))
return {
'ft': self._params['highres_ft'],
'amplitude': abs(highres_data),
'phase': angle(highres_data),
'pupil': self._params['pupil'],
}
@RecoveryMethods.product('adaptive-fp')
class AdaptiveFPRecovery(FPRecovery):
def _init_recovery_iteration(self, iter_num):
self._params['i'] = iter_num
def _recover_for_led(self, led):
x_slice, y_slice = self._params['slices'][led.id]
old_highres_ft_part = self._params['highres_ft'][y_slice, x_slice]
old_lowres_ft = self.q2 * old_highres_ft_part * self._params['ctf']
old_lowres_ampl = fft.ifft2(fft.ifftshift(old_lowres_ft))
# Measured I correction.
measured_ampl = self._params['measured'][led.id]
if self._params['i']:
measured_ampl *= np_sum(abs(old_lowres_ampl)) / np_sum(measured_ampl)
new_lowres_ampl = self.q_2 * measured_ampl * exp(1j * angle(old_lowres_ampl))
new_lowres_ft = fft.fftshift(fft.fft2(new_lowres_ampl))
self._params['highres_ft'][y_slice, x_slice] = \
self._params['ictf'] * old_highres_ft_part \
+ self._params['ctf'] * new_lowres_ft
def _finish_recovery_iteration(self, iter_num):
print("CI = {} at {} step.".format(self.count_convergence(), iter_num))
pass
def count_convergence(self):
'''Count convergence index (measure the recovery goodness).'''
conv = 0.0
for led in self.leds:
x_slice, y_slice = self._params['slices'][led.id]
lowres_ampl = self._params['highres_ft'][y_slice, x_slice]
measured_ampl = self._params['measured'][led.id]
conv += mean(measured_ampl) / np_sum(abs(lowres_ampl - measured_ampl))
return conv
| true |
209eafeec0cd6b5bacb44a7231920c01c824b433
|
Python
|
carrier-io/galloper
|
/galloper/database/utils.py
|
UTF-8
| 410 | 2.53125 | 3 |
[
"Apache-2.0"
] |
permissive
|
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'sqlite')
def sqlite_urcnow(element, compiler, **kw):
return "DATETIME('now')"
| true |
a62a6642313d5b1ff0d28f4fc6705a4aa6b387b2
|
Python
|
yxrya/Distributed-Computing-SynchBFS
|
/v4MB/main.py
|
UTF-8
| 3,887 | 2.625 | 3 |
[] |
no_license
|
from queue import Queue
from message import Message
from process import Process
import threading
import time
import sys
import json
def launch_master_thread(n, ids, root, conn_matrix):
global id_process, id_label
print(f'In master thread. Launching {n} threads..')
initial_config = {}
q = Queue() # initially empty queue
for pid in ids:
initial_config[int(pid)] = {'parent': None,
'children':[],
'marked':False,
'sent':[],
'r':0,
'q':q
}
threadLock = threading.Lock()
r=1
while True:
print(f'********** master broadcast {r} ***********')
if r==1:
config = initial_config
latest_q = q
print(f'|| BFS LEVEL: {root}')
r = r+1
print(list(latest_q.queue))
id_process = launch_threads(ids, conn_matrix, root, latest_q, config)
done_threads = []
config={}
while True: #wait for all threads to complete one round after the first broadcast
#time.sleep(2)
if len(done_threads) == len(id_process):
#print(f'done status: {done_threads}')
break
tmp=None
#print(f'master acq {list(q.queue)}')
threadLock.acquire()
if q.qsize()!=0:
tmp = q.get()
threadLock.release()
#print(f'master rel')
if tmp==None:
continue
if tmp.receiverID == 'Master':
config[tmp.senderID] = tmp.msg_type
latest_q = tmp.msg_type['q']
done_threads.append(tmp.senderID)
#print(f'-----------------> Master recieved done from {tmp.senderID}')
else:
threadLock.acquire()
q.put(tmp)
threadLock.release()
for v in id_process.values():
v.join()
# check termination
visited = []
for key in config.keys():
if config[key]['marked']==True:
visited.append(key)
if len(visited)==n and latest_q.qsize()==0:
break
print('**********************************************')
for c in config:
print(f'{c} : Parent: {config[c]["parent"]} Children: {config[c]["children"]} Marked: {config[c]["marked"]}')
print('exiting master thread. bye!')
def launch_threads(ids, conn_matrix, root, q, config):
i=0
for p_id in ids:
id_label[i] = p_id
i += 1
for p_id, conn in zip(ids, conn_matrix):
neighbor_ids = []
for j in range(len(conn)):
if (conn[j]==1 and id_label[j]!=int(p_id)):
neighbor_ids.append(id_label[j])
process = Process(int(p_id), root, neighbor_ids, q, config)
id_process[p_id] = process
#print(id_label)
for v in id_process.values():
v.start()
return id_process
if __name__=="__main__":
assert(len(sys.argv) == 3)
n = int(sys.argv[1])
root = sys.argv[2]
with open("input.dat","r") as dat_file:
data = dat_file.readlines()
ids = data[0].strip().split(",")
for i in range(len(ids)): ids[i] = int(ids[i])
matrix_rows = data[1].strip().split(",")
connectivity_matrix = []
for row in matrix_rows:
connectivity_matrix.append(row.split())
for row in range(len(connectivity_matrix)):
for j in range(len(connectivity_matrix)):
connectivity_matrix[row][j] = int(connectivity_matrix[row][j])
id_process = {}
id_label = {}
master_thread = threading.Thread(name='master',target=launch_master_thread, args=(n, ids, root, connectivity_matrix))
master_thread.start()
| true |
27b7b846c193c6386ddc974ab1a6b895a422d2bd
|
Python
|
AlejandroCamara/Learn_Python_3_the_Hard_Way
|
/Exercise 03. Numbers and Math/ex3-sd.py
|
UTF-8
| 1,512 | 4.28125 | 4 |
[] |
no_license
|
# Exercise 03: Numbers and Math - Study Drills.
# Prints the message "I will now count my chickens:"
print("I will now count my chickens:")
# Prints "Hens" and the result of (25 + (30 / 6))
print("Hens", 25.0 + 30.0 / 6.0)
# Prints "Roosters" and the result of (100 - (25 * (3 % 4)))
print("Roosters", 100.0 - 25.0 * 3.0 % 4.0)
# Prints "Now I will count the eggs:"
print("Now I will count the eggs:")
# Prints the result of (3 + 2 + 1 -5 + (4 % 2) - (1 / 4) + 6)
print(3.0 + 2.0 + 1.0 - 5.0 + 4.0 % 2.0 - 1.0 / 4.0 + 6.0)
# Prints "Is it true that 3 + 2 < 5 - 7?"
print("Is it true that 3 + 2 < 5 - 7?")
# Prints the result of comparing (3 + 2) < (5 - 7) with a Boolean
print(3.0 + 2.0 < 5.0 - 7.0)
# Prints "What is 3 + 2?" and the result of (3 + 2)
print("What is 3 + 2?", 3.0 + 2.0)
# Prints "What is 5 - 7?" and the result of (5 - 7)
print("What is 5 - 7?", 5.0 - 7.0)
# Prints "Oh, that's why it's False."
print("Oh, that's why it's False.")
# Prints "How about some more."
print("How about some more.")
# Prints "Is it greater?" and the result of comparing (5 > -2)
print("Is it greater?", 5.0 > -2.0)
# Prints "Is it greater or equal?" and the result of comparing (5 >= -2)
print("Is it greater or equal?", 5.0 >= -2.0)
# Prints "Is it less or equal?" and the result of comparing (5 <= -2)
print("Is it less or equal?", 5.0 <= -2.0)
# Personal calculations
print("Christmas gifts:", 295 + 340)
print("Steam games:", 349.50 + 299.25 + 349.50 + 187.19)
print("Credit card fee:", 1500 / 12)
| true |
f39b599dacc067dc03b21d974a9bccc4ebdaa26b
|
Python
|
AustenA/AOC2020
|
/3-1.py
|
UTF-8
| 378 | 3 | 3 |
[] |
no_license
|
f = open("Day3Trees.txt", "r")
d = f.read().split("\n")
total = 0
down = 0
over = 0
height = len(d)
width = len(d[0])
downSpeed = 1
rightSpeed = 3
while True:
over = rightSpeed + over
down = downSpeed + down
if down >= height:
break
over = over%width
print(d[down][over])
if (d[down][over] == '#'):
total = total + 1
print(total)
| true |
ba2f20b365a33bd2709e3fa7fe8456dd91584df9
|
Python
|
Pranavtechie/Class-XII-Projects
|
/Section C/Group - 1 (03,04,433)/Source Code/src.py
|
UTF-8
| 12,090 | 3.21875 | 3 |
[] |
no_license
|
import mysql.connector as sql
from mysql.connector import Error
from time import sleep
import os
import datetime
from prettytable import PrettyTable
from math import fabs
house_list = ['Godavari', 'Krishna', 'Penna', 'Tungabhadra']
def Establish_Connection():
"""This function establishes connection to mysql database"""
connection = sql.connect(
host='localhost', user='root', password='student', database='medic')
mycursor = connection.cursor()
return connection, mycursor, Error
# Establishing connection to the database
conn, cursor, sqlerror = Establish_Connection()
def Cls():
"""This function clears the clears the screen in the command prompt"""
os.system('cls')
def Exit():
"""This function prints the Thank You message when a user exits the program"""
print("Thank you for using the program")
sleep(1.5)
exit()
def Main_Heading():
"""This function prints the main heading"""
print("---------- SAINIK SCHOOL KALIKIRI -----------")
print("----------- Dispensary Management -----------\n")
def Get_Admin_Username_List():
"""This function returns the list of admin usernames"""
cursor.execute("SELECT username FROM admin_user")
username_data = cursor.fetchall()
username_list = []
for row in username_data:
username_list.append(row[0])
return username_list
def Get_Admin_User_Password(username):
"""This function returns the password of a given user"""
cursor.execute(
f"SELECT password FROM admin_user WHERE username = '{username}'")
password = cursor.fetchone()[0]
return password
def Get_Admin_Name(username):
"""This function returns the name of the admin user given the username"""
cursor.execute(
f"SELECT name FROM admin_user WHERE username = '{username}'")
name = cursor.fetchone()[0]
return name
def Get_Cadet_Name(roll_no):
"""This function returns the name of the cadet based on his roll number"""
cursor.execute(f"SELECT name FROM cadet WHERE roll_no = {roll_no}")
name = cursor.fetchone()[0]
return name
def Get_Roll_No_List():
"""This function returns the list of roll no of the cadets"""
roll_list = []
cursor.execute("SELECT roll_no FROM cadet")
data = cursor.fetchall()
for row in data:
roll_list += row
return roll_list
def Get_Cadet_Password(roll_no):
"""This function returns the password for a given Roll Number"""
cursor.execute(
f"SELECT password FROM cadet_user WHERE roll_no = {roll_no}")
password = cursor.fetchone()[0]
return password
def Redirecting():
"""This function just prints redirecting on the screen"""
print("\nRedirecting", end='')
sleep(1)
print(".", end='')
sleep(1)
print(".", end='')
sleep(1)
print(".")
def Input_Date():
"""This function inputs date and returns the date"""
print("\nPlease do enter only integers for the date")
while True:
try:
day_input = int(input("Enter the Day: "))
month_input = int(input("Enter the Month: "))
year_input = int(input("Enter the Year: "))
the_date = datetime.date(year_input, month_input, day_input)
return the_date
except ValueError:
print("\nYou have entered an invalid characters. Please Try Again")
continue
def Check_Roll_No(roll_no):
"""This function checks whether the entered roll number
exists in the data and returns True if matched"""
roll_list = Get_Roll_No_List()
if roll_no in roll_list:
return True
else:
return False
def Get_Probable_Medicine(med_name):
"""This function return the probable list and table of medicines"""
cursor.execute(
f"SELECT medicine_name FROM medicine WHERE medicine_name like '%{med_name}%'")
data = cursor.fetchall()
med_list = []
med_table = PrettyTable()
med_table.field_names = ['Medicine Name']
for med in data:
med_table.add_row([med[0]])
med_list.append(med[0])
if len(med_list) == 0:
return [], []
else:
return med_list, med_table
def Check_Expiry(med_name):
"""This function checks whether the given medicine in expired or not"""
try:
cursor.execute(
f"SELECT expiry FROM medicine WHERE medicine_name = '{med_name}'")
expiry_date = cursor.fetchone()[0]
if expiry_date > datetime.date.today():
return True
else:
return False
except Error:
return 'Error'
def Check_Quantity(med_name):
"""This function checks the quantity available to issue medicine"""
cursor.execute(
f"SELECT quantity FROM medicine WHERE medicine_name = '{med_name}'")
aval_quantity = cursor.fetchone()[0]
if aval_quantity > 0:
return True
else:
return False
def Change_Medication_Status():
"""This function checks the end_date and updates the medical status of the cadet"""
try:
cursor.execute(
"SELECT roll_no, timestamp, end_date, status FROM issue_medicine WHERE status = 'Under Medication'")
data = cursor.fetchall()
for roll_no, timestamp, end_date, status in data:
if end_date < datetime.date.today():
cursor.excecute(
f"UPDATE issue_medicine SET status = 'Healthy' WHERE roll_no = {roll_no} and timestamp = '{timestamp}'")
conn.commit()
else:
pass
except sqlerror:
print("An Error Occurred while parsing and modifying the Status of the Cadet.")
def Scan_For_Expiry():
"""This function scans the entire medicines to check if any medicine expired"""
expiry_table = PrettyTable()
expiry_table.field_names = ['Medicine Name',
'Usage / Indication', 'Quantity', 'Expiry']
expiry_list = []
try:
cursor.execute(f"SELECT medicine_name FROM medicine")
data = cursor.fetchall()
for medicine in data:
check_value = Check_Expiry(medicine[0])
if not check_value:
cursor.execute(
f"SELECT * FROM medicine WHERE medicine_name = '{medicine[0]}'")
name, usage, qty, exp = cursor.fetchone()
expiry_list.append(name)
expiry_table.add_row([name, usage, qty, exp])
else:
pass
if len(expiry_list) == 0:
return False, False
else:
return expiry_table, expiry_list
except sqlerror:
print("\nAn Error Occurred while scanning for expiry")
def Get_BMI_Status(bmi):
"""This function gets the bmi status based on the bmi value"""
if bmi < 18.5 or bmi == 18.5:
return 'Underweight'
elif 18.5 < bmi < 24.9:
return 'Healthy'
elif 25 < bmi < 29.9:
return 'Overweight'
elif bmi > 30:
return 'Obese'
else:
return "Can't Be Calculated"
def Update_BMI(roll_no):
"""This function updates BMI and BMI Status of a cadet"""
try:
import mysql.connector
new = mysql.connector.connect(
host='localhost', user='root', password='student', database='medic')
new_cursor = new.cursor()
new_cursor.execute(
f"SELECT height, weight FROM medical_data WHERE roll_no = {roll_no}")
height, weight = new_cursor.fetchone()
if height and weight:
height = height/100
bmi = (weight / height**2)
bmi = round(bmi, 2)
bmi_status = Get_BMI_Status(bmi)
new_cursor.execute(
f"UPDATE medical_data SET BMI = {bmi}, BMI_status = '{bmi_status}' WHERE roll_no = {roll_no}")
new.commit()
return True
else:
return False
except sqlerror:
return False
def Input_Timing():
"""This function lets user to enter timing"""
while True:
hour_input = input("Enter the hour (24 Hour Format): ")
minute_input = input("Enter the minutes: ")
try:
hour_input = int(hour_input)
minute_input = int(minute_input)
if 0 <= hour_input <= 23 and 0 <= minute_input <= 59:
time = datetime.time(hour=hour_input, minute=minute_input)
return time
else:
print("You input exceeded the limit. Please Try Again")
sleep(1)
continue
except ValueError:
print(
"You have entered an invalid value for hours of minute. Please Try Again")
sleep(1)
continue
def Check_If_Admitted(roll_no):
"""This function checks whether a cadet is admitted or not"""
try:
cursor.execute(
f"SELECT * FROM admission WHERE roll_no = {roll_no} and status = 'Admitted'")
data = cursor.fetchone()
if data is not None:
print("\nYou can't Admit the Cadet. The cadet is already Admitted")
print(f"Admitted Cause: {data[1]}")
print(f"Admitted on: {data[3]}")
print(f"Discharge Date: {data[2]}")
return True
else:
return False
except sqlerror:
print("An Error Occurred while parsing data from the database. Please Try Again")
sleep(1.5)
def Get_Latest_Timestamp(roll_no):
"""This function gets the latest admission timestamp of a cadet"""
try:
cursor.execute(
f"SELECT timestamp FROM admission WHERE roll_no = {roll_no} and status = 'Admitted' ORDER BY timestamp DESC")
timestamp = cursor.fetchall()[0][0]
return timestamp
except sqlerror:
print("\nAn error occurred while getting admission data. Please Try Again")
sleep(1.5)
return False
def Calculate_Eye_Sight_Points():
"""This functions calculates the points for eye sight"""
for house in house_list:
cursor.execute(f"""SELECT medical_data.eye_l,medical_data.eye_r FROM medical_data,cadet
WHERE cadet.house = '{house}' and cadet.roll_no = medical_data.roll_no""")
data = cursor.fetchall()
house_total = 0
for eye_l, eye_r in data:
if eye_l is None and eye_r is None:
continue
else:
print("else is getting executed")
eye_l = fabs(eye_l)
eye_r = fabs(eye_r)
eye_total = eye_l + eye_r
house_total += eye_total
else:
cursor.execute(
f"UPDATE fit_house SET eye_sight = {house_total} WHERE house = '{house}'")
conn.commit()
def Calculate_BMI_Points():
"""This function calculates points based on BMI"""
for house in house_list:
cursor.execute(f"""SELECT medical_data.BMI FROM medical_data,cadet
WHERE cadet.house = '{house}' and cadet.roll_no = medical_data.roll_no""")
data = cursor.fetchall()
house_bmi = 0
for row in data:
bmi = row[0]
if bmi is None:
continue
else:
house_bmi += bmi
else:
cursor.execute(
f"UPDATE fit_house SET BMI = {house_bmi} WHERE house = '{house}'")
conn.commit()
def Add_Admission_Points(roll_no):
"""This function adds points the the fit house table if a cadet is admitted"""
cursor.execute(f"SELECT house FROM cadet WHERE roll_no = {roll_no}")
house = cursor.fetchone()[0]
cursor.execute(
f"UPDATE fit_house SET admission = admission + 5 WHERE house = '{house}'")
conn.commit()
def Calculate_Total_Points():
"""This function calculates the total points for the house"""
for house in house_list:
cursor.execute(
f"UPDATE fit_house SET total_points = bmi + eye_sight + admission WHERE house = '{house}'")
conn.commit()
| true |
ab8dcaaff9068711dd87fe66e3c1a736362ac5ca
|
Python
|
searow/cs249-project
|
/scws_poly_eval.py
|
UTF-8
| 4,884 | 2.5625 | 3 |
[] |
no_license
|
from utils import *
import numpy as np
from scipy.stats import spearmanr
window_sizes = range(1, 100)
embeddings_fp = './embedding_results/saved_embeddings_step_5000000_k_2_dim_150_neg_1_swind_5_contexts_1_2018-06-02-20-39-00/saved_embeddings_step_5000000_k_2_dim_150_neg_1_swind_5_contexts_1_2018-06-02-20-39-00'
# embeddings_fp = 'saved_embeddings_step_5000000_k_1_dim_300_neg_65_swind_5_contexts_1_2018-06-01-13_41_24'
corpus_fp = './data/enwik9/enwik9_tokenized_50000'
eval_fp = 'data/SCWS/ratings'
def eval(eval_test, name_token,
target_embeddings, target_mask, context_embeddings, window_size):
d = {}
for i in range(1, 3):
word_index = eval_test['word{}_index'.format(i)]
sentence = eval_test['word{}_in_context'.format(i)]
tokenized_sentence = tokenize_sentence( \
sentence, name_token)
all_meaning_embedding_list = extract_meanings_as_list(target_embeddings, \
tokenized_sentence, word_index, target_mask)
context_embedding_list = extract_contexts_as_list(\
tokenized_sentence, \
word_index, context_embeddings, window_size)
d['w{}_real_meaning'.format(i)] = get_real_meaning_embedding( \
context_embedding_list, \
all_meaning_embedding_list)
return cosine_sim(d['w1_real_meaning'], d['w2_real_meaning'])
def tokenize_sentence(sentence, name_token):
# Chris
# sentence = [0, 1, 5, 2, 1, 2, 0]
unknown_token = 0
words = sentence.split()
tokenized = []
for word in words:
if word not in name_token:
tokenized.append(unknown_token)
else:
tokenized.append(name_token[word])
return tokenized
def extract_meanings_as_list(target_embeddings, tokenized_sentence, word_index, target_mask):
k = len(target_embeddings)
meanings = []
target_token = tokenized_sentence[word_index]
is_untrained_embs = np.count_nonzero(target_mask[0][target_token]) == 0
for i in range(k):
if(is_untrained_embs or target_mask[1][target_token][i]):
meanings.append(target_embeddings[i][target_token])
return meanings
def extract_contexts_as_list(tokenized_sentence, word_index, context_emb_mat, window_size):
# Alex
# e.g. tokenized_sentence=[0, 2, 1, 3, ...]
# word_index = 1
# context_emb_mat: V x D
# Remeber to check bound when using window_size!
if word_index < window_size:
context_tokens = [token for token in tokenized_sentence[0:word_index+window_size+1] \
if token != tokenized_sentence[word_index]]
else:
context_tokens = [token for token in tokenized_sentence[word_index-window_size:word_index+window_size+1] \
if token != tokenized_sentence[word_index]]
assert(context_emb_mat.shape[0] == 1)
return [context_emb_mat[0][token] for token in context_tokens]
def get_real_meaning_embedding(context_embedding_list, all_meaning_embedding_list):
# Jack
avg_c = np.mean(context_embedding_list, axis=0)
scores = [np.dot(m, avg_c) for m in all_meaning_embedding_list]
return all_meaning_embedding_list[np.argmax(scores)]
def cosine_sim(emb1, emb2):
return np.dot(emb1, emb2)
def evaluate_spearman(window_size, eval_tests, name_token, context_embeddings,
target_counts, target_embeddings):
true_scores = []
test_scores = []
word1_UNK = 0
word2_UNK = 0
eval_UNK = 0
target_mask = filter_words(target_counts)
for eval_test in eval_tests:
if eval_test['word1'] not in name_token or eval_test['word2'] not in name_token:
continue
score = eval(eval_test, name_token,
target_embeddings, target_mask, context_embeddings, window_size)
test_scores.append(score)
true_scores.append(eval_test['average_human_rating'])
spearman_score = spearmanr(test_scores, true_scores)
rho_correlation = spearman_score[0]
return rho_correlation
if __name__ == '__main__':
# Load trained data.
train_loaded = load(embeddings_fp)
target_embeddings = train_loaded['target_embeddings']
target_counts = train_loaded['target_counts']
context_embeddings = train_loaded['context_embeddings']
context_counts = train_loaded['context_counts']
# Load corpus data.
corpus_loaded = load(corpus_fp)
name_token = corpus_loaded['dictionary']
token_name = corpus_loaded['reversed_dictionary']
# Load eval task.
eval_tests = load(eval_fp) # eval_test = list of dicts.
spearman_scores = []
for window in window_sizes:
score = evaluate_spearman(window, eval_tests, name_token,
context_embeddings,
target_counts, target_embeddings)
spearman_scores.append(score)
print('{} {}'.format(window, score))
| true |
1a374bf3437548133478a3ee20978e726bf6b7c3
|
Python
|
donglaiw/pytorch_connectomics
|
/connectomics/data/utils/data_affinity.py
|
UTF-8
| 4,188 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
import numpy as np
# from Janelia pyGreentea
# https://github.com/naibaf7/PyGreentea
def mknhood2d(radius=1):
# Makes nhood structures for some most used dense graphs.
ceilrad = np.ceil(radius)
x = np.arange(-ceilrad,ceilrad+1,1)
y = np.arange(-ceilrad,ceilrad+1,1)
[i,j] = np.meshgrid(y,x)
idxkeep = (i**2+j**2)<=radius**2
i=i[idxkeep].ravel(); j=j[idxkeep].ravel();
zeroIdx = np.ceil(len(i)/2).astype(np.int32);
nhood = np.vstack((i[:zeroIdx],j[:zeroIdx])).T.astype(np.int32)
return np.ascontiguousarray(np.flipud(nhood))
def mknhood3d(radius=1):
# Makes nhood structures for some most used dense graphs.
# The neighborhood reference for the dense graph representation we use
# nhood(1,:) is a 3 vector that describe the node that conn(:,:,:,1) connects to
# so to use it: conn(23,12,42,3) is the edge between node [23 12 42] and [23 12 42]+nhood(3,:)
# See? It's simple! nhood is just the offset vector that the edge corresponds to.
ceilrad = np.ceil(radius)
x = np.arange(-ceilrad,ceilrad+1,1)
y = np.arange(-ceilrad,ceilrad+1,1)
z = np.arange(-ceilrad,ceilrad+1,1)
[i,j,k] = np.meshgrid(z,y,x)
idxkeep = (i**2+j**2+k**2)<=radius**2
i=i[idxkeep].ravel(); j=j[idxkeep].ravel(); k=k[idxkeep].ravel();
zeroIdx = np.array(len(i) // 2).astype(np.int32);
nhood = np.vstack((k[:zeroIdx],i[:zeroIdx],j[:zeroIdx])).T.astype(np.int32)
return np.ascontiguousarray(np.flipud(nhood))
def mknhood3d_aniso(radiusxy=1,radiusxy_zminus1=1.8):
# Makes nhood structures for some most used dense graphs.
nhoodxyz = mknhood3d(radiusxy)
nhoodxy_zminus1 = mknhood2d(radiusxy_zminus1)
nhood = np.zeros((nhoodxyz.shape[0]+2*nhoodxy_zminus1.shape[0],3),dtype=np.int32)
nhood[:3,:3] = nhoodxyz
nhood[3:,0] = -1
nhood[3:,1:] = np.vstack((nhoodxy_zminus1,-nhoodxy_zminus1))
return np.ascontiguousarray(nhood)
def seg_to_aff(seg, nhood=mknhood3d(1), pad='replicate'):
# constructs an affinity graph from a segmentation
# assume affinity graph is represented as:
# shape = (e, z, y, x)
# nhood.shape = (edges, 3)
shape = seg.shape
nEdge = nhood.shape[0]
aff = np.zeros((nEdge,)+shape,dtype=np.float32)
for e in range(nEdge):
aff[e, \
max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \
max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \
max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] = \
(seg[max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \
max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \
max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] == \
seg[max(0,nhood[e,0]):min(shape[0],shape[0]+nhood[e,0]), \
max(0,nhood[e,1]):min(shape[1],shape[1]+nhood[e,1]), \
max(0,nhood[e,2]):min(shape[2],shape[2]+nhood[e,2])] ) \
* ( seg[max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \
max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \
max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] > 0 ) \
* ( seg[max(0,nhood[e,0]):min(shape[0],shape[0]+nhood[e,0]), \
max(0,nhood[e,1]):min(shape[1],shape[1]+nhood[e,1]), \
max(0,nhood[e,2]):min(shape[2],shape[2]+nhood[e,2])] > 0 )
if nEdge==3 and pad == 'replicate': # pad the boundary affinity
aff[0,0] = (seg[0]>0).astype(aff.dtype)
aff[1,:,0] = (seg[:,0]>0).astype(aff.dtype)
aff[2,:,:,0] = (seg[:,:,0]>0).astype(aff.dtype)
return aff
def blend_gaussian(sz, sigma=0.8, mu=0.0):
"""
Gaussian blending
"""
zz, yy, xx = np.meshgrid(np.linspace(-1,1,sz[0], dtype=np.float32),
np.linspace(-1,1,sz[1], dtype=np.float32),
np.linspace(-1,1,sz[2], dtype=np.float32), indexing='ij')
dd = np.sqrt(zz*zz + yy*yy + xx*xx)
ww = 1e-4 + np.exp(-( (dd-mu)**2 / ( 2.0 * sigma**2 )))
return ww
| true |
87aa5dcbbab1a117e3f0ff41f685a291efb72b74
|
Python
|
takkaria/plucker
|
/tests/test_plucker/test_extractor.py
|
UTF-8
| 1,407 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
import pytest
from typing import List
from plucker.types import JSONValue
from plucker.extractor import extract, _get_from_path
from plucker.tokeniser import ArrayToken, NameToken, Range, Token
from plucker.exceptions import ExtractError
# A fake range cos it doesn't really matter.
fr = Range(0, 0)
def test_getter():
data = {"fred": 2}
path: List[Token] = [NameToken(fr, "fred")]
assert _get_from_path(data, path) == 2
def test_getter_array():
data = {"fred": [2, 3, 4]}
path: List[Token] = [NameToken(fr, "fred")]
assert _get_from_path(data, path) == [2, 3, 4]
def test_getter_array_subs():
data = {"fred": [{"v": 2}, {"v": 3}]}
path: List[Token] = [NameToken(fr, "fred"), ArrayToken(fr), NameToken(fr, "v")]
assert _get_from_path(data, path) == [2, 3]
def test_getter_array_sub_subs():
data = [
{"fred": [{"v": 2}, {"v": 3}]},
{"fred": [{"v": 2}, {"v": 3}]},
]
path: List[Token] = [
ArrayToken(fr),
NameToken(fr, "fred"),
ArrayToken(fr),
NameToken(fr, "v"),
]
assert _get_from_path(data, path) == [[2, 3], [2, 3]]
def test_getter_array_error():
data: JSONValue = [
{"fred": []},
{"fred": []},
]
path = ".fred[].v"
with pytest.raises(ExtractError) as exc:
extract(data, path)
assert "expected fred to be 'dict' but it was 'list'" in str(exc)
| true |
9de669e2a40472f54416f858b33b56d677b88535
|
Python
|
frozenparadox99/OpenCvCourse
|
/drawOnStream.py
|
UTF-8
| 1,117 | 2.796875 | 3 |
[] |
no_license
|
import cv2
#Callback Function
def draw_rect(event,x,y,flags,params):
global p1,p2,topLeftClicked,bottomRightClicked
if event==cv2.EVENT_LBUTTONDOWN:
if topLeftClicked and bottomRightClicked:
p1=(0,0)
p2=(0,0)
topLeftClicked=False
bottomRightClicked=False
if topLeftClicked==False:
p1=(x,y)
topLeftClicked=True
elif bottomRightClicked==False:
p2=(x,y)
bottomRightClicked=True
#Global variables
p1=(0,0)
p2=(0,0)
topLeftClicked=False
bottomRightClicked=False
#Dock the callback
cv2.namedWindow('frame')
cv2.setMouseCallback('frame',draw_rect)
#Access the video
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
if topLeftClicked:
cv2.circle(frame,center=p1,radius=5,color=(0,0,255),thickness=-1)
if topLeftClicked & bottomRightClicked:
cv2.rectangle(frame,p1,p2,(0,0,255),3)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true |
1f06329bdb958c952b45f9999504208e107b2d5f
|
Python
|
JakeAttard/2810ICTPythonExercises
|
/W1H6.py
|
UTF-8
| 933 | 3.375 | 3 |
[] |
no_license
|
from PyTest import *
##//////////////////////////// PROBLEM STATEMENT //////////////////////////
## Given a list of ints, decide which is larger of the first and //
## last elements in the list, and set all the other elements to be that //
## that value. Print the changed list. Implement functions for: //
## - reading the list //
## - finding the maximum of 2 integers //
## - setting all elements of a list to a single value //
## - printing a list //
## 1, 2, 3 -> 3, 3, 3 //
## 11, 5, 9 -> 11, 11, 11 //
## 2, 11, 3 -> 3, 3, 3 //
##/////////////////////////////////////////////////////////////////////////
| true |
5be89b5e1bfafc13a64772afca678879345d1543
|
Python
|
drabekj/OttoBot-Alexa-Skill
|
/test/test_add_to_watchlist.py
|
UTF-8
| 3,682 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
import unittest
from flask import json
from app import create_app, db, logger
from app.models import Stock, User, Watchlist
from test.sample_requests import *
class AddToWatchlistTestCase(unittest.TestCase):
"""This class represents the OttoBot routing test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
# binds the app to the current context
with self.app.app_context():
# create all tables
db.create_all()
# Insert testing data
Stock(test_stock_1_ticker, test_stock_1_date, close=test_stock_1_close).save()
User(test_user_id, test_user_name).save()
def test_intent_add_to_watchlist_dialog(self):
"""Test if the add to watchlist dialog works correctly including confirmation/cancelation."""
# 1st Step: start dialog "add to watchlist"
logger.info("1st step: start dialog add to watchlist")
self.add_to_watchlist_dialog_invoke()
logger.info("2st step: deny adding stock to watchlist")
self.add_to_watchlist_dialog_deny()
logger.info("3st step: confirm adding stock to watchlist")
self.add_to_watchlist_dialog_invoke()
self.add_to_watchlist_dialog_confirm()
def add_to_watchlist_dialog_invoke(self):
# Setup
request = json.dumps(intent_add_to_watchlist())
# Execute
res = self.client().post('/api/', data=request,
content_type='application/json')
# Assert
self.assertEqual(res.status_code, 200)
self.assertIn(RESPONSE_intent_add_to_watchlist_ask_confirmation,
str(res.data))
# check if stock not in watchlist just yet
with self.app.app_context():
watchlist = Watchlist.get_users_tickers(test_user_id)
for item in watchlist:
self.assertNotEqual(item, test_add_stock)
def add_to_watchlist_dialog_deny(self):
# Setup
request = json.dumps(intent_add_to_watchlist_deny())
# Execute
res = self.client().post('/api/', data=request,
content_type='application/json')
# Assert 1st step
self.assertEqual(res.status_code, 200)
self.assertIn(RESPONSE_intent_add_to_watchlist_denied, str(res.data))
# check if stock not in watchlist just yet
with self.app.app_context():
watchlist = Watchlist.get_users_tickers(test_user_id)
for item in watchlist:
self.assertNotEqual(item, test_add_stock)
def add_to_watchlist_dialog_confirm(self):
# Setup
request = json.dumps(intent_add_to_watchlist_confirm())
# Execute
res = self.client().post('/api/', data=request,
content_type='application/json')
# Assert 1st step
self.assertEqual(res.status_code, 200)
self.assertIn(
RESPONSE_intent_add_to_watchlist_confirmed.format(test_add_stock),
str(res.data))
# check if stock was added to watchlist
with self.app.app_context():
watchlist = Watchlist.get_users_tickers(test_user_id)
for item in watchlist:
self.assertEqual(item, test_add_stock)
def tearDown(self):
"""teardown all initialized variables."""
with self.app.app_context():
# drop all tables
db.session.remove()
db.drop_all()
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
| true |
c2a180e13a684facfb74f3055a4acf620094dbca
|
Python
|
morallo/plaso
|
/tests/parsers/sqlite_plugins/twitter_ios.py
|
UTF-8
| 6,863 | 2.515625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Twitter on iOS 8+ plugin."""
import unittest
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers.sqlite_plugins import twitter_ios
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class TwitterIOSTest(test_lib.SQLitePluginTestCase):
"""Tests for Twitter on iOS 8+ database plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'twitter_ios.db'])
def testProcess(self):
"""Test the Process function on a Twitter iOS file."""
plugin = twitter_ios.TwitterIOSPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'twitter_ios.db'], plugin)
# We should have 184 events in total.
# - 25 Contacts creation events.
# - 25 Contacts update events.
# - 67 Status creation events.
# - 67 Status update events.
self.assertEqual(184, storage_writer.number_of_events)
events = list(storage_writer.GetEvents())
# Test the first contact creation event.
event = events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2007-04-22 14:42:37')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
self.assertEqual(event.screen_name, u'BBCBreaking')
self.assertEqual(event.name, u'BBC Breaking News')
self.assertEqual(event.location, u'London, UK')
self.assertEqual(event.following, 0)
self.assertEqual(event.followers_count, 19466932)
self.assertEqual(event.following_count, 3)
self.assertEqual(event.url, u'http://www.bbc.co.uk/news')
expected_description = (
u'Breaking news alerts and updates from the BBC. For news, features, '
u'analysis follow @BBCWorld (international) or @BBCNews (UK). Latest '
u'sport news @BBCSport.')
self.assertEqual(event.description, expected_description)
expected_profile_url = (
u'https://pbs.twimg.com/profile_images/'
u'460740982498013184/wIPwMwru_normal.png')
self.assertEqual(event.profile_url, expected_profile_url)
expected_message = (
u'Screen name: BBCBreaking Profile picture URL: '
u'https://pbs.twimg.com/profile_images/460740982498013184/'
u'wIPwMwru_normal.png Name: BBC Breaking News Location: London, UK '
u'Description: Breaking news alerts and updates from the BBC. For '
u'news, features, analysis follow @BBCWorld (international) or '
u'@BBCNews (UK). Latest sport news @BBCSport. URL: '
u'http://www.bbc.co.uk/news Following: No Number of followers: '
u'19466932 Number of following: 3')
expected_short_message = (
u'Screen name: BBCBreaking Description: Breaking news alerts and '
u'updates from t...')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
# Test first contact modification event.
event = events[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-12-02 15:35:44')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_UPDATE)
self.assertEqual(event.screen_name, u'BBCBreaking')
self.assertEqual(event.name, u'BBC Breaking News')
self.assertEqual(event.location, u'London, UK')
self.assertEqual(event.following, 0)
self.assertEqual(event.followers_count, 19466932)
self.assertEqual(event.following_count, 3)
self.assertEqual(event.url, u'http://www.bbc.co.uk/news')
expected_description = (
u'Breaking news alerts and updates from the BBC. For news, features, '
u'analysis follow @BBCWorld (international) or @BBCNews (UK). Latest '
u'sport news @BBCSport.')
self.assertEqual(event.description, expected_description)
expected_profile_url = (
u'https://pbs.twimg.com/profile_images/'
u'460740982498013184/wIPwMwru_normal.png')
self.assertEqual(event.profile_url, expected_profile_url)
expected_message = (
u'Screen name: BBCBreaking Profile picture URL: '
u'https://pbs.twimg.com/profile_images/460740982498013184/'
u'wIPwMwru_normal.png Name: BBC Breaking News Location: London, UK '
u'Description: Breaking news alerts and updates from the BBC. For '
u'news, features, analysis follow @BBCWorld (international) or '
u'@BBCNews (UK). Latest sport news @BBCSport. URL: '
u'http://www.bbc.co.uk/news Following: No Number of followers: '
u'19466932 Number of following: 3')
expected_short_message = (
u'Screen name: BBCBreaking Description: Breaking news alerts and '
u'updates from t...')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
# Test first status creation event.
event = events[50]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-09-11 11:46:16')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
self.assertEqual(event.text, u'Never forget. http://t.co/L7bjWue1A2')
self.assertEqual(event.user_id, 475222380)
self.assertEqual(event.name, u'Heather Mahalik')
self.assertEqual(event.retweet_count, 2)
self.assertEqual(event.favorite_count, 3)
self.assertEqual(event.favorited, 0)
expected_message = (
u'Name: Heather Mahalik User Id: 475222380 Message: Never forget. '
u'http://t.co/L7bjWue1A2 Favorite: No Retweet Count: 2 Favorite '
u'Count: 3')
expected_short_message = (
u'Name: Heather Mahalik Message: Never forget. http://t.co/L7bjWue1A2')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
# Test first status update event.
event = events[51]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-12-02 15:39:37')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_UPDATE)
self.assertEqual(event.text, u'Never forget. http://t.co/L7bjWue1A2')
self.assertEqual(event.user_id, 475222380)
self.assertEqual(event.name, u'Heather Mahalik')
self.assertEqual(event.retweet_count, 2)
self.assertEqual(event.favorite_count, 3)
self.assertEqual(event.favorited, 0)
expected_message = (
u'Name: Heather Mahalik User Id: 475222380 Message: Never forget. '
u'http://t.co/L7bjWue1A2 Favorite: No Retweet Count: 2 Favorite '
u'Count: 3')
expected_short_message = (
u'Name: Heather Mahalik Message: Never forget. http://t.co/L7bjWue1A2')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| true |
7c0702d25ff96613d2f0a75ec8d61b7149f2de64
|
Python
|
stefanduscher/stefanduscher
|
/Anfänger_Tag3_Schieberegler.py
|
UTF-8
| 2,764 | 3.53125 | 4 |
[] |
no_license
|
# Programm: Interaktive Grafik mit Schieberegler
#
# Datum: 02. Juno 2019
#
# Autor: Stefan Duscher
#
# Bemerkungen: Kurs "Python für Anfänger" Ludwigsburg
#
# ----------------------------------------------------
# Einbinden der Bibliotheken
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.widgets import Slider
# Abfrage der Eigenschaften der Bibliothek Matplotlib
def Abfragen_Matplotlib():
mpl_version = mpl.__version__
mpl_ort = mpl.__file__
mpl_config = mpl.get_configdir()
mpl_cache = mpl.get_cachedir()
print("Version von Matplotlib:", mpl_version)
print("Installationsort von Matplotlib:", mpl_ort)
print("Die Konfigurationsinformationen befinden sich bei:", mpl_config)
print("Der Zwischenspeicher / Cache befindet sich bei:", mpl_cache)
# Definition der x- und y-Werte
x = np.linspace(0,12,200)
y = np.sin(x)
bild = plt.figure()
# Hier wird "bild" zum Objekt figure(), das aus der Bibliothek Pyplot aus
# Matplotlib stammt. Diesem Objekt kann man später Eigenschaften zuweisen.
# Der eigentliche Plot und der Slider sollen übereinander stehen.
# Mit einem einfachen 2x1-Raster werden sie aber gleich hoch.
# Wir erzeugen das Raster daher mit gridspec, da kann man ein
# Höhenverhältnis angeben.
gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[8, 1])
# Mit Gridspec unterteilt man vorbereitend ein Fenster, wenn man dann darin
# Subplots platzieren will. Details siehe den Link zur offiziellen Doku:
# https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.gridspec.GridSpec.html
zeichnen1 = bild.add_subplot(gs[0])
zeichnen2 = bild.add_subplot(gs[1])
# zeichnen1 und zeichnen2 sind ebenfalls Objekte; sie leiten sich aus dem Objekt
# Bild ab und geben ihm weitere Eigenschaften, nämlich dass eine Untergrafik ("Subplot")
# dem Bild zugewiesen wird.
# zeichnen1 weist das Subplot gs[0] zu, das ist das obere 8 Neuntel hoche Subplot
# zeichnen2 weist das Subplot gs[1] zu, das ist das untere 1 Neuntel hohe Subplot
zeichnen1.plot(x,y)
# Der Slider füllt einen Subplot, hat einen Namen und Min-, Max- und Startwert
sl = Slider(zeichnen2, 'Frequenz ', 0.01, 5.0, valinit=1)
bild.show()
# Diese update-Funktion zeichnet den Graph neu:
def myupdate(val):
y = np.cos(val*x) # val ist der am Schieberegler eingestellte Wert
zeichnen1.cla() # alten subplot löschen
zeichnen1.plot(x,y) # neu zeichnen
bild.canvas.draw_idle() #
# Hier beginnt das Hauptprogramm
Abfragen_Matplotlib()
# hier wird dem Slider gesagt, dass er bei Änderungen die Funktion myupdate rufen soll
sl.on_changed(myupdate)
| true |
3a7f93912835863b36ce9326b6dcb9a72c8876f2
|
Python
|
dongupak/Basic-Python-Programming
|
/Ch18_패키지/from_import_ex.py
|
UTF-8
| 190 | 2.65625 | 3 |
[] |
no_license
|
import sys
sys.path.append("/Users/dongupak/pkgTutorial")
from math_pkg import math_op
print('100 + 200 =', math_op.add(100,200))
from str_pkg import str_op
print(str_op.upper('hello'))
| true |
e39e1b1ecb2867867113fd26aa951eca6a54d3e6
|
Python
|
gutzbenj/hackerrank_challenges
|
/warmup/birthday_cake_candles.py
|
UTF-8
| 315 | 3.03125 | 3 |
[] |
no_license
|
import sys
# Complete the birthdayCakeCandles function below.
def birthdayCakeCandles(ar):
ar_max = max(ar)
print(sum([n == ar_max for n in ar]))
sys.stdout.flush()
if __name__ == '__main__':
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
birthdayCakeCandles(ar)
| true |
3082950c9c51fb337c3ca87a5c9e6dfa519015c6
|
Python
|
Icedgarr/Machine-Learning
|
/coursework/Problemset1/Ex4.py
|
UTF-8
| 2,077 | 3.171875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 11:18:36 2017
@author: roger
"""
import numpy as np
import itertools as itt
import matplotlib.pyplot as plt
from numpy import random as rand
def proj_basis(d,D): #Projects the basis of a D-dim space to a d-dim space
W=rand.normal(0,1/d,(d,D)) #Generate a random matrix to project D-dim vectors to d-dim space
basis=np.identity(D) #Generate the basis of a D-dim space
proj_vect=np.dot(W,basis) #Project the basis
proj_vect[0]=proj_vect[0]-np.mean(proj_vect[0]) #center first component
proj_vect[1]=proj_vect[1]-np.mean(proj_vect[1]) #center second component
std_dev=np.sqrt(np.var(proj_vect[0,])) #compute the std dev of the first component
proj_vect=proj_vect/std_dev #rescale by first component
return proj_vect
d=2
i=0
rng=[10,50,100,150,200,500,1000,10000]
for D in rng: #Plot the proj basis for a rng of dimensions D into a d-dim space
i=i+1
proj_vect=proj_basis(d,D)
rnd_vect_plane=rand.normal(0,1,(2,D)) #generate random normals
plt.subplot(4,2,i) #more than one plot
plt.scatter(proj_vect[0],proj_vect[1])
plt.scatter(rnd_vect_plane[0],rnd_vect_plane[1],color="red")
plt.title("N=%d"%D) #change title
#hypercube
def proj_hypercube(d,D):
vmax=[1]*D
vmin=[-1]*D
hypercube=np.transpose(np.asarray(list(itt.product(*zip(vmin,vmax))))) #generates the vertices
W=rand.normal(0,1/d,(d,D)) #Generates the projection matrix
proj_hyp_cube=np.dot(W,hypercube) #Projects
proj_hyp_cube[0]=proj_hyp_cube[0]-np.mean(proj_hyp_cube[0])
proj_hyp_cube[1]=proj_hyp_cube[1]-np.mean(proj_hyp_cube[1])
std_dev=np.sqrt(np.var(proj_hyp_cube[1,]))
proj_hyp_cube=proj_hyp_cube/std_dev
return proj_hyp_cube
d=2
rng=[2,3,4,5,6,10]
i=0
for D in rng: #projects the hypercubes from different dimensions to a 2-dim subspace
i=i+1
proj_hyp_cube=proj_hypercube(d,D)
plt.subplot(3,2,i) #more than one plot
plt.scatter(proj_hyp_cube[0],proj_hyp_cube[1])
plt.title("D=%d"%D) #change title
| true |
6851172d0b9952f940f838f89ef0e316edb6f073
|
Python
|
ChizzaHut/PythonLog
|
/Practice 6 - List Comprehensions.py
|
UTF-8
| 174 | 3 | 3 |
[] |
no_license
|
import random
a = random.sample(range(1, 50), 10)
a.sort()
b = []
c = []
for x in a:
if x % 2 == 0:
b.append(x)
else:
c.append(x)
print(a)
print(b)
| true |
3583759e41724e3824eb3d7df8ff1f55dfe04330
|
Python
|
bwats2/CSCIE7-Final-Project
|
/tests/beautifulsouptest3.py
|
UTF-8
| 2,594 | 3.28125 | 3 |
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
# https://stackoverflow.com/questions/24398302/bs4-featurenotfound-couldnt-find-a-tree-builder-with-the-features-you-requeste
# https://codeburst.io/web-scraping-101-with-python-beautiful-soup-bb617be1f486
NFL_TEAMS = ['Arizona Cardinals', 'Atlanta Falcons', 'Baltimore Ravens', 'Buffalo Bills', 'Carolina Panthers', 'Chicago Bears', 'Cincinnati Bengals', 'Cleveland Browns', 'Dallas Cowboys', 'Denver Broncos', 'Detroit Lions', 'Green Bay Packers', 'Houston Texans', 'Indianapolis Colts', 'Jacksonville Jaguars', 'Kansas City Chiefs', 'Los Angeles Chargers', 'Los Angeles Rams', 'Miami Dolphins', 'Minnesota Vikings', 'New England Patriots', 'New Orleans Saints', 'New York Giants', 'New York Jets', 'Oakland Raiders', 'Philadelphia Eagles', 'Pittsburgh Steelers', 'San Francisco 49ers', 'Seattle Seahawks', 'Tampa Bay Buccaneers', 'Tennessee Titans', 'Washington Redskins']
website_url = requests.get("https://en.wikipedia.org/wiki/2019_NFL_season").text
soup = BeautifulSoup(website_url,"html.parser")
# print(soup.prettify())
my_table = soup.find('table',{'class':'multicol'})
# print(my_table)
links = my_table.find_all('td')
# print(links)
teamscores = [link.text.strip() for link in links]
# for link in links:
# teamscores.append(link.text.strip())
print(teamscores)
# index = teamscores.index("New Orlean Saints")
# print(teamscores[index])
# print(teamscores[index+1])
# print(teamscores[index+2])
# Issue is that team names have added characters such as "y – New Orleans Saints"
# Maybe I can use REGEX???
# import re
# # berry_idx = [i for i, item in enumerate(NFL_TEAMS) if re.search('??????New Orlean Saints?????')]
# teamindex = [i for i, word in enumerate(teamscores) if word.endswith('New England Patriots') or word.startswith('New England Patriots')]
# print(teamindex[0])
# https://stackoverflow.com/questions/4146009/python-get-list-indexes-using-regular-expression
from collections import defaultdict
teamscoredict = defaultdict(int) # Dict to hold team and score value
for team in NFL_TEAMS:
print(team)
# Wikipedia sometimes adds characters to start or end of team name in table
teamindex = [i for i, word in enumerate(teamscores) if word.endswith(team) or word.startswith(team)]
print(teamscores[teamindex[0]])
wins = int(teamscores[teamindex[0]+1])
ties = int(teamscores[teamindex[0]+3])
score = wins+ties/2
print(f"wins: {wins}")
print("losses: ",teamscores[teamindex[0]+2])
print(f"ties: {ties}")
print(score)
teamscoredict[team] = score
print(teamscoredict)
| true |
9f956c93a622df14048693b428b8d1db11c6c7d1
|
Python
|
faterazer/LeetCode
|
/0022. Generate Parentheses/generate_parentheses.py
|
UTF-8
| 524 | 3.40625 | 3 |
[] |
no_license
|
from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
ret = []
self.dfs(ret, "", n, n)
return ret
def dfs(self, ret: List[str], s: str, lpairs: int, rpairs: int) -> None:
if rpairs < lpairs:
return
if lpairs == 0 and rpairs == 0:
ret.append(s)
return
if lpairs:
self.dfs(ret, s + "(", lpairs - 1, rpairs)
if rpairs:
self.dfs(ret, s + ")", lpairs, rpairs - 1)
| true |
752852e27c7500c56dc4c7a03790ec99359e9469
|
Python
|
Tubbz-alt/sds
|
/example_problem/exact-solution.py
|
UTF-8
| 825 | 3.109375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
'''Find the exact solution of the toy dataset
'''
from itertools import combinations
import numpy as np
import pandas as pd
from time import time
START = time()
N = 24 # Population size
n = 12 # dissimilar-set size
# Load dataset, reset column labels while doing so because pandas will interpret as string
mtrx = pd.read_csv(f'toy-{N}x{N}-dataset.csv', skiprows=1, header=None)
# Run through all possible combinations
sums = []
for subset in combinations(mtrx.index, n):
submtrx = mtrx[list(subset)].loc[list(subset)]
sums.append(submtrx.sum().sum()/2)
sums = np.array(sums)
df = pd.DataFrame([sums.max(), sums.mean(), sums.min()], index=['max set', 'mean set', 'min set']).T
df.to_csv(f'exact-solution-N{N}-n{n}.csv', index=False)
print(f'Exact solution N = {N}, n = {n}')
print((time()-START)/60, ' min')
| true |
dc21f1d75ad28fa3f7d7fdebfd694e29872c064d
|
Python
|
tangyiheng2021/pg_plan_inspector
|
/pgpi/regression.py
|
UTF-8
| 19,527 | 2.953125 | 3 |
[] |
no_license
|
"""
rgression.py
Formatted by black (https://pypi.org/project/black/)
Copyright (c) 2021, Hironobu Suzuki @ interdb.jp
"""
import math
import os
import operator
from .common import Common, Log
from .repository import Repository
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
class CalcRegression:
"""
Functions to calculate the regression params.
"""
def set_log_level(self, log_level):
self.LogLevel = log_level
def __rss(self, X, Y):
assert len(X) != 0
return math.sqrt(
sum(list(map(lambda x: x ** 2, list(map(operator.sub, X, Y)))))
) / len(X)
def scan(self, X, Y):
"""
linear regression
* Model(no bias): y = a * x
* Loss function: Mean Square Error
"""
_sumY = sum(Y)
_sumX = sum(X)
if Log.debug3 <= self.LogLevel:
print("Debug3: ----- SCAN ----")
print("Debug3: ===> X = {}".format(X))
print("Debug3: ===> Y = {}".format(Y))
if 250 * _sumY < _sumX:
# Assume that this data set is fit to a constant function.
if Log.debug3 <= self.LogLevel:
print(
"Debug3: ==> coef = 0 intercept = {}".format(
round(_sumY / len(Y), 5)
)
)
return (0, round(_sumY / len(Y), 5))
else:
if Log.debug3 <= self.LogLevel:
if _sumX == 0:
print(
"Debug3: ==> coef = 0 intercept = {}".format(
round(_sumY / len(Y), 5)
)
)
else:
print(
"Debug3: ==> coef = {} intercept = 0".format(
round(_sumY / _sumX, 5)
)
)
if _sumX == 0:
return (0, round(_sumY / len(Y), 5))
else:
return (_sumY / _sumX, 0)
def gather(self, X, Y):
"""
linear regression
* Model(no bias): y = a * x
* Loss function: Mean Square Error
"""
_sumY = sum(Y)
_sumX = sum(X)
if Log.debug3 <= self.LogLevel:
print("Debug3: ---- GATHER ----")
print("Debug3: ===> X = {}".format(X))
print("Debug3: ===> Y = {}".format(Y))
if Log.debug3 <= self.LogLevel:
if _sumX == 0:
print(
"Debug3: ==> coef = 0 intercept = {}".format(
round(_sumY / len(Y), 5)
)
)
else:
print(
"Debug3: ==> coef = {} intercept = 0".format(
round(_sumY / _sumX, 5)
)
)
if _sumX == 0:
return (0, round(_sumY / len(Y), 5))
else:
return (_sumY / _sumX, 0)
def nested_loop(self, Xouter, Xinner, Y):
"""
Multiple linear regression
* Model(no bias): Y = a * Xinner * Xouter
* Loss function: Mean Square Error
"""
"""
_sumY = 0; _sumX = 0
for i in range(0, len(Y)):
_sumY += Y[i] * Xinner[i] * Xouter[i]
_sumX += Xinner[i] **2 * Xouter[i] **2
"""
_sumY = sum(list(map(operator.mul, list(map(operator.mul, Xinner, Xouter)), Y)))
_sumX = sum(
list(
map(
operator.mul,
list(map(lambda x: x ** 2, Xinner)),
list(map(lambda x: x ** 2, Xouter)),
)
)
)
if Log.debug3 <= self.LogLevel:
print("Debug3: +++++ NESTED LOOP JOIN +++++")
print("Debug3: ===> Xouter = {}".format(Xouter))
print("Debug3: ===> Xinner = {}".format(Xinner))
print("Debug3: ===> Y = {}".format(Y))
if _sumX == 0:
print("Debug3: ==> coef=1")
else:
print("Debug3: ==> coef={}".format(str(round(_sumY / _sumX, 5))))
return 1 if _sumX == 0 else _sumY / _sumX
def merge_or_hash_join(self, Xouter, Xinner, Y, add_bias_0=True):
def multi_regression(Xouter, Xinner, Y, add_bias_0=True):
_X = []
_Y = []
"""Format _Y and _X"""
for i in range(0, len(Y)):
_Y.append(Y[i])
_X.append([Xouter[i], Xinner[i]])
if add_bias_0:
# Add a constraint because we assume that the bias is 0
_X.append([0, 0])
_Y.append(0)
if Log.debug3 <= self.LogLevel:
print("Debug3: ****MERGE OR HASH JOIN*****")
print("Debug3: ===> Xouter = {}".format(Xouter))
print("Debug3: ===> Xinner = {}".format(Xinner))
print("Debug3: ===> X ={}".format(_X))
print("Debug3: ===> Plan Rows ={} Y={}".format(Y, _Y))
"""
Calc regression
Multiple linear regression
* Model(no bias): Y = a1 * Xouter + a2 * Xinner
* Loss function: Mean Square Error
"""
scireg = LinearRegression()
scireg.fit(_X, _Y)
_list = scireg.coef_.tolist()
_coef = [round(_list[n], 5) for n in range(len(_list))]
_intercept = round(scireg.intercept_, 5)
"""Predict and calculate RMSE."""
_y_pred = scireg.predict(_X)
_rmse = np.sqrt(mean_squared_error(_Y, _y_pred))
del scireg
return (_coef, _intercept, _rmse)
def single_regression(X, Y, add_bias_0=True):
_X = []
_Y = []
"""Format _Y and _X"""
for i in range(0, len(Y)):
_Y.append(Y[i])
_X.append([X[i]])
if add_bias_0:
# Add a constraint because we assume that the bias is 0
_X.append([0])
_Y.append(0)
if Log.debug3 <= self.LogLevel:
print("Debug3: ****MERGE OR HASH JOIN*****")
print("Debug3: ===> X={}".format(X))
print("Debug3: ===> Plan Rows ={} Y={}".format(Y, _Y))
"""
Calc regression
Multiple linear regression
* Model: Y = a * X + b
* Loss function: Mean Square Error
"""
scireg = LinearRegression()
scireg.fit(_X, _Y)
_list = scireg.coef_.tolist()
_coef = [round(_list[n], 5) for n in range(len(_list))]
_intercept = round(scireg.intercept_, 5)
"""Predict and calculate RMSE."""
_y_pred = scireg.predict(_X)
_rmse = np.sqrt(mean_squared_error(_Y, _y_pred))
del scireg
return (_coef[0], _intercept, _rmse)
def reg(Xouter, Xinner, Y):
## Same as NestedLoop
_sumY = sum(
list(map(operator.mul, list(map(operator.mul, Xinner, Xouter)), Y))
)
_sumX = sum(
list(
map(
operator.mul,
list(map(lambda x: x ** 2, Xinner)),
list(map(lambda x: x ** 2, Xouter)),
)
)
)
_coef = 1 if _sumX == 0 else _sumY / _sumX
# Calculate MSE
_mse = (
sum(
list(
map(
lambda x: x ** 2,
list(
map(
operator.sub,
Y,
list(
map(
lambda y: _coef * y,
list(map(operator.mul, Xouter, Xinner)),
)
),
)
),
)
)
)
/ len(Y)
)
return (_coef, np.sqrt(_mse))
"""
Calcuate regression parameters.
"""
(coef, intercept, rmse) = multi_regression(Xouter, Xinner, Y)
if coef[0] < 0 or coef[1] < 0:
(coef, intercept, rmse) = multi_regression(Xouter, Xinner, Y, False)
_coef = [coef[0], coef[1]]
_reg = 0
_intercept = intercept
_rmse = rmse
(coef, intercept, rmse) = single_regression(Xouter, Y)
if coef < 0:
(coef, intercept, rmse) = single_regression(Xouter, Y, False)
if rmse < _rmse:
_coef = [coef, 0]
_intercept = intercept
_rmse = rmse
(coef, intercept, rmse) = single_regression(Xinner, Y)
if coef < 0:
(coef, intercept, rmse) = single_regression(Xinner, Y, False)
if rmse < _rmse:
_coef = [0, coef]
_intercept = intercept
_rmse = rmse
"""
# Note: This is not used because it makes the results significantly unstable.
(coef, rmse) = reg(Xouter, Xinner, Y)
if rmse < _rmse:
_coef = [0, 0]
_intercept = 0
_reg = coef
"""
if Log.debug3 <= self.LogLevel:
print(
"Debug3: ==> coef={} reg={} intercept={}".format(
_coef, _reg, _intercept
)
)
return (_coef, _reg, _intercept)
class Regression(Repository, CalcRegression):
def __init__(self, base_dir=".", log_level=Log.error):
self.ServerId = ""
self.Level = 0
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
"""
Handle self.Level value.
"""
def __init_level(self):
self.Level = 0
def __incr_level(self):
self.Level += 1
def __get_level(self):
return self.Level
def __delete_objects(self, plan):
"""Delete all objects except 'Node Type' and 'Plan(s)'."""
for k in list(plan):
"""
Use list(plan) instead of plan.keys() to avoid
"RuntimeError: dictionary changed size during iteration" error.
"""
if k != "Node Type" and k != "Plans" and k != "Plan":
plan.pop(k)
return plan
def __calc_regression(self, plan, reg, queryid, planid, depth):
"""
Calculate the regression parameters of plan, and Set the results into reg.
"""
self.__incr_level()
_level = self.__get_level()
_node_type = plan["Node Type"]
"""
nested loop type
"""
for n in (
"Append",
"Merge Append",
"Recursive Union",
"Nested Loop",
"BitmapAnd",
"BitmapOr",
):
if n == _node_type:
(
_Xouter,
_Xinner,
_RR,
) = self.get_inputs(plan)
"""
Calculate the regression parameter.
"""
if Log.debug3 <= self.LogLevel:
print("Debug3: === NodeType={}".format(n))
print("Debug3: *** Y ActualRows={}".format(plan["Actual Rows"]))
print(
"Debug3: *** Xouter ={} Xinner ={}".format(_Xouter, _Xinner)
)
_Y = plan["Actual Rows"]
_coef = self.nested_loop(_Xouter, _Xinner, _Y)
"""
Set the result to the reg dict.
"""
reg.update(Coefficient=[_coef])
return
"""
hash or merge join
"""
for n in ("Merge Join", "Hash Join"):
if n == _node_type:
(
_Xouter,
_Xinner,
_RR,
) = self.get_inputs(plan)
"""
Calculate the regression parameter.
"""
if Log.debug3 <= self.LogLevel:
print(
"Debug3: HASH or MERGE depth={} RR={} queryid={} planid={}".format(
depth, _RR, queryid, planid
)
)
if Log.debug3 <= self.LogLevel:
print("Debug3: === NodeType={}".format(n))
print("Debug3: *** Y ActualRows={}".format(plan["Actual Rows"]))
print(
"Debug3: *** Xouter ={} Xinner ={}".format(_Xouter, _Xinner)
)
_Y = plan["Actual Rows"]
(_coef, _reg, _intercept) = self.merge_or_hash_join(
_Xouter, _Xinner, _Y
)
"""
Set the result to the reg dict.
"""
reg.update(Coefficient=[_coef])
reg.update(Coefficient2=[_reg])
reg.update(Intercept=[_intercept])
return
"""
scan type
"""
"""Calculate the regression parameter."""
if Log.debug3 <= self.LogLevel:
print("Debug3: === NodeType={}".format(_node_type))
print(
"Debug3: *** Plan Rows={} NormalizeParam={} NormalizePlanParam={}".format(
plan["Plan Rows"],
plan["NormalizeParam"],
plan["NormalizePlanParam"],
)
)
print("Debug3: *** Actual Rows={}".format(plan["Actual Rows"]))
(_coef, _intercept) = self.scan(plan["Plan Rows"], plan["Actual Rows"])
"""
Set the result to the reg dict.
"""
reg.update(Coefficient=[_coef])
reg.update(Intercept=[_intercept])
return
def __regression(self, Plans, reg_param, queryid, planid):
"""
Calculate the regression parameters of Plans, and Set the results into
reg_param.
Parameters
----------
Plans : dict
A plan grouped with the same queryid-planid.
reg_param : dict
A dict type skeleton with the same structure as Plans.
queryid : int
planid : int
Returns
-------
reg_param: dict
A dict which contains the regression parameter in each node.
"""
def incr(plan):
if "Node Type" in plan:
self._count += 1
def op(Plans, reg_param, queryid, planid):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
incr(Plans[i])
self.__calc_regression(
Plans[i], reg_param[i], queryid, planid, self._count
)
if "Plans" in Plans[i]:
op(Plans[i]["Plans"], reg_param[i]["Plans"], queryid, planid)
return
else:
incr(Plans)
self.__calc_regression(Plans, reg_param, queryid, planid, self._count)
if "Plans" in Plans:
op(Plans["Plans"], reg_param["Plans"], queryid, planid)
return
# Main procedure.
self._count = 0
op(Plans, reg_param, queryid, planid)
"""
Public method
"""
def regression(self, serverId):
"""
Calculate the regression parameters of all serverId's query plans
in the repository.
"""
if self.check_serverId(serverId) == False:
if Log.error <= self.LogLevel:
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
self.__set_serverId(serverId)
self.set_log_level(self.LogLevel)
if Log.info <= self.LogLevel:
print("Info: Calculating regression parameters.")
"""
Check the grouping stat file.
"""
_grouping_seqid = self.get_seqid_from_grouping_stat(self.ServerId)
"""
Check the regression stat file.
"""
self.check_regression_dir(self.ServerId)
_regression_seqid = self.get_seqid_from_regression_stat(self.ServerId)
if Log.debug3 <= self.LogLevel:
print(
"Debug3: _grouping_seqid={} _regression_seqid={}".format(
_grouping_seqid, _regression_seqid
)
)
"""
Calculate the regression parameters.
"""
if _regression_seqid < _grouping_seqid:
for _hash_subdir in self.get_grouping_dir_list(self.ServerId):
_gsdirpath = self.get_grouping_subdir_path(self.ServerId, _hash_subdir)
if os.path.isdir(_gsdirpath):
_gsdirlist = self.get_grouping_subdir_list(
self.ServerId, _hash_subdir
)
for f in _gsdirlist:
_gpath = self.path(_gsdirpath, f)
_qp_id = str(f).split(".")
_queryid = _qp_id[0]
_planid = _qp_id[1]
if Log.debug3 <= self.LogLevel:
print("Debug3: >>>>>> gpath={}".format(_gpath))
_json_dict = self.read_plan_json(_gpath)
_reg_param = self.read_plan_json(_gpath)
self.delete_unnecessary_objects(
self.__delete_objects, _reg_param
)
"""
Calculate the regression parameters in each plan
and Store into _reg_param.
"""
self.__init_level()
self.__regression(
_json_dict["Plan"], _reg_param["Plan"], _queryid, _planid
)
"""
Write the result (regression parameters) to the regression
directory.
"""
_rsdirpath = self.get_regression_subdir_path(
self.ServerId, _hash_subdir
)
if os.path.exists(_rsdirpath) == False:
os.makedirs(_rsdirpath)
_rpath = self.path(_rsdirpath, f)
self.write_plan_json(_reg_param, _rpath)
if Log.debug3 <= self.LogLevel:
print("Debug3: Rpath={}".format(_rpath))
print("Debug3: reg_param={}".format(_reg_param))
"""Update stat file"""
self.update_regression_stat_file(self.ServerId, _grouping_seqid)
| true |
7a96c3968455cd93482d14650e46a4463e68f313
|
Python
|
zhao1701/tcvae
|
/tcvae/visualization.py
|
UTF-8
| 6,200 | 2.796875 | 3 |
[] |
no_license
|
#!/usr/bin/env python
"""
This module contains utilities for predicting with and inspecting
autoencoders models.
"""
import imageio
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .utils import check_path
def plot_img(img, figsize=(4, 4)):
plt.figure(figsize=figsize)
plt.imshow(img)
plt.axis('off')
def process_for_animation(img):
img *= 255
img = img.astype(np.uint8)
return img
def tile_multi_image_traversal(latent_traversals, num_rows):
"""
Combines independent latent traversals for different images into a
single traversal with images tiled together.
Parameters
----------
latent_traversals : np.ndarray
A 5-dimensional array with the following shape:
(traversal_resolution, num_samples, img_height, img_width,
num_channels).
num_rows : int
The number of rows of images when their traversals are tiled
together. Must be a divisor of the number of images.
Returns
-------
latent_traversals : np.ndarray
A 4-dimensional array with the following shape:
(traversal_resolution, num_rows*img_height, num_cols*img_width,
num_channels).
"""
traversal_resolution, num_samples, img_height, img_width, num_channels = \
latent_traversals.shape
assert (num_samples % num_rows == 0), (
'The number of rows of the stitched image must be an integer divisor '
'of the number of samples in the batch.')
num_cols = num_samples // num_rows
latent_traversals = latent_traversals.reshape(
traversal_resolution, num_rows, num_cols, img_height, img_width,
num_channels)
latent_traversals = latent_traversals.transpose(0, 1, 3, 2, 4, 5)
latent_traversals = latent_traversals.reshape(
traversal_resolution, num_rows * img_height, num_cols * img_width,
num_channels)
return latent_traversals
def animate_traversals(traversals_dict, traversal_dir):
for latent_index, traversal in traversals_dict.items():
basename = 'traversal-{:0>2}.gif'.format(latent_index)
filename = traversal_dir / basename
imageio.mimsave(filename, traversal)
# def plot_loss_history(csv_file, html_file=None):
# csv_file = check_path(csv_file, path_type=str)
# df = pd.read_csv(csv_file)
# df = df.drop('epoch', axis='columns')
# metrics = df.columns
# num_metrics = len(metrics)
# colors = [
# 'blue', 'green', 'red', 'orange', 'purple', 'yellow']
# # Individual area plots for each metric
# area_plots = list()
# kdim = hv.Dimension('epoch', label='Epoch', range=(None, None))
# for index, metric in enumerate(metrics):
# label = metric.capitalize().replace('_', ' ')
# vdim = hv.Dimension(metric, label=label, range=(None, None))
# ylim = (df[metric].min(), df[metric].max())
# xlabel = 'Epoch' if index == (num_metrics - 1) else ''
# area_plot = hv.Area(
# (df.index, df[metric]), vdims=vdim, kdims=kdim)
# area_plot = area_plot.opts(
# ylim=ylim, color=colors[index], xlabel=xlabel)
# area_plots.append(area_plot)
# # Composition of multiple line plots for each metric
# line_plots = list()
# vdim = hv.Dimension('value', label='Value', range=(None, None))
# for index, metric in enumerate(metrics):
# label = metric.capitalize().replace('_', ' ')
# line_plot = hv.Curve(
# (df.index, df[metric]), vdims=vdim, kdims=kdim, label=label)
# line_plot = line_plot.opts(color=colors[index])
# line_plots.append(line_plot)
# overlay = hv.Overlay(line_plots).opts(xlabel='')
# # Create final layout
# all_plots = [overlay] + area_plots
# layout = hv.Layout(all_plots).cols(1).opts(
# opts.Area(width=800, height=200, alpha=0.2),
# opts.Curve(width=800, height=200)).opts(title='Training history')
# # Save HTML file
# if html_file is not None:
# html_file = check_path(html_file, path_type=str)
# hv.save(layout, html_file)
# return layout
# def plot_dist_history(csv_file, html_file=None):
# csv_file = check_path(csv_file, path_type=str)
# df = pd.read_csv(csv_file)
# width = 1000
# sigma_cols = df.columns[df.columns.str.contains('sigma')]
# mu_cols = df.columns[df.columns.str.contains('mu')]
# # Create bar plot with most recent latent standard deviations
# kdim = hv.Dimension('latents', label='Latent dimension')
# vdim = hv.Dimension('sigma', label='Current standard deviation')
# sigmas_latest = df[sigma_cols].iloc[-1]
# sigmas_latest = [
# (index.split('_')[-1], value) for index, value
# in sigmas_latest.iteritems()]
# sigma_bar_plot = hv.Bars(sigmas_latest, kdims=kdim, vdims=vdim)
# sigma_bar_plot = sigma_bar_plot.opts(width=width)
# # Create line plots of latent standard deviation history
# kdim = hv.Dimension('epoch', label='Epoch')
# sigma_line_plots = [
# hv.Curve(
# (df.index, df[col]),
# vdims=hv.Dimension(col, label='Standard deviation'),
# kdims=kdim, label='Latent {}'.format(index)).opts(alpha=0.5)
# for index, col in enumerate(sigma_cols)]
# sigma_line_overlay = hv.Overlay(sigma_line_plots).opts(
# opts.Curve(width=width, height=400, show_grid=True))
# # Create line plots of latent mean history
# mu_line_plots = [
# hv.Curve(
# (df.index, df[col]),
# vdims=hv.Dimension(col, label='Mean'),
# kdims=kdim, label='Latent {}'.format(index)).opts(alpha=0.5)
# for index, col in enumerate(mu_cols)]
# mu_line_overlay = hv.Overlay(mu_line_plots).opts(
# opts.Curve(width=width, height=400, show_grid=True))
# # Create composite layout
# layout = sigma_bar_plot + sigma_line_overlay + mu_line_overlay
# layout = layout.cols(1).opts(
# title='Latent Distribution History')
# Save HTML file
if html_file is not None:
html_file = check_path(html_file, path_type=str)
hv.save(layout, html_file)
return layout
| true |
53cd1ab4b5f083450ec003ed9c8ce954338c85ad
|
Python
|
WHJR-G12-Github/Template_C6_SAA1
|
/template_c6_saa1.py
|
UTF-8
| 203 | 4.09375 | 4 |
[] |
no_license
|
# Create a list 'numbers'
# Create a variable 'count'
# Initialize it to '0'
# Create a 'for' loop to access every element in the list
# Increment 'count' by '1'
# Print the value of 'count'
| true |
6c0629c92e02d265fed64805996f97e26d68f96a
|
Python
|
learning022/quality-report
|
/backend/hqlib/metric/product/size_metrics.py
|
UTF-8
| 3,053 | 2.53125 | 3 |
[] |
no_license
|
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List
from hqlib.typing import MetricParameters, MetricValue
from ..metric_source_mixin import SonarMetric, SonarDashboardMetric
from ...domain import LowerIsBetterMetric, Product
class ProductLOC(SonarDashboardMetric, LowerIsBetterMetric):
""" Metric for measuring the size (in number of lines of code) of a product. """
name = 'Component omvang'
unit = 'regels code'
target_value = 5000
low_target_value = 6000
def value(self) -> MetricValue:
return self._metric_source.ncloc(self._sonar_id()) if self._metric_source else -1
class TotalLOC(SonarMetric, LowerIsBetterMetric):
""" Metric for measuring the total size (in number of lines of code) of several products. """
name = 'Totale omvang'
unit = 'regels code'
template = 'Het totaal aantal {unit} voor de producten {products} is {value} {unit}.'
target_value = 30000
# Maximum number of LOC to be eligible for 4 stars, see
# https://www.softwareimprovementgroup.com/wp-content/uploads/2018/05/20180509-SIG-TUViT-Evaluation-Criteria-Trusted-Product-Maintainability-Guidance-for-producers.pdf
# We use the lowest number (Python; 122000 LOC in 20 years = 30500 in 5 years)
low_target_value = 30500
def _parameters(self) -> MetricParameters:
parameters = super()._parameters()
products = self.__main_products()
parameters['products'] = ', '.join([product.name() for product in products])
return parameters
def value(self) -> MetricValue:
if not self._metric_source:
return -1
total = 0
for product in self.__main_products():
sonar_id = product.metric_source_id(self._metric_source)
if sonar_id:
product_size = self._metric_source.ncloc(sonar_id)
if product_size == -1:
return -1
total += product_size
return total
def recent_history(self) -> List[int]:
""" Subtract the minimum value from all values so that we can send more data to the Google Chart API. """
historic_values = [h for h in super().recent_history() if h is not None]
minimum_value = min(historic_values) if historic_values else 0
return [value - minimum_value for value in historic_values]
def __main_products(self) -> List[Product]:
""" Return the main products. """
return [product for product in self._project.products() if product.is_main()]
| true |
795c029a42ca3a93b03f409dccf1ea3138456403
|
Python
|
BlakeMcMurray/Bioinformatics-Algorithms
|
/Chapter 2/score.py
|
UTF-8
| 527 | 2.859375 | 3 |
[] |
no_license
|
#working (probably)
import countMatrix as CM
#calculates the score of a set of motifs
#breaks ties by first encounter of max nucleotide
def score(kmers):
count_m = CM.count(kmers)
m = 0
score = 0
tracker = 0
for i in range(len(count_m)):
m = max(count_m[i])
for j in range(len(count_m[i])):
if count_m[i][j] == m and tracker == 0:
tracker = 1
continue
else:
score += count_m[i][j]
tracker = 0
return(score)
| true |