hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
255d6b11cbe644a12928787786f04d1940067a84 | 303 | py | Python | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='dropboxbackup',
version='0.1',
py_modules=['dropboxbackup'],
install_requires=[
'click',
'dropbox',
'simple-crypt'
],
entry_points='''
[console_scripts]
dropboxbackup=dropboxbackup:cli
''',
)
| 17.823529 | 39 | 0.574257 |
255e4c40128d8dd3b9bb2375467740bbfa0ffbee | 6,896 | py | Python | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | 5 | 2021-12-25T10:18:07.000Z | 2022-02-20T00:24:41.000Z | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | 2 | 2022-01-16T10:12:07.000Z | 2022-03-22T00:34:26.000Z | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | null | null | null | from os import path
from shutil import copyfile
import tvm
from tvm import relay
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCModel
from tvm.relay.transform import InferType, ToMixedPrecision
"""Copy pasted mostly from:
https://github.com/AndrewZhaoLuo/TVM-Sandbox/blob/bb209e8845440ed9f40af1b2580618196c939745/fp16_pass/benchmark_fp16.py#L1
Creates centerface autoscheduler log files, which are included in this repo so you
don't have to spend 24 hrs running the tuning script!
Run on a 2020, 13-inch macbook pro (m1 mac)
FP32:
Processing centerface_autoscheduler_30000kt_fp32_llvm
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
33.8869 33.6213 35.0154 33.1292 0.7192
Output Names:
['output_0', 'output_1', 'output_2', 'output_3']
FP16:
Processing centerface_autoscheduler_10000kt_fp16_llvm
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
22.3274 22.2959 23.4356 21.7442 0.4560
Output Names:
['output_0', 'output_1', 'output_2', 'output_3']
"""
if __name__ == "__main__":
benchmark_model(
get_centerface,
"centerface_autoscheduler_30000kt_fp16_llvm",
run_fp16_pass=True,
run_other_opts=True,
enable_autoscheduler=True,
try_nhwc_layout=True,
tuning_trials=30000,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
target_host="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_model(
get_centerface,
"centerface_autoscheduler_30000kt_fp32_llvm",
run_fp16_pass=False,
run_other_opts=True,
enable_autoscheduler=True,
try_nhwc_layout=True,
tuning_trials=30000,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
target_host="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_and_compile_so_and_whl(
get_centerface,
"centerface_autoscheduler_30000kt_fp16_llvm",
run_fp16_pass=True,
run_other_opts=True,
try_nhwc_layout=True,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_and_compile_so_and_whl(
get_centerface,
"centerface_autoscheduler_30000kt_fp32_llvm",
run_fp16_pass=False,
run_other_opts=True,
try_nhwc_layout=True,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
| 32.074419 | 121 | 0.664588 |
255ead5625498b81a0e784e802611ba152b63d6e | 1,547 | py | Python | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
XJB Generate Images Module (/doodle)
Created on Sun Sep 1 16:03:16 2019
@author: user
"""
import os
import asyncio
import uuid
import tg_connection
gen_path = "D:/AndroidProjects/ScarletKindom/flandre-generator/wgan/sample.png"
inp_base = "D:/AndroidProjects/ScarletKindom/flandre-generator/wgan/"
sketchr_query = set()
| 27.625 | 85 | 0.659341 |
255eb03b3149f28db58ee09e23382f4784f486dd | 362 | py | Python | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | 1 | 2020-03-26T06:25:47.000Z | 2020-03-26T06:25:47.000Z | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | null | null | null | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | null | null | null | from rest_framework import viewsets, permissions
from leads.serializers import LeadSerializer
from leads.models import Lead
| 25.857143 | 58 | 0.756906 |
255edfec817ac332c0a59a30e33ffe4ca99dbfbc | 207 | py | Python | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | 1 | 2019-10-15T08:16:17.000Z | 2019-10-15T08:16:17.000Z | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | null | null | null | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | null | null | null | from flask import render_template
from . import main | 23 | 47 | 0.714976 |
255f08813afc83e4a9438097dc0b9eb5bb612867 | 408 | py | Python | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db import models
| 31.384615 | 86 | 0.801471 |
255f1d23b8f394dc79d9946c976e6a08c2991d2e | 18,476 | py | Python | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | 2 | 2020-11-29T17:00:52.000Z | 2022-01-06T19:24:23.000Z | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | null | null | null | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | null | null | null | import PIL.Image as Img
import numpy as np
from tqdm.notebook import tqdm
from PIL import ImageFilter
import tables
import time
import gc
"""
all the insert/append function for collage generator
_canvas_append takes the inserting operation, the rest are finding add_point logic
"""
| 54.662722 | 120 | 0.500758 |
255fc1c3062c1fbdf6dc873744212e8248b03800 | 190,066 | py | Python | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a client. By listing and beginning a watch from the returned resourceVersion, clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
| 61.470246 | 3,325 | 0.643066 |
256135f3261bda49e4b410a35a4a8f8355d98ad8 | 722 | py | Python | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | 4 | 2020-01-01T15:22:55.000Z | 2020-01-10T09:34:26.000Z | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | 2 | 2020-01-01T15:16:02.000Z | 2020-01-02T13:56:29.000Z | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | null | null | null | import socket
import sys
IP_ADDR = "192.168.1.19"
TCP_PORT = 10000
if __name__ == "__main__":
# Create TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Associate the socket with the server address
server_address = (IP_ADDR, TCP_PORT)
print("Start TCP server at address {} on port {} ".format(server_address[0], server_address[1]))
sock.bind(server_address)
# Mode TCP server
sock.listen(1)
while True:
connection, client_address = sock.accept()
while True :
print("Connection from {} ".format(client_address))
data = connection.recv(16)
print("Data : %s" % data)
else:
connection.close()
| 26.740741 | 100 | 0.631579 |
25630ec4579c4b69b7aa7ebcd6033338a4cfed43 | 269 | py | Python | pandas/pandasReading02.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | pandas/pandasReading02.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | pandas/pandasReading02.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | import pandas as pd
countryInformation = pd.read_csv('resource/countryInformation.csv')
#looping row
#for index,row in countryInformation.iterrows():
#print(index, row['country_name'])
print(countryInformation.loc[countryInformation['country_name'] == 'india']) | 26.9 | 76 | 0.773234 |
256327adbdadb9819f932122ab31855bfe822e1d | 2,011 | py | Python | List Comprehensions/examples.py | mervatkheir/kite-python-blog-post-code | 9a331e5d327cd27c6ecd72926f3e74afd252efb5 | [
"MIT"
] | 238 | 2018-10-10T18:50:40.000Z | 2022-02-09T21:26:24.000Z | List Comprehensions/examples.py | mrrizal/kite-python-blog-post-code | 597f2d75b2ad5dda97e9b19f6e9c7195642e1739 | [
"MIT"
] | 38 | 2019-12-04T22:42:45.000Z | 2022-03-12T00:04:57.000Z | List Comprehensions/examples.py | mrrizal/kite-python-blog-post-code | 597f2d75b2ad5dda97e9b19f6e9c7195642e1739 | [
"MIT"
] | 154 | 2018-11-11T22:48:09.000Z | 2022-03-22T07:12:18.000Z | """
List Comprehensions Examples
"""
my_list = []
# my_list.append()
# my_list.extend()
"""
When to use ListComps
"""
phones = [
{
'number': '111-111-1111',
'label': 'phone',
'extension': '1234',
},
{
'number': '222-222-2222',
'label': 'mobile',
'extension': None,
}
]
my_phone_list = []
for phone in phones:
my_phone_list.append(phone['number'])
# List Comprehension
[phone['number'] for phone in phones]
"""
Advanced Usage
"""
# Buld an explicit nested list
table = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
]
fields = ['x', 'y', 'z']
rows = [1, 2, 3]
table = []
for r in rows:
row = []
for field in fields:
row.append(field)
table.append(row)
[field for field in fields]
[row for row in rows]
table = [[field for field in fields] for row in rows]
"""
Dictionary Comprehensions
"""
[{str(item): item} for item in [1, 2, 3, ]]
dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
double_dict1 = {k: v * 2 for (k, v) in dict1.items()}
dict_map = {
'apple' : 1,
'cherry': 2,
'earwax': 3,
}
{v:k for (k, v) in dict_map.items()}
items = dict_map.items()
"""
Logical Comparisons
"""
values = [1,2,3]
[i for i in values if i < 3]
[k for k, v in dict_map.items() if v < 3]
"""
Performance, Spongecase Example
"""
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
if index % 2 == 1:
spongecase_letters.append(letter.upper())
else:
spongecase_letters.append(letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
transformed_letter = spongecase(index, letter)
spongecase_letters.append(transformed_letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
| 15.960317 | 53 | 0.604674 |
2564c2f1d6dd5e44be1def881988d5a419b3038e | 2,549 | py | Python | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | 5 | 2019-06-20T09:54:04.000Z | 2021-06-15T04:22:49.000Z | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | null | null | null | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | 1 | 2019-04-19T04:52:34.000Z | 2019-04-19T04:52:34.000Z | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
| 33.986667 | 59 | 0.488035 |
256670e4e127db5ef91b0b78cc07a367f32674c1 | 884 | py | Python | utils/timer.py | YorkSu/hat | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | 1 | 2019-04-10T04:49:30.000Z | 2019-04-10T04:49:30.000Z | utils/timer.py | Suger131/HAT-tf2.0 | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | null | null | null | utils/timer.py | Suger131/HAT-tf2.0 | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | 1 | 2019-06-14T05:53:42.000Z | 2019-06-14T05:53:42.000Z | import time
| 31.571429 | 65 | 0.623303 |
2568aee40cfce9e5a8b21215e284c31ef6b2bd2a | 17,464 | py | Python | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | """ Select only a part of the instances
.. todo: group instance selectors
"""
import random
import logging
from collections import defaultdict
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.tools.memoize_generator import MemoizeGenerator
_NODE_MAPPING = {"RandomInstanceSelection": InstanceSelectionNode,
"Reduce_Overrepresented_Class": ReduceOverrepresentedClassNode}
| 43.334988 | 88 | 0.584918 |
256a8cd6b55c2a6f3936b57c2975d63cfcb67d9a | 4,050 | py | Python | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 14 | 2015-09-05T20:20:50.000Z | 2021-04-08T08:53:20.000Z | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 6 | 2017-05-12T20:46:40.000Z | 2020-02-08T05:05:03.000Z | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 8 | 2017-02-13T15:38:53.000Z | 2020-11-11T20:16:58.000Z | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from contextlib import contextmanager
import imp
import posixpath
from zipfile import ZipFile
from click.testing import CliRunner
import pkginfo
import pytest
from six import PY3
def with_byte_compiled(paths):
""" Augment PATHS with paths of byte-compiled files.
"""
get_tag = getattr(imp, 'get_tag', None)
compiled = set()
for path in paths:
head, tail = posixpath.split(path)
root, ext = posixpath.splitext(tail)
if ext == '.py':
if get_tag:
root = '%s.%s' % (root, get_tag())
head = posixpath.join(head, '__pycache__')
compiled.add(posixpath.join(head, root + '.pyc'))
return compiled.union(paths)
| 27.739726 | 79 | 0.66716 |
256b83c7f65a2f6d348541c27824ba4aba67696c | 1,649 | py | Python | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | 1 | 2021-04-03T12:16:53.000Z | 2021-04-03T12:16:53.000Z | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | 6 | 2019-05-07T03:36:58.000Z | 2021-02-02T22:49:53.000Z | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
def lookup_action(self, action):
"""
Case insensitive lookup for all known actions. Returned in PascalCase
:param action:
:type action: str
:return:
:rtype: str
"""
return self._actions_set_case_insensitive_lookup.get(action.lower())
| 28.431034 | 116 | 0.62644 |
256b989b63c37dd38e854142d7a19f85d5f03b4f | 1,401 | py | Python | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | import pybullet as p
from gym import spaces
import pybullet_planning as pbp
import numpy as np
from diy_gym.addons.addon import Addon
| 31.133333 | 120 | 0.581727 |
256c5471eacba768e9791f30d6ef0762118cc682 | 181 | py | Python | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z |
S = "Mr John Smith"
print(solution(S))
| 12.066667 | 23 | 0.381215 |
256c54c224c3656056ad73a0292f2c0577a7fce0 | 1,612 | py | Python | ngraph/flex/flexargparser.py | NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | [
"Apache-2.0"
] | 18 | 2018-03-19T04:16:49.000Z | 2021-02-08T14:44:58.000Z | ngraph/flex/flexargparser.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 2 | 2019-04-16T06:41:49.000Z | 2019-05-06T14:08:13.000Z | ngraph/flex/flexargparser.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 11 | 2018-06-16T15:59:08.000Z | 2021-03-06T00:45:30.000Z | from __future__ import print_function
import ngraph.transformers as ngt
from ngraph.flex.names import flex_gpu_transformer_name
import argparse
| 36.636364 | 97 | 0.614144 |
256d49d818eb371b9cdddf6e67c307560654cf96 | 969 | py | Python | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 2 | 2020-11-12T03:08:07.000Z | 2021-10-04T22:09:48.000Z | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 2 | 2020-11-25T16:24:29.000Z | 2021-08-28T23:19:39.000Z | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 1 | 2020-11-12T03:08:10.000Z | 2020-11-12T03:08:10.000Z | """
Simple reduced order solver.
More of a no-op, in that it doesn't actually
perform a flux solution
"""
import numpy
from hydep.internal.features import FeatureCollection
from hydep.internal import TransportResult
from .lib import ReducedOrderSolver
| 26.916667 | 85 | 0.693498 |
25713c734ac79b5bf287eaff619cf02ebcde4535 | 449 | py | Python | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#
# produce ttGenEvent with all necessary ingredients
#
from TopQuarkAnalysis.TopEventProducers.producers.TopInitSubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.TopDecaySubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.TtGenEvtProducer_cfi import *
makeGenEvtTask = cms.Task(
initSubset,
decaySubset,
genEvt
)
makeGenEvt = cms.Sequence(makeGenEvtTask)
| 28.0625 | 79 | 0.830735 |
2571f7e0a4f394d6c21f691f7de829e3237dd090 | 8,442 | py | Python | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | 5 | 2020-09-19T18:05:08.000Z | 2022-01-23T14:55:07.000Z | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | null | null | null | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | 7 | 2020-09-19T18:05:11.000Z | 2021-12-28T02:41:12.000Z | import os
from os.path import join as pjoin
import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
try:
from .radam import RAdam
except (ImportError, ModuleNotFoundError) as err:
from radam import RAdam
try:
from torch.nn import Flatten
except ImportError:
def __init__(self, n_classes=2):
super(LinNet, self).__init__()
self._name = "linnet"
# HPF
self.hpf = HPF(1, 4, 5, padding=2)
self.group1 = Group1()
self.group2 = Group2()
self.group3 = Group3()
self.group4 = Group4()
self.group5 = Group5()
self.group6 = Group6()
self.classifier = Classifier(n_classes)
self.initialize_parameters()
def forward(self, x):
y = self.hpf(x)
g1 = self.group1(y)
g2 = self.group2(g1)
g3 = self.group3(g2)
g4 = self.group4(g3)
g5 = self.group5(g4)
g6 = self.group6(g5)
logits = self.classifier(g6)
return logits
def initialize_parameters(self):
"""
In the original paper, Lin et al.
Conv1d: Xavier uniform initializer with zero biases
"""
"""
[Original]
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
elif isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight,
mode='fan_in',
nonlinearity='relu')
nn.init.constant_(m.bias.data, val=1e-3)
elif isinstance(m, nn.Linear):
# Zero mean Gaussian with std 0.01
nn.init.normal_(m.weight, 0.0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 1e-3)
"""
# Following settings is the same with that of BSN.
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
if __name__ == "__main__":
model = LinNet()
n_ch = 1
for i in range(1, 2):
x = torch.randn(1, n_ch, i*16000)
t_beg = time.time()
out = model(x)
t_end = time.time()
print("LinNet model output:", out)
print("Execution time:", t_end - t_beg)
# end of for
| 31.977273 | 98 | 0.47394 |
c24fdcfaa37586667c8318eb6776d1204e6b7822 | 6,043 | py | Python | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T23:29:13.000Z | 2019-11-02T23:29:13.000Z | import os
import sys
import unittest
from nose.importer import Importer
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 35.757396 | 78 | 0.580837 |
c2516c459b4df1dceb074080d5a8ce6f229681ed | 16,278 | py | Python | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | 1 | 2021-08-17T13:22:54.000Z | 2021-08-17T13:22:54.000Z | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | from sklearn.base import clone
import pandas as pd
from abc import ABCMeta
from time import time
from datetime import datetime
import numpy as np
from sklearn.model_selection import ParameterGrid
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from mvmm.utils import get_seeds
from mvmm.multi_view.utils import linspace_zero_to, \
expspace_zero_to, polyspace_zero_to
from mvmm.multi_view.block_diag.graph.linalg import geigh_Lsym_bp_smallest
from mvmm.multi_view.block_diag.utils import asc_sort
from mvmm.clustering_measures import unsupervised_cluster_scores, \
several_unsupervised_cluster_scores, MEASURE_MIN_GOOD
def predict(self, X):
"""
Predict the labels for the data samples in X using trained model.
"""
return self.best_estimator_.predict(X)
def predict_proba(self, X):
"""
Predict posterior probability of each component given the data.
"""
return self.best_estimator_.predict_proba(X)
def sample(self, n_samples=1):
"""
Generate random samples from the fitted Gaussian distribution.
"""
return self.best_estimator_.sample(n_samples=n_samples)
def score(self, X, y=None):
"""
Compute the per-sample average log-likelihood of the given data X.
"""
return self.best_estimator_.score(X)
def score_samples(self, X):
"""
Compute the weighted log probabilities for each sample.
"""
return self.best_estimator_.score_samples(X)
| 37.42069 | 128 | 0.531699 |
c251ec2f4862db71edcfa85809de82aead64c14b | 812 | py | Python | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | """Delegate provider traversal tests."""
from dependency_injector import providers
| 24.606061 | 51 | 0.752463 |
c2531eebc4b5c56768575d213a86688eb0c965b8 | 161 | py | Python | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | null | null | null | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | 2 | 2020-05-31T20:40:25.000Z | 2020-07-15T16:51:55.000Z | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for RHG Compute Tools."""
__author__ = """Michael Delgado"""
__email__ = 'mdelgado@rhg.com'
__version__ = '0.2.1'
| 20.125 | 46 | 0.645963 |
c253281fece2f931537ba0aac860be0c88c05f35 | 481 | py | Python | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | 1 | 2021-05-25T02:46:42.000Z | 2021-05-25T02:46:42.000Z | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | null | null | null | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-12-19 17:24
from django.db import migrations, models
import django.db.models.deletion
| 24.05 | 117 | 0.640333 |
c254aa30204c44e620331c5c8033c1497466fa14 | 6,339 | py | Python | tests/logic/order_history_test.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | tests/logic/order_history_test.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | 1 | 2020-06-24T04:41:59.000Z | 2020-06-24T04:41:59.000Z | tests/logic/order_history_test.py | rirwin/stock_analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | import datetime
from sqlalchemy.orm import sessionmaker
from database import db
from database.order_history import OrderHistory
from stock_analysis.logic import order_history
from stock_analysis.logic.order_history import Order
from stock_analysis.logic.order_history import OrderHistoryLogic
from stock_analysis.logic.order_history import TickerDate
Session = sessionmaker(bind=db.engine)
| 31.073529 | 102 | 0.56602 |
c256ecf86fa244e6c6873a974253c22509fa427e | 3,380 | py | Python | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
from densenet_3d_model import DenseNet3D
def _build_tfrecord_dataset(directory, total_clip_num, batch_size, **params):
'''
Buffer the training dataset to TFRecordDataset with the following video shape
[num_frames_per_clip, height, width, channel]
ex: [16, 100, 120, 3]
'''
print('Building dataset, number of clips: ' + str(total_clip_num))
dataset = tf.data.TFRecordDataset(directory)
dataset = dataset.shuffle(buffer_size=total_clip_num)
dataset = dataset.map(
map_func=
lambda serialized_example: _parser(serialized_example, **params))
dataset = dataset.repeat()
iterator = dataset.batch(batch_size=batch_size).make_one_shot_iterator()
clips, labels = iterator.get_next()
return {'video_clips': clips}, labels
| 34.845361 | 108 | 0.671598 |
c25812708f73bfd533c1e0436f576998dc88a5d4 | 1,075 | py | Python | indian_name/india_name.py | NeelShah18/api | 602dcd7bce5b3a54873a004e7847565c17ce9fc9 | [
"MIT"
] | null | null | null | indian_name/india_name.py | NeelShah18/api | 602dcd7bce5b3a54873a004e7847565c17ce9fc9 | [
"MIT"
] | null | null | null | indian_name/india_name.py | NeelShah18/api | 602dcd7bce5b3a54873a004e7847565c17ce9fc9 | [
"MIT"
] | null | null | null | import sqlite3
import sys
import os
import io
if __name__=='__main__':
main()
| 25.595238 | 130 | 0.580465 |
c25ccf7947b661f681ea864f696118baa98103e2 | 1,389 | py | Python | Lib/defcon/objects/lib.py | typemytype/defcon | 18dc8c22a1cf84c95f4f4e4f7bb6f033a062021c | [
"MIT"
] | null | null | null | Lib/defcon/objects/lib.py | typemytype/defcon | 18dc8c22a1cf84c95f4f4e4f7bb6f033a062021c | [
"MIT"
] | null | null | null | Lib/defcon/objects/lib.py | typemytype/defcon | 18dc8c22a1cf84c95f4f4e4f7bb6f033a062021c | [
"MIT"
] | null | null | null | from defcon.objects.base import BaseDictObject
if __name__ == "__main__":
import doctest
doctest.testmod() | 29.553191 | 79 | 0.673866 |
c25ef6c3bd3cdff08d9b5795b973809ddeedca03 | 93 | py | Python | Vizard/Presenter/apps.py | styinx/Vizard | f73e8478fef7017da65ed965932b4c141fef1bdd | [
"MIT"
] | null | null | null | Vizard/Presenter/apps.py | styinx/Vizard | f73e8478fef7017da65ed965932b4c141fef1bdd | [
"MIT"
] | null | null | null | Vizard/Presenter/apps.py | styinx/Vizard | f73e8478fef7017da65ed965932b4c141fef1bdd | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.5 | 33 | 0.763441 |
c25f9adb65d5dc5bc38b8b33443e4276d20956b1 | 685 | py | Python | ATM/atm_function.py | nouranHnouh/FormusWorkshop- | 7f69b9d2226209adcc6ecb208ac426eec7e86d2b | [
"MIT"
] | null | null | null | ATM/atm_function.py | nouranHnouh/FormusWorkshop- | 7f69b9d2226209adcc6ecb208ac426eec7e86d2b | [
"MIT"
] | null | null | null | ATM/atm_function.py | nouranHnouh/FormusWorkshop- | 7f69b9d2226209adcc6ecb208ac426eec7e86d2b | [
"MIT"
] | null | null | null | #this program is atm that withdraw any money amount
#allowed papers: 100,50,10,5, and the rest of requests
balance = 500
balance = withdraw(balance, 277)
balance = withdraw(balance, 30)
balance = withdraw(balance, 5)
balance = withdraw(balance, 500)
| 13.7 | 55 | 0.659854 |
c2611c72bea7ee655df6077231d5fe5c6f79d18c | 2,973 | py | Python | 2021/day.3.py | craignicol/adventofcode | 41ea3325adeb373dccc70d36a9a685eaf13359eb | [
"Apache-2.0"
] | null | null | null | 2021/day.3.py | craignicol/adventofcode | 41ea3325adeb373dccc70d36a9a685eaf13359eb | [
"Apache-2.0"
] | null | null | null | 2021/day.3.py | craignicol/adventofcode | 41ea3325adeb373dccc70d36a9a685eaf13359eb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from statistics import mode
tests_failed = 0
tests_executed = 0
example1= """00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010""".split('\n')
powers = [8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
if __name__ == "__main__":
test_cases()
print(execute()) | 29.147059 | 95 | 0.667003 |
c263e873beab15ef3148ddea30b0dcbd4c5dcb1c | 6,194 | py | Python | src/propagation.py | haoningwu3639/EE229_Project_VideoStabilization | 74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d | [
"MIT"
] | 1 | 2021-06-13T06:32:29.000Z | 2021-06-13T06:32:29.000Z | src/propagation.py | haoningwu3639/EE229_Project_VideoStabilization | 74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d | [
"MIT"
] | null | null | null | src/propagation.py | haoningwu3639/EE229_Project_VideoStabilization | 74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from scipy.signal import medfilt
from utils import init_dict, l2_dst
def keypoint_transform(H, keypoint):
"""
Input:
H: homography matrix of dimension (3*3)
keypoint: the (x, y) point to be transformed
Output:
keypoint_trans: Transformed point keypoint_trans = H * (keypoint, 1)
"""
keypoint = np.append(keypoint, 1)
a, b, c = np.dot(H, keypoint)
keypoint_trans = np.array([[a/c, b/c]]).flatten()
return keypoint_trans
def propagate(input_points, output_points, input_frame, PATCH_SIZE=16, PROP_R=300):
"""
Input:
intput_points: points in input_frame which are matched feature points with output_frame
output_points: points in input_frame which are matched feature points with intput_frame
input_frame
H: the homography between input and output points
Output:
x_motion_patch, y_motion_patch: Motion patch in x-direction and y-direction for input_frame
"""
cols, rows = input_frame.shape[1] // PATCH_SIZE, input_frame.shape[0] // PATCH_SIZE
x_motion = init_dict(cols, rows)
y_motion = init_dict(cols, rows)
temp_x_motion = init_dict(cols, rows)
temp_y_motion = init_dict(cols, rows)
# pre-warping with global homography
H, _ = np.array(cv2.findHomography(input_points, output_points, cv2.RANSAC))
for i in range(rows):
for j in range(cols):
point = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]])
point_trans = keypoint_transform(H, point)
x_motion[i, j] = point.flatten()[0] - point_trans[0]
y_motion[i, j] = point.flatten()[1] - point_trans[1]
# distribute feature motion vectors
for i in range(rows):
for j in range(cols):
vertex = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]])
for in_point, out_point in zip(input_points, output_points):
# velocity = point - feature point in current frame
distance = l2_dst(in_point, vertex)
if distance < PROP_R:
point_trans = keypoint_transform(H, in_point)
temp_x_motion[i, j] = [out_point[0] - point_trans[0]]
temp_y_motion[i, j] = [out_point[1] - point_trans[1]]
# Apply one Median Filter on obtained motion for each vertex
x_motion_patch = np.zeros((rows, cols), dtype=float)
y_motion_patch = np.zeros((rows, cols), dtype=float)
for key in x_motion.keys():
temp_x_motion[key].sort()
temp_y_motion[key].sort()
x_motion_patch[key] = x_motion[key] + temp_x_motion[key][len(temp_x_motion[key]) // 2]
y_motion_patch[key] = y_motion[key] + temp_y_motion[key][len(temp_y_motion[key]) // 2]
# Apply the other Median Filter over the motion patch for outliers
x_motion_patch = medfilt(x_motion_patch, kernel_size=[3, 3])
y_motion_patch = medfilt(y_motion_patch, kernel_size=[3, 3])
return x_motion_patch, y_motion_patch
def vertex_motion_path(x_path, y_path, x_motion_patch, y_motion_patch):
"""
Input:
x_path: motion path along x_direction
y_path: motion path along y_direction
x_motion_patch: obtained motion patch along x_direction
y_motion_patch: obtained motion patch along y_direction
Output:
x_paths, y_paths: Updated x_paths, y_paths with new x_motion_patch, y_motion_patch added to the last x_paths, y_paths
"""
x_path_new = x_path[:, :, -1] + x_motion_patch
y_path_new = y_path[:, :, -1] + y_motion_patch
x_paths = np.concatenate((x_path, np.expand_dims(x_path_new, axis=2)), axis=2)
y_paths = np.concatenate((y_path, np.expand_dims(y_path_new, axis=2)), axis=2)
return x_paths, y_paths
def warp_frame(frame, x_motion_patch, y_motion_patch, PATCH_SIZE=16):
"""
Input:
frame is the current frame
x_motion_patch: the motion_patch to be warped on frame along x-direction
y_motion_patch: the motion patch to be warped on frame along y-direction
Output:
new_frame: a warped frame according to given motion patches x_motion_patch, y_motion_patch
"""
map_x = np.zeros((frame.shape[0], frame.shape[1]), np.float32)
map_y = np.zeros((frame.shape[0], frame.shape[1]), np.float32)
for i in range(x_motion_patch.shape[0] - 1):
for j in range(x_motion_patch.shape[1] - 1):
x, y = int(j * PATCH_SIZE), int(i * PATCH_SIZE)
x_next, y_next = int((j+1) * PATCH_SIZE), int((i+1) * PATCH_SIZE)
src = np.array(
[[x, y], [x, y_next], [x_next, y], [x_next, y_next]]
)
dst = np.array(
[[x + x_motion_patch[i, j], y + y_motion_patch[i, j]],
[x + x_motion_patch[i+1, j], y_next + y_motion_patch[i+1, j]],
[x_next + x_motion_patch[i, j+1], y + y_motion_patch[i, j+1]],
[x_next + x_motion_patch[i+1, j+1], y_next + y_motion_patch[i+1, j+1]]]
)
H, _ = cv2.findHomography(src, dst, cv2.RANSAC)
for k in range(y, y_next):
for l in range(x, x_next):
x_res, y_res, w_res = np.dot(H, np.append(np.array([[l, k]]), 1))
if w_res != 0:
x_res, y_res = x_res / (w_res*1.0), y_res / (w_res*1.0)
else:
x_res, y_res = l, k
map_x[k, l] = x_res
map_y[k, l] = y_res
# repeat motion vectors for remaining frame in x-direction
for j in range(PATCH_SIZE*x_motion_patch.shape[1], map_x.shape[1]):
map_x[:, j] = map_x[:, PATCH_SIZE * x_motion_patch.shape[0] - 1]
map_y[:, j] = map_y[:, PATCH_SIZE * x_motion_patch.shape[0] - 1]
# repeat motion vectors for remaining frame in y-direction
for i in range(PATCH_SIZE*x_motion_patch.shape[0], map_x.shape[0]):
map_x[i, :] = map_x[PATCH_SIZE * x_motion_patch.shape[0] - 1, :]
map_y[i, :] = map_y[PATCH_SIZE * x_motion_patch.shape[0] - 1, :]
# deforms patch
new_frame = cv2.remap(frame, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return new_frame
| 37.768293 | 121 | 0.628996 |
c264fe174bb79ece1406b41e4cb858d0735178ff | 1,140 | py | Python | plugins/cuckoo/komand_cuckoo/actions/vpn_status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/cuckoo/komand_cuckoo/actions/vpn_status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/cuckoo/komand_cuckoo/actions/vpn_status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
| 16.764706 | 57 | 0.490351 |
c267cbc162d3355bf7a9a7568e5120c20f9a8b94 | 15,306 | py | Python | src/utils/scout_compiler.py | CheckPointSW/Scour | 2f9391da45803b44181f7973e4e7c93bc2208252 | [
"MIT"
] | 152 | 2018-08-13T05:48:59.000Z | 2022-03-30T15:18:44.000Z | src/utils/scout_compiler.py | CheckPointSW/Scour | 2f9391da45803b44181f7973e4e7c93bc2208252 | [
"MIT"
] | 7 | 2019-08-29T15:24:41.000Z | 2021-05-04T06:38:49.000Z | src/utils/scout_compiler.py | CheckPointSW/Scour | 2f9391da45803b44181f7973e4e7c93bc2208252 | [
"MIT"
] | 21 | 2018-08-13T19:11:29.000Z | 2022-02-28T15:25:47.000Z | import os
import struct
from .compilation.scout_flags import *
from .compilation.scout_files import *
from .compilation.arc_intel import arcIntel
from .compilation.arc_arm import arcArm, arcArmThumb
from .compilation.arc_mips import arcMips
from .context_creator import *
###################################
## Architecture Configurations ##
###################################
# Using an enum to support feature extensions
ARC_INTEL = arcIntel.name()
ARC_ARM = arcArm.name()
ARC_ARM_THUMB = arcArmThumb.name()
ARC_MIPS = arcMips.name()
arc_factory = {
ARC_INTEL: arcIntel,
ARC_ARM: arcArm,
ARC_ARM_THUMB: arcArmThumb,
ARC_MIPS: arcMips,
}
arc_flags = {
ARC_INTEL: (flag_arc_intel,),
ARC_ARM: (flag_arc_arm,),
ARC_ARM_THUMB: (flag_arc_arm, flag_arc_thumb),
ARC_MIPS: (flag_arc_mips,),
}
#################
## Utilities ##
#################
def systemLine(line, logger):
"""Issue (and debug trace) a systen line.
Args:
line (string): cmd line to be executed
logger (logger, elementals): logger to be used by the function (elementals)
"""
logger.debug(line)
os.system(line)
###############################
## The full Scout Compiler ##
###############################
| 43.731429 | 163 | 0.616425 |
c2683140e8ef3e0e1fef1af02d286989992c9f33 | 6,870 | py | Python | python/paddle/fluid/tests/custom_op/extension_utils.py | dingsiyu/Paddle | 2c974cc316bce4054bdf28d1f6b4c3bb8bd99d75 | [
"Apache-2.0"
] | 1 | 2021-03-16T13:40:07.000Z | 2021-03-16T13:40:07.000Z | python/paddle/fluid/tests/custom_op/extension_utils.py | dingsiyu/Paddle | 2c974cc316bce4054bdf28d1f6b4c3bb8bd99d75 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/custom_op/extension_utils.py | dingsiyu/Paddle | 2c974cc316bce4054bdf28d1f6b4c3bb8bd99d75 | [
"Apache-2.0"
] | 1 | 2021-03-16T13:40:08.000Z | 2021-03-16T13:40:08.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import copy
import glob
import warnings
import subprocess
import paddle
IS_WINDOWS = os.name == 'nt'
# TODO(Aurelius84): Need check version of gcc and g++ is same.
# After CI path is fixed, we will modify into cc.
NVCC_COMPILE_FLAGS = [
'-ccbin', 'gcc', '-DPADDLE_WITH_CUDA', '-DEIGEN_USE_GPU',
'-DPADDLE_USE_DSO', '-Xcompiler', '-fPIC', '-w', '--expt-relaxed-constexpr',
'-O3', '-DNVCC'
]
def prepare_unix_cflags(cflags):
"""
Prepare all necessary compiled flags for nvcc compiling CUDA files.
"""
cflags = NVCC_COMPILE_FLAGS + cflags + get_cuda_arch_flags(cflags)
return cflags
def add_std_without_repeat(cflags, compiler_type, use_std14=False):
"""
Append -std=c++11/14 in cflags if without specific it before.
"""
cpp_flag_prefix = '/std:' if compiler_type == 'msvc' else '-std='
if not any(cpp_flag_prefix in flag for flag in cflags):
suffix = 'c++14' if use_std14 else 'c++11'
cpp_flag = cpp_flag_prefix + suffix
cflags.append(cpp_flag)
def get_cuda_arch_flags(cflags):
"""
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
"""
# TODO(Aurelius84):
return []
def normalize_extension_kwargs(kwargs, use_cuda=False):
"""
Normalize include_dirs, library_dir and other attributes in kwargs.
"""
assert isinstance(kwargs, dict)
# append necessary include dir path of paddle
include_dirs = kwargs.get('include_dirs', [])
include_dirs.extend(find_paddle_includes(use_cuda))
kwargs['include_dirs'] = include_dirs
# append necessary lib path of paddle
library_dirs = kwargs.get('library_dirs', [])
library_dirs.extend(find_paddle_libraries(use_cuda))
kwargs['library_dirs'] = library_dirs
# add runtime library dirs
runtime_library_dirs = kwargs.get('runtime_library_dirs', [])
runtime_library_dirs.extend(find_paddle_libraries(use_cuda))
kwargs['runtime_library_dirs'] = runtime_library_dirs
# append compile flags
extra_compile_args = kwargs.get('extra_compile_args', [])
extra_compile_args.extend(['-g'])
kwargs['extra_compile_args'] = extra_compile_args
# append link flags
extra_link_args = kwargs.get('extra_link_args', [])
extra_link_args.extend(['-lpaddle_framework', '-lcudart'])
kwargs['extra_link_args'] = extra_link_args
kwargs['language'] = 'c++'
return kwargs
def find_paddle_includes(use_cuda=False):
"""
Return Paddle necessary include dir path.
"""
# pythonXX/site-packages/paddle/include
paddle_include_dir = paddle.sysconfig.get_include()
third_party_dir = os.path.join(paddle_include_dir, 'third_party')
include_dirs = [paddle_include_dir, third_party_dir]
return include_dirs
def find_cuda_home():
"""
Use heuristic method to find cuda path
"""
# step 1. find in $CUDA_HOME or $CUDA_PATH
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
# step 2. find path by `which nvcc`
if cuda_home is None:
which_cmd = 'where' if IS_WINDOWS else 'which'
try:
with open(os.devnull, 'w') as devnull:
nvcc_path = subprocess.check_output(
[which_cmd, 'nvcc'], stderr=devnull)
if six.PY3:
nvcc_path = nvcc_path.decode()
nvcc_path = nvcc_path.rstrip('\r\n')
# for example: /usr/local/cuda/bin/nvcc
cuda_home = os.path.dirname(os.path.dirname(nvcc_path))
except:
if IS_WINDOWS:
# search from default NVIDIA GPU path
candidate_paths = glob.glob(
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
if len(candidate_paths) > 0:
cuda_home = candidate_paths[0]
else:
cuda_home = "/usr/local/cuda"
# step 3. check whether path is valid
if not os.path.exists(cuda_home) and paddle.is_compiled_with_cuda():
cuda_home = None
warnings.warn(
"Not found CUDA runtime, please use `export CUDA_HOME= XXX` to specific it."
)
return cuda_home
def find_paddle_libraries(use_cuda=False):
"""
Return Paddle necessary library dir path.
"""
# pythonXX/site-packages/paddle/libs
paddle_lib_dirs = [paddle.sysconfig.get_lib()]
if use_cuda:
cuda_dirs = find_cuda_includes()
paddle_lib_dirs.extend(cuda_dirs)
return paddle_lib_dirs
def append_necessary_flags(extra_compile_args, use_cuda=False):
"""
Add necessary compile flags for gcc/nvcc compiler.
"""
necessary_flags = ['-std=c++11']
if use_cuda:
necessary_flags.extend(NVCC_COMPILE_FLAGS)
def get_build_directory(name):
"""
Return paddle extension root directory, default specific by `PADDLE_EXTENSION_DIR`
"""
root_extensions_directory = os.envsiron.get('PADDLE_EXTENSION_DIR')
if root_extensions_directory is None:
# TODO(Aurelius84): consider wind32/macOs
here = os.path.abspath(__file__)
root_extensions_directory = os.path.realpath(here)
warnings.warn(
"$PADDLE_EXTENSION_DIR is not set, using path: {} by default."
.format(root_extensions_directory))
return root_extensions_directory
| 31.658986 | 88 | 0.673071 |
c268a7da87d9bb0526fdc4df929e276c63494567 | 8,095 | py | Python | HW_new_contact.py | AnastasiiaSarkisova/Home-Work-1 | 9021e9323a08f9dd96323b763aa0b0e689df24f8 | [
"Apache-2.0"
] | null | null | null | HW_new_contact.py | AnastasiiaSarkisova/Home-Work-1 | 9021e9323a08f9dd96323b763aa0b0e689df24f8 | [
"Apache-2.0"
] | null | null | null | HW_new_contact.py | AnastasiiaSarkisova/Home-Work-1 | 9021e9323a08f9dd96323b763aa0b0e689df24f8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
import unittest
from group import Group
from contact import Contact
if __name__ == '__main__':
unittest.main()
| 51.891026 | 112 | 0.652625 |
c268fccca788ac0d257bd73459ca8edc55f80559 | 2,189 | py | Python | SNPdata/ExcludeDuplicates.py | ejh243/BrainFANS | 903b30516ec395e0543d217c492eeac541515197 | [
"Artistic-2.0"
] | null | null | null | SNPdata/ExcludeDuplicates.py | ejh243/BrainFANS | 903b30516ec395e0543d217c492eeac541515197 | [
"Artistic-2.0"
] | 2 | 2022-02-16T09:35:08.000Z | 2022-03-29T08:06:32.000Z | SNPdata/ExcludeDuplicates.py | ejh243/BrainFANS | 903b30516ec395e0543d217c492eeac541515197 | [
"Artistic-2.0"
] | null | null | null | ## using output of king and plink --miss identify worse performing duplicate for exclusion
## this script is run as follows
## python ExcludeDuplicates.py <king output> <plink --miss output> <output file>
import sys
print "Reading in sample missingness from", sys.argv[2]
sampleMissing = file(sys.argv[2], "r")
sampleMissing = sampleMissing.readlines()
sMissing = {}
for line in sampleMissing:
line = line.strip().split()
sMissing[line[0]+"^"+line[1]] = line[4]
print "Missingness info read for ", str(len(sMissing)), "samples"
print "\n\n"
print "Identifying duplicates from", sys.argv[1]
dupSamplesFile = file(sys.argv[1], "r")
dupSamplesFile = dupSamplesFile.readlines()
dupSamples = {}
for line in dupSamplesFile:
line = line.strip().split()
sample1 = line[0] + "^" + line[1]
sample2 = line[2] + "^" + line[3]
## need to save both ways around in case 3 way+ duplicates
if sample1 in dupSamples:
oldEntry = dupSamples[sample1]
newEntry = oldEntry + [sample2]
dupSamples[sample1] = newEntry
else:
dupSamples[sample1] = [sample2]
if sample2 in dupSamples:
oldEntry = dupSamples[sample2]
newEntry = oldEntry + [sample1]
dupSamples[sample2] = newEntry
else:
dupSamples[sample2] = [sample1]
## create unique list of duplicate samples
fullList = []
for item in dupSamples:
allDups = dupSamples[item] + [item]
allDups.sort()
fullList = fullList + [";".join(allDups)]
uniqList = []
unique = [uniqList.append(x) for x in fullList if x not in uniqList]
print str(len(uniqList)),"groups of duplicate samples"
uniqList.sort()
print "Writing list of samples to exclude to", sys.argv[3]
output = file(sys.argv[3], "w")
## find sample with least missingness and exclude all others
for item in uniqList:
samples = item.split(";")
list_values = []
for element in samples:
if element in sMissing:
list_values = list_values + [sMissing[element]]
else:
print "Couldn't find sample: ", element
if len(list_values) != 0:
indexToKeep = list_values.index(min(list_values))
samples.remove(samples[indexToKeep])
for each in samples:
output.write("\t".join(each.split("^")) + "\n")
output.close()
| 25.752941 | 91 | 0.692097 |
c26ac1a91dabdb0034c28b5241ea7cfad78d438f | 3,375 | py | Python | jscatter/jscatter_test.py | flekschas/jupyter-scatter | 550eceb2311b0394caad83dbb399ed2f29e55af6 | [
"Apache-2.0"
] | 23 | 2021-02-03T02:05:47.000Z | 2022-03-17T14:53:39.000Z | jscatter/jscatter_test.py | manzt/jupyter-scatter | c38f94abfb655e03f407e7fcec80a883439796b5 | [
"Apache-2.0"
] | 5 | 2021-02-04T22:19:35.000Z | 2022-03-07T04:49:31.000Z | jscatter/jscatter_test.py | manzt/jupyter-scatter | c38f94abfb655e03f407e7fcec80a883439796b5 | [
"Apache-2.0"
] | 1 | 2021-06-15T14:14:47.000Z | 2021-06-15T14:14:47.000Z | import numpy as np
import pandas as pd
from .jscatter import Scatter, component_idx_to_name
from .utils import minmax_scale
| 31.25 | 105 | 0.647704 |
c26b638d6a13eb8cf7404de0290463e08f694482 | 10,917 | py | Python | py-world/world/main.py | Coastchb/Tacotron-2 | 0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c | [
"MIT"
] | null | null | null | py-world/world/main.py | Coastchb/Tacotron-2 | 0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c | [
"MIT"
] | null | null | null | py-world/world/main.py | Coastchb/Tacotron-2 | 0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c | [
"MIT"
] | null | null | null | import logging
import sys
from typing import Iterable
# 3rd party imports
import numpy as np
# import matplotlib.pyplot as plt
from scipy.io.wavfile import read as wavread
# local imports
from .dio import dio
from .stonemask import stonemask
from .harvest import harvest
from .cheaptrick import cheaptrick
from .d4c import d4c
from .d4cRequiem import d4cRequiem
from .get_seeds_signals import get_seeds_signals
from .synthesis import synthesis
from .synthesisRequiem import synthesisRequiem
from .swipe import swipe
| 42.644531 | 152 | 0.582944 |
c26b881427d152a0f3576dc1d7e1e0a52917ad82 | 8,165 | py | Python | src/universal_build/helpers/build_docker.py | prototypefund/universal-build | 809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7 | [
"MIT"
] | 17 | 2020-11-20T15:58:02.000Z | 2022-02-06T19:18:20.000Z | src/universal_build/helpers/build_docker.py | prototypefund/universal-build | 809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7 | [
"MIT"
] | 3 | 2021-02-17T13:47:44.000Z | 2021-10-14T13:53:15.000Z | src/universal_build/helpers/build_docker.py | prototypefund/universal-build | 809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7 | [
"MIT"
] | 6 | 2020-11-23T09:51:26.000Z | 2022-02-11T13:46:57.000Z | """Utilities to help building Docker images."""
import argparse
import os
import subprocess
from typing import List, Optional
from universal_build import build_utils
FLAG_DOCKER_IMAGE_PREFIX = "docker_image_prefix"
def parse_arguments(
input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None
) -> dict:
"""Parses all arguments and returns a sanitized & augmented list of arguments.
Sanitized means that, for example, the version is already checked and set depending on our build guidelines.
If arguments are not valid, exit the script run.
Args:
input_args (List[str], optional): List of arguments that are used instead of the arguments passed to the process. Defaults to `None`.
argument_parser (arparse.ArgumentParser, optional): An argument parser which is passed as a parents parser to the default ArgumentParser to be able to use additional flags besides the default ones.
Returns:
dict: The parsed default arguments thar are already checked for validity.
"""
if argument_parser is None:
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"--" + FLAG_DOCKER_IMAGE_PREFIX.replace("_", "-"),
help="Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.",
required=False,
default="",
)
return build_utils.parse_arguments(
input_args=input_args, argument_parser=argument_parser
)
def check_image(
image: str, trivy: bool = True, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Run vulnerability checks on Dockerimage.
Args:
image (str): The name of the docker image to check.
trivy (bool, optional): Activate trivy vulnerability check. Defaults to `True`.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
"""
build_utils.log("Run vulnerability checks on docker image:")
if trivy and build_utils.command_exists("trivy", exit_on_error=exit_on_error):
return build_utils.run(
f"trivy image --timeout=20m0s --exit-code 1 --severity HIGH,CRITICAL {image}",
exit_on_error=exit_on_error,
)
return subprocess.CompletedProcess(args="", returncode=-1, stdout="", stderr="")
# TODO: Implement dockl container scan
def lint_dockerfile(
hadolint: bool = True, dockerfile: str = "Dockerfile", exit_on_error: bool = True
) -> None:
"""Run hadolint on the Dockerfile.
Args:
hadolint (bool, optional): Activate hadolint dockerfile linter. Defaults to `True`.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
"""
build_utils.log("Run linters and style checks:")
if hadolint and build_utils.command_exists("hadolint", exit_on_error=exit_on_error):
config_file_arg = ""
if os.path.exists(".hadolint.yml"):
config_file_arg = "--config=.hadolint.yml"
build_utils.run(
f"hadolint {config_file_arg} {dockerfile}", exit_on_error=exit_on_error
)
def get_image_name(name: str, tag: str, image_prefix: str = "") -> str:
"""Get a valid versioned image name.
Args:
name (str): Name of the docker image.
tag (str): Version to use for the tag.
image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
Returns:
str: a valid docker image name based on: prefix/name:tag
"""
versioned_tag = name.strip() + ":" + tag.strip()
if image_prefix:
versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag
return versioned_tag
def build_docker_image(
name: str,
version: str,
build_args: str = "",
docker_image_prefix: str = "",
dockerfile: Optional[str] = None,
additional_build_args: str = "",
exit_on_error: bool = True,
) -> subprocess.CompletedProcess:
"""Build a docker image from a Dockerfile in the working directory.
Args:
name (str): Name of the docker image.
version (str): Version to use as tag.
build_args (str, optional): Add additional build arguments for docker build.
docker_image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
versioned_tag = get_image_name(name=name, tag=version)
latest_tag = get_image_name(name=name, tag="latest")
dockerfile_command = ""
if dockerfile:
dockerfile_command = " -f " + dockerfile
completed_process = build_utils.run(
"docker build "
+ dockerfile_command
+ "-t "
+ versioned_tag
+ " -t "
+ latest_tag
+ " "
+ build_args
+ " ./",
exit_on_error=exit_on_error,
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to build Docker image {versioned_tag}")
return completed_process
if docker_image_prefix:
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
return completed_process
def release_docker_image(
name: str, version: str, docker_image_prefix: str, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Push a Docker image to a repository.
Args:
name (str): The name of the image. Must not be prefixed!
version (str): The tag used for the image.
docker_image_prefix (str): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the `docker push ...` command.
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
if not docker_image_prefix:
build_utils.log(
"The flag --docker-image-prefix cannot be blank when pushing a Docker image."
)
build_utils.exit_process(build_utils.EXIT_CODE_GENERAL)
versioned_tag = get_image_name(name=name, tag=version)
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
completed_process = build_utils.run(
"docker push " + remote_versioned_tag, exit_on_error=exit_on_error
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to release Docker image {name}:{version}")
# Only push version with latest tag if no suffix is added (pre-release)
if "-" not in version:
remote_latest_tag = get_image_name(
name=name, tag="latest", image_prefix=docker_image_prefix
)
build_utils.log(
"Release Docker image with latest tag as well: " + remote_latest_tag
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_latest_tag,
exit_on_error=exit_on_error,
)
build_utils.run("docker push " + remote_latest_tag, exit_on_error=exit_on_error)
return completed_process
| 37.113636 | 205 | 0.679731 |
c26b884dd3d4d5b8b8e569db3554db56ec68bc33 | 129 | py | Python | Code/YOLO/darkflow/darkflow/net/build.py | kalvin-osoro/ml_project | bf0bdc5719f2712682dd070045a5f1edf933a0c4 | [
"Apache-2.0"
] | null | null | null | Code/YOLO/darkflow/darkflow/net/build.py | kalvin-osoro/ml_project | bf0bdc5719f2712682dd070045a5f1edf933a0c4 | [
"Apache-2.0"
] | null | null | null | Code/YOLO/darkflow/darkflow/net/build.py | kalvin-osoro/ml_project | bf0bdc5719f2712682dd070045a5f1edf933a0c4 | [
"Apache-2.0"
] | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:ea33786bb4be2c91d879beaff23346f37c5b4b5b8504df61a909e3570d67eb08
size 5150
| 32.25 | 75 | 0.883721 |
c26c957464e23a15c778de134041cfb40b9fa636 | 1,415 | py | Python | norns/gear/migrations/0006_auto_20180522_2102.py | the-norns/norns | 8856626fb6937452c123e4629a5888a49a82c349 | [
"MIT"
] | null | null | null | norns/gear/migrations/0006_auto_20180522_2102.py | the-norns/norns | 8856626fb6937452c123e4629a5888a49a82c349 | [
"MIT"
] | 62 | 2018-05-19T22:18:01.000Z | 2018-05-26T00:13:21.000Z | norns/gear/migrations/0006_auto_20180522_2102.py | the-norns/norns | 8856626fb6937452c123e4629a5888a49a82c349 | [
"MIT"
] | 3 | 2018-05-19T18:54:28.000Z | 2018-05-21T02:14:47.000Z | # Generated by Django 2.0.5 on 2018-05-22 21:02
from django.db import migrations, models
import django.db.models.deletion
| 32.906977 | 125 | 0.582332 |
c26e8a076cd054bdeb3d8edfa2f30d5c046667f6 | 1,121 | py | Python | src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"sort": {
1: {
"invoked": 3321960,
"usecs": 109,
"tty": 0,
"one_min_cpu": 0.54,
"process": "PIM Process",
"five_min_cpu": 0.48,
"runtime": 362874,
"pid": 368,
"five_sec_cpu": 1.03,
},
2: {
"invoked": 1466728,
"usecs": 2442,
"tty": 0,
"one_min_cpu": 0.87,
"process": "IOSv e1000",
"five_min_cpu": 2.77,
"runtime": 3582279,
"pid": 84,
"five_sec_cpu": 0.55,
},
3: {
"invoked": 116196,
"usecs": 976,
"tty": 0,
"one_min_cpu": 0.07,
"process": "OSPF-1 Hello",
"five_min_cpu": 0.07,
"runtime": 113457,
"pid": 412,
"five_sec_cpu": 0.15,
},
},
"five_sec_cpu_total": 4,
"five_min_cpu": 9,
"one_min_cpu": 4,
"nonzero_cpu_processes": ["PIM Process", "IOSv e1000", "OSPF-1 Hello"],
"five_sec_cpu_interrupts": 0,
}
| 26.069767 | 75 | 0.407672 |
c26f5c129b7cbf79a66da9961a7b6a906731cbb8 | 4,702 | py | Python | watcher_metering/publisher/publisher.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 2 | 2015-10-22T19:44:57.000Z | 2017-06-15T15:01:07.000Z | watcher_metering/publisher/publisher.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 1 | 2015-10-26T13:52:58.000Z | 2015-10-26T13:52:58.000Z | watcher_metering/publisher/publisher.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 4 | 2015-10-10T13:59:39.000Z | 2020-05-29T11:47:07.000Z | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from oslo_log import log
from six.moves.queue import Queue
from watcher_metering.publisher.base import PublisherServerBase
from watcher_metering.publisher.worker import Worker
LOG = log.getLogger(__name__)
| 38.227642 | 79 | 0.652276 |
c2702f279730a02fd61505d6c6e1275ba009f9db | 873 | py | Python | setup.py | mortenlj/earthlyw | 7aa1762c84d680abed1192f88a42d4d4b227432c | [
"MIT"
] | null | null | null | setup.py | mortenlj/earthlyw | 7aa1762c84d680abed1192f88a42d4d4b227432c | [
"MIT"
] | 16 | 2021-08-21T20:11:04.000Z | 2022-03-12T19:55:53.000Z | setup.py | mortenlj/earthlyw | 7aa1762c84d680abed1192f88a42d4d4b227432c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(
name="earthlyw",
version="0.1",
packages=[
"ibidem",
"ibidem.earthlyw",
],
install_requires=[
"setuptools",
"colorlog<6",
"appdirs<2",
"requests<3",
],
extras_require={
"dev": [
"tox",
"pytest",
'pytest-xdist',
'pytest-sugar',
'pytest-html',
'pytest-cov',
]
},
namespace_packages=["ibidem"],
zip_safe=True,
# Metadata
author="Morten Lied Johansen",
author_email="mortenjo@ifi.uio.no",
license="LGPL",
keywords="ibidem earthly",
url="https://github.com/mortenlj/earthlyw",
# Entry points
entry_points={
"console_scripts": [
"earthlyw = ibidem.earthlyw.main:main",
],
},
)
| 19.4 | 51 | 0.5063 |
c2722d474ea8fa2b576a6ea93761caf6c92cb828 | 5,547 | py | Python | export_pdf_decaissement.py | Ciwara/DE-ENCAISSEMENT | bd816b40c857a768e866535b46b30ae6fb5020e9 | [
"Apache-2.0"
] | null | null | null | export_pdf_decaissement.py | Ciwara/DE-ENCAISSEMENT | bd816b40c857a768e866535b46b30ae6fb5020e9 | [
"Apache-2.0"
] | null | null | null | export_pdf_decaissement.py | Ciwara/DE-ENCAISSEMENT | bd816b40c857a768e866535b46b30ae6fb5020e9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding= UTF-8 -*-
# Fad
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
# setup the empty canvas
from io import FileIO as file
from reportlab.platypus import Flowable
# from Common.pyPdf import PdfFileWriter, PdfFileReader
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.lib import colors
from reportlab.platypus import Table, TableStyle, Paragraph
# from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from num2words import num2words
# from configuration import Config
from Common.ui.util import formatted_number
from Common.ui.util import get_temp_filename
def pdFview(filename, invoice):
"""
cette views est cree pour la generation du PDF
"""
styles = getSampleStyleSheet()
# styleN = styles["BodyText"]
styleBH = styles["Normal"]
if not filename:
filename = get_temp_filename('pdf')
PDFSOURCE = 'static/encaissement_source.pdf'
TMP_FILE = 'static/tmp.pdf'
DEFAULT_FONT_SIZE = 11
FONT_BOLD = 'Helvetica-Bold'
FONT = 'Helvetica'
# FONT = 'Courier-Bold'
# A simple function to return a leading 0 on any single digit int.
# PDF en entre
input1 = PdfFileReader(file(PDFSOURCE, "rb"))
# PDF en sortie
output = PdfFileWriter()
# Rcupration du nombre de pages
n_pages = input1.getNumPages()
# Pour chaque page
y = 750
x = 40
recever_name = Paragraph('''{}'''.format(invoice.recever_name), styleBH)
description = Paragraph('''{}'''.format(invoice.description), styleBH)
date_valeur = invoice.date.strftime("%d - %b - %Y")
for i in range(n_pages):
# Rcupration de la page du doc initial (input1)
page = input1.getPage(i)
p = canvas.Canvas(TMP_FILE, pagesize=A4)
p.setFont(FONT_BOLD, 12)
p.drawString(x + 300, y - 60, "DECAISEMENT N :")
p.drawString(x + 300, y - 80, "BAMAKO le ")
p.setFont(FONT, 12)
p.drawString(x + 420, y - 60, invoice.number)
p.drawString(x + 380, y - 80, date_valeur)
ldata = []
ht = invoice.amount
amount = str(formatted_number(ht))
ldata.append(['', "DESIGNATION", 'MONTANT', 'NOM'])
ldata.append(["MONTANT", description, amount, recever_name])
ldata.append(["TAUX", "", "", "MONTANT"])
ldata.append(["VALEUR", "", "", amount])
row = 0.8
col = 1.5
btable = Table(
ldata,
colWidths=[col * inch, 2.8 * inch, col * inch, col * inch],
rowHeights=[0.5 * inch, row * inch, row * inch, row * inch])
btable.setStyle(
TableStyle(
[("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('ALIGN', (0, 1), (-1, -1), "RIGHT"),
('BACKGROUND', (0, 0), (-1, 0), colors.white),
('TEXTCOLOR', (0, 0), (-1, 0), colors.black),
('FONTSIZE', (0, 0), (-1, 0), 14),
('FONTNAME', (0, 0), (-1, -1), FONT_BOLD),
# ('BACKGROUND', (1, 1), (1, 1), colors.black),
('ALIGN', (1, 0), (1, -1), 'LEFT')])
)
a_w = 800
a_h = y - 320
w, h = btable.wrap(a_w, a_h)
btable.drawOn(p, 40, a_h)
ht_en_lettre = num2words(ht, lang='fr')
y = a_h - 15
ht_en_lettre1, ht_en_lettre2 = controle_caratere(ht_en_lettre + " franc CFA", 55, 40)
p.drawString(x, y - 30, "Arrt la prsente facture la somme de : {}".format(ht_en_lettre1.title()))
p.drawString(x, y - 45, (ht_en_lettre2))
y -= 90
p.drawString(x + 230, y - 20, str(invoice.num_client))
p.setFont(FONT_BOLD, 12)
p.drawString(x, y, "Signature Client")
p.drawString(x + 220, y, "Numro Client")
p.drawString(x + 440, y, "Signature")
p.showPage()
# Sauvegarde de la page
p.save()
# Cration du watermark
watermark = PdfFileReader(file(TMP_FILE, "rb"))
# Cration page_initiale+watermark
page.mergePage(watermark.getPage(0))
# Cration de la nouvelle page
output.addPage(page)
# Nouveau pdf
file_dest = filename + ".pdf"
output_stream = file(file_dest, u"wb")
output.write(output_stream)
output_stream.close()
return file_dest
def controle_caratere(lettre, nb_controle, nb_limite):
"""
cette fonction decoupe une chaine de caratere en fonction
du nombre de caratere donne et conduit le reste la ligne
"""
lettre = lettre
if len(lettre) <= nb_controle:
ch = lettre
ch2 = u""
return ch, ch2
else:
ch = ch2 = u""
for n in lettre.split(u" "):
if len(ch) <= nb_limite:
ch = ch + u" " + n
else:
ch2 = ch2 + u" " + n
return ch, ch2
| 33.415663 | 110 | 0.581035 |
c274168a8c5a204b07518e6afd5c3fd89f5eb019 | 9,073 | py | Python | pose/datasets/real_animal_all.py | chaneyddtt/UDA-Animal-Pose | f1ebfda860a2585c60fe86ce1632e910ac97ebc5 | [
"MIT"
] | 61 | 2021-03-30T08:34:24.000Z | 2022-03-30T02:45:46.000Z | pose/datasets/real_animal_all.py | chaneyddtt/UDA-Animal-Pose | f1ebfda860a2585c60fe86ce1632e910ac97ebc5 | [
"MIT"
] | 13 | 2021-04-10T12:46:58.000Z | 2022-03-11T10:40:02.000Z | pose/datasets/real_animal_all.py | chaneyddtt/UDA-Animal-Pose | f1ebfda860a2585c60fe86ce1632e910ac97ebc5 | [
"MIT"
] | 2 | 2021-07-22T04:53:44.000Z | 2022-02-15T14:19:02.000Z | from __future__ import print_function, absolute_import
import random
import torch.utils.data as data
from pose.utils.osutils import *
from pose.utils.transforms import *
from scipy.io import loadmat
import argparse
real_animal_all.njoints = 18 # ugly but works
| 45.139303 | 136 | 0.554502 |
c276131b5adb85398eba0cb67c7e33579e3497af | 2,376 | py | Python | src/models/SLEuth_model.py | NCBI-Codeathons/SLEuth | c74c05bbc07ce4c813ee46ab66cfb6487fdc6966 | [
"MIT"
] | 2 | 2019-11-07T22:24:49.000Z | 2019-11-09T02:42:59.000Z | src/models/SLEuth_model.py | NCBI-Codeathons/STRATIFICATION-OF-SLE-PATIENT-COHORT-FOR-PRECISION-MEDICINE | c74c05bbc07ce4c813ee46ab66cfb6487fdc6966 | [
"MIT"
] | null | null | null | src/models/SLEuth_model.py | NCBI-Codeathons/STRATIFICATION-OF-SLE-PATIENT-COHORT-FOR-PRECISION-MEDICINE | c74c05bbc07ce4c813ee46ab66cfb6487fdc6966 | [
"MIT"
] | 1 | 2020-06-06T18:47:21.000Z | 2020-06-06T18:47:21.000Z | import sklearn
from sklearn.cluster import KMeans
from src.features.feature_selection import PCA_Variants2Gene_FeatureSelection
| 44.830189 | 147 | 0.676347 |
c2769ae34a085e912e6eacf2499ecd7dc14d3eeb | 492 | py | Python | cap6/ex6.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap6/ex6.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap6/ex6.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | p = [1, 4, 9, 10, 20, 25]
e1 = int(input('Primeiro elemento: '))
e2 = int(input('Segundo elemento: '))
x = 0
achou = False
primeiro = 0
while x < len(p):
if p[x] == e1:
print(f'Elemento 1 encontrado na posio {x} da lista!')
if primeiro == 0:
primeiro = 1
if p[x] == e2:
print(f'Elemento 2 encontrado na posio {x} da lista!')
if primeiro == 0:
primeiro = 2
x += 1
print(f'Foi encontrado primeiro o {primeiro} elemento!')
| 27.333333 | 64 | 0.556911 |
c276e6f457a794f0b9dc50d4ef823c9392fe5335 | 62 | py | Python | src/lk_db/ents/time/EntTime.py | nuuuwan/lk_db | ac0abfa47ba31b0d4c2c8566b3101b83749bd45d | [
"MIT"
] | null | null | null | src/lk_db/ents/time/EntTime.py | nuuuwan/lk_db | ac0abfa47ba31b0d4c2c8566b3101b83749bd45d | [
"MIT"
] | null | null | null | src/lk_db/ents/time/EntTime.py | nuuuwan/lk_db | ac0abfa47ba31b0d4c2c8566b3101b83749bd45d | [
"MIT"
] | null | null | null | from lk_db.ents.Ent import Ent
| 10.333333 | 30 | 0.709677 |
c278440f2c1e433cd58705bc68bd303258b8e21b | 8,084 | py | Python | lib/datasets/myvg.py | zhydong/faster-rcnn.pytorch | 36fa8b9718228edb4702b039deab924c40b973f5 | [
"MIT"
] | null | null | null | lib/datasets/myvg.py | zhydong/faster-rcnn.pytorch | 36fa8b9718228edb4702b039deab924c40b973f5 | [
"MIT"
] | null | null | null | lib/datasets/myvg.py | zhydong/faster-rcnn.pytorch | 36fa8b9718228edb4702b039deab924c40b973f5 | [
"MIT"
] | null | null | null | """
Visual Genome in Scene Graph Generation by Iterative Message Passing split
"""
import os
import cv2
import json
import h5py
import pickle
import numpy as np
import scipy.sparse
import os.path as osp
from datasets.imdb import imdb
from model.utils.config import cfg
from IPython import embed
if __name__ == '__main__':
fuck = vg_sggimp(0)
embed(header='myvg.py in lib/datasets')
| 36.579186 | 114 | 0.588323 |
c279e40781e717d53e7e9d1d3467cb0c61eb0740 | 6,213 | py | Python | src/rfid/__init__.py | whaleygeek/SL030 | ff96337cd1619b4a5bd8097a5d5dd0455d2e1674 | [
"MIT"
] | null | null | null | src/rfid/__init__.py | whaleygeek/SL030 | ff96337cd1619b4a5bd8097a5d5dd0455d2e1674 | [
"MIT"
] | 8 | 2020-11-14T11:01:38.000Z | 2020-11-18T15:06:07.000Z | src/rfid/__init__.py | whaleygeek/SL030 | ff96337cd1619b4a5bd8097a5d5dd0455d2e1674 | [
"MIT"
] | 2 | 2020-07-23T14:41:31.000Z | 2020-11-19T13:19:38.000Z | # SL030 RFID reader driver for skpang supplied SL030 Mifare reader
# (c) 2013-2014 Thinking Binaries Ltd, David Whale
#===============================================================================
# CONFIGURATION
#
# You can change these configuration items either by editing them in this
# file, or by refering to the module by name inside your own program.
# e.g.
# import rfid
# rfid.CFGEN_GPIO = False
# set to True to detect card presence by using GPIO
# set to False to detect card presence by reading card status
CFGEN_GPIO = True
# Set to the GPIO required to monitor the tag detect (OUT) line
CFG_TAG_DETECT = 4
# The I2C address of the SL030 RFID tag reader
CFG_ADDRESS = 0x50
# How often to poll (in seconds) for a tag present
CFG_TAG_PRESENT_POLL_TIME = 0.01
# How often to poll (in seconds) for a tag absent
CFG_TAG_ABSENT_POLL_TIME = 0.5
# Set to True to throw an exception when an error is printed
# Set to False to just print the error
CFGEN_EXCEPTIONS = True
# The function called when an error occurs in this module
# you can replace this with a function of your own to handle errors
#===============================================================================
# SETUP
try:
import ci2c # python2
except ImportError:
from . import ci2c # python3
import time
CMD_SELECT_MIFARE = 0x01
CMD_GET_FIRMWARE = 0xF0
WR_RD_DELAY = 0.05
ci2c.initDefaults()
#===============================================================================
# UTILITIES
#===============================================================================
# class-based interface.
# If for some reason you had multiple SL030's with different addresses,
# you could use this to have multiple instances. It's not really written
# that way yet as CFG_ADDRESS is global, but it's easy to change if you
# did want more than one reader, or if you wanted different types of readers
# that implemented this same interface and were interchangeable at product
# install time.
# The gpio parameter in __init__ can be used to provide an alternative GPIO
# implementation or to share an application wide GPIO object.
#===============================================================================
# class-less interface
#
# Useful if you want kids to use the interface and don't want the complexity
# of classes. It also allows us to hide some of the more complex functions
# and provide simpler documentation strings
instance = SL030()
def tagIsPresent():
"""Check if there is a tag present or not"""
return instance.tagIsPresent()
def waitTag():
"""Wait until a tag is present"""
instance.waitTag()
def waitNoTag():
"""Wait until there is no longer a tag present"""
instance.waitNoTag()
def readMifare():
"""Try to read this as a mifare tag. Returns False if not a mifare"""
return instance.readMifare()
def getUID():
"""Get the unique ID number of the card"""
return instance.getUID()
def getUniqueId():
"""Get the unique ID number of the card as a printable string"""
return instance.getUniqueId()
def getType():
"""Get the type number of the card"""
return instance.getType()
def getTypeName():
"""Get a string representing the name of the type of card in use"""
return typename(instance.getType())
# END
| 26.21519 | 80 | 0.63013 |
c27a447ad8dc36c385389a565e4ed170305a8f4a | 1,293 | py | Python | parser_tool.py | kmwenja/marker | f9d9634eb1ddce3dc0ebbe6164bb87a27dc95dd9 | [
"MIT"
] | null | null | null | parser_tool.py | kmwenja/marker | f9d9634eb1ddce3dc0ebbe6164bb87a27dc95dd9 | [
"MIT"
] | null | null | null | parser_tool.py | kmwenja/marker | f9d9634eb1ddce3dc0ebbe6164bb87a27dc95dd9 | [
"MIT"
] | null | null | null | # parser.py - parses a given sentence using a given grammar definition
import sys, os
import argparse
from utils import load_grammar
def get_parser(grammar_file, *args, **kwargs):
""" loads a parser from the given grammar """
return load_grammar(grammar_file, *args, **kwargs)
def tokenize(sentence):
""" breaks down a string into tokens for parsing """
return sentence.split()
def parse(parser, sentence):
""" gets the best parse trees for this sentence """
return parser.nbest_parse(tokenize(sentence))
if __name__ == "__main__":
main()
| 26.387755 | 73 | 0.661253 |
c27a8632891f52402501c008dbb668b6e46297a0 | 3,821 | py | Python | pyjapt/lexing.py | alejandroklever/pyjapt | 21b11fd4b5b21cabcc59673538c473e33af9e646 | [
"MIT"
] | 8 | 2020-07-23T06:19:28.000Z | 2021-11-06T04:26:47.000Z | pyjapt/lexing.py | alejandroklever/PyJapt | 21b11fd4b5b21cabcc59673538c473e33af9e646 | [
"MIT"
] | null | null | null | pyjapt/lexing.py | alejandroklever/PyJapt | 21b11fd4b5b21cabcc59673538c473e33af9e646 | [
"MIT"
] | null | null | null | import re
from typing import List, Any, Generator, Tuple, Pattern, Optional, Callable, Dict
| 33.517544 | 111 | 0.576289 |
c27b06a45e2113932d5e033fa31486f2b933313d | 3,975 | py | Python | src/jobs/management/commands/try_retrain.py | fleur101/predict-python | d40c876d919232bbb77904e050b182c875bc36fa | [
"MIT"
] | 12 | 2018-06-27T08:09:18.000Z | 2021-10-10T22:19:04.000Z | src/jobs/management/commands/try_retrain.py | fleur101/predict-python | d40c876d919232bbb77904e050b182c875bc36fa | [
"MIT"
] | 17 | 2018-06-12T17:36:11.000Z | 2020-11-16T21:23:22.000Z | src/jobs/management/commands/try_retrain.py | fleur101/predict-python | d40c876d919232bbb77904e050b182c875bc36fa | [
"MIT"
] | 16 | 2018-08-02T14:40:17.000Z | 2021-11-12T12:28:46.000Z | import random
from django.core.management.base import BaseCommand
from pandas import Series
from src.cache.cache import put_labelled_logs
from src.core.core import get_encoded_logs
from src.jobs.models import Job
from src.jobs.tasks import prediction_task
from src.runtime.tasks import create_prediction_job
from src.utils.django_orm import duplicate_orm_row
| 38.592233 | 103 | 0.585409 |
c27ba0e5861f097686336335cdd99739a27bfdc4 | 1,646 | py | Python | flydra_camnode/flydra_camnode/camnode_utils.py | elhananby/flydra | 09b86859b1863700cdea0bbcdd4758da6c83930b | [
"Apache-2.0",
"MIT"
] | 45 | 2017-08-25T06:46:56.000Z | 2021-08-29T16:42:49.000Z | flydra_camnode/flydra_camnode/camnode_utils.py | elhananby/flydra | 09b86859b1863700cdea0bbcdd4758da6c83930b | [
"Apache-2.0",
"MIT"
] | 7 | 2017-10-16T10:46:20.000Z | 2020-12-03T16:42:55.000Z | flydra_camnode/flydra_camnode/camnode_utils.py | elhananby/flydra | 09b86859b1863700cdea0bbcdd4758da6c83930b | [
"Apache-2.0",
"MIT"
] | 21 | 2018-04-11T09:06:40.000Z | 2021-12-26T23:38:40.000Z | #emacs, this is -*-Python-*- mode
from __future__ import division
from __future__ import with_statement
import contextlib
import threading, Queue
| 27.433333 | 77 | 0.592345 |
c27c8a4376df70bee9dccb2ba1429b510d6719df | 3,439 | py | Python | app.py | Nerolation/Ethereum-Notary-Service-Prototype | ea5487a29813caee1e4be9edac495d89010c593e | [
"MIT"
] | null | null | null | app.py | Nerolation/Ethereum-Notary-Service-Prototype | ea5487a29813caee1e4be9edac495d89010c593e | [
"MIT"
] | null | null | null | app.py | Nerolation/Ethereum-Notary-Service-Prototype | ea5487a29813caee1e4be9edac495d89010c593e | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, logging, make_response, json
from ethw3 import genkey, create_chain_data, verify_chain_data, create_acct, mine, history_slice
from utils_s3 import load_from_fetchlist
# Initialize flask an other global variables
app = Flask(__name__)
address, username, addr, priv, contVer, web3Ver = None,None,None,None,None,None
sig = []
txHash = []
status,status2 = 0,0
recordDict, matchedData = {}, {}
entryList = []
if __name__ == '__main__':
app.run(debug=True)
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
| 37.791209 | 198 | 0.628962 |
c27d5e7133ef2989b1ce8e0b881cabce3f8f0dab | 8,800 | py | Python | python/dsbox/planner/common/library.py | RqS/dsbox-ta2 | 43800d4365a154684fa5b9551c2c1cd21ec7139c | [
"MIT"
] | null | null | null | python/dsbox/planner/common/library.py | RqS/dsbox-ta2 | 43800d4365a154684fa5b9551c2c1cd21ec7139c | [
"MIT"
] | null | null | null | python/dsbox/planner/common/library.py | RqS/dsbox-ta2 | 43800d4365a154684fa5b9551c2c1cd21ec7139c | [
"MIT"
] | null | null | null | import json
import os
from datetime import date
from typing import List, Dict
from d3m_metadata.metadata import PrimitiveMetadata, PrimitiveFamily, PrimitiveAlgorithmType
from d3m import index
from dsbox.planner.common.primitive import Primitive
from dsbox.schema.profile_schema import DataProfileType as dpt
from collections import defaultdict
| 42.307692 | 112 | 0.611932 |
c27e04a4ce8b186ea59a6dc9c61fb5fd29af829e | 134 | py | Python | python-lab-file/64_capitalizefirshchar.py | zshashz/py1729 | 3281ae2a20c665ebcc0d53840cc95143cbe6861b | [
"MIT"
] | 1 | 2021-01-22T09:03:59.000Z | 2021-01-22T09:03:59.000Z | python-lab-file/64_capitalizefirshchar.py | zshashz/py1729 | 3281ae2a20c665ebcc0d53840cc95143cbe6861b | [
"MIT"
] | null | null | null | python-lab-file/64_capitalizefirshchar.py | zshashz/py1729 | 3281ae2a20c665ebcc0d53840cc95143cbe6861b | [
"MIT"
] | 2 | 2021-05-04T11:29:38.000Z | 2021-11-03T13:09:48.000Z | # Program 64 : Capitalize the First Character of a String
my_string = input()
cap_string = my_string.capitalize()
print(cap_string) | 19.142857 | 57 | 0.768657 |
c27e0e6f346371d620980e3342a6a2e97c9b22a6 | 18,487 | py | Python | radiation_dose_estimator_keras.py | vsuomi/radiation-dose | 6a97e282b7b8a06cde7973ceeebb14ea3ef60f70 | [
"MIT"
] | 12 | 2019-02-27T17:07:58.000Z | 2021-12-17T20:31:31.000Z | radiation_dose_estimator_keras.py | liuhd073/radiation-dose | 6a97e282b7b8a06cde7973ceeebb14ea3ef60f70 | [
"MIT"
] | 2 | 2020-04-02T08:57:19.000Z | 2021-06-10T05:06:26.000Z | radiation_dose_estimator_keras.py | vsuomi/radiation-dose | 6a97e282b7b8a06cde7973ceeebb14ea3ef60f70 | [
"MIT"
] | 2 | 2019-09-21T12:19:08.000Z | 2020-04-28T04:30:41.000Z | # -*- coding: utf-8 -*-
'''
Created on Fri Nov 16 09:36:50 2018
@author:
Visa Suomi
Turku University Hospital
November 2018
@description:
This model is used to predict radiation dose from pre-treatment patient
parameters
'''
#%% clear variables
%reset -f
%clear
#%% import necessary libraries
import keras as k
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.utils.class_weight import compute_sample_weight
import scipy as sp
import time
import os
from save_load_variables import save_load_variables
from plot_regression_performance import plot_regression_performance
from analyse_statistics import analyse_statistics
from analyse_correlation import analyse_correlation
from analyse_feature_correlation import analyse_feature_correlation
#%% define logging and data display format
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
#%% read data
df = pd.read_csv(r'C:\Users\visa\Documents\TYKS\Machine learning\Radiation dose\radiation-dose\radiation_dose_data.csv', sep = ',')
df_orig = df.copy()
#%% check for duplicates
duplicates = any(df.duplicated())
#%% create synthetic features
df['BSA'] = 0.007184 * df['paino'].pow(0.425) * df['pituus'].pow(0.725)
#%% calculate nan percent for each label
nan_percent = pd.DataFrame(df.isnull().mean() * 100, columns = ['% of NaN'])
# drop nan values
#df = df.dropna()
df = df.dropna(subset = ['paino'])
#df = df.dropna(subset = ['AHA_cto'])
#df = df.dropna(subset = ['Patient_sex'])
#df = df.dropna(subset = ['FN2BA'])
#df = df.dropna(subset = ['I20.81_I21.01_I21.11_or_I21.41'])
#df = df.dropna(subset = ['add_stent_2_tai_yli'])
#df = df.dropna(subset = ['n_tmp_3'])
#df = df.dropna(subset = ['sten_post_100'])
#df = df.dropna(subset = ['suonia_2_tai_yli'])
#df = df.dropna(subset = ['pituus'])
#%% randomise and divive data for cross-validation
# split data
split_ratio = 0.2
training_set, holdout_set = train_test_split(df, test_size = split_ratio)
validation_set, testing_set = train_test_split(holdout_set, test_size = 0.5)
del holdout_set
# obtain sizes
n_training = training_set.shape[0]
n_validation = validation_set.shape[0]
n_testing = testing_set.shape[0]
#%% calculate correlation and standard deviation matrices
std_mat, corr_mat, most_corr = analyse_correlation(training_set, 13, 'Korjattu_DAP_GYcm2')
#%% analyse individual feature correlations
analyse_feature_correlation(training_set, 'paino', 'Korjattu_DAP_GYcm2', False)
analyse_feature_correlation(training_set, 'AHA_cto', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'Patient_sex', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'FN2BA', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'I20.81_I21.01_I21.11_or_I21.41', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'add_stent_2_tai_yli', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'n_tmp_3', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'sten_post_100', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'suonia_2_tai_yli', 'Korjattu_DAP_GYcm2', True)
analyse_feature_correlation(training_set, 'pituus', 'Korjattu_DAP_GYcm2', False)
#%% analyse target
analyse_statistics(training_set[['Korjattu_DAP_GYcm2']])
#%% replace missing values in all datasets
# create dictionary for impute values based only on training data
impute_values = {'BSA': training_set['BSA'].mean(),
'paino': training_set['paino'].mean(),
'pituus': training_set['pituus'].mean(),
'ind_pci_in_stemi': training_set['ind_pci_in_stemi'].mode()[0],
'ind_flap_failure': training_set['ind_flap_failure'].mode()[0],
'ind_nstemi': training_set['ind_nstemi'].mode()[0],
'ind_diag': training_set['ind_diag'].mode()[0],
'ind_uap': training_set['ind_uap'].mode()[0],
'ind_heart_failure': training_set['ind_heart_failure'].mode()[0],
'ind_stemi_other': training_set['ind_stemi_other'].mode()[0],
'ind_stable_ap': training_set['ind_stable_ap'].mode()[0],
'ind_arrhythmia_settl': training_set['ind_arrhythmia_settl'].mode()[0],
'suonia_2_tai_yli': training_set['suonia_2_tai_yli'].mode()[0],
'lm_unprotected': training_set['lm_unprotected'].mode()[0],
'im': training_set['im'].mode()[0],
'lada': training_set['lada'].mode()[0],
'ladb': training_set['ladb'].mode()[0],
'ladc': training_set['ladc'].mode()[0],
'lcxa': training_set['lcxa'].mode()[0],
'lcxb': training_set['lcxb'].mode()[0],
'lcxc': training_set['lcxc'].mode()[0],
'ld1': training_set['ld1'].mode()[0],
'ld2': training_set['ld2'].mode()[0],
'lita': training_set['lita'].mode()[0],
'lm': training_set['lm'].mode()[0],
'lom1': training_set['lom1'].mode()[0],
'lom2': training_set['lom2'].mode()[0],
'lpd': training_set['lpd'].mode()[0],
'lpl': training_set['lpl'].mode()[0],
'ram_rv': training_set['ram_rv'].mode()[0],
'rcaa': training_set['rcaa'].mode()[0],
'rcab': training_set['rcab'].mode()[0],
'rcac': training_set['rcac'].mode()[0],
'rita': training_set['rita'].mode()[0],
'rpd': training_set['rpd'].mode()[0],
'rpl': training_set['rpl'].mode()[0],
'vgrca_ag': training_set['vgrca_ag'].mode()[0],
'vglca1_ag': training_set['vglca1_ag'].mode()[0],
'vglca2_ag': training_set['vglca2_ag'].mode()[0],
'restenosis': training_set['restenosis'].mode()[0],
'stent_dimension': training_set['stent_dimension'].mean(),
'ball_dimension': training_set['ball_dimension'].mean(),
'add_stent_1': 0,
'add_stent_2_tai_yli': 0}
# combine datasets for imputing
df = training_set.append([validation_set, testing_set])
# impute data
for key, val in impute_values.items():
df[key] = df[key].fillna(val)
del key, val
#%% fill in mutually exclusive categorical values
# obtain categorical impute values
sten_post_training = training_set[['sten_post_0', 'sten_post_25', 'sten_post_60',
'sten_post_85', 'sten_post_100']].idxmax(axis = 1)
impute_values['sten_post'] = sten_post_training.mode()[0]
sten_pre_training = training_set[['sten_pre_100', 'sten_pre_85',
'sten_pre_60']].idxmax(axis = 1)
impute_values['sten_pre'] = sten_pre_training.mode()[0]
AHA_training = training_set[['AHA_a', 'AHA_b1', 'AHA_b2',
'AHA_c', 'AHA_cto']].idxmax(axis = 1)
impute_values['AHA'] = AHA_training.mode()[0]
del sten_post_training, sten_pre_training, AHA_training
# impute data
sten_post = df[['sten_post_0', 'sten_post_25', 'sten_post_60',
'sten_post_85', 'sten_post_100']].idxmax(axis = 1)
sten_post = sten_post.fillna(impute_values['sten_post'])
sten_post = pd.get_dummies(sten_post).astype(int)
sten_post = sten_post[['sten_post_0', 'sten_post_25', 'sten_post_60',
'sten_post_85', 'sten_post_100']]
df[['sten_post_0', 'sten_post_25', 'sten_post_60', 'sten_post_85',
'sten_post_100']] = sten_post
sten_pre = df[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']].idxmax(axis = 1)
sten_pre = sten_pre.fillna(impute_values['sten_pre'])
sten_pre = pd.get_dummies(sten_pre).astype(int)
sten_pre = sten_pre[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']]
df[['sten_pre_100', 'sten_pre_85', 'sten_pre_60']] = sten_pre
AHA = df[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']].idxmax(axis = 1)
AHA = AHA.fillna(impute_values['AHA'])
AHA = pd.get_dummies(AHA).astype(int)
AHA = AHA[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']]
df[['AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']] = AHA
del sten_post, sten_pre, AHA
#%% check for nan values
df.isnull().values.any()
#%% split impute data back to training, validation and testing
training_set = df[:n_training]
validation_set = df[n_training:n_training+n_validation]
testing_set = df[-n_testing:]
#%% define feature and target labels
feature_labels = ['paino', 'FN2BA', 'Patient_sex',
'Aiempi_ohitusleikkaus', 'suonia_2_tai_yli',
'add_stent_2_tai_yli',
'sten_post_85', 'sten_post_100',
'I20.81_I21.01_I21.11_or_I21.41', 'I35.0',
'ind_nstemi', 'ind_pci_in_stemi', 'ind_stable_ap',
'AHA_a', 'AHA_b1', 'AHA_b2', 'AHA_c', 'AHA_cto']
#feature_labels = ['BSA', 'AHA_cto', 'FN2BA',
# 'add_stent_2_tai_yli',
# 'sten_post_100', 'suonia_2_tai_yli']
#feature_labels = ['paino', 'pituus', 'Patient_sex', 'Age',
# 'I20.81_I21.01_I21.11_or_I21.41', 'I35.0', 'FN1AC', 'FN2BA',
# 'FN2AA', 'TFC00', 'n_tmp_1', 'n_tmp_2', 'n_tmp_3',
# 'ind_pci_in_stemi', 'ind_flap_failure', 'ind_nstemi',
# 'ind_diag', 'ind_uap', 'ind_heart_failure', 'ind_stemi_other',
# 'ind_stable_ap', 'ind_arrhythmia_settl', 'suonia_2_tai_yli',
# 'lm_unprotected', 'Aiempi_ohitusleikkaus', 'im', 'lada',
# 'ladb', 'ladc', 'lcxa', 'lcxb', 'lcxc', 'ld1', 'ld2', 'lita',
# 'lm', 'lom1', 'lom2', 'lpd', 'lpl', 'ram_rv', 'rcaa', 'rcab',
# 'rcac', 'rita', 'rpd', 'rpl', 'vgrca_ag', 'vglca1_ag',
# 'vglca2_ag', 'restenosis', 'stent_dimension', 'ball_dimension',
# 'add_stent_1', 'add_stent_2_tai_yli', 'sten_post_0',
# 'sten_post_25', 'sten_post_60', 'sten_post_85', 'sten_post_100',
# 'sten_pre_100', 'sten_pre_85', 'sten_pre_60', 'AHA_a', 'AHA_b1',
# 'AHA_b2', 'AHA_c', 'AHA_cto', 'IVUS', 'OCT']
target_label = ['Korjattu_DAP_GYcm2']
#%% extract features and targets
training_features = training_set[feature_labels]
validation_features = validation_set[feature_labels]
testing_features = testing_set[feature_labels]
training_targets = training_set[target_label]
validation_targets = validation_set[target_label]
testing_targets = testing_set[target_label]
#%% calculate sample weights
hist, bin_edges = np.histogram(training_targets, bins = 10)
classes = training_targets.apply(lambda x: pd.cut(x, bin_edges, labels = False,
include_lowest = True)).values
sample_weights = compute_sample_weight('balanced', classes)
#%% scale features
feature_transform = 'z-score'
if feature_transform == 'z-score':
t_mean = training_features.mean()
t_std = training_features.std()
training_features = (training_features - t_mean) / t_std
validation_features = (validation_features - t_mean) / t_std
testing_features = (testing_features - t_mean) / t_std
if feature_transform == 'log':
training_features = np.log1p(training_features)
validation_features = np.log1p(validation_features)
testing_features = np.log1p(testing_features)
if feature_transform == 'box-cox':
lmbda = 0.15
training_features = sp.special.boxcox1p(training_features, lmbda)
validation_features = sp.special.boxcox1p(validation_features, lmbda)
testing_features = sp.special.boxcox1p(testing_features, lmbda)
#%% scale targets (for skewed data)
target_transform = 'log'
if target_transform == 'log':
training_targets = np.log1p(training_targets)
validation_targets = np.log1p(validation_targets)
testing_targets = np.log1p(testing_targets)
if target_transform == 'box-cox':
lmbda = 0.15
training_targets = sp.special.boxcox1p(training_targets, lmbda)
validation_targets = sp.special.boxcox1p(validation_targets, lmbda)
testing_targets = sp.special.boxcox1p(testing_targets, lmbda)
#%% build and train neural network model
# define parameters
learning_rate = 0.001
n_epochs = 150
n_neurons = 64
n_layers = 2
batch_size = 5
l1_reg = 0.0
l2_reg = 0.01
batch_norm = False
dropout = None
if 'sample_weights' not in locals():
sample_weights = None
# build model
if 'model' in locals():
del model
model = k.models.Sequential()
model.add(k.layers.Dense(n_neurons,
input_shape = (training_features.shape[1],),
kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),
activation = 'relu'))
if batch_norm is True:
model.add(k.layers.BatchNormalization())
if dropout is not None:
model.add(k.layers.Dropout(dropout))
i = 1
while i < n_layers:
model.add(k.layers.Dense(n_neurons,
kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),
activation = 'relu'))
if batch_norm is True:
model.add(k.layers.BatchNormalization())
if dropout is not None:
model.add(k.layers.Dropout(dropout))
i += 1
model.add(k.layers.Dense(1))
model.compile(optimizer = k.optimizers.Adam(lr = learning_rate),
loss = 'mean_squared_error',
metrics = ['mean_absolute_error'])
model.summary()
# train model
timestr = time.strftime('%Y%m%d-%H%M%S')
history = model.fit(training_features, training_targets, verbose = 0, callbacks = [PrintDot()],
batch_size = batch_size, epochs = n_epochs, sample_weight = sample_weights,
validation_data = (validation_features, validation_targets))
#%% evaluate model performance
# calculate loss metrics
training_loss, training_error = model.evaluate(training_features, training_targets)
validation_loss, validation_error = model.evaluate(validation_features, validation_targets)
# make predictions
training_predictions = model.predict(training_features)
training_predictions = pd.DataFrame(training_predictions, columns = target_label,
index = training_features.index, dtype = float)
validation_predictions = model.predict(validation_features)
validation_predictions = pd.DataFrame(validation_predictions, columns = target_label,
index = validation_features.index, dtype = float)
# convert log targets to linear units (for skewed data)
if target_transform == 'log':
training_targets_lin = np.expm1(training_targets)
validation_targets_lin = np.expm1(validation_targets)
training_predictions_lin = np.expm1(training_predictions)
validation_predictions_lin = np.expm1(validation_predictions)
# convert box-cox targets to linear units (for skewed data)
if target_transform == 'box-cox':
training_targets_lin = sp.special.inv_boxcox1p(training_targets, lmbda)
validation_targets_lin = sp.special.inv_boxcox1p(validation_targets, lmbda)
training_predictions_lin = sp.special.inv_boxcox1p(training_predictions, lmbda)
validation_predictions_lin = sp.special.inv_boxcox1p(validation_predictions, lmbda)
# plot training performance
if (target_transform == 'log') or (target_transform == 'box-cox'):
f1 = plot_regression_performance(history, training_targets_lin, training_predictions_lin,
validation_targets_lin, validation_predictions_lin)
else:
f1 = plot_regression_performance(history, training_targets, training_predictions,
validation_targets, validation_predictions)
#%% save model
model_dir = 'Keras models\\%s_TE%d_VE%d' % (timestr,
round(training_error),
round(validation_error))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
f1.savefig(model_dir + '\\' + 'evaluation_metrics.pdf', dpi = 600, format = 'pdf',
bbox_inches = 'tight', pad_inches = 0)
variables_to_save = {'learning_rate': learning_rate,
'n_epochs': n_epochs,
'n_neurons': n_neurons,
'n_layers': n_layers,
'batch_size': batch_size,
'l1_reg': l1_reg,
'l2_reg': l2_reg,
'batch_norm': batch_norm,
'dropout': dropout,
'nan_percent': nan_percent,
'duplicates': duplicates,
'most_corr': most_corr,
'corr_mat': corr_mat,
'std_mat': std_mat,
'split_ratio': split_ratio,
'sample_weights': sample_weights,
'feature_transform': feature_transform,
'target_transform': target_transform,
'timestr': timestr,
'history': history,
'model_dir': model_dir,
'df': df,
'df_orig': df_orig,
'impute_values': impute_values,
'feature_labels': feature_labels,
'target_label': target_label,
'n_training': n_training,
'n_validation': n_validation,
'n_testing': n_testing,
'training_set': training_set,
'training_features': training_features,
'training_targets': training_targets,
'validation_set': validation_set,
'validation_features': validation_features,
'validation_targets': validation_targets,
'testing_set': testing_set,
'testing_features': testing_features,
'testing_targets': testing_targets}
save_load_variables(model_dir, variables_to_save, 'variables', 'save')
model.save(model_dir + '\\' + 'keras_model.h5')
| 37.960986 | 131 | 0.638611 |
c27e29fee5d31c11bd5413203986db3871f9139b | 3,610 | py | Python | gmsh_cad/emi_system_gap.py | MiroK/emi-cylinders | ccbbfa51003fc4fe8abc257dee916e229398c520 | [
"MIT"
] | null | null | null | gmsh_cad/emi_system_gap.py | MiroK/emi-cylinders | ccbbfa51003fc4fe8abc257dee916e229398c520 | [
"MIT"
] | null | null | null | gmsh_cad/emi_system_gap.py | MiroK/emi-cylinders | ccbbfa51003fc4fe8abc257dee916e229398c520 | [
"MIT"
] | 1 | 2018-05-30T14:26:59.000Z | 2018-05-30T14:26:59.000Z | from dolfin import *
parameters['form_compiler']['representation'] = 'uflacs'
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['cpp_optimize_flags'] = '-O3 -ffast-math -march=native'
parameters['ghost_mode'] = 'shared_facet'
mesh_file = 'cell_grid.h5'
comm = mpi_comm_world()
h5 = HDF5File(comm, mesh_file, 'r')
mesh = Mesh()
h5.read(mesh, 'mesh', False)
# The mesh comes in micro meters. Below it is more convenient to work in cm
mesh.coordinates()[:] *= 1E-4
# Facets in the mesh have tags 0, 1, 2, 3 One is for interfaces between
# cells and exterior, 3 are cell-cell interfaces. Two is used for marking
# boundary facets of the domain - this is where typically zero DirichletBCs
# are applied for the potential
surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1)
h5.read(surfaces, 'facet')
# The domain is split into 2 subdomains marked as 1 and 2 (cell interior,
# cell exterior). These differ by conductivities
volumes = MeshFunction('size_t', mesh, mesh.topology().dim())
h5.read(volumes, 'physical')
cell = mesh.ufl_cell()
# We have 3 spaces S for sigma = -kappa*grad(u) [~electric field]
# U for potential u
# Q for transmebrane potential p
Sel = FiniteElement('RT', cell, 1)
Vel = FiniteElement('DG', cell, 0)
Qel = FiniteElement('Discontinuous Lagrange Trace', cell, 0)
W = FunctionSpace(mesh, MixedElement([Sel, Vel, Qel]))
sigma, u, p = TrialFunctions(W)
tau, v, q = TestFunctions(W)
# Grounding for potential
bcs = [DirichletBC(W.sub(2), Constant(0), surfaces, 2)]
# Make measures aware of subdomains
dx = Measure('dx', domain=mesh, subdomain_data=volumes)
dS = Measure('dS', domain=mesh, subdomain_data=surfaces)
ds = Measure('ds', domain=mesh, subdomain_data=surfaces)
# Normal fo the INTERIOR surface. Note that 1, 2 marking of volume makes
# 2 cells the '+' cells w.r.t to surface and n('+') would therefore be their
# outer normal (that is an outer normal of the outside). ('-') makes the orientation
# right
n = FacetNormal(mesh)('-')
# Now onto the weak form
# Electric properties of membrane and interior/exterior
C_m = Constant(1) # 1 mu F / cm^2 @ 1
C_mcc = Constant(1.1) # @ 3
cond_int = Constant(5) # 5 mS / cm
cond_ext = Constant(20) # 20 mS / cm
# Time step
dt_fem = Constant(1E-3) # ms
# The source term as a function Q is coming from ODE solver. Here it is
# just some random function
Q = FunctionSpace(mesh, Qel)
p0 = interpolate(Constant(1), Q)
# And additional source on the boundary is the ionic current. For simplicity
I_ion = p0
# The source term for cell-cell interface
I_gap = 2*p0
# The system
a = ((1/cond_int)*inner(sigma, tau)*dx(1)+(1/cond_ext)*inner(sigma, tau)*dx(2)
- inner(div(tau), u)*dx(1) - inner(div(tau), u)*dx(2)
+ inner(p('+'), dot(tau('+'), n))*dS(1)
+ inner(p('+'), dot(tau('+'), n))*dS(3)
- inner(div(sigma), v)*dx(1) - inner(div(sigma), v)*dx(2)
+ inner(q('+'), dot(sigma('+'), n))*dS(1)
+ inner(q('+'), dot(sigma('+'), n))*dS(3)
- (C_m/dt_fem)*inner(q('+'), p('+'))*dS(1)
- (C_mcc/dt_fem)*inner(q('+'), p('+'))*dS(3))
L = (inner(q('+'), I_ion('+')-(C_m/dt_fem)*p0('+'))*dS(1)
+ inner(q('+'), I_gap('+')-(C_mcc/dt_fem)*p0('+'))*dS(3))
# Additional terms to set to zero the dofs of W.sub(2) which are not on
# the interfaces
a -= inner(p('+'), q('+'))*dS(0) + inner(p, q)*ds(2)
L -= inner(Constant(0)('+'), q('+'))*dS(0) + inner(Constant(0), q)*ds(2)
A, b = PETScMatrix(), PETScVector()
assemble_system(a, L, bcs, A_tensor=A, b_tensor=b)
info("size(A) = %d" % A.size(0))
| 38.404255 | 84 | 0.650139 |
c27e409359b0212f22ca1f835566250303f86f95 | 165 | py | Python | customers/tests/test_gemsutils.py | dcopm999/sibdev | 9dc01ed5d172869d4870c847f01d168602f31be8 | [
"MIT"
] | null | null | null | customers/tests/test_gemsutils.py | dcopm999/sibdev | 9dc01ed5d172869d4870c847f01d168602f31be8 | [
"MIT"
] | null | null | null | customers/tests/test_gemsutils.py | dcopm999/sibdev | 9dc01ed5d172869d4870c847f01d168602f31be8 | [
"MIT"
] | null | null | null | from django.test import TestCase
from customers.gems_utils import Gems
| 16.5 | 37 | 0.690909 |
c2818a738014513d0cd2309428321fbec20d821d | 2,855 | py | Python | commands/load_metadata/products.py | DataViva/dataviva-scripts | 1e36f11e2849c33b8118cefe1755d312b19c0ecd | [
"MIT"
] | 10 | 2015-05-20T14:41:23.000Z | 2020-05-27T22:36:19.000Z | commands/load_metadata/products.py | DataViva/dataviva-scripts | 1e36f11e2849c33b8118cefe1755d312b19c0ecd | [
"MIT"
] | 11 | 2018-05-17T14:30:58.000Z | 2018-09-06T21:20:34.000Z | commands/load_metadata/products.py | DataViva/dataviva-scripts | 1e36f11e2849c33b8118cefe1755d312b19c0ecd | [
"MIT"
] | 12 | 2015-07-14T13:46:41.000Z | 2019-09-20T00:47:10.000Z | import click
import pandas
import pickle
import json
from clients import s3, redis
| 33.588235 | 119 | 0.561121 |
c282ea17115bac2dbe64555ce16709d1698a3231 | 5,158 | py | Python | docker/entrypoint_benchmark.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | 88 | 2018-03-24T11:57:10.000Z | 2022-03-21T13:04:41.000Z | docker/entrypoint_benchmark.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | 558 | 2018-03-14T14:16:30.000Z | 2022-03-29T12:48:14.000Z | docker/entrypoint_benchmark.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | 41 | 2018-04-09T11:13:12.000Z | 2021-10-05T14:43:11.000Z | #!/usr/bin/env python3
import argparse
import os
import io
import subprocess
import sys
from tabulate import tabulate
def run_tests(test_dir, run_dir, log_fp, oasis_args, threshold=None):
'''
Output of each run entry in `results`
In [3]: example_run
Out[3]:
{'total': 88.63,
'oasislmf.manager.__init__': 0.0,
'oasislmf.model_preparation.gul_inputs.get_gul_input_items': 16.05,
'oasislmf.model_preparation.gul_inputs.write_items_file': 3.84,
'oasislmf.model_preparation.gul_inputs.write_coverages_file': 1.88,
'oasislmf.model_preparation.gul_inputs.write_gul_input_files': 5.94,
'oasislmf.model_preparation.summaries.get_summary_mapping': 0.8,
'oasislmf.model_preparation.summaries.write_mapping_file': 6.77,
'oasislmf.model_preparation.il_inputs.get_il_input_items': 30.42,
'oasislmf.model_preparation.il_inputs.write_fm_policytc_file': 8.49,
'oasislmf.model_preparation.il_inputs.write_fm_profile_file': 1.59,
'oasislmf.model_preparation.il_inputs.write_fm_programme_file': 7.52,
'oasislmf.model_preparation.il_inputs.write_fm_xref_file': 2.98,
'oasislmf.model_preparation.il_inputs.write_il_input_files': 21.44}
'''
sub_dirs = next(os.walk(test_dir))[1]
test_data = dict()
results= dict()
for d in sub_dirs:
loc_fp = os.path.join(test_dir, d, 'loc.csv')
acc_fp = os.path.join(test_dir, d, 'acc.csv')
keys_fp = os.path.join(test_dir, d, 'keys.csv')
n_sample = sum(1 for line in open(loc_fp)) -1
cmd_str = f'oasislmf model generate-oasis-files -x {loc_fp} -y {acc_fp} -z {keys_fp} --oasis-files-dir {run_dir} {oasis_args} --verbose'
test_data[n_sample] = cmd_str
for t in sorted(test_data.keys()):
print('Running: ')
print(f"cmd = {test_data[t]}")
print(f'size = {t}')
print(f't_max = {threshold}')
stdout = run_command(test_data[t])
run = pasrse_gen_output(stdout)
results[t] = run
print(f"t_total = {run['total']}\n")
# If given check that threshold isn't exceeded
if threshold:
if run['total'] > threshold:
print('FAILED\n')
tabulate_data(results, log_fp)
sys.exit(1)
else:
print('PASSED\n')
tabulate_data(results, log_fp)
return results
if __name__ == "__main__":
args = parse_args()
run_tests(args['test_directory'],
args['output_directory'],
args['log_output'],
args['extra_oasislmf_args'],
args['time_threshold'])
| 35.819444 | 144 | 0.647732 |
c28326651456ece0ed2b23f9a58a7727eada65f9 | 191 | py | Python | run.py | elchingon/flask-temperature-relay-app | ef2fcdf039eb2be5ef28a53ca1727d73c0732d0f | [
"MIT"
] | null | null | null | run.py | elchingon/flask-temperature-relay-app | ef2fcdf039eb2be5ef28a53ca1727d73c0732d0f | [
"MIT"
] | null | null | null | run.py | elchingon/flask-temperature-relay-app | ef2fcdf039eb2be5ef28a53ca1727d73c0732d0f | [
"MIT"
] | null | null | null | #!/bin/env python
from app import create_app, socketio
from app.db_setup import init_db
app = create_app(debug=False)
init_db()
if __name__ == '__main__':
socketio.run(app, port=5001)
| 17.363636 | 36 | 0.73822 |
c2844b9558a0aad3fcd5e9e967cacb650e5737e3 | 1,427 | py | Python | GUI/index.py | Abhishek2019/Speech | 416827a02279cdafd268ef2748d4f4f52b0f0e15 | [
"MIT"
] | null | null | null | GUI/index.py | Abhishek2019/Speech | 416827a02279cdafd268ef2748d4f4f52b0f0e15 | [
"MIT"
] | null | null | null | GUI/index.py | Abhishek2019/Speech | 416827a02279cdafd268ef2748d4f4f52b0f0e15 | [
"MIT"
] | null | null | null | # from tkinter import *
# root = Tk()
# frametop = Frame(root)
# framebottom = Frame(root)
# frameleft = Frame(framebottom)
# frameright = Frame(framebottom)
# text = Text(frametop)
# scroll = Scrollbar(frametop, command=text.yview)
# btn1 = Button(frameleft, text="Course")
# btn2 = Button(frameleft, text="Abscences")
# btn3 = Button(frameright, text="Notes")
# btn4 = Button(frameright, text="Return")
# text['yscrollcommand'] = scroll.set
# frametop.pack(side=TOP, fill=BOTH, expand=1)
# framebottom.pack(side=BOTTOM, fill=BOTH, expand=1)
# frameleft.pack(side=LEFT, fill=BOTH, expand=1)
# frameright.pack(side=RIGHT, fill=BOTH, expand=1)
# text.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# scroll.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# btn1.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# btn2.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# btn3.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# btn4.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# root.mainloop()
from tkinter import *
root = Tk()
button_frame = Frame(root)
button_frame.pack(fill=X, side=BOTTOM)
reset_button = Button(button_frame, text='Reset')
run_button = Button(button_frame, text='Run')
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=1)
reset_button.grid(row=0, column=0, sticky=W+E)
run_button.grid(row=0, column=1, sticky=W+E)
root.mainloop() | 29.122449 | 63 | 0.716889 |
c284f0eeab9d175ef398f1a574b272296ad415fe | 2,270 | py | Python | tests/0400_i18n/08_update_catalogs.py | sveetch/Optimus | 983aebeccd2ada7a5a0ab96f9296d4bba1112022 | [
"MIT"
] | 2 | 2019-05-31T00:23:15.000Z | 2021-04-26T07:26:16.000Z | tests/0400_i18n/08_update_catalogs.py | sveetch/Optimus | 983aebeccd2ada7a5a0ab96f9296d4bba1112022 | [
"MIT"
] | 27 | 2015-04-21T14:43:26.000Z | 2022-01-29T00:42:53.000Z | tests/0400_i18n/08_update_catalogs.py | sveetch/Optimus | 983aebeccd2ada7a5a0ab96f9296d4bba1112022 | [
"MIT"
] | 1 | 2017-05-21T17:32:28.000Z | 2017-05-21T17:32:28.000Z | import os
import logging
import shutil
from optimus.i18n.manager import I18NManager
def test_update_catalogs_all(
minimal_i18n_settings, caplog, temp_builds_dir, fixtures_settings
):
"""
Update every catalogs
"""
basepath = temp_builds_dir.join("i18n_update_catalogs_all")
# Copy sample project to temporary dir
samplename = "minimal_i18n"
samplepath = os.path.join(fixtures_settings.fixtures_path, samplename)
destination = os.path.join(basepath.strpath, samplename)
shutil.copytree(samplepath, destination)
# Get manager with settings
settings = minimal_i18n_settings(destination)
manager = I18NManager(settings)
updated = manager.update_catalogs()
assert updated == ["en_US", "fr_FR"]
assert caplog.record_tuples == [
(
"optimus",
logging.INFO,
"Updating catalog (PO) for language 'en_US' to {}".format(
manager.get_po_filepath("en_US")
),
),
(
"optimus",
logging.INFO,
"Updating catalog (PO) for language 'fr_FR' to {}".format(
manager.get_po_filepath("fr_FR")
),
),
]
def test_update_catalogs_one(
minimal_i18n_settings, caplog, temp_builds_dir, fixtures_settings
):
"""
Update only default locale catalog
"""
basepath = temp_builds_dir.join("i18n_update_catalogs_one")
# Copy sample project to temporary dir
samplename = "minimal_i18n"
samplepath = os.path.join(fixtures_settings.fixtures_path, samplename)
destination = os.path.join(basepath.strpath, samplename)
shutil.copytree(samplepath, destination)
# Get manager with settings
settings = minimal_i18n_settings(destination)
manager = I18NManager(settings)
updated = manager.update_catalogs([settings.LANGUAGE_CODE])
assert updated == [settings.LANGUAGE_CODE]
assert os.path.exists(manager.get_po_filepath(settings.LANGUAGE_CODE)) is True
assert caplog.record_tuples == [
(
"optimus",
logging.INFO,
"Updating catalog (PO) for language 'en_US' to {}".format(
manager.get_po_filepath(settings.LANGUAGE_CODE)
),
),
]
| 28.024691 | 82 | 0.654626 |
c2873e4c6778750f7796b281ee5e6991fa174ee7 | 3,707 | py | Python | StreamPy/TestExamplesListToStreams/test_example_element_single_in_single_out_stateful.py | AnomalyInc/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | 2 | 2017-04-27T11:04:27.000Z | 2019-02-07T21:03:32.000Z | StreamPy/TestExamplesListToStreams/test_example_element_single_in_single_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | StreamPy/TestExamplesListToStreams/test_example_element_single_in_single_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | """This module contains examples of stream_func where f_type
is 'element' and stream_func has a single input stream, and
a single output stream, and the operation is stateful.
The state captures information in the past input streams;
this information is required to append values to the tails
of the output streams.
The functions on static Python data structures are of the form:
element, state -> element, state
These functions typically have the following structure:
(1) Extract variables from the state.
(2) Compute the output and the new state.
(3) Return (output, new_state)
"""
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from functools import partial
from Stream import Stream
from Operators import stream_func
from stream_test import *
# Functions: element, state -> element, state
def cumulative_sum(v, cumulative):
""" This function is used to output a stream
where the n-th value on the output stream is
the cumulative sum of the first n values of the
input stream.
The state of the input stream is cumulative.
When used to create a stream, cumulative is
the sum of all the values in the input stream
received so far.
v is the next value received in the input stream.
"""
cumulative += v
return (cumulative, cumulative)
def average(v, state):
""" This function is used to output a stream
where the n-th value on the output stream is
the average of the first n values of the
input stream.
The state of the input stream is the pair (n,cum).
When used to create a stream, n is the number
of values received on the input stream, and cum
is the sum of all the values in the input stream
received so far.
v is the next value received in the input stream.
"""
n, cum = state
n += 1
cum += v
mean = cum/float(n)
state = (n, cum)
return (mean, state)
# Functions: stream -> stream.
# Each element of the output stream is f() applied to the corresponding
# element of the input stream.
stream_cumulative = partial(stream_func, f_type='element', f=cumulative_sum,
num_outputs=1, state=0)
stream_average = partial(stream_func, f_type='element', f=average,
num_outputs=1, state=(0,0.0))
if __name__ == '__main__':
test()
| 29.656 | 77 | 0.669005 |
c288905ef79b8a14c466f9e48a449c4d916507ed | 7,815 | py | Python | summer21/mpqa_dataprocessing/mpqa3_to_dict.py | gu-sentiment-2021/sent | a3874a7286c965684d92fcf78e4091ad3a33aae1 | [
"MIT"
] | null | null | null | summer21/mpqa_dataprocessing/mpqa3_to_dict.py | gu-sentiment-2021/sent | a3874a7286c965684d92fcf78e4091ad3a33aae1 | [
"MIT"
] | null | null | null | summer21/mpqa_dataprocessing/mpqa3_to_dict.py | gu-sentiment-2021/sent | a3874a7286c965684d92fcf78e4091ad3a33aae1 | [
"MIT"
] | null | null | null | # mpqa3_to_dict helps to convert MPQA stand-off format to python dictionaries.
# It provides the following functionalities:
# 1) Clean up the MPQA 3.0 corpus
# 2) Convert an MPQA document to a dictionary
# 3) Convert an entire corpus to a dictionary
import os
import re
HAS_LIST_OF_IDS = [ # These attributes may have any number of ids. (>= 0)
"nested-source", "attitude-link", "insubstantial",
"sTarget-link", "newETarget-link", "eTarget-link",
"target-speech-link"
]
| 47.078313 | 124 | 0.583109 |
c289ff863c8d7033135d9b33b4209cc7939b98dc | 1,283 | py | Python | dev/Gems/CloudGemInGameSurvey/AWS/lambda-code/ServiceLambda/api/active_surveys_player_submissions.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemInGameSurvey/AWS/lambda-code/ServiceLambda/api/active_surveys_player_submissions.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemInGameSurvey/AWS/lambda-code/ServiceLambda/api/active_surveys_player_submissions.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import service
import CloudCanvas
import survey_utils
import survey_common
import validation_utils
import validation_common
from boto3.dynamodb.conditions import Key
| 33.763158 | 110 | 0.749805 |
c28c89d7b55ba84be47ce0c0270354c71ec824ef | 402 | py | Python | exercicio57.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio57.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio57.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | #Crie um programa que tenha uma tupla com vrias palavras (no usar acentos). Depois disso, voc deve mostrar, para cada palavra, quais so as suas vogais.
palavras=('SOPA','BATATAS','CACAU','CASTANHA','LASANHA','GOSTOSURAS','TRAVESSURAS','PARMEGIANA')
for p in palavras:
print(f'\n As Vogais de {p} so: ',end='')
for letra in p:
if letra in 'AEIOU':
print(letra, end=' ') | 57.428571 | 156 | 0.666667 |
c28cb4aefdf64fe8ea7d0c518f1c67e77950a4da | 2,534 | py | Python | marketgrab/views.py | colinmcglone/window-time | 74ed90440b9bb93fa569534c7557972242569d3a | [
"MIT"
] | null | null | null | marketgrab/views.py | colinmcglone/window-time | 74ed90440b9bb93fa569534c7557972242569d3a | [
"MIT"
] | null | null | null | marketgrab/views.py | colinmcglone/window-time | 74ed90440b9bb93fa569534c7557972242569d3a | [
"MIT"
] | null | null | null | import os
from django.conf import *
from django.shortcuts import render_to_response, render
from django.http import HttpResponse
from .models import Data, MovingAvg, Movements, Sigma
from datetime import datetime
from django.template import RequestContext
| 33.786667 | 97 | 0.609708 |
c28e4edca7ae401126ff0a6fbb8cb56b375f7be5 | 195 | py | Python | Regular Expression/Introduction/Matching Whitespace % Non-whitespace Characters/matching_whitespace_and_non-whitespace_characters.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | 2 | 2020-05-28T07:15:00.000Z | 2020-07-21T08:34:06.000Z | Regular Expression/Introduction/Matching Whitespace % Non-whitespace Characters/matching_whitespace_and_non-whitespace_characters.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null | Regular Expression/Introduction/Matching Whitespace % Non-whitespace Characters/matching_whitespace_and_non-whitespace_characters.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null |
# Method_#1
#Regex_Pattern = r"\S\S\s\S\S\s\S\S" # Do not delete 'r'.
# Method_#2
Regex_Pattern = r"(\S\S\s){2}(\S\S){1}"
import re
print(str(bool(re.search(Regex_Pattern, input()))).lower()) | 19.5 | 59 | 0.625641 |
c28f5399f5765ea45b857d48fc29ac34ed27c2a1 | 842 | py | Python | causalpy/utils/utils.py | maichmueller/CausalPy | d2dcd2c557e1ccdfa1fb77f3adbf78b0ea63c735 | [
"MIT"
] | null | null | null | causalpy/utils/utils.py | maichmueller/CausalPy | d2dcd2c557e1ccdfa1fb77f3adbf78b0ea63c735 | [
"MIT"
] | null | null | null | causalpy/utils/utils.py | maichmueller/CausalPy | d2dcd2c557e1ccdfa1fb77f3adbf78b0ea63c735 | [
"MIT"
] | null | null | null | import os
import shutil
| 24.057143 | 55 | 0.549881 |
c28fad06e8a818ac6ddee1694bd9cf847cd0da24 | 205 | py | Python | Semester 5 (PIP)/assignment1/prog4.py | MartyMiniac/ITER-Assignment | a7b355f40cc52a337ad90bb8328e54c4a9534530 | [
"MIT"
] | 14 | 2020-11-11T08:48:58.000Z | 2022-02-26T03:59:05.000Z | Semester 5 (PIP)/assignment1/prog4.py | SKSTCODE42/ITER-Assignment | a7b355f40cc52a337ad90bb8328e54c4a9534530 | [
"MIT"
] | 4 | 2020-11-12T13:31:14.000Z | 2021-06-21T05:41:34.000Z | Semester 5 (PIP)/assignment1/prog4.py | SKSTCODE42/ITER-Assignment | a7b355f40cc52a337ad90bb8328e54c4a9534530 | [
"MIT"
] | 10 | 2020-11-07T15:09:20.000Z | 2022-02-26T03:56:50.000Z | regno='1941012661'
year=2019
# print('My Regd. No is %s and I have taken admission in B. Tech. In %d.' %(regno, year))
print('My Regd. No is', regno,'and I have taken admission in B. Tech. In', year,'.' ) | 41 | 89 | 0.658537 |
c2902a4704b44239ff0cf04b847569517aff459d | 483 | py | Python | api_server/apps/speech_api/models.py | take2make/sra_api | 923193789fdbfdd1022782f92bf2f041ddc4da29 | [
"MIT"
] | null | null | null | api_server/apps/speech_api/models.py | take2make/sra_api | 923193789fdbfdd1022782f92bf2f041ddc4da29 | [
"MIT"
] | null | null | null | api_server/apps/speech_api/models.py | take2make/sra_api | 923193789fdbfdd1022782f92bf2f041ddc4da29 | [
"MIT"
] | null | null | null | """
,
"""
from django.db import models
| 26.833333 | 71 | 0.728778 |
c291f9afa8d4a69dbe3f4791438b896f2870685a | 1,690 | py | Python | setup.py | krismolendyke/den | aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d | [
"MIT"
] | 6 | 2015-06-20T21:54:21.000Z | 2017-11-29T03:00:15.000Z | setup.py | krismolendyke/den | aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d | [
"MIT"
] | 1 | 2017-02-13T09:08:54.000Z | 2017-02-13T09:33:46.000Z | setup.py | krismolendyke/den | aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d | [
"MIT"
] | null | null | null | """setuptools entry point."""
from codecs import open
from os import path
from setuptools import find_packages, setup
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
with open(path.join(HERE, "src", "den", "VERSION")) as version_file:
VERSION = version_file.read().strip()
setup(
name="den",
version=VERSION,
description="Den is a home for your home's data.",
long_description=LONG_DESCRIPTION,
author="Kris Molendyke",
author_email="kris@k20e.com",
url="https://git.io/k20e",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"
],
keywords="nest thermostat smoke alarm camera weather propane monitor",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=["backoff>=1.3.2", "influxdb>=3.0", "python-forecastio>=1.3.5", "requests>=2.0"],
extras_require={
"dev": [
"tox",
"yapf",
],
"doc": [
"Sphinx",
"alabaster",
"sphinx-argparse",
"sphinx-autobuild",
],
"notebook": ["jupyter", ],
"test": [
"coverage",
"prospector",
"mock",
"responses",
],
},
package_data={},
include_package_data=True,
data_files=[],
test_suite="tests",
python_requires=">=2.7",
entry_points={"console_scripts": ["den = den.__main__:main", ], }, )
| 29.649123 | 118 | 0.57929 |
c293b3a4ad307a538250b63b7b3b8429f3fda47c | 25,807 | py | Python | apgl/util/Util.py | mathemaphysics/APGL | 6ca7c176e04017feeae00c4cee069fd126df0fbc | [
"BSD-3-Clause"
] | 13 | 2015-02-19T14:39:09.000Z | 2021-04-12T01:22:32.000Z | apgl/util/Util.py | mathemaphysics/APGL | 6ca7c176e04017feeae00c4cee069fd126df0fbc | [
"BSD-3-Clause"
] | 1 | 2020-07-29T07:09:33.000Z | 2020-07-29T07:09:33.000Z | apgl/util/Util.py | mathemaphysics/APGL | 6ca7c176e04017feeae00c4cee069fd126df0fbc | [
"BSD-3-Clause"
] | 7 | 2015-03-16T07:26:49.000Z | 2021-01-12T06:57:27.000Z | '''
Created on 31 Jul 2009
@author: charanpal
'''
from __future__ import print_function
import sys
import os
import numpy
from contextlib import contextmanager
import numpy.random as rand
import logging
import scipy.linalg
import scipy.sparse as sparse
import scipy.special
import pickle
from apgl.util.Parameter import Parameter
| 35.016282 | 209 | 0.551168 |
c29405121608c5b0e9800d088104121ec6141017 | 7,242 | py | Python | src/python/weblyzard_api/client/jesaja_ng.py | PhilippKuntschik/weblyzard_api | 415df7d2c3e625e96636ad0ab91f3ba669db64ea | [
"Apache-2.0"
] | null | null | null | src/python/weblyzard_api/client/jesaja_ng.py | PhilippKuntschik/weblyzard_api | 415df7d2c3e625e96636ad0ab91f3ba669db64ea | [
"Apache-2.0"
] | null | null | null | src/python/weblyzard_api/client/jesaja_ng.py | PhilippKuntschik/weblyzard_api | 415df7d2c3e625e96636ad0ab91f3ba669db64ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
.. codeauthor: Albert Weichselbraun <albert.weichselbraun@htwchur.ch>
.. codeauthor:: Heinz-Peter Lang <lang@weblyzard.com>
'''
from __future__ import print_function
from __future__ import unicode_literals
from eWRT.ws.rest import MultiRESTClient
from weblyzard_api.client import (
WEBLYZARD_API_URL, WEBLYZARD_API_USER, WEBLYZARD_API_PASS)
| 38.727273 | 140 | 0.609362 |
c29512076f4adfe1c703eb019e1315c92cfb30fe | 3,342 | py | Python | tasks/utilities/runner.py | faisaltheparttimecoder/carelogBackend | b0635e72338e14dad24f1ee0329212cd60a3e83a | [
"MIT"
] | 1 | 2020-04-09T11:45:14.000Z | 2020-04-09T11:45:14.000Z | tasks/utilities/runner.py | faisaltheparttimecoder/carelogBackend | b0635e72338e14dad24f1ee0329212cd60a3e83a | [
"MIT"
] | 2 | 2020-06-05T18:04:30.000Z | 2021-06-10T20:11:46.000Z | tasks/utilities/runner.py | faisaltheparttimecoder/carelogBackend | b0635e72338e14dad24f1ee0329212cd60a3e83a | [
"MIT"
] | null | null | null | import datetime, os
from django.contrib.auth.models import User
from products.lib.data_load import LoadProducts
from zendesk.lib.load_tickets import LoadTickets
from tasks.engine.maintenance import Maintenance
from tasks.models import LastRun
| 37.550562 | 124 | 0.648115 |
c298455b91c04670dd6ada8face196e4608ff57c | 1,667 | py | Python | code/mlp-test.py | asdlei99/firewall | fd2819fab4cfde9989350397300efd4321e197fa | [
"MIT"
] | 1 | 2020-03-01T21:17:01.000Z | 2020-03-01T21:17:01.000Z | code/mlp-test.py | asdlei99/firewall | fd2819fab4cfde9989350397300efd4321e197fa | [
"MIT"
] | null | null | null | code/mlp-test.py | asdlei99/firewall | fd2819fab4cfde9989350397300efd4321e197fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 10:51:14 2018
@author: peter
"""
from sklearn.feature_extraction.text import TfidfVectorizer
import os
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
import urllib.parse
from sklearn.externals import joblib
badQueries = loadFile('badqueries.txt')#
validQueries = loadFile('goodqueries.txt')#
#
badQueries = list(set(badQueries))
validQueries = list(set(validQueries))
allQueries = badQueries + validQueries
#
yBad = [1 for i in range(0, len(badQueries))]
yGood = [0 for i in range(0, len(validQueries))]
y = yBad + yGood
queries = allQueries
#TF-IDF
vectorizer = TfidfVectorizer(min_df = 0.0, analyzer="char", sublinear_tf=True, ngram_range=(1,3))
X = vectorizer.fit_transform(queries)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
badCount = len(badQueries)
validCount = len(validQueries)
#mlp
mlp = joblib.load("mlp-module.m")
predicted=mlp.predict(X_test)
print("Bad samples: %d" % badCount)
print("Good samples: %d" % validCount)
print("Accuracy: %f" % mlp.score(X_test, y_test))
print("Precision: %f" % metrics.precision_score(y_test, predicted))
print("Recall: %f" % metrics.recall_score(y_test, predicted)) | 27.327869 | 98 | 0.720456 |
c298b609bf3203502c3817910d7a265918d513ee | 3,176 | py | Python | crud.py | wileeam/discount-code-service | 74ccd0564115c636ed8d825e41d8e7d1bec33ded | [
"Apache-2.0"
] | null | null | null | crud.py | wileeam/discount-code-service | 74ccd0564115c636ed8d825e41d8e7d1bec33ded | [
"Apache-2.0"
] | null | null | null | crud.py | wileeam/discount-code-service | 74ccd0564115c636ed8d825e41d8e7d1bec33ded | [
"Apache-2.0"
] | null | null | null | import random
import string
from sqlalchemy.orm import Session
import models, schemas
| 34.521739 | 86 | 0.573992 |
c298ed848bd524b876312d8bab6fc24cd9b74131 | 28,847 | py | Python | python-obj-system.py | AlexPl292/python-obj-system | 7d0e743e4785989f54094ea0bac05e3681c308c2 | [
"MIT"
] | 54 | 2021-12-06T10:44:17.000Z | 2022-02-16T23:35:16.000Z | python-obj-system.py | AlexPl292/python-obj-system | 7d0e743e4785989f54094ea0bac05e3681c308c2 | [
"MIT"
] | 1 | 2022-01-22T07:09:16.000Z | 2022-01-22T07:09:16.000Z | python-obj-system.py | AlexPl292/python-obj-system | 7d0e743e4785989f54094ea0bac05e3681c308c2 | [
"MIT"
] | 4 | 2021-12-22T08:07:56.000Z | 2022-03-10T01:51:40.000Z | #!/usr/bin/env python3
from mdpyformat import *
import pprintex
header_md("""Python object primer for Python3 / meta classes""" )
header_md("""Introduction""", nesting = 2)
print_md("""
Python is good at creating the illusion of being a simple programming language. Sometimes this illusion fails, like when you have to deal with the import/module system [my attempts to get it](https://github.com/MoserMichael/pythonimportplayground). Another area of complexity is the object system, last week I tried to understand [python enums](https://docs.python.org/3/library/enum.html), it turns that they are built on top of [meta classes](https://github.com/python/cpython/blob/2c56c97f015a7ea81719615ddcf3c745fba5b4f3/Lib/enum.py#L511), So now I have come to realize, that I really don't know much about python and its object system. The purpose of this text is to figure out, how the python object system ticks.
""")
header_md("""The Python object system""", nesting=2)
header_md("""How objects are represented""", nesting=3)
print_md("""
Lets look at a simple python class Foo with a single base class Base, and see how objects are created and represented in memory
""")
eval_and_quote("""
# The base class. All Python3 classes have the base class of type object.
# The long form is therefore
# class Base(object):
# However Pylint will tell you, that this long form is redundant
class Base:
# Class variables are shared between all instances of the class Base, and declared like this:
base_class_var = "Base"
# The object constructor/init method, Note the first 'self' argument, which refers to the object instance.
def __init__(self):
print("calling Base.__init__")
# Object variables are specific to a given instance of Base
# Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method)
self.obj_var_base = 10
# An object method - needs to access the object instance, which is passed as first 'self' argument.
def show_base(self):
print_md("obj_var_base: ", self.obj_var_base)
# A class method/static method is called without an object instance.
@staticmethod
def make_base():
return Base()
# class Foo with a base class Base
class Foo(Base):
# Class variables are shared between all instances of the class Foo, and declared like this:
class_var = 42
class_var2 = 43
# The object constructor/init method, Note the first 'self' argument, which is the object instance.
def __init__(self):
# When not calling the base class __init__ method: the base class object variables are not added to the object !!!
# The base class __init__ adds the 'obj_var_base' member to the __dict__ member of this object instance.
# By convention: you first init the base classes, before initialising the derived class.
super().__init__()
print("calling Foo.__init__")
# Object variables are specific to a given instance of Foo
# Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method)
# Define object variable: obj_var_a
self.obj_var_a=42
# Define object variable: obj_var_b
self.obj_var_b="name"
# An object method - needs to access the object instance, which is passed as first 'self' argument.
def show_derived(self):
print_md("obj_var_a:", self.obj_var_a, "obj_var_b:", self.obj_var_b)
# A class method/static method is called without an object instance.
@staticmethod
def make_foo():
return Foo()
# Make a new object instance of type Foo class.
foo_obj=Foo()
""")
print_md("The memory address of object foo_obj is returned by the [id built-in](https://docs.python.org/3/library/functions.html#id)")
eval_and_quote('print("id(foo_obj) : ", id(foo_obj))')
print_md("If two variables have the same object id value, then they both refer to the very same object/instance!")
print_md("""
Each user defined object has a __dict__ attribute, this is a dictionary that lists all the object instance variables.
This also includes instance members that were added by the __init__ method of the base class !!
""")
eval_and_quote("""print("foo_obj.__dict__ : ", foo_obj.__dict__)""")
print_md("""
So you see that the following is exactly the same thing:
""")
eval_and_quote("""assert id(foo_obj.obj_var_a) == id( foo_obj.__dict__['obj_var_a'] ) """)
print_md("""
Wait, but where does the __dict__ attribute come from?
The [built-in getattr](https://docs.python.org/3/library/functions.html#getattr) function can return this built-in __dict__ attribute!
Interesting: the python notation object.member_name can mean different things:
1) for built-in attributes it means a call to getattr
2) for object instances (assigned in the __init__ method of the class) it means a call to retrieve the __dict__ attribute, and then a lookup of the variable name in that dictionary.
""")
print_md( """foo_obj.__dict__ and getattr(foo_obj,'__dict__',None) is the same thing! """)
eval_and_quote("""assert id(foo_obj.__dict__) == id( getattr(foo_obj,'__dict__',None) )""")
print_md("""
The getattr builtin function has a good part, its return value can be checked for None. This can be used, in order to check if the argument is an object with a __dict__ attribute.
""")
eval_and_quote("""base_obj = object()""")
print_md("An object of built-in type ", type(base_obj), " doesn't have a __dict__ member")
eval_and_quote("""assert getattr(base_obj, '__dict__', None) is None""")
eval_and_quote("""int_obj = 42""")
print_md("An object of built-in type ", type(int_obj), " doesn't have a __dict__ member")
eval_and_quote("""assert getattr(int_obj, '__dict__', None) is None""")
print_md("""
The [dir builtin](https://docs.python.org/3/library/functions.html#dir) function does different things, depending on the argument,
for regular objects it returns a "list that contains the objects attributes names, the names of its classs attributes, and recursively of the attributes of its classs base classes.",
all this is sorted alphabetically.
""")
eval_and_quote("""print("dir(foo_obj) : ", dir(foo_obj))""")
# doesn't have __slots__, how odd.
#print_md("foo_obj.__slots__ : ", foo_obj.__slots__)
header_md("""How classes are represented""", nesting=3)
print_md("""The built-in function [type](https://docs.python.org/3/library/functions.html#type), is returning the class of an object, when applied to a variable (to be more exact: type is a built-in class, and not a built-in function, more on that later)""")
eval_and_quote("""
# Make a new object instance of type Foo class.
foo_obj=Foo()
print("class of object foo_obj - type(foo_obj): ", type(foo_obj))
# That's the same as showing the __class__ member of the variable (in Python3)
print("foo_obj.__class__ :", foo_obj.__class__)
""")
print_md("""
The class is an object, it's purpose is to hold the static data that is shared between all object instances.
Each object has a built-in __class__ attribute, that refers to this class object.
Note that the name of the class includes the module name, __main__ if the class is defined in the file given as argument to the python interpreter.
Also note that the type built-in of type(foo_obj) is really the same as: str(foo_obj.__class__) (for Python3)
""")
print_md("""
Again, the built in attribute __class__ can also be accessed with the getattr built-in function.
""")
eval_and_quote( """
print("foo_obj.__class__ and getattr(foo_obj,'__class__',None) is the same thing!")
assert id(foo_obj.__class__) == id( getattr(foo_obj,'__class__',None) )
""")
print_md("""The __name__ and __qualname__ built-in attributes return the name of the class, without the module name """)
eval_and_quote( """
print("foo_boj.__class__.__name__ : ", foo_obj.__class__.__name__)
print("foo_boj.__class__.__qualname__ : ", foo_obj.__class__.__qualname__)""" )
print_md("""
To get the immediate base class list as declared in that particular class.
""")
eval_and_quote( """print("foo_obj.__class__.__bases__ :", foo_obj.__class__.__bases__)""")
print_md("""
The __mro__ member is a list of types that stands for 'method resoultion order', when searching for an instance method, this list is searched in order to resolve the method name.
The Python runtime creates this lists by enumerating all of its base classes recursively, in depth first traversal order. For each class it follows the base classes, from the left ot the right
This list is used to resolve a member function 'member_function' of an object, when you call it via: obj_ref.member_function()
""")
eval_and_quote( """print("foo_obj.__class__.__mro__ :", foo_obj.__class__.__mro__) """ )
print_md("Computing the method resolution order by hand")
eval_and_quote("""
# function to a class hierarchy, in depth first search order (like what you get in MRO - method resolution order)
def show_type_hierarchy(type_class):
def show_type_hierarchy_imp(type_class, nesting):
if len(type_class.__bases__) == 0:
return
prefix = "\t" * nesting
print( prefix + "type:", type_class.__name__ , "base types:", ",".join( map( lambda ty : ty.__name__, type_class.__bases__) ) )
#print( prefix + "str(", type_class.__name__ , ").__dict__ : ", type_class.__dict__ )
for base in type_class.__bases__:
show_type_hierarchy_imp(base, nesting+1)
if not inspect.isclass(type_class):
print("object ", str(type_class), " is not class")
return
print("show type hierarchy of class:")
show_type_hierarchy_imp(type_class, 0)
class LevelOneFirst:
pass
class LevelOneSecond:
pass
class LevelOneThird:
pass
class LevelTwoFirst(LevelOneFirst, LevelOneSecond):
pass
class LevelThree(LevelTwoFirst,LevelOneThird):
pass
show_type_hierarchy(LevelThree)
print("LevelThree.__mro__:", LevelThree.__mro__)
""")
eval_and_quote("""
print("*** mro in detail:")
for cls in foo_obj.__class__.__mro__:
print_md("\tclass-in-mro: ", str(cls), "id:", id(cls), "cls.__dict__: ", cls.__dict__)
print("*** eof mro in detail")
""")
print_md("""
The class object has a __dict__ too - here you will see all the class variables (for Foo these are class_var and class_var2) and class methods (defined with @staticmethod), but also the object methods (with the self parameter)
""")
eval_and_quote( """print("foo_obj.__class__.__dict__ : ", foo_obj.__class__.__dict__)""" )
# doen't have slots, how odd.
#print_md("foo_obj.__class__.__slots__ : ", foo_obj.__class__.__slots__)
print_md("""
Again, the [dir](https://docs.python.org/3/library/functions.html#dir) built-in function does different things, depending on the argument type
for a class object it returns a "list that contains the names of its attributes, and recursively of the attributes of its bases"
That means it displays both the names of static variables, and the names of the static functions, for the class and it's base classes.
Note that the names are sorted.
""")
eval_and_quote("""print("dir(foo_obj.__class__) : ", dir( foo_obj.__class__ ) )""")
print_md("""
The class object derives from built-in class type, you can check if an object is a class by checking if it is an instance of class 'type'!
""")
# check that foo_obj.__class__ is a type - it is derived from built-in class type
eval_and_quote("""
assert isinstance(foo_obj.__class__, type)
# same thing as
assert inspect.isclass(foo_obj.__class__)
# an object is not derived from class type.
assert not isinstance(foo_obj, type)
# same thng as
assert not inspect.isclass(foo_obj)
""")
print_md( """
Now there is much more: there is the inspect module that returns it all, a kind of rosetta stone of the python object model.
inspect.getmembers returns everything! You can see the source of inspect.getmembers [here](https://github.com/python/cpython/blob/3.10/Lib/inspect.py)
""")
eval_and_quote("""print("inspect.getmembers(foo_obj): ", inspect.getmembers(foo_obj))""")
print_md("""
Attention!
the type of the object is the class of the object (remember: the classes is an object, where the __dict__ member holds the class variables)
""")
eval_and_quote("""
print("type(foo_obj) : ", type(foo_obj))
# same thing in python3
print("str(foo_obj.__class__) : ", str(foo_obj.__class__) )""")
print_md("""
Let's look at both the type and identity of all these objects:
""")
eval_and_quote("""print("id(foo_obj) : ", id(foo_obj), " str(foo_obj) : ", str(foo_obj))""")
print_md("""
The following expressions refer to the same thing: the type of the object foo_obj, also known as the class of foo_obj
""")
eval_and_quote("""
print("type(foo_obj) :", type(foo_obj), " id(type(foo_obj)) :", id(type(foo_obj)), " type(foo_obj).__name__ : ", type(foo_obj).__name__ )
print("str(foo_obj.__class__) :", str(foo_obj.__class__), " id(foo_obj.__class__) :", id(foo_obj.__class__), "foo_obj.__class__.__name__ : ", foo_obj.__class__.__name__)
print("str(Foo) :", str(Foo), " id(Foo) :", id( Foo ), "Foo.__name__ :", Foo.__name__)
assert id(Foo) == id(type(foo_obj))
assert id(type(foo_obj)) == id(foo_obj.__class__)
""")
print_md("""
The Foo class members
""")
eval_and_quote("""
print("foo_obj.__class__.__dict__ :", foo_obj.__class__.__dict__)
print("Foo.__dict__ :", Foo.__dict__)
# everything accessible form the class
print("dir(foo_obj.__class__) :", dir( foo_obj.__class__))
""")
print_md("""
The following expressions refer to the same thing: the meta-type of the foo_obj.
""")
eval_and_quote("""
print("type(foo_obj.__class__.__class__):", type(foo_obj.__class__.__class__), " id( foo_obj.__class__.__class__ ) :" , id( foo_obj.__class__.__class__ ) , "foo_obj.__class__.__class__.__name__ : ", foo_obj.__class__.__class__.__name__ )
print("type(Foo) :", type(Foo), " id(type(Foo)) : ", id( type( Foo ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__)
print("type(Foo.__class__) :", type(Foo.__class__), " id(type(Foo.__class__)) : ", id( type( Foo.__class__ ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__)
print("type(Foo.__class__.__class__) :", type(Foo.__class__.__class__), " id(type(Foo.__class__.__class__)) :", id( type( Foo.__class__.__class__ ) ) )
assert type(Foo) == type(Foo.__class__)
assert type(Foo.__class__) == type(Foo.__class__.__class__)
""")
print_md("""
The type of the type is the metaclass - the metaclass constructs the Class object! (the class of an object is also an object!)
""")
eval_and_quote("""
print("type( type( foo_obj ) ) :", type( type( foo_obj ) ) )
print("str( foo_obj.__class__.__class__ ) :", str(foo_obj.__class__.__class__) )
""")
# result:
eval_and_quote("""
print(" metaclass members: foo_obj.__class__.__class__.__dict__ : ", foo_obj.__class__.__class__.__dict__)
print(" everything accessible form metaclass: dir( foo_obj.__class__.__class__ ) : ", dir( foo_obj.__class__.__class__) )
""")
print_md("""
Wow, any class can tell all of its derived classes! I wonder how that works...
""")
eval_and_quote("""print("Base.__subclasses__() : ", Base.__subclasses__())""")
header_md("""Object creation""", nesting=3)
print_md("""
Objects recap:
The object instance holds the __dict__ attribute of the object instance, it's value is a dictionary that holds the object instance members.
The class is an object that is shared between all object instances, and it holds the static data (class variables, class methods)
What happens upon: foo = Foo() ?
Take the type of Foo - the metaclass of Foo, the metaclass both knows how to create an instance of the class Foo, and the object instances.
A metaclass is derived from built-in class 'type', The 'type' constructor with three argument creates a new class object. [see reference](https://docs.python.org/3/library/functions.html#type)
class_obj = Foo
The metaclass is used as a 'callable' - it has a __call__ method, and can therefore be called as if it were a function (see more about callables in the course on [decorators](https://github.com/MoserMichael/python-obj-system/blob/master/decorator.md))
Now this __call__ method creates and initialises the object instance.
The implementation of __call__ now does two steps:
- Class creation is done in the [__new__](https://docs.python.org/3/reference/datamodel.html#object.__new__) method of the metaclass. The __new__ method creates the Foo class, it is called exactly once, upon class declaration (you will see this shortly, in the section on custom meta classes)
- It uses the Foo class and calls its to create and initialise the object (call the __new__ method of the Foo class, in order to create an instance of Foo, then calls the __init__ instance method of the Foo class, on order to initialise it). This all done by the __call__ method of the metaclass.
instance_of_foo = meta_class_obj.__call__()
(actually that was a bit of a simplification...
)
""")
eval_and_quote("""
# same as: foo_obj = Foo()
foo_obj = Foo.__call__()
print("foo_obj : ", foo_obj)
print("foo_obj.__dict__ : ", foo_obj.__dict__)
""")
print_md("This is the same as:")
eval_and_quote("""
class_obj = Foo
instance_of_foo = class_obj()
print("instance_of_foo : ", instance_of_foo)
print("instance_of_foo.__dict__ : ", instance_of_foo.__dict__)
""")
header_md("""Custom metaclasses""", nesting = 2)
header_md("""Metaclasses for implementing singleton objects""", nesting = 3)
print_md("""
An object can define a different way of creating itself, it can define a custom metaclass, which will do exactly the same object creation steps described in the last section.
Let's examine a custom metaclass for creating singleton objects.
""")
eval_and_quote("""
# metaclass are always derived from the type class.
# the type class has functions to create class objects
# the type class has also a default implementation of the __call__ method, for creating object instances.
class Singleton_metaclass(type):
# invoked to create the class object instance (for holding static data)
# this function is called exactly once, in order to create the class instance!
def __new__(meta_class, name, bases, cls_dict, **kwargs):
print("Singleton_metaclass: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}')
class_instance = super().__new__(meta_class, name, bases, cls_dict)
print("Singleton_metaclass: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance))
# the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class.
class_instance.__singleton_instance__ = None
return class_instance
def __call__(cls, *args, **kwargs):
# we get here to create an object instance. the class object has already been created.
print("Singleton_metaclass: __call__ args:", *args, f'kwargs: {kwargs}')
# check if the singleton has already been created.
if cls.__singleton_instance__ is None:
# create the one an only instance object.
instance = cls.__new__(cls)
# initialise the one and only instance object
instance.__init__(*args, **kwargs)
# store the singleton instance object in the class variable __singleton_instance__
cls.__singleton_instance__ = instance
# return the singleton instance
return cls.__singleton_instance__
import math
# the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo
class SquareRootOfTwo(metaclass=Singleton_metaclass):
# the __init__ method is called exactly once, when the first instance of the singleton is created.
# the square root of two is computed exactly once.
def __init__(self):
self.value = math.sqrt(2)
print("SquareRootOfTwo.__init__ self:", self)
print("creating the objects instances...")
sqrt_root_two_a = SquareRootOfTwo()
print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value)
sqrt_root_two_b = SquareRootOfTwo()
print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_two_a) == id(sqrt_root_two_b)
""")
header_md("""Passing arguments to metaclasses""", nesting = 3)
print_md(""""
Lets extend the previous singleton creating metaclass, so that it can pass parameters to the __init__ method of the object, these parameters are defined together with the metaclass specifier.
""")
eval_and_quote("""
# metaclass are always derived from the type class.
# The type class has functions to create class objects
# The type class has also a default implementation of the __call__ method, for creating object instances.
class Singleton_metaclass_with_args(type):
# invoked to create the class object instance (for holding static data)
# this function is called exactly once, in order to create the class instance!
def __new__(meta_class, name, bases, cls_dict, **kwargs):
print("Singleton_metaclass_with_args: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}')
class_instance = super().__new__(meta_class, name, bases, cls_dict)
print("Singleton_metaclass_with_args: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance))
# the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class.
class_instance.__singleton_instance__ = None
# the keywords that have been specified, are passed into the class creation method __new__.
# save them as a class variable, so as to pass them to the object constructor!
class_instance.__kwargs__ = kwargs
return class_instance
def __call__(cls, *args, **kwargs):
# we get here to create an object instance. the class object has already been created.
print("Singleton_metaclass_with_args: __call__ args:", *args, f'kwargs: {kwargs}')
# check if the singleton has already been created.
if cls.__singleton_instance__ is None:
# create the one an only instance object.
instance = cls.__new__(cls)
# initialise the one and only instance object
# pass it the keyword parameters specified for the class!
instance.__init__(*args, **cls.__kwargs__)
# store the singleton instance object in the class variable __singleton_instance__
cls.__singleton_instance__ = instance
# return the singleton instance
return cls.__singleton_instance__
import math
class AnySquareRoot:
def __init__(self, arg_val):
self.value = math.sqrt(arg_val)
# the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo
class SquareRootOfTwo(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=2):
# the init method is called with arg_num specified in the class definition (value of 2)
def __init__(self, arg_num):
super().__init__(arg_num)
class SquareRootOfThree(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=3):
# the init method is called with arg_num specified in the class definition (value of 3)
def __init__(self, arg_num):
super().__init__(arg_num)
print("creating the objects instances...")
sqrt_root_two_a = SquareRootOfTwo()
print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value)
sqrt_root_two_b = SquareRootOfTwo()
print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_two_a) == id(sqrt_root_two_b)
sqrt_root_three_a = SquareRootOfThree()
print("sqrt_three_a id(sqrt_root_three_a):", id(sqrt_root_three_a), "type(sqrt_root_three_a):", type(sqrt_root_three_a), "sqrt_root_three_a.value:", sqrt_root_three_a.value)
sqrt_root_three_b = SquareRootOfThree()
print("sqrt_three_b id(sqrt_root_three_b)", id(sqrt_root_three_b), "type(sqrt_root_three_b):", type(sqrt_root_three_b), "sqrt_root_three_b.value:", sqrt_root_three_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_three_a) == id(sqrt_root_three_b)
""")
header_md("""Metaclasses in the Python3 standard library""", nesting=2)
print_md("""
This section lists examples of meta-classes in the python standard library. Looking at the standard library of a language is often quite useful, when learning about the intricacies of a programming language.
""")
header_md("""ABCMeta class""", nesting=3)
print_md("""The purpose of this metaclass is to define abstract base classes (also known as ABC's), as defined in [PEP 3119](https://www.python.org/dev/peps/pep-3119/), the documentation for the metaclass [ABCMeta class](https://docs.python.org/3/library/abc.html#abc.ABCMeta).
A python metaclass imposes a different behavior for builtin function [isinstance](https://docs.python.org/3/library/functions.html#isinstance) and [issubclass](https://docs.python.org/3/library/functions.html#issubclass) Only classes that are [registered](https://docs.python.org/3/library/abc.html#abc.ABCMeta.register) with the metaclass, are reported as being subclasses of the given metaclass. The referenced PEP explains, why this is needed, i didn't quite understand the explanation. Would be helpful if the reader can clarify this issue.
""")
header_md("""Enum classes""", nesting=3)
print_md("""Python has support for [enum classes](https://docs.python.org/3/library/enum.html). An enum class lists a set of integer class variables, these variables can then be accessed both by their name, and by their integer value.
An example usage: Note that the class doesn't have a constructor, everything is being taken care of by the baseclass [enum.Enum](https://docs.python.org/3/library/enum.html#enum.Enum) which is making use of a meta-class in he definition of the Enum class [here](https://docs.python.org/3/library/enum.html), this metaclass [EnumMeta source code](https://github.com/python/cpython/blob/f6648e229edf07a1e4897244d7d34989dd9ea647/Lib/enum.py#L161) then creates a behind the scene dictionary, that maps the integer values to their constant names.
The advantage is, that you get an exception, when accessing an undefined constant, or name. There are also more things there, please refer to the linked [documentation](https://docs.python.org/3/library/enum.html)
""")
eval_and_quote("""
import enum
class Rainbow(enum.Enum):
RED=1
ORANGE=2
YELLOW=3
GREEN=4
BLUE=5
INDIGO=6
VIOLET=7
color=Rainbow.GREEN
print("type(Rainbow.GREEN):", type(Rainbow.GREEN))
print("The string rep Rainbow.Green.name:", Rainbow.GREEN.name, "type(Rainbow.GREEN.name):", type(Rainbow.GREEN.name))
print("The integer rep Rainbow.GREEN.value: ", Rainbow.GREEN.value, "type(Rainbow.GREEN.value):", type(Rainbow.GREEN.value))
print("Access by name: Rainbow['GREEN']:", Rainbow['GREEN'])
print("Access by value: Rainbow(4):", Rainbow(4))
# which is the same thing
assert id(Rainbow['GREEN']) == id(Rainbow(4))
""")
header_md("""Conclusion""", nesting=2)
print_md("""
Python meta-classes and decorators are very similar in their capabilities.
Both are tools for [metaprogramming](https://en.wikipedia.org/wiki/Metaprogramming), tools for modifying the program text, and treating and modifying code, as if it were data.
I would argue, that decorators are most often the easiest way of achieving the same goal.
However some things, like hooking the classification of classes and objects (implementing class methods [__instancecheck__ and __subclasscheck__](https://docs.python.org/3/reference/datamodel.html#customizing-instance-and-subclass-checks), can only be done with meta-classes.
I hope, that this course has given you a better understanding, of what is happening under the hood, which would be a good thing.
""")
print_md("*** eof tutorial ***")
| 43.707576 | 720 | 0.733456 |
c29a504870955a0bf9ae588161b47325a0b2d50b | 3,508 | py | Python | src/Speech_process/speech_to_text.py | pranayjoshi/Medico | 2508a39d58eec50f5e94f3c878c00f599fff6629 | [
"MIT"
] | 13 | 2020-09-04T09:16:15.000Z | 2021-01-27T07:03:12.000Z | src/Speech_process/speech_to_text.py | bhargavaganti/Medico | 9059c59f49211f48a27805a00807121ac6f27b27 | [
"MIT"
] | 1 | 2020-10-04T03:23:45.000Z | 2020-10-04T03:23:45.000Z | src/Speech_process/speech_to_text.py | bhargavaganti/Medico | 9059c59f49211f48a27805a00807121ac6f27b27 | [
"MIT"
] | 2 | 2020-11-27T12:25:10.000Z | 2022-01-11T06:25:33.000Z | import speech_recognition as sr #Recognition Module
import pyttsx3 #Speaking package
import json
import series_counter as s_c
engine = pyttsx3.init() #initialising pyttsx value
speak('hi user')
# this class will act as a test printer
#this script will run all
def run_all(present_count):
r_utils = run_utils(present_count)
r_utils.file_paths()
r_utils.run_Vr()
r_utils.run_all_s2t()
r_utils.run_printer()
printer = r_utils.printer()
return printer
# finally run the script
if __name__ == "__main__":
run()
| 29.982906 | 127 | 0.646522 |
c29a89217076d97f8ff62faec004446052c3802d | 14,431 | py | Python | consai2_game/scripts/example/actions/defense.py | ibis-ssl/consai2-ibis | 2b7d67007703fa49fc7290e92e12481ba48a9a93 | [
"MIT"
] | 4 | 2019-12-16T12:17:32.000Z | 2020-02-15T04:45:47.000Z | consai2_game/scripts/example/actions/defense.py | ibis-ssl/consai2-ibis | 2b7d67007703fa49fc7290e92e12481ba48a9a93 | [
"MIT"
] | null | null | null | consai2_game/scripts/example/actions/defense.py | ibis-ssl/consai2-ibis | 2b7d67007703fa49fc7290e92e12481ba48a9a93 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 SSL-Roots
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# coding: UTF-8
# defense.pyaction
import math
import rospy
import sys,os
from consai2_msgs.msg import BallInfo, RobotInfo
from consai2_msgs.msg import ControlTarget
from geometry_msgs.msg import Pose2D
sys.path.append(os.pardir)
from field import Field
from observer import Observer
import role
import tool
# defenserole
#
#
#
| 34.857488 | 119 | 0.654147 |
c29af6b719187be274751f6a43356244d63bdafc | 2,432 | py | Python | scripts/feature-processing/average_pool_features.py | visionjo/pykinship | 1a73490ffd6ebc6dafc6e18c558ccc7e4ce9acec | [
"MIT"
] | 12 | 2020-02-19T02:50:49.000Z | 2022-03-31T19:39:35.000Z | scripts/feature-processing/average_pool_features.py | visionjo/pykinship | 1a73490ffd6ebc6dafc6e18c558ccc7e4ce9acec | [
"MIT"
] | 68 | 2020-03-23T00:07:28.000Z | 2022-03-28T10:02:16.000Z | scripts/feature-processing/average_pool_features.py | visionjo/pykinship | 1a73490ffd6ebc6dafc6e18c558ccc7e4ce9acec | [
"MIT"
] | 3 | 2020-02-11T19:07:08.000Z | 2020-11-04T18:48:00.000Z | #
# Script to fuse features per member per family (i.e., for each FID.MID, average all encodings across feature dim).
# Any features can be fused. Here is link to ArcFace features,
# https://www.dropbox.com/s/5rbj68dqud2folu/FIDs-features.tar.gz?dl=0
#
import pickle
from pathlib import Path
import numpy as np
from tqdm import tqdm
from src.tools.features import l2_norm
dir_features = str(Path("./").home() / "datasets/rfiw2021/rfiw2021-data/FIDs-features/")
dir_out = ""
ext = "pkl" # ["pkl', 'npy']
# assume input/output directories are the same if no output is specified
dir_out = dir_out if len(dir_out) == 0 else dir_features
path_features = Path(dir_features)
dir_contents = list(path_features.glob("F????"))
normalize_features = True
do_pickle2numpy = True
# convert pkl files to npy (not required, just done if preferred).
# Average fuse all embeddings for each MID
for fid in tqdm(dir_contents):
# for each FID
print(f"FID: {fid}")
for mid in fid.glob("MID*"):
# for each member
print(f"Fusing: {mid}")
if not mid.is_dir():
continue
fout = mid / "avg_encoding.npy"
features = []
for face_feat in mid.glob(f"*face*.{ext}"):
# for each face
if ext == "pkl":
try:
with open(str(face_feat), "rb") as fin:
feature = pickle.load(fin)
feature = np.array(feature)
if do_pickle2numpy:
np.save(str(face_feat).replace(".pkl", ".npy"), feature)
except:
print(
f"WARNING: Exception thrown converting pickle to npy. {face_feat}"
)
elif ext == "npy":
feature = np.load(str(face_feat))
else:
# TODO : have as assert outside for loop (i.e., when value is set), but quick solution for now
print(f"extension {ext} is unrecognizable. Options: [pkl, npy]")
exit(0)
features.append(feature)
if features and normalize_features:
# if features exist and normalize flag is set True
features = np.mean(features, axis=0)
features = l2_norm(features[None, ...])[0]
if features.shape[0] == 512:
np.save(fout, features)
else:
print(f"ERROR saving: {fout}") | 36.848485 | 115 | 0.580181 |
c29b8867909e2528de5c43aad2904d281f32bd76 | 454 | py | Python | python/py-collections/most-commons.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | python/py-collections/most-commons.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | python/py-collections/most-commons.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | # Python > Collections > Company Logo
# Print the number of character occurrences in descending order.
#
# https://www.hackerrank.com/challenges/most-commons/problem
#
from collections import Counter
from itertools import groupby
name = input()
nb = 0
for c, g in groupby(Counter(name).most_common(), key=lambda x: x[1]):
for l in sorted(map(lambda x: x[0], g)):
print(l, c)
nb += 1
if nb == 3: break
if nb == 3: break
| 23.894737 | 69 | 0.65859 |
c29faa589f518d5f02ac893b18dc08fe8e171da1 | 19,174 | py | Python | sdk/python/pulumiverse_astra/cdc.py | mapped/pulumi-astra | 0d12cb616ed0eeb0d9e7dd9001b94f10bd4c3e8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumiverse_astra/cdc.py | mapped/pulumi-astra | 0d12cb616ed0eeb0d9e7dd9001b94f10bd4c3e8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumiverse_astra/cdc.py | mapped/pulumi-astra | 0d12cb616ed0eeb0d9e7dd9001b94f10bd4c3e8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['CdcArgs', 'Cdc']
class Cdc(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CdcArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database_id: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
keyspace: Optional[pulumi.Input[str]] = None,
table: Optional[pulumi.Input[str]] = None,
tenant_name: Optional[pulumi.Input[str]] = None,
topic_partitions: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CdcArgs.__new__(CdcArgs)
if database_id is None and not opts.urn:
raise TypeError("Missing required property 'database_id'")
__props__.__dict__["database_id"] = database_id
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
if keyspace is None and not opts.urn:
raise TypeError("Missing required property 'keyspace'")
__props__.__dict__["keyspace"] = keyspace
if table is None and not opts.urn:
raise TypeError("Missing required property 'table'")
__props__.__dict__["table"] = table
if tenant_name is None and not opts.urn:
raise TypeError("Missing required property 'tenant_name'")
__props__.__dict__["tenant_name"] = tenant_name
if topic_partitions is None and not opts.urn:
raise TypeError("Missing required property 'topic_partitions'")
__props__.__dict__["topic_partitions"] = topic_partitions
__props__.__dict__["connector_status"] = None
__props__.__dict__["data_topic"] = None
super(Cdc, __self__).__init__(
'astra:index/cdc:Cdc',
resource_name,
__props__,
opts)
| 38.119284 | 134 | 0.625222 |
c2a178c1d8a1946506d66653d718f66410977163 | 2,960 | py | Python | fit_data.py | enricomeloni/covid-tools | 6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7 | [
"MIT"
] | 1 | 2020-10-06T16:03:01.000Z | 2020-10-06T16:03:01.000Z | fit_data.py | enricomeloni/covid-tools | 6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7 | [
"MIT"
] | 3 | 2022-02-13T20:21:56.000Z | 2022-02-27T10:19:23.000Z | fit_data.py | sailab-code/learning-sidarthe | 6920b8cfa0eb89bdb7e0ba96ecc74831185c44a7 | [
"MIT"
] | null | null | null | import os
from torch.optim import Adam, SGD
import skopt
import torch
from utils.data_utils import select_data
from utils.visualization_utils import plot_data_and_fit
from learning_models.logistic import Logistic
# df_file = os.path.join(os.getcwd(), "dati-regioni", "dpc-covid19-ita-regioni.csv")
df_file = os.path.join(os.getcwd(), "train.csv")
area = ["China"] # list(df["denominazione_regione"].unique())
area_col_name = "Country/Region" # "denominazione_regione"
value_col_name = "Fatalities" # "deceduti"
groupby_cols = ["Date"] # ["Data"]
configs = {"optimizer": SGD, "n_epochs": 20000}
_x, _y = select_data(df_file, area, area_col_name, value_col_name, groupby_cols, file_sep=",")
validation_index = 50
# fixme china only
x = _x[:validation_index]
y = _y[:validation_index]
LOGISTIC_MODEL = Logistic((x, y), configs)
SPACE = [skopt.space.Real(1e-9, 1e-3, name='lrw', prior='log-uniform'),
skopt.space.Real(1e-9, 1e-2, name='lrb', prior='log-uniform'),
skopt.space.Real(5e-5, 9e-1, name='lrm', prior='log-uniform'),
skopt.space.Real(-3.0, 3.0, name='initial_w', prior='uniform'),
skopt.space.Real(-5.0, 5.0, name='initial_b', prior='uniform'),
skopt.space.Real(min(y)/5, 10*max(y), name='initial_m', prior='uniform'),
]
res_gp = skopt.gp_minimize(objective, SPACE, n_calls=50) # n_calls is the number of repeated trials
# print(res_gp)
score = "Best score=%.4f" % res_gp.fun
result = """Best parameters:
- lrw=%.9f
- lrb=%.9f
- lrm=%.9f
- initial_w=%.6f
- initial_b=%.6f
- initial_m=%.6f""" % (res_gp.x[0], res_gp.x[1], res_gp.x[2], res_gp.x[3], res_gp.x[4], res_gp.x[5])
print(score)
print(result)
#
base_path = os.path.join(os.getcwd(), "regioni")
if not os.path.exists(base_path):
os.mkdir(base_path)
log_file = os.path.join(base_path, area[0] + "_best_results.txt")
with open(log_file, "w") as f:
f.write(score)
f.write(result)
y_hat = LOGISTIC_MODEL(LOGISTIC_MODEL.x).detach().numpy()
data = (LOGISTIC_MODEL.x.detach().numpy(), LOGISTIC_MODEL.y.detach().numpy())
future_days = 10 # predictions for the future 30 days and current date
future_x = torch.tensor([i+len(y) for i in range(future_days)]).view(-1, 1).float()
future_y = LOGISTIC_MODEL(future_x).detach().numpy()
print("Error in next 30 days")
print(LOGISTIC_MODEL.eval(future_x, torch.tensor(_y[validation_index:validation_index+future_days]).float()))
future_x = future_x.detach().numpy()
save_plot_path = os.path.join(base_path, area[0] + ".png")
# plot_data_and_fit(data, fitted_data=(x, w_hat), future_data=(future_x, future_w), save_path=save_plot_path, plot_name=area[0])
data = [_x, _y]
plot_data_and_fit(data, fitted_data=(x, y_hat), future_data=(future_x, future_y), save_path=save_plot_path, plot_name=area[0])
| 33.636364 | 128 | 0.70473 |
c2a18f5087df24218cafcfe623033e7eac9d54d7 | 16,181 | py | Python | Kafka/automated/dedup_test.py | allensanborn/ChaosTestingCode | 36682e9ec70659f8e6a684e53fff6968bb5d15a2 | [
"MIT"
] | 73 | 2018-10-17T19:48:44.000Z | 2022-03-24T10:28:32.000Z | Kafka/automated/dedup_test.py | allensanborn/ChaosTestingCode | 36682e9ec70659f8e6a684e53fff6968bb5d15a2 | [
"MIT"
] | 1 | 2019-03-04T07:15:29.000Z | 2019-03-04T07:31:49.000Z | Kafka/automated/dedup_test.py | allensanborn/ChaosTestingCode | 36682e9ec70659f8e6a684e53fff6968bb5d15a2 | [
"MIT"
] | 35 | 2018-10-20T23:37:57.000Z | 2022-03-30T13:48:57.000Z | #!/usr/bin/env python
from confluent_kafka import Producer, Consumer, KafkaError
import sys
import time
import subprocess
from datetime import datetime
import threading
from collections import defaultdict
import re
import uuid
# not used at this time
# def delivery_report(err, msg):
# global messages_pos_acked, messages_neg_acked, send_count, ack_count, pos_ack_count, neg_ack_count, action_mark, action_performed, topic, test_type
# ack_count += 1
# if err:
# neg_ack_count += 1
# value = int(msg.value())
# messages_neg_acked.add(value)
# else:
# pos_ack_count += 1
# value = int(msg.value())
# messages_pos_acked.add(value)
# if ack_count % 50000 == 0:
# log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
# if ack_count > action_mark and action_performed == False:
# action_performed = True
# if test_type == "kill-leader":
# print(f"Preparing to kill partition leader: {leader}")
# r = threading.Thread(target=kill_partition_leader)
# r.start()
# else:
# print(f"Preparing to kill client connections to partition leader: {leader}")
# r = threading.Thread(target=kill_tcp_connections_of_leader)
# r.start()
# def produce():
# global send_count, ack_count, pos_ack_count, neg_ack_count, messages_sent, messages_pos_acked, partitions, leader
# dedup = dedup_enabled == "true"
# acks_mode = "all"
# bootstrap_servers = get_broker_ips()
# log(f"Producer bootstrap.servers: {bootstrap_servers}")
# producer = Producer({'bootstrap.servers': bootstrap_servers,
# 'message.send.max.retries': 3,
# 'max.in.flight.requests.per.connection': 5,
# #'enable.idempotence': dedup,
# 'default.topic.config': { 'request.required.acks': acks_mode }})
# # send the first message synchronously, to ensure everything is running ok
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# send_count += 1
# messages_sent[send_count] = list()
# producer.poll(0)
# producer.flush()
# partitions = get_isolate_from_zk_partitions(leader)
# print("Started producing")
# # send bulk of messages asynchronously in order to achieve high message rate
# while send_count < count-1:
# producer.poll(0)
# if send_count - ack_count >= 10000: # ensure we don't have more than 10k in flight at a time
# time.sleep(0.1)
# #print("Sleeping")
# else:
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# messages_sent[send_count] = list()
# send_count += 1
# # send last message in order to block until acked
# # this way we ensure all messages are acked by the end of this function
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# send_count += 1
# messages_sent[send_count] = list()
# producer.poll(0)
# time.sleep(5)
# producer.flush()
# log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
topic_prefix = sys.argv[1]
test_num = int(sys.argv[2])
count = int(sys.argv[3])
action_mark = int(sys.argv[4])
test_type = sys.argv[5]
leader = ""
# create log files
start_time = datetime.now().strftime('%H:%M:%S')
output_file_w = open(f"test-output/{topic_prefix}_dedup_output.txt", "w")
output_file_w.write("DedupEnabled,TestRun,SendCount,AckCount,PosAckCount,NegAckCount,Received,NotReceived,ReceivedNoAck,MsgsWithDups,DJF,DJB,JF,JB\n")
output_file = open(f"test-output/{topic_prefix}_dedup_output.txt", "a")
order_file_w = open(f"test-output/{topic_prefix}_order_output.txt", "w")
order_file_w.write("Log of duplicate blocks and out-of-order messages")
order_file = open(f"test-output/{topic_prefix}_order_output.txt", "a")
dedup_enabled_values = ["false", "true"]
timeout_values = [60000, 0]
for i in range(2):
test_run = 1
dedup_enabled = dedup_enabled_values[i]
timeout = timeout_values[i]
log(f"Running {test_num} runs with deduplication enabled = {dedup_enabled}")
create_cluster()
while test_run <= test_num:
# run test
topic = f"{topic_prefix}_{str(test_run)}_dedup_{dedup_enabled}"
leader = create_topic(topic)
duplicate_jump_forward = 0
duplicate_jump_back = 0
jump_forward = 0
jump_back = 0
# send_count = 0
# ack_count = 0
# pos_ack_count = 0
# neg_ack_count = 0
# action_performed = False
# - CHAOS VARIABLES
partitions = list()
log(f"")
log(f"Test Run #{test_run} on topic {topic} ------------")
# - WRITE PHASE --------------------
log("-------------------------------------------------")
log("WRITE PHASE")
log("-------------------------------------------------")
messages_sent = defaultdict(list)
messages_pos_acked = set()
messages_neg_acked = set()
# try:
# produce()
# print("Produce ended")
# except KeyboardInterrupt:
# log("Producer cancelled")
# sys.exit(1)
# except Exception as ex:
# template = "An exception of type {0} occurred. Arguments:{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# log("The producer has failed!!!")
# log(message)
# sys.exit(1)
pos_acked_file = f"producer-output/{topic}_pos_acked.txt"
neg_acked_file = f"producer-output/{topic}_neg_acked.txt"
try:
bootstrap_servers = get_broker_ips()
produce_with_java(topic, count, bootstrap_servers, pos_acked_file, neg_acked_file, dedup_enabled)
log("Produce ended")
except KeyboardInterrupt:
log("Producer cancelled")
sys.exit(1)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(ex).__name__, ex.args)
log("The Java producer has failed!!!")
log(message)
sys.exit(1)
# - READ PHASE --------------------
if test_type == "kill-leader":
start_downed_broker()
time.sleep(10)
log("-------------------------------------------------")
log("READ PHASE")
log("-------------------------------------------------")
received_count = 0
try:
read()
except KeyboardInterrupt:
log("Reader cancelled")
sys.exit(1)
not_received = 0
received_no_ack = 0
msgs_with_dups = 0
received = 0
for msg_val, msg_ids in messages_sent.items():
received += len(msg_ids)
if len(msg_ids) == 0 and msg_val in messages_pos_acked:
not_received += 1
elif len(msg_ids) == 1 and msg_val not in messages_pos_acked:
received_no_ack += 1
elif len(msg_ids) > 1:
msgs_with_dups += 1
send_count = len(messages_sent)
ack_count = len(messages_pos_acked) + len(messages_neg_acked)
pos_ack_count = len(messages_pos_acked)
neg_ack_count = len(messages_neg_acked)
log("Results --------------------------------------------")
log(f"Final send count: {str(send_count)}")
log(f"Final ack count: {str(ack_count)}")
log(f"Final positive ack count: {str(pos_ack_count)}")
log(f"Final negative ack count: {str(neg_ack_count)}")
log(f"Messages received: {str(received)}")
log(f"Acked messages missing: {str(not_received)}")
log(f"Non-acked messages received: {str(received_no_ack)}")
log(f"Duplicates: {msgs_with_dups}")
log(f"Duplicate Jump Forward: {duplicate_jump_forward}")
log(f"Duplicate Jump Back: {duplicate_jump_back}")
log(f"Non-Duplicate Jump Forward: {jump_forward}")
log(f"Non-Duplicate Jump Back: {jump_back}")
log("----------------------------------------------------")
log(f"{dedup_enabled},{str(test_run)},{str(send_count)},{str(ack_count)},{str(pos_ack_count)},{str(neg_ack_count)},{str(received)},{str(not_received)},{str(received_no_ack)},{str(msgs_with_dups)},{str(duplicate_jump_forward)},{str(duplicate_jump_back)},{str(jump_forward)},{str(jump_back)}", True)
time.sleep(20)
test_run += 1 | 37.630233 | 305 | 0.596131 |
c2a329ec349312cdbee649e9a5eb68a195101366 | 2,374 | py | Python | libs/units/tests/test_string_reps.py | mscansian/drpexe-uploader | de17baf9085155a046b8e5f68b0b3191a2ce1847 | [
"MIT"
] | null | null | null | libs/units/tests/test_string_reps.py | mscansian/drpexe-uploader | de17baf9085155a046b8e5f68b0b3191a2ce1847 | [
"MIT"
] | null | null | null | libs/units/tests/test_string_reps.py | mscansian/drpexe-uploader | de17baf9085155a046b8e5f68b0b3191a2ce1847 | [
"MIT"
] | null | null | null | """Tests for string representations of Quantities and Units,
i.e. __repr__ and __str__"""
from units import unit
from units.predefined import define_units
from units.quantity import Quantity
from units.registry import REGISTRY
def test_quantity_repr():
"""Developer-friendly string representation of quantities."""
assert repr(Quantity(1, unit('m'))) == "Quantity(1, LeafUnit('m', True))"
def test_quantity_str():
"""User-friendly string representation of quantities."""
assert str(Quantity(1, unit('m'))) == "1.00 m"
def test_leaf_unit_repr():
"""Developer-friendly string representation of leaf units."""
assert repr(unit('m')) == "LeafUnit('m', True)"
def test_leaf_unit_str():
"""User-friendly string representation of leaf units"""
assert str(unit('s')) == "s"
def test_composed_unit_repr():
"""Developer-friendly string representation of composed units."""
test_repr = (repr(unit('m') * unit('g') / unit('s')))
# non-deterministic
assert test_repr in ["ComposedUnit([LeafUnit('g', True), " +
"LeafUnit('m', True)], " +
"[LeafUnit('s', True)], 1)",
"ComposedUnit([LeafUnit('m', True), " +
"LeafUnit('g', True)], " +
"[LeafUnit('s', True)], 1)"]
def test_composed_unit_str():
"""User-friendly string representation of composed units."""
test_str = (str(unit('m') * unit('g') / unit('s')))
assert test_str in ["g * m / s", "m * g / s"] # non-deterministic.
def test_named_composed_unit_repr():
"""Developer-friendly string representation of named units."""
assert (repr(unit('km')) == "NamedComposedUnit('km', " +
"ComposedUnit([LeafUnit('m', True)], " +
"[], 1000), False)")
def test_named_composed_unit_str():
"""User-friendly string representation of named units."""
assert str(unit('mi')) == 'mi'
def setup_module(module):
# Disable warning about not using module.
# pylint: disable=W0613
"""Called by py.test before running any of the tests here."""
define_units()
def teardown_module(module):
# Disable warning about not using module.
# pylint: disable=W0613
"""Called after running all of the tests here."""
REGISTRY.clear()
| 36.523077 | 77 | 0.607835 |
c2a47a378106287329bd3e25e1d300fbd9312bc2 | 643 | py | Python | apps/store/permissions.py | JimenezJC/cozy-exchange | 131576e8159df8bab2ff680283ed55e66abaaa1d | [
"MIT"
] | null | null | null | apps/store/permissions.py | JimenezJC/cozy-exchange | 131576e8159df8bab2ff680283ed55e66abaaa1d | [
"MIT"
] | null | null | null | apps/store/permissions.py | JimenezJC/cozy-exchange | 131576e8159df8bab2ff680283ed55e66abaaa1d | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission, SAFE_METHODS
| 33.842105 | 74 | 0.709176 |
c2a90d53c2c37e84ce26a4aecf160f999dff6816 | 969 | py | Python | fishpi/vehicle/test_quick.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | 18 | 2015-01-17T17:03:07.000Z | 2020-10-17T06:38:26.000Z | fishpi/vehicle/test_quick.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | null | null | null | fishpi/vehicle/test_quick.py | FishPi/FishPi-POCV---Command---Control | 6df8e9db29c1b4769ddedb3a89a21fadae260709 | [
"BSD-2-Clause"
] | 9 | 2015-02-14T01:42:46.000Z | 2019-08-26T20:24:36.000Z | #!/usr/bin/python
#
# FishPi - An autonomous drop in the ocean
#
# Simple test of PWM motor and servo drive
#
import logging
import raspberrypi
from time import sleep
from drive_controller import AdafruitDriveController
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
logger.addHandler(console)
print "testing drive controller..."
drive = AdafruitDriveController(debug=True, i2c_bus=raspberrypi.i2c_bus())
print "run ahead..."
drive.set_throttle(0.5)
sleep(0.5)
drive.set_throttle(1.0)
sleep(0.5)
drive.set_throttle(0.5)
sleep(2)
print "run 0%..."
drive.set_throttle(-1.0)
sleep(2)
drive.set_throttle(0.0)
sleep(2)
print "run reverse for 2 sec"
drive.set_throttle(-0.5)
sleep(0.5)
drive.set_throttle(-1.0)
sleep(2)
print "and back to neutral..."
drive.set_throttle(0.0)
sleep(5)
| 20.617021 | 78 | 0.668731 |
c2a9d8d15587245ae91d5e2b5d778ffa6fc78c2f | 13,246 | py | Python | sg_covid_impact/complexity.py | nestauk/sg_covid_impact | 0d52e643280cc6b06611759d4464dec82949ae05 | [
"MIT"
] | 2 | 2020-10-19T16:30:59.000Z | 2021-03-17T13:11:50.000Z | sg_covid_impact/complexity.py | nestauk/sg_covid_impact | 0d52e643280cc6b06611759d4464dec82949ae05 | [
"MIT"
] | 67 | 2020-10-07T09:34:38.000Z | 2021-04-06T08:46:49.000Z | sg_covid_impact/complexity.py | nestauk/sg_covid_impact | 0d52e643280cc6b06611759d4464dec82949ae05 | [
"MIT"
] | null | null | null | import logging
import numpy as np
import pandas as pd
import scipy.stats as ss
from scipy.linalg import eig
from numba import jit
import sg_covid_impact
# from mi_scotland.utils.pandas import preview
logger = logging.getLogger(__name__)
np.seterr(all="raise") # Raise errors on floating point errors
def process_complexity(df, dataset, year, geo_type, cluster, PCI=False):
"""Calculate complexity variables aggregated over the columns.
Calculates: size, complexity index, complexity outlook index
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
PCI (bool, optional): If True, calculate product complexity by
transposing input
# TODO refactor outside of function
Returns:
pandas.DataFrame
"""
X = (
df.pipe(pivot_area_cluster, cluster).fillna(0)
# Transpose if PCI
.pipe(lambda x: x.T if PCI else x)
)
X.index.name = "cluster"
size = X.sum(1).to_frame("size")
complexity = (
X.pipe(create_lq, binary=True)
.pipe(calc_eci, sign_correction=X.sum(1))
.pipe(lambda x: x.rename(columns={"eci": "pci"}) if PCI else x)
)
outlook = X.pipe(complexity_outlook_index).to_frame("coi" if not PCI else "poi")
return (
size.join(complexity)
.join(outlook)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
)
def _melt_keep_index(df, value_name="value"):
""" Fully melt a dataframe keeping index, setting new index as all but `value` """
id_vars = df.index.names
return (
df.reset_index()
.melt(id_vars=id_vars, value_name=value_name)
.set_index([*id_vars, df.columns.name])
)
def process_complexity_unit(df, dataset, year, geo_type, cluster):
"""Calculate unaggregated complexity analysis variables
Calculates: raw value, location quotient, RCA?, distance, opportunity outlook gain
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
Returns:
pandas.DataFrame
"""
X = df.pipe(pivot_area_cluster, cluster).fillna(0)
X.columns.name = "cluster"
# Index: year, location, cluster, geo_type
# value, LQ, RCA?, distance, OOG
value = X.pipe(_melt_keep_index, "value")
lq = X.pipe(create_lq).pipe(_melt_keep_index, "lq")
has_rca = (lq > 1).rename(columns={"lq": "has_rca"})
d = X.pipe(distance).pipe(_melt_keep_index, "distance")
omega = 1 - X.pipe(proximity_density).pipe(_melt_keep_index, "omega")
oog = opportunity_outlook_gain(X).pipe(_melt_keep_index, "oog")
return (
pd.concat([value, lq, has_rca, d, omega, oog], axis=1)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
.pipe(preview)
)
def proximity_matrix(X, threshold=1):
""" Calculates proximity matrix
Proximity between entries calculates the probability that given a revealed
comparative advantage (RCA) in entity `j`, a location also has a RCA in
entity `i`.
The same probability is calculated with `i` and `j` permuted, and the
minimum of the two probabilities is then taken.
.. math::
\\large{ \\phi_{ij} = \\min\\left\\{\\mathbb{P}(\\text{RCA}_i \\geq 1 |
\\text{RCA}_j \\geq 1), \\mathbb{P}(\\text{RCA}_j \\geq 1 |
\\text{RCA}_i \\geq 1)\\right\\} } \\\\
\\large{ \\phi_{ij} = \\frac{\\sum_c M_{ci} * M_{cj}}{\\max(k_i, k_j)} }
k = \\sum_i M_{i, j}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [n x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
return pd.DataFrame(_proximity_matrix(M.values), index=M.columns, columns=M.columns)
def proximity_density(X, threshold=1):
"""Calculate proximity density
.. math:
\\omega_{ik} = \\frac{ \\sum_j M_{ij} \\phi_{jk}}{\\sum_j \\phi_{jk}}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [m x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
phi = proximity_matrix(X, threshold)
return (M @ phi) / phi.sum(axis=0)
def distance(X, threshold=1):
"""Distance: 1 - proximity density w/ existing capabilities as NaN
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
return (((1 - M) @ phi) / phi.sum(axis=1)) * M.applymap(
lambda x: np.nan if x == 1 else 1
)
def complexity_outlook_index(X, threshold=1):
"""Calculate economic complexity outlook index
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.Series [locations]
"""
M = create_lq(X, threshold, binary=True)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
d = d.loc[:, PCI.index]
return ((1 - d) * (1 - M) * PCI.values.T).sum(axis=1)
def opportunity_outlook_gain(X, threshold=1):
"""Calculate opportunity outlook gain
Value for existing capabilities is NaN.
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
phi = phi.loc[PCI.index, PCI.index]
d = d.loc[:, PCI.index]
return (
(1 - M) * PCI.values.T @ (phi / phi.sum(0)) - ((1 - d) * PCI.values.T)
) * M.applymap(lambda x: np.nan if x == 1 else 1)
def pivot_area_cluster(df, cluster, aggfunc=sum):
"""Convert long data into a matrix, pivoting on `cluster`
For example, take BRES/IDBR data at Local authority (LAD) geographic level
and SIC4 sectoral level to create matrix with elements representing the
activity level for a given LAD-SIC4 combination.
Args:
df (pandas.DataFrame): Long dataframe
Expected Columns: `{"geo_nm", "geo_cd", cluster}`
cluster (str): Column of the sector type to pivot on
agg_func (function, optional): Aggregation function passed to
`pandas.DataFrame.pivot_table`.
Returns:
pandas.DataFrame: [number areas x number cluster]
Note: Fills missing values with zero
"""
return (
df
# Fill missing values with zeros
.fillna(0)
# Pivot to [areas x sectors]
.pivot_table(
index=["geo_cd", "geo_nm"],
columns=cluster,
values="value",
fill_value=0,
aggfunc=aggfunc,
)
)
def create_lq(X, threshold=1, binary=False):
"""Calculate the location quotient.
Divides the share of activity in a location by the share of activity in
the UK total.
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
threshold (float, optional): Binarisation threshold.
binary (bool, optional): If True, binarise matrix at `threshold`.
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
Xm = X.values
with np.errstate(invalid="ignore"): # Accounted for divide by zero
X = pd.DataFrame(
(Xm * Xm.sum()) / (Xm.sum(1)[:, np.newaxis] * Xm.sum(0)),
index=X.index,
columns=X.columns,
).fillna(0)
return (X > threshold).astype(float) if binary else X
def calc_fitness(X, n_iters):
"""Calculate the fitness metric of economic complexity
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
x = np.ones(X.shape[0])
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / x.mean()
return pd.DataFrame(np.log(x), index=X.index, columns=["fitness"])
def calc_fit_plus(X, n_iters, correction=True):
"""Calculate the fitness+ (ECI+) metric of economic complexity
Args:
X (pandas.Dataframe): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
correction (bool, optional): If true, apply logarithmic correction.
Returns:
pandas.Dataframe
#UTILS
"""
X = _drop_zero_rows_cols(X)
if X.dtypes[0] == bool:
norm_mean = np.mean
else:
norm_mean = ss.gmean
x = X.values.sum(axis=1)
x = x / norm_mean(x)
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / norm_mean(x)
if correction:
x = np.log(x) - np.log((X / X.sum(0)).sum(1))
else:
pass # x = np.log(x)
return pd.DataFrame(x, index=X.index, columns=["fit_p"])
def calc_eci(X, sign_correction=None):
"""Calculate the original economic complexity index (ECI).
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
sign_correction (pd.Series, optional): Array to correlate with ECI
to calculate sign correction. Typically, ubiquity. If None, uses
the sum over columns of the input data.
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
C = np.diag(1 / X.sum(1)) # Diagonal entries k_C
P = np.diag(1 / X.sum(0)) # Diagonal entries k_P
H = C @ X.values @ P @ X.T.values
w, v = eig(H, left=False, right=True)
eci = pd.DataFrame(v[:, 1].real, index=X.index, columns=["eci"])
# Positively correlate `sign_correction` (some proxy for diversity) w/ ECI
if sign_correction is None:
sign_correction = X.sum(1)
else:
sign_correction = sign_correction.loc[X.index]
sign = np.sign(np.corrcoef(sign_correction, eci.eci.values)[0, 1])
logger.info(f"CI sign: {sign}")
return (eci - eci.mean()) / eci.std() * sign
def _drop_zero_rows_cols(X):
"""Drop regions/entities with no activity
Fully zero column/row means ECI cannot be calculated
"""
nz_rows = X.sum(1) > 0
has_zero_rows = nz_rows.sum() != X.shape[0]
if has_zero_rows:
logger.warning(f"Dropping all zero rows: {X.loc[~nz_rows].index.values}")
X = X.loc[nz_rows]
nz_cols = X.sum(0) > 0
has_zero_cols = nz_cols.sum() != X.shape[1]
if has_zero_cols:
logger.warning(f"Dropping all zero cols: {X.loc[:, ~nz_cols].columns.values}")
X = X.loc[:, nz_cols]
return X
def simple_diversity(X):
"""Generate two simple measures of diversity
The first measure is the number of areas engaging in an activity
The second measure is the number of areas with a revealed comparative advantage
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
div_1 = X.pipe(lambda x: np.sum(x > 0, axis=1)).to_frame("div_n_active")
div_2 = (
X.pipe(create_lq, binary=True, threshold=1).sum(axis=1).to_frame("div_n_RCA")
)
return pd.concat([div_1, div_2], axis=1)
| 29.968326 | 88 | 0.616337 |
c2aaa982479408d6fca2ceb47bf8d2f924d7e364 | 768 | py | Python | Exercicios/Ex019.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | 2 | 2021-05-21T23:17:44.000Z | 2021-05-22T04:34:37.000Z | Exercicios/Ex019.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | Exercicios/Ex019.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | '''Ex 019 - Um professor quer sortear um dos seus quatro alunos para apagar o quadro.
Faa um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.'''
print('-' * 15, '>Ex 19<', '-' * 15)
from random import choice
# Usando Random para sortiar o escolhido.
# Recebendo dados.
aluno1 = str(input('Digite o nome do aluno:'))
aluno2 = str(input('Digite o nome do aluno:'))
aluno3 = str(input('Digite o nome do aluno:'))
aluno4 = str(input('Digite o nome do aluno:'))
# Criando um array para escolher um entre os informados.
lista = [aluno1, aluno2, aluno3, aluno4]
# Usando choice para sortiar um dentro do array.
escolhido = choice(lista)
# Imprimindo dados na tela para o usuario.
print('O escolhido foi {}'. format(escolhido)) | 33.391304 | 100 | 0.716146 |
c2ad28eb7943b8ab5d743641a4bd509bff412fa2 | 4,807 | py | Python | evalml/preprocessing/data_splitters/balanced_classification_splitter.py | skvorekn/evalml | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | [
"BSD-3-Clause"
] | null | null | null | evalml/preprocessing/data_splitters/balanced_classification_splitter.py | skvorekn/evalml | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | [
"BSD-3-Clause"
] | null | null | null | evalml/preprocessing/data_splitters/balanced_classification_splitter.py | skvorekn/evalml | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | [
"BSD-3-Clause"
] | null | null | null | from sklearn.model_selection import StratifiedKFold
from evalml.preprocessing.data_splitters.balanced_classification_sampler import (
BalancedClassificationSampler
)
from evalml.preprocessing.data_splitters.base_splitters import (
BaseUnderSamplingSplitter
)
from evalml.preprocessing.data_splitters.training_validation_split import (
TrainingValidationSplit
)
| 60.848101 | 191 | 0.725817 |
c2afa0144857d385ec53c489e4695b2ff1d1fdcf | 1,327 | py | Python | alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 23.696429 | 65 | 0.57046 |