hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e4e48bf1020755d5adf17a1c4aa85cf738609d6 | 23,209 | py | Python | riglib/bmi/robot_arms.py | sgowda/brain-python-interface | 708e2a5229d0496a8ce9de32bda66f0925d366d9 | [
"Apache-2.0"
] | 7 | 2015-08-25T00:28:49.000Z | 2020-04-14T22:58:51.000Z | riglib/bmi/robot_arms.py | sgowda/brain-python-interface | 708e2a5229d0496a8ce9de32bda66f0925d366d9 | [
"Apache-2.0"
] | 89 | 2020-08-03T16:54:08.000Z | 2022-03-09T19:56:19.000Z | riglib/bmi/robot_arms.py | sgowda/brain-python-interface | 708e2a5229d0496a8ce9de32bda66f0925d366d9 | [
"Apache-2.0"
] | 4 | 2016-10-05T17:54:26.000Z | 2020-08-06T15:37:09.000Z | '''
Classes implementing various kinematic chains. This module is perhaps mis-located
as it does not have a direct BMI role but rather contains code which is useful in
supporting BMI control of kinematic chains.
This code depends on the 'robot' module (https://github.com/sgowda/robotics_toolbox)
'''
import numpy as np
try:
import robot
except ImportError:
import warnings
warnings.warn("The 'robot' module cannot be found! See https://github.com/sgowda/robotics_toolbox")
import matplotlib.pyplot as plt
from collections import OrderedDict
import time
pi = np.pi
def point_to_line_segment_distance(point, segment):
'''
Determine the distance between a point and a line segment. Used to determine collisions between robot arm links and virtual obstacles.
Adapted from http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
'''
v, w = segment
l2 = np.sum(np.abs(v - w)**2)
if l2 == 0:
return np.linalg.norm(v - point)
t = np.dot(point - v, w - v)/l2
if t < 0:
return np.linalg.norm(point - v)
elif t > 1:
return np.linalg.norm(point - w)
else:
projection = v + t*(w-v)
return np.linalg.norm(projection - point)
| 32.190014 | 138 | 0.561463 |
4e4e7f677ab2a0f132b93e4b1dfb1c29e362f6de | 3,622 | py | Python | src/utils/tilemap.py | Magicalbat/Metroidvania-Month-15 | a0a30fb3f531a597ced69bf76568ef26e5e88019 | [
"MIT"
] | null | null | null | src/utils/tilemap.py | Magicalbat/Metroidvania-Month-15 | a0a30fb3f531a597ced69bf76568ef26e5e88019 | [
"MIT"
] | null | null | null | src/utils/tilemap.py | Magicalbat/Metroidvania-Month-15 | a0a30fb3f531a597ced69bf76568ef26e5e88019 | [
"MIT"
] | null | null | null | import pygame
from pygame.math import Vector2
import json, math | 38.126316 | 90 | 0.549144 |
4e508c95181eba9329a23ec0f597dadfe33c7e09 | 7,295 | py | Python | src/dsnt/util.py | anibali/dsnt-pose2d | f453331a6b120f02948336555b996ac0d95bf4be | [
"Apache-2.0"
] | 12 | 2018-10-18T06:41:00.000Z | 2021-07-31T08:19:41.000Z | src/dsnt/util.py | anibali/dsnt-pose2d | f453331a6b120f02948336555b996ac0d95bf4be | [
"Apache-2.0"
] | 2 | 2019-07-15T13:36:08.000Z | 2020-03-09T04:39:08.000Z | src/dsnt/util.py | anibali/dsnt-pose2d | f453331a6b120f02948336555b996ac0d95bf4be | [
"Apache-2.0"
] | 5 | 2019-01-08T01:32:18.000Z | 2020-08-04T07:42:12.000Z | """
Miscellaneous utility functions.
"""
import random
import time
from contextlib import contextmanager
import math
import numpy as np
import torch
from PIL.ImageDraw import Draw
# Joints to connect for visualisation, giving the effect of drawing a
# basic "skeleton" of the pose.
BONES = {
'right_lower_leg': (0, 1),
'right_upper_leg': (1, 2),
'right_pelvis': (2, 6),
'left_lower_leg': (4, 5),
'left_upper_leg': (3, 4),
'left_pelvis': (3, 6),
'center_lower_torso': (6, 7),
'center_upper_torso': (7, 8),
'center_head': (8, 9),
'right_lower_arm': (10, 11),
'right_upper_arm': (11, 12),
'right_shoulder': (12, 8),
'left_lower_arm': (14, 15),
'left_upper_arm': (13, 14),
'left_shoulder': (13, 8),
}
def draw_skeleton(img, coords, joint_mask=None):
'''Draw a pose skeleton connecting joints (for visualisation purposes).
Left-hand-side joints are connected with blue lines. Right-hand-size joints
are connected with red lines. Center joints are connected with magenta
lines.
Args:
img (PIL.Image.Image): PIL image which the skeleton will be drawn over.
coords (Tensor): 16x2 tensor containing 0-based pixel coordinates
of joint locations. Joints indices are expected to match
http://human-pose.mpi-inf.mpg.de/#download
joint_mask (Tensor, optional): Mask of valid joints (invalid joints
will be drawn with grey lines).
'''
draw = Draw(img)
for bone_name, (j1, j2) in BONES.items():
if bone_name.startswith('center_'):
colour = (255, 0, 255) # Magenta
elif bone_name.startswith('left_'):
colour = (0, 0, 255) # Blue
elif bone_name.startswith('right_'):
colour = (255, 0, 0) # Red
else:
colour = (255, 255, 255)
if joint_mask is not None:
# Change colour to grey if either vertex is not masked in
if joint_mask[j1] == 0 or joint_mask[j2] == 0:
colour = (100, 100, 100)
draw.line([coords[j1, 0], coords[j1, 1], coords[j2, 0], coords[j2, 1]], fill=colour)
def draw_gaussian(img_tensor, x, y, sigma, normalize=False, clip_size=None):
'''Draw a Gaussian in a single-channel 2D image.
Args:
img_tensor: Image tensor to draw to.
x: x-coordinate of Gaussian centre (in pixels).
y: y-coordinate of Gaussian centre (in pixels).
sigma: Standard deviation of Gaussian (in pixels).
normalize: Ensures values sum to 1 when True.
clip_size: Restrict the size of the draw region.
'''
# To me it makes more sense to round() these, but hey - I'm just following the example
# of others.
x = int(x)
y = int(y)
if img_tensor.dim() == 2:
height, width = list(img_tensor.size())
elif img_tensor.dim() == 3:
n_chans, height, width = list(img_tensor.size())
assert n_chans == 1, 'expected img_tensor to have one channel'
img_tensor = img_tensor[0]
else:
raise Exception('expected img_tensor to have 2 or 3 dimensions')
radius = max(width, height)
if clip_size is not None:
radius = clip_size / 2
if radius < 0.5 or x <= -radius or y <= -radius or \
x >= (width - 1) + radius or y >= (height - 1) + radius:
return
start_x = max(0, math.ceil(x - radius))
end_x = min(width, int(x + radius + 1))
start_y = max(0, math.ceil(y - radius))
end_y = min(height, int(y + radius + 1))
w = end_x - start_x
h = end_y - start_y
subimg = img_tensor[start_y:end_y, start_x:end_x]
xs = torch.arange(start_x, end_x).type_as(img_tensor).view(1, w).expand_as(subimg)
ys = torch.arange(start_y, end_y).type_as(img_tensor).view(h, 1).expand_as(subimg)
k = -0.5 * (1 / sigma)**2
subimg.copy_((xs - x)**2)
subimg.add_((ys - y)**2)
subimg.mul_(k)
subimg.exp_()
if normalize:
val_sum = subimg.sum()
if val_sum > 0:
subimg.div_(val_sum)
def encode_heatmaps(coords, width, height, sigma=1):
'''Convert normalised coordinates into heatmaps.'''
# Normalised coordinates to pixel coordinates
coords.add_(1)
coords[:, :, 0].mul_(width / 2)
coords[:, :, 1].mul_(height / 2)
coords.add_(-0.5)
batch_size = coords.size(0)
n_chans = coords.size(1)
target = torch.FloatTensor(batch_size, n_chans, height, width).zero_()
for i in range(batch_size):
for j in range(n_chans):
x = round(coords[i, j, 0])
y = round(coords[i, j, 1])
draw_gaussian(target[i, j], x, y, sigma, normalize=False, clip_size=7)
return target
def decode_heatmaps(heatmaps, use_neighbours=True):
'''Convert heatmaps into normalised coordinates.'''
coords = get_preds(heatmaps)
_, _, height, width = list(heatmaps.size())
if use_neighbours:
# "To improve performance at high precision thresholds the prediction
# is offset by a quarter of a pixel in the direction of its next highest
# neighbor before transforming back to the original coordinate space
# of the image"
# - Stacked Hourglass Networks for Human Pose Estimation
for i, joint_coords in enumerate(coords):
for j, (x, y) in enumerate(joint_coords):
x = int(x)
y = int(y)
if x > 0 and x < width - 1 and y > 0 and y < height - 1:
hm = heatmaps[i, j]
joint_coords[j, 0] += (0.25 * np.sign(hm[y, x + 1] - hm[y, x - 1]))
joint_coords[j, 1] += (0.25 * np.sign(hm[y + 1, x] - hm[y - 1, x]))
# Pixel coordinates to normalised coordinates
coords.add_(0.5)
coords[:, :, 0].mul_(2 / width)
coords[:, :, 1].mul_(2 / height)
coords.add_(-1)
return coords
def seed_random_number_generators(seed):
"""Seed all random number generators."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 31.042553 | 92 | 0.615216 |
4e50c9eaddb3cc1ea6331eb13dca8d92f32d04fe | 1,321 | py | Python | lect01_codes/lect01_eg/multiprocess_eg/mp_main.py | radiumweilei/chinahadoop-python-ai-2 | f45271b073f99b9c46de150aa87bcf4adc5feca2 | [
"Apache-2.0"
] | null | null | null | lect01_codes/lect01_eg/multiprocess_eg/mp_main.py | radiumweilei/chinahadoop-python-ai-2 | f45271b073f99b9c46de150aa87bcf4adc5feca2 | [
"Apache-2.0"
] | null | null | null | lect01_codes/lect01_eg/multiprocess_eg/mp_main.py | radiumweilei/chinahadoop-python-ai-2 | f45271b073f99b9c46de150aa87bcf4adc5feca2 | [
"Apache-2.0"
] | 1 | 2019-11-11T09:42:06.000Z | 2019-11-11T09:42:06.000Z | import os
import time
from datetime import datetime
from multiprocessing import Process, Pool
if __name__ == '__main__':
print('id', os.getpid())
# 1.
# start = datetime.now()
# for i in range(10):
# run_proc(i)
# print(':', datetime.now() - start)
# 2.
# 2.1
# start = datetime.now()
# for i in range(10):
# p = Process(target=run_proc, args=(i,))
# p.start()
# print(':', datetime.now() - start)
# 2.2
# start = datetime.now()
# for i in range(10):
# p = Process(target=run_proc, args=(i,))
# p.start()
# p.join()
# print(':', datetime.now() - start)
# 3.
# 3.1 Pool
# pool = Pool()
# start = datetime.now()
# for i in range(10):
# pool.apply(func=run_proc, args=(i,))
# pool.close()
# pool.join()
# print(':', datetime.now() - start)
# 3.2 Pool
# pool = Pool()
# start = datetime.now()
# for i in range(10):
# pool.apply_async(func=run_proc, args=(i,))
# pool.close()
# pool.join()
# print(':', datetime.now() - start)
| 23.175439 | 74 | 0.551098 |
4e51a3f60a853dcb91ad39c536974879ba250f9f | 268 | py | Python | strings_12/tests/test_change_case.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | null | null | null | strings_12/tests/test_change_case.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | 2 | 2019-04-15T06:29:55.000Z | 2019-04-19T17:34:32.000Z | strings_12/tests/test_change_case.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | 1 | 2019-11-19T04:51:18.000Z | 2019-11-19T04:51:18.000Z | import pytest
from ..change_case import change_case
| 22.333333 | 46 | 0.664179 |
4e533f314e5c3e66781f51a4229383e5a116f3ac | 803 | py | Python | Curso_Python_3_UDEMY/POO/desafio_carro.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_Python_3_UDEMY/POO/desafio_carro.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_Python_3_UDEMY/POO/desafio_carro.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
c1 = Carro(180)
for _ in range(25):
print(f'Acelerando {c1.acelerar(8)}')
for _ in range(10):
print(f' reduzindo a velocidade {c1.frear(delta=20)}') | 28.678571 | 83 | 0.608966 |
4e55091a618968c01ac26471c9cd251dd97a71d7 | 9,578 | py | Python | argocd_client/models/application_application_sync_request.py | thepabloaguilar/argocd-client | a6c4ff268a63ee6715f9f837b9225b798aa6bde2 | [
"BSD-3-Clause"
] | 1 | 2021-09-29T11:57:07.000Z | 2021-09-29T11:57:07.000Z | argocd_client/models/application_application_sync_request.py | thepabloaguilar/argocd-client | a6c4ff268a63ee6715f9f837b9225b798aa6bde2 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T00:28:57.000Z | 2020-09-09T00:28:57.000Z | argocd_client/models/application_application_sync_request.py | thepabloaguilar/argocd-client | a6c4ff268a63ee6715f9f837b9225b798aa6bde2 | [
"BSD-3-Clause"
] | 2 | 2020-10-13T18:31:59.000Z | 2021-02-15T12:52:33.000Z | # coding: utf-8
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argocd_client.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationApplicationSyncRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApplicationApplicationSyncRequest):
return True
return self.to_dict() != other.to_dict()
| 29.112462 | 200 | 0.618292 |
4e5750ff5296717b749da87c576b380ec5a0ca38 | 1,818 | py | Python | kedro/extras/decorators/retry_node.py | hfwittmann/kedro | b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6 | [
"Apache-2.0"
] | 1 | 2021-11-25T12:33:13.000Z | 2021-11-25T12:33:13.000Z | kedro/extras/decorators/retry_node.py | MerelTheisenQB/kedro | 1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827 | [
"Apache-2.0"
] | null | null | null | kedro/extras/decorators/retry_node.py | MerelTheisenQB/kedro | 1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827 | [
"Apache-2.0"
] | null | null | null | """
This module contains the retry decorator, which can be used as
``Node`` decorators to retry nodes. See ``kedro.pipeline.node.decorate``
"""
import logging
from functools import wraps
from time import sleep
from typing import Callable, Type
def retry(
exceptions: Type[Exception] = Exception, n_times: int = 1, delay_sec: float = 0
) -> Callable:
"""
Catches exceptions from the wrapped function at most n_times and then
bundles and propagates them.
**Make sure your function does not mutate the arguments**
Args:
exceptions: The superclass of exceptions to catch.
By default catch all exceptions.
n_times: At most let the function fail n_times. The bundle the
errors and propagate them. By default retry only once.
delay_sec: Delay between failure and next retry in seconds
Returns:
The original function with retry functionality.
"""
return _retry
| 30.3 | 86 | 0.573707 |
4e57932c1bf27e86e563c5240b4f42764bb1b0f4 | 1,470 | py | Python | test/lmp/dset/_base/test_download_file.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | test/lmp/dset/_base/test_download_file.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | test/lmp/dset/_base/test_download_file.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | """Test the ability to download files.
Test target:
- :py:meth:`lmp.dset._base.BaseDset.download`.
"""
import os
from typing import Callable
import pytest
import lmp.dset._base
import lmp.util.path
def test_download_as_text_file(file_path: str, file_url: str) -> None:
"""Must be able to download file and output as text file."""
lmp.dset._base.BaseDset.download_file(mode='text', download_path=file_path, url=file_url)
assert os.path.exists(file_path)
def test_download_as_binary_file(file_path: str, file_url: str) -> None:
"""Must be able to download file and output as binary file."""
lmp.dset._base.BaseDset.download_file(mode='binary', download_path=file_path, url=file_url)
assert os.path.exists(file_path)
| 30 | 112 | 0.755782 |
4e584909a422d8166030333d2adb063c0ced43a9 | 1,019 | py | Python | server/account/models.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | server/account/models.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | server/account/models.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | """account models."""
from django.contrib.auth.hashers import (
check_password, make_password
)
from django.db import models
from extension.modelutils import RandomFixedCharField
| 28.305556 | 66 | 0.693817 |
4e59aa0341bec9390bf565218344a25a6e72bf84 | 2,420 | py | Python | tests/views/test_delete.py | fvalverd/AutoApi | 3ceb320fe6a36d24032df121e335a8470fb929af | [
"MIT"
] | 6 | 2015-04-28T13:03:04.000Z | 2021-08-24T19:15:53.000Z | tests/views/test_delete.py | fvalverd/AutoApi | 3ceb320fe6a36d24032df121e335a8470fb929af | [
"MIT"
] | 6 | 2017-06-19T20:59:10.000Z | 2020-05-22T16:22:28.000Z | tests/views/test_delete.py | fvalverd/AutoApi | 3ceb320fe6a36d24032df121e335a8470fb929af | [
"MIT"
] | 2 | 2015-11-10T14:38:39.000Z | 2017-05-18T05:46:03.000Z | # -*- coding: utf-8 -*-
import json
import unittest
from .. import MoviesTest
if __name__ == '__main__':
unittest.main()
| 35.588235 | 108 | 0.638843 |
4e5ac8d618f9e77a2b39df9c4c03557f518e532c | 1,347 | py | Python | src/api/urls.py | Karim-Valeev/django-myfoods | e8750a05461616a2e7740230177a139749daac73 | [
"MIT"
] | null | null | null | src/api/urls.py | Karim-Valeev/django-myfoods | e8750a05461616a2e7740230177a139749daac73 | [
"MIT"
] | null | null | null | src/api/urls.py | Karim-Valeev/django-myfoods | e8750a05461616a2e7740230177a139749daac73 | [
"MIT"
] | null | null | null | from django.urls import path, re_path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework.routers import SimpleRouter, DefaultRouter
from rest_framework_simplejwt import views as jwt_views
from api.views import *
# ,
router = SimpleRouter()
router.register("baskets", BasketViewSet, "baskets")
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version="v1",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
)
urlpatterns = [
path("check/", check_api_view, name="check-api"),
path("token/", jwt_views.TokenObtainPairView.as_view(), name="token-obtain-pair"),
path("token/refresh/", jwt_views.TokenRefreshView.as_view(), name="token-refresh"),
*router.urls,
re_path(r"swagger(?P<format>\.json|\.yaml)$", schema_view.without_ui(cache_timeout=0), name="schema-json"),
path("swagger/", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"),
]
| 38.485714 | 111 | 0.723831 |
4e5b531d4dae58f3f455001978beda2d6160593c | 18,417 | py | Python | second/pytorch/inference_ros.py | neolixcn/nutonomy_pointpillars | 03f46f6de97c0c97d7bc98d7af3daee215d81a30 | [
"MIT"
] | 1 | 2021-06-11T00:54:48.000Z | 2021-06-11T00:54:48.000Z | second/pytorch/inference_ros.py | neolixcn/nutonomy_pointpillars | 03f46f6de97c0c97d7bc98d7af3daee215d81a30 | [
"MIT"
] | null | null | null | second/pytorch/inference_ros.py | neolixcn/nutonomy_pointpillars | 03f46f6de97c0c97d7bc98d7af3daee215d81a30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import argparse
import pathlib
import pickle
import shutil
import time
from functools import partial
import sys
sys.path.append('../')
from pathlib import Path
import fire
import numpy as np
import torch
import torch.nn as nn
import os
print(torch.__version__)
print(os.environ['PYTHONPATH'])
from google.protobuf import text_format
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from std_msgs.msg import Header
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
import torchplus
import second.data.kitti_common as kitti
from second.builder import target_assigner_builder, voxel_builder
from second.data.preprocess import merge_second_batch
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import ProgressBar
def get_paddings_indicator(actual_num, max_num, axis=0):
"""
Create boolean mask by actually number of a padded tensor.
:param actual_num:
:param max_num:
:param axis:
:return: [type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis+1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis+1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num : [N, M, 1]
# tiled_actual_num : [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# title_max_num : [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape : [batch_size, max_num]
return paddings_indicator
def flat_nested_json_dict(json_dict, sep=".") -> dict:
"""flat a nested json-like dict. this function make shadow copy.
"""
flatted = {}
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, k)
else:
flatted[k] = v
return flatted
# def evaluate(config_path,
# model_dir,
# result_path=None,
# predict_test=False,
# ckpt_path=None,
# ref_detfile=None,
# pickle_result=True,
# read_predict_pkl_path=None):
#
# model_dir = str(Path(model_dir).resolve())
# if predict_test:
# result_name = 'predict_test'
# else:
# result_name = 'eval_results'
# if result_path is None:
# model_dir = Path(model_dir)
# result_path = model_dir / result_name
# else:
# result_path = pathlib.Path(result_path)
#
# if isinstance(config_path, str):
# config = pipeline_pb2.TrainEvalPipelineConfig()
# with open(config_path, "r") as f:
# proto_str = f.read()
# text_format.Merge(proto_str, config)
# else:
# config = config_path
#
# input_cfg = config.eval_input_reader
# model_cfg = config.model.second
# train_cfg = config.train_config
# class_names = list(input_cfg.class_names)
# center_limit_range = model_cfg.post_center_limit_range
# #########################
# # Build Voxel Generator
# #########################
# voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
# bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
# box_coder = box_coder_builder.build(model_cfg.box_coder)
# target_assigner_cfg = model_cfg.target_assigner
# target_assigner = target_assigner_builder.build(target_assigner_cfg,
# bv_range, box_coder)
#
# net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
# net.cuda()
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
#
# if ckpt_path is None:
# torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
# else:
# torchplus.train.restore(ckpt_path, net)
#
# eval_dataset = input_reader_builder.build(
# input_cfg,
# model_cfg,
# training=False,
# voxel_generator=voxel_generator,
# target_assigner=target_assigner)
#
# eval_dataloader = torch.utils.data.DataLoader(
# eval_dataset,
# batch_size=input_cfg.batch_size,
# shuffle=False,
# num_workers=input_cfg.num_workers,
# pin_memory=False,
# collate_fn=merge_second_batch)
#
# if train_cfg.enable_mixed_precision:
# float_dtype = torch.float16
# else:
# float_dtype = torch.float32
#
# net.eval()
# result_path_step = result_path / f"step_{net.get_global_step()}"
# result_path_step.mkdir(parents=True, exist_ok=True)
# t = time.time()
# dt_annos = []
# global_set = None
# eval_data = iter(eval_dataloader)
# example = next(eval_data)
# example = example_convert_to_torch(example, float_dtype)
# example_tuple = list(example.values())
# example_tuple[5] = torch.from_numpy(example_tuple[5])
# if (example_tuple[3].size()[0] != input_cfg.batch_size):
# continue
#
# dt_annos += predict_kitti_to_anno(
# net, example_tuple, class_names, center_limit_range,
# model_cfg.lidar_input, global_set)
# for example in iter(eval_dataloader):
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'
# # 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'
# # 8: 'image_idx', 9: 'image_shape']
#
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',
# # 4: 'anchor_mask', 5: 'pc_idx']
# example = example_convert_to_torch(example, float_dtype)
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',
# # 4: 'anchor_mask', 5: 'pc_idx']
#
# example_tuple = list(example.values())
# example_tuple[5] = torch.from_numpy(example_tuple[5])
# # example_tuple[9] = torch.from_numpy(example_tuple[9])
#
# if (example_tuple[3].size()[0] != input_cfg.batch_size):
# continue
#
# dt_annos += predict_kitti_to_anno(
# net, example_tuple, class_names, center_limit_range,
# model_cfg.lidar_input, global_set)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='testing')
args = parser.parse_args()
model_dir = "/nfs/nas/model/songhongli/neolix_shanghai_3828/"
config_path = "/home/songhongli/Projects/pointpillars2/second/configs/pointpillars/xyres_16_4cls.proto"
if isinstance(config_path, str):
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
class_names = list(input_cfg.class_names)
center_limit_range = model_cfg.post_center_limit_range
#########################
# Build Voxel Generator
#########################
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
net.cuda()
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
# code added for using ROS
rospy.init_node('pointpillars_ros_node')
sub_ = rospy.Subscriber("/sensor/velodyne16/all/compensator/PointCloud2", PointCloud2, callback, queue_size=1)
pub_points = rospy.Publisher("points_modified", PointCloud2, queue_size=1)
pub_arr_bbox = rospy.Publisher("pre_arr_bbox", BoundingBoxArray, queue_size=10)
# pub_bbox = rospy.Publisher("voxelnet_bbox", BoundingBox, queue_size=1)
print("[+] voxelnet_ros_node has started!")
rospy.spin()
| 37.432927 | 176 | 0.617147 |
4e5c7dba2e2083dcb5bc4c5689df3f572c63510f | 3,112 | py | Python | agent.py | AdamMiltonBarker/TassAI | 61ae4f208f06ea39cc5b58079175f17bf1fca4c4 | [
"MIT"
] | 1 | 2021-06-29T09:46:47.000Z | 2021-06-29T09:46:47.000Z | agent.py | AdamMiltonBarker/TassAI | 61ae4f208f06ea39cc5b58079175f17bf1fca4c4 | [
"MIT"
] | 4 | 2021-06-27T16:06:43.000Z | 2021-06-27T16:09:53.000Z | agent.py | AdamMiltonBarker/TassAI | 61ae4f208f06ea39cc5b58079175f17bf1fca4c4 | [
"MIT"
] | 2 | 2020-09-28T02:11:43.000Z | 2020-10-13T15:27:41.000Z | #!/usr/bin/env python3
""" HIAS TassAI Facial Recognition Agent.
HIAS TassAI Facial Recognition Agent processes streams from local
or remote cameras to identify known and unknown humans.
MIT License
Copyright (c) 2021 Asociacin de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker
"""
import sys
from abc import ABC, abstractmethod
from modules.AbstractAgent import AbstractAgent
from modules.helpers import helpers
from modules.model import model
from modules.read import read
from modules.stream import stream
from modules.sockets import sockets
from threading import Thread
agent = agent()
if __name__ == "__main__":
main()
| 26.151261 | 78 | 0.754499 |
4e5f6c409675e74bac8adf5ea0c951c284a25d25 | 181 | py | Python | asyncy/constants/LineConstants.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | asyncy/constants/LineConstants.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | asyncy/constants/LineConstants.py | rashmi43/platform-engine | dd9a22742bc8dc43a530ea5edef39b3c35db57c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
| 16.454545 | 23 | 0.563536 |
4e604e0888e3f4c9cd3d2b535fdc4b7f1eabfe77 | 2,576 | py | Python | Payload_Types/apfell/mythic/agent_functions/terminals_send.py | xorrior/Mythic | ea348b66e1d96e88e0e7fbabff182945cbdf12b6 | [
"BSD-3-Clause"
] | 2 | 2021-01-28T19:35:46.000Z | 2021-04-08T12:01:48.000Z | Payload_Types/apfell/mythic/agent_functions/terminals_send.py | xorrior/Mythic | ea348b66e1d96e88e0e7fbabff182945cbdf12b6 | [
"BSD-3-Clause"
] | null | null | null | Payload_Types/apfell/mythic/agent_functions/terminals_send.py | xorrior/Mythic | ea348b66e1d96e88e0e7fbabff182945cbdf12b6 | [
"BSD-3-Clause"
] | 2 | 2020-12-29T02:34:13.000Z | 2021-06-24T04:07:38.000Z | from CommandBase import *
import json
from MythicResponseRPC import *
| 36.8 | 457 | 0.622671 |
4e6138843998a3ede92abaaa70a1ae3fc7c18aae | 711 | py | Python | losses.py | dhaulagiri0/AniGen | bd845a29e771544ade1f64b94f967d8e178952f8 | [
"MIT"
] | null | null | null | losses.py | dhaulagiri0/AniGen | bd845a29e771544ade1f64b94f967d8e178952f8 | [
"MIT"
] | null | null | null | losses.py | dhaulagiri0/AniGen | bd845a29e771544ade1f64b94f967d8e178952f8 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import backend
#DEPRECATED
# An implementation of wasserstein used for a naive implementation of WGAN
# calculate wasserstein loss
# Define the loss functions for the discriminator,
# which should be (fake_loss - real_loss).
# We will add the gradient penalty later to this loss function.
# Define the loss functions for the generator.
| 32.318182 | 74 | 0.78481 |
4e6142fd70771c11fbb624c19a0644bc6c708693 | 623 | py | Python | mephisto/plugins/math_expr.py | Kenton1989/mephisto-bot | 50a8008c99b984a453713f480fa578bf5a8353c8 | [
"MIT"
] | null | null | null | mephisto/plugins/math_expr.py | Kenton1989/mephisto-bot | 50a8008c99b984a453713f480fa578bf5a8353c8 | [
"MIT"
] | null | null | null | mephisto/plugins/math_expr.py | Kenton1989/mephisto-bot | 50a8008c99b984a453713f480fa578bf5a8353c8 | [
"MIT"
] | null | null | null | import re
import math
import numexpr as ne
MATH_CONST = {
'pi': math.pi,
'': math.pi,
'e': math.e,
'inf': math.inf,
'i': 1j,
'j': 1j,
}
SUB_MAP = {
# replace UTF char with ASCII char
'': '(',
'': ')',
'': ',',
'': '-',
'': '/',
'': '*',
'': '+',
# replace common synonym
'ln': 'log',
'lg': 'log10',
'': 'inf',
'mod': '%',
}
SUB_RE = re.compile('|'.join(re.escape(s) for s in SUB_MAP.keys()))
| 16.394737 | 67 | 0.473515 |
4e61d7b3b7f328b277a5ef816c4995021aeb1703 | 1,185 | py | Python | testemu/client/testemu_client/network.py | advaoptical/netemu | a418503d3829f206602e9360c05235626fa8bec5 | [
"Apache-2.0"
] | null | null | null | testemu/client/testemu_client/network.py | advaoptical/netemu | a418503d3829f206602e9360c05235626fa8bec5 | [
"Apache-2.0"
] | null | null | null | testemu/client/testemu_client/network.py | advaoptical/netemu | a418503d3829f206602e9360c05235626fa8bec5 | [
"Apache-2.0"
] | null | null | null | from collections import Mapping
from . import yang_models
| 23.7 | 73 | 0.571308 |
4e61d7b801c1c3cd496fc2afd8c46c182f86ceda | 666 | py | Python | asym_rlpo/representations/identity.py | abaisero/asym-porl | 8a76d920e51d783bbeeeea3cd2b02efffbb33c72 | [
"MIT"
] | 2 | 2021-08-24T22:41:36.000Z | 2021-10-31T01:55:37.000Z | asym_rlpo/representations/identity.py | abaisero/asym-porl | 8a76d920e51d783bbeeeea3cd2b02efffbb33c72 | [
"MIT"
] | null | null | null | asym_rlpo/representations/identity.py | abaisero/asym-porl | 8a76d920e51d783bbeeeea3cd2b02efffbb33c72 | [
"MIT"
] | 1 | 2021-10-13T12:27:40.000Z | 2021-10-13T12:27:40.000Z | import gym
import torch
from asym_rlpo.utils.debugging import checkraise
from .base import Representation
| 22.2 | 52 | 0.630631 |
4e630265553913112faaae0a442558c6d77373c7 | 8,885 | py | Python | src/ggplib/db/lookup.py | richemslie/ggplib | 8388678f311db4a9906d8a3aff71d3f0037b623b | [
"MIT"
] | 11 | 2019-03-02T13:49:07.000Z | 2021-12-21T17:03:05.000Z | src/ggplib/db/lookup.py | ggplib/ggplib | 8388678f311db4a9906d8a3aff71d3f0037b623b | [
"MIT"
] | 2 | 2019-05-15T18:23:50.000Z | 2019-05-19T08:13:19.000Z | src/ggplib/db/lookup.py | ggplib/ggplib | 8388678f311db4a9906d8a3aff71d3f0037b623b | [
"MIT"
] | 1 | 2020-04-02T17:35:35.000Z | 2020-04-02T17:35:35.000Z | import sys
import traceback
from ggplib.util import log
from ggplib.statemachine import builder
from ggplib.db import signature
###############################################################################
###############################################################################
def install_draughts(add_game):
' load custom c++ statemachine for draughts '
from ggplib import interface
from ggplib.non_gdl_games.draughts import desc, model
desc10 = desc.BoardDesc(10)
cpp_statemachines = interface.CppStateMachines()
model = model.create_sm_model(desc10)
for game_variant in ["draughts_10x10",
"draughts_killer_10x10",
"draughts_bt_10x10"]:
sm_create_meth = getattr(cpp_statemachines, game_variant)
add_game(game_variant, sm_create_meth(), model)
def install_hex(add_game):
' load custom c++ statemachine for draughts '
from ggplib import interface
from ggplib.non_gdl_games.hex.model import create_sm_model
cpp_statemachines = interface.CppStateMachines()
for sz in [9, 11, 13, 15, 19]:
cpp_sm = cpp_statemachines.get_hex(sz)
model = create_sm_model(sz)
add_game("hex_lg_%s" % sz, cpp_sm, model)
###############################################################################
# The API:
the_database = None
# XXX build_sm not used.
| 28.206349 | 88 | 0.566798 |
4e63749234da693d5c1f2625bba0bf9c3d524e3f | 274 | py | Python | testproject/fiber_test/tests/test_templatetags/test_fiber_version.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 143 | 2015-01-06T01:15:22.000Z | 2017-07-08T04:10:08.000Z | testproject/fiber_test/tests/test_templatetags/test_fiber_version.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 44 | 2015-01-22T14:21:32.000Z | 2017-05-31T16:59:23.000Z | testproject/fiber_test/tests/test_templatetags/test_fiber_version.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 53 | 2015-01-21T21:48:49.000Z | 2017-06-12T07:33:13.000Z | import fiber
from django.test import SimpleTestCase
from ...test_util import RenderMixin
| 30.444444 | 95 | 0.766423 |
4e658277a9b24094cf1e76fa7c348cccc93b01df | 7,352 | py | Python | main.py | dogerish/pic2html | cca9d032fb2325cb8c220cd0f5f632235d0f8c94 | [
"MIT"
] | null | null | null | main.py | dogerish/pic2html | cca9d032fb2325cb8c220cd0f5f632235d0f8c94 | [
"MIT"
] | null | null | null | main.py | dogerish/pic2html | cca9d032fb2325cb8c220cd0f5f632235d0f8c94 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import sys, re
from PIL import Image
# return the argument if it exists (converted to the same type as the default), otherwise default
default = lambda arg, defa: type(defa)(sys.argv[arg]) if len(sys.argv) > arg and sys.argv[arg] else defa
# filename of image to evaluate, default is image.jpg
IMAGE = default(1, "image.jpg")
# filename of output, default just prints it to stdout
OUTPUT = default(2, "")
# outputs in defined way based on whether or not an output file is given
if OUTPUT == "": output = print
else:
# output columns (width)
COLS = default(3, 200)
# color hues (degrees, [0-360))
COLORS = dict()
with open('colors.txt') as f:
# each line in the file
for line in f.readlines():
# means comment
if line.startswith('#'): continue
# name: hue saturation
# split bt name and values
line = line.split(':')
# split values with whitespace characters
line = [line[0], *line[1].strip().split('\t')]
# strip blank things from each piece
for i, piece in enumerate(line): line[i] = piece.strip()
# add key to COLORS
name, hue, sat = line
COLORS[name] = (None if hue == '*' else int(hue), None if sat == '*' else float(sat))
# characters for lightness values (ascending)
CHARS = " -+:!?%#&$@"
# color class
# where the output will be accumulated to
accumulator = '<body style="background-color: #000"><pre>'
# open the image
with Image.open(IMAGE) as img:
# the step to increment by each time
step = img.size[0] / COLS
# the vertical step, to account for characters not being squares
vstep = step * 15/7.81
# the current color
curcolor = None
# each row
for row in range(int(img.size[1]/vstep)):
row *= vstep
# add newline character to go to next row if this isn't the first row
accumulator += '\n'
# each column
for col in range(COLS):
col *= step
# average the colors for this location
avgcolor = Color()
colorc = 0 # color count
# within this tile/area
for y in range(int(row), int(row + vstep)):
for x in range(int(col), int(col + step)):
if x >= img.size[0]: break # break if it's out of range
# add this pixel's color to the average
avgcolor += Color(*img.getpixel((x, y)))
colorc += 1
if y >= img.size[1]: break # break if it's out of range
# turn sum into average
avgcolor /= colorc
# get the hsl version
hsl = avgcolor.hsl()
# approximate the color
apcolor = avgcolor.approx(hsl)
# pick the right character based on the lightness
char = CHARS[round(hsl[2]*(len(CHARS) - 1))]
# if it isn't already in the right color, change it
if apcolor != curcolor:
# add colored string to accumulator
accumulator += "</font>" + avgcolor.color_str(char, apcolor)
# new color
curcolor = apcolor
else:
# add character
accumulator += char
# end the elements
accumulator += "</font></pre></body>"
# output the result
output(accumulator)
| 37.131313 | 104 | 0.569777 |
4e673a1840258d5931483218139042ef6091e9ee | 485 | py | Python | coding_challenge/conftest.py | jojacobsen/coding_challenge | 94335f00f57a6c4d64cbc2b282a0ca099445e866 | [
"MIT"
] | 1 | 2022-03-06T15:40:56.000Z | 2022-03-06T15:40:56.000Z | coding_challenge/conftest.py | jojacobsen/coding_challenge | 94335f00f57a6c4d64cbc2b282a0ca099445e866 | [
"MIT"
] | null | null | null | coding_challenge/conftest.py | jojacobsen/coding_challenge | 94335f00f57a6c4d64cbc2b282a0ca099445e866 | [
"MIT"
] | null | null | null | import pytest
from coding_challenge.users.models import User
from coding_challenge.users.tests.factories import UserFactory
from coding_challenge.ship_manager.models import Ship
from coding_challenge.ship_manager.tests.factories import ShipFactory
| 22.045455 | 69 | 0.797938 |
4e699a095a31850ba8f572b972a6266ffa8b3893 | 1,909 | py | Python | swagger.py | edwardw1987/swagger | 69b868523834811537af5265b47eb0bd94c42c2f | [
"MIT"
] | null | null | null | swagger.py | edwardw1987/swagger | 69b868523834811537af5265b47eb0bd94c42c2f | [
"MIT"
] | null | null | null | swagger.py | edwardw1987/swagger | 69b868523834811537af5265b47eb0bd94c42c2f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: edward
# @Date: 2016-05-12 14:11:21
# @Last Modified by: edward
# @Last Modified time: 2016-05-12 17:29:48
from functools import partial
# api = swagger.docs(Api(app), apiVersion='0.1',
# basePath='http://localhost:5000',
# resourcePath='/',
# produces=["application/json", "text/html"],
# api_spec_url='/api/spec',
# description='A Basic API')
apis = _APIs()
operation = apis.operation
docs = apis.make_docs
get_api_spec = apis.get_spec
if __name__ == '__main__':
main()
| 25.118421 | 64 | 0.550026 |
4e69ad09d076ffa2812d0d72b1983c1392ea46e1 | 29,469 | py | Python | wicarproject/carapi/carupload_tests.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | 1 | 2018-04-20T04:58:50.000Z | 2018-04-20T04:58:50.000Z | wicarproject/carapi/carupload_tests.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | 7 | 2021-02-08T20:24:49.000Z | 2022-03-11T23:26:33.000Z | wicarproject/carapi/carupload_tests.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | null | null | null | import os
import unittest
import sqlalchemy
from flask import Flask,session,url_for,redirect
from flask_sqlalchemy import SQLAlchemy
from application import create_app ,db
import unittest
import json
from caruser.models import User, UserBank
from carupload.models import CarOption,Car,CarImage
from flask_testing import TestCase
from utilities.dao.userdao import UserDao
from utilities.dao.cardao import CarDao
from utilities.testutil import TestUtil
from freezegun import freeze_time
from datetime import datetime as dt
from datetime import timedelta
from settings import TEST_DB_URI,MONGO_URI
import urllib
from utilities.flask_tracking.documents import Tracking
from mongoengine.queryset.visitor import Q
import os
TEST_UPLOADED_FOLDER='/static/images/test_images'
# .
#
#.
#.
#.
#
| 43.083333 | 137 | 0.574468 |
4e69d114d4d24a38c5a1cd7288544f6fdd2af296 | 1,947 | py | Python | test/train_net.py | gregdeon/simple-ann | 80f1d239d15b820162d5de93766290bca81f7bd3 | [
"MIT"
] | 1 | 2018-08-07T03:27:23.000Z | 2018-08-07T03:27:23.000Z | test/train_net.py | gregdeon/simple-ann | 80f1d239d15b820162d5de93766290bca81f7bd3 | [
"MIT"
] | null | null | null | test/train_net.py | gregdeon/simple-ann | 80f1d239d15b820162d5de93766290bca81f7bd3 | [
"MIT"
] | null | null | null | # train-net.py
# Use the neural network module to detect simple signals
import numpy as np
import matplotlib.pyplot as plt
import random
from src.net import Net
def main():
""" Step 1: make dataset """
random.seed()
# Make 3 inputs - 1 base and 2 added inputs
sig_len = 10
y_base = np.array([1, 2, 3, 2, 6, 5, 0, -1, 2, 4])
y_add1 = np.array([0, 0, 1, 0, -2, 0, 0, 1, 1, 0])
y_add2 = np.array([1, 0, 0, 1, 2, -1, 0, 0, 0, 0])
# Set up a bunch of random signals to detect
y_num = 100
signal1 = np.array([random.randint(0,1) for i in range(y_num)])
signal2 = np.array([random.randint(0,1) for i in range(y_num)])
signal = np.array([signal1, signal2])
# Add up the inputs accordingly
y_list = np.zeros([y_num, len(y_base)])
for i in range(y_num):
y_sum = np.array([y_base[j] + signal1[i]*y_add1[j] + signal2[i]*y_add2[j]
for j in range(sig_len)])
y_list[i] = y_sum
# Add noise
noise = np.random.random([y_num, len(y_base)]) / 10
y_list += noise
""" Step 2: train neural network """
# Set up input and signals
input = np.array(y_list)
signal = signal.transpose()
# Set up min and max for each input
# Can give the network a good idea of input ranges or just a rough range
limits = [[y_base[i]-2, y_base[i]+2] for i in range(10)]
#limits = [[-20, 20]]*10
# Make network
net = Net(limits, 2, 2)
errorList = net.train_many(input, signal, 0.1, 100, 0.001, True)
print "\n".join(map(str, errorList))
""" Step 3: check results """
# Print results by hand
#for i in range(y_num):
# print y_list[i]
# print signal1[i]
# print signal2[i]
# print net.sim(y_list[i, :])
# Plot error vs. training epochs
plt.semilogy(errorList)
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('SSE')
plt.show() | 30.421875 | 82 | 0.580894 |
4e6a00926225b1d573a3d0b1379d0479bee2d1ba | 2,165 | py | Python | neural-network/model/gcn_lib/sparse/data_util.py | BuildJet/hongbomiao.com | 33bcbf63c51f9df725811ddbdef49d0db46146ef | [
"MIT"
] | 104 | 2019-08-09T21:27:48.000Z | 2022-03-29T11:58:36.000Z | neural-network/model/gcn_lib/sparse/data_util.py | BuildJet/hongbomiao.com | 33bcbf63c51f9df725811ddbdef49d0db46146ef | [
"MIT"
] | 4,187 | 2019-08-04T08:19:36.000Z | 2022-03-31T22:43:20.000Z | neural-network/model/gcn_lib/sparse/data_util.py | BuildJet/hongbomiao.com | 33bcbf63c51f9df725811ddbdef49d0db46146ef | [
"MIT"
] | 19 | 2019-08-06T00:53:05.000Z | 2022-01-04T05:55:48.000Z | # allowable multiple choice node and edge features
# code from https://github.com/snap-stanford/ogb/blob/master/ogb/utils/features.py
allowable_features = {
"possible_atomic_num_list": list(range(1, 119)) + ["misc"], # type: ignore
"possible_chirality_list": [
"CHI_UNSPECIFIED",
"CHI_TETRAHEDRAL_CW",
"CHI_TETRAHEDRAL_CCW",
"CHI_OTHER",
],
"possible_degree_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "misc"],
"possible_formal_charge_list": [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, "misc"],
"possible_numH_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, "misc"],
"possible_number_radical_e_list": [0, 1, 2, 3, 4, "misc"],
"possible_hybridization_list": ["SP", "SP2", "SP3", "SP3D", "SP3D2", "misc"],
"possible_is_aromatic_list": [False, True],
"possible_is_in_ring_list": [False, True],
"possible_bond_type_list": ["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC", "misc"],
"possible_bond_stereo_list": [
"STEREONONE",
"STEREOZ",
"STEREOE",
"STEREOCIS",
"STEREOTRANS",
"STEREOANY",
],
"possible_is_conjugated_list": [False, True],
}
| 35.491803 | 82 | 0.590762 |
4e6b5fa33d845c1a9c9c556d16d04f10c237dd56 | 3,401 | py | Python | ising_model/hamiltonian.py | FeiQuantumSoftware/ising_model | 6d8b177678aa953840fc01616dc7c789d9531b93 | [
"BSD-3-Clause"
] | null | null | null | ising_model/hamiltonian.py | FeiQuantumSoftware/ising_model | 6d8b177678aa953840fc01616dc7c789d9531b93 | [
"BSD-3-Clause"
] | null | null | null | ising_model/hamiltonian.py | FeiQuantumSoftware/ising_model | 6d8b177678aa953840fc01616dc7c789d9531b93 | [
"BSD-3-Clause"
] | null | null | null | """coupling Hamiltonian class def"""
from math import exp
import numpy as np
from .spinconfig import SpinConfig
| 25.007353 | 91 | 0.486622 |
4e6b7f322b294819ab5b4b631d1bcc760d50a431 | 618 | py | Python | examples/testpetsc.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | 2 | 2018-09-13T12:39:07.000Z | 2019-04-05T04:55:59.000Z | examples/testpetsc.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | null | null | null | examples/testpetsc.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | null | null | null |
import numpy as np
| 16.263158 | 62 | 0.469256 |
4e6dab17c24195e50268c9d3fbe8629caaa109c4 | 17,866 | py | Python | tcm-py/tcm_fabric.py | Datera/lio-utils | 0ac9091c1ff7a52d5435a4f4449e82637142e06e | [
"Apache-2.0"
] | 8 | 2015-04-02T21:44:47.000Z | 2021-07-15T08:31:28.000Z | tcm-py/tcm_fabric.py | Datera/lio-utils | 0ac9091c1ff7a52d5435a4f4449e82637142e06e | [
"Apache-2.0"
] | null | null | null | tcm-py/tcm_fabric.py | Datera/lio-utils | 0ac9091c1ff7a52d5435a4f4449e82637142e06e | [
"Apache-2.0"
] | 8 | 2015-06-18T14:30:21.000Z | 2021-03-25T19:51:03.000Z | #!/usr/bin/python
import os, sys, shutil
import subprocess as sub
import string
import re
import datetime, time
import optparse
target_root = "/sys/kernel/config/target/"
spec_root = "/var/target/fabric/"
if __name__ == "__main__":
main()
| 32.9631 | 144 | 0.640378 |
4e6eb09628c21128f6237f44dd57b2bfd0a093f8 | 12,321 | py | Python | vmflib2/games/hl2mp.py | Trainzack/vmflib2 | 9bc9803b3c8c644346b5a5eb864c0deaf544d8a6 | [
"BSD-2-Clause"
] | 1 | 2021-02-11T17:52:48.000Z | 2021-02-11T17:52:48.000Z | vmflib2/games/hl2mp.py | Trainzack/vmflib2 | 9bc9803b3c8c644346b5a5eb864c0deaf544d8a6 | [
"BSD-2-Clause"
] | null | null | null | vmflib2/games/hl2mp.py | Trainzack/vmflib2 | 9bc9803b3c8c644346b5a5eb864c0deaf544d8a6 | [
"BSD-2-Clause"
] | 1 | 2021-02-12T18:56:51.000Z | 2021-02-12T18:56:51.000Z | """
Helper classes for creating maps in any Source Engine game that uses hl2mp.fgd.
This file was auto-generated by import_fgd.py on 2020-01-19 09:11:14.977620.
"""
from vmflib2.vmf import *
| 57.306977 | 840 | 0.692152 |
4e6f333b107f38c8aaa09dc165be5dc797f0e6b5 | 317 | py | Python | src/constants.py | heyhpython/desktop | e75ffddf9526e8fd1adaca69c315005e202bf84b | [
"MIT"
] | null | null | null | src/constants.py | heyhpython/desktop | e75ffddf9526e8fd1adaca69c315005e202bf84b | [
"MIT"
] | null | null | null | src/constants.py | heyhpython/desktop | e75ffddf9526e8fd1adaca69c315005e202bf84b | [
"MIT"
] | null | null | null | import os
BASE_DIR = os.path.dirname(__file__)
__config__ = os.path.abspath(os.path.join(BASE_DIR, "../config.cfg"))
__template__ = os.path.abspath(os.path.join(BASE_DIR, "templates"))
__static__ = os.path.abspath(os.path.join(BASE_DIR, "static"))
__upload__ = os.path.abspath(os.path.join(__static__, "uploads"))
| 31.7 | 69 | 0.744479 |
4e7096429698d4dbcba3e4c9717842932c8154f8 | 1,363 | py | Python | app.py | IamSilentBot/Guardzilla | 8ca9dcda2d99cba1628b708a770a34dd726acd9e | [
"MIT"
] | 1 | 2022-02-05T22:55:50.000Z | 2022-02-05T22:55:50.000Z | app.py | IamSilentBot/Guardzilla | 8ca9dcda2d99cba1628b708a770a34dd726acd9e | [
"MIT"
] | null | null | null | app.py | IamSilentBot/Guardzilla | 8ca9dcda2d99cba1628b708a770a34dd726acd9e | [
"MIT"
] | 1 | 2022-02-21T17:47:39.000Z | 2022-02-21T17:47:39.000Z | import nextcord
from nextcord.ext import commands
import json
import os
import pymongo
import os
from keep_alive import keep_alive
# Set environment variables
# os.environ['info'] = "test:pass123"
# os.environ['TOKEN'] = "MY-AWSOME-TOKEN"
intents = nextcord.Intents.all()
TOKEN = os.environ['TOKEN']
client = nextcord.ext.commands.Bot(
command_prefix=prefix_d, intents=intents, help_command=None)
for pyFile in os.listdir("./commands"):
if pyFile.endswith(".py"):
client.load_extension(f"commands.{pyFile[:-3]}")
print(f"{pyFile[:-3]} | Loaded")
keep_alive()
client.run(TOKEN)
| 27.816327 | 117 | 0.681585 |
4e71fbacfde29db685d3ecdd9b0c31b58a97a5ef | 1,841 | py | Python | generator/contact.py | bloodes/adressbook | 52582bc8c4825987db668ab084dff32202f1e2e5 | [
"Apache-2.0"
] | null | null | null | generator/contact.py | bloodes/adressbook | 52582bc8c4825987db668ab084dff32202f1e2e5 | [
"Apache-2.0"
] | null | null | null | generator/contact.py | bloodes/adressbook | 52582bc8c4825987db668ab084dff32202f1e2e5 | [
"Apache-2.0"
] | null | null | null | from models.model_contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number_of_groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == '-n':
n = int(a)
elif o == '-f':
f = a
testdata = [Contact(firstname='Stepan', middlename='Barantsev', lastname='Lol',
nickname='Bloodes', email1='stepan.barantsev@gmail.com')] +\
[Contact(firstname=random_string('', 10),
middlename=random_string('', 20),
lastname=random_string('', 20),
nickname=random_string('', 20),
homephone=random_string('', 20),
mobilephone=random_string('', 20),
workphone=random_string('', 20),
secondaryphone=random_string('', 20),
email1=random_string('', 20),
email2=random_string('', 20),
email3=random_string('', 20),
title=random_string('', 20),
notes=random_string('', 20),
company=random_string('', 20),
homepage=random_string('', 20),
fax=random_string('', 20))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, 'w') as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 33.472727 | 96 | 0.551331 |
4e72b83d81906c88261b3bf53646c5b537fd803e | 13,598 | py | Python | tick/array_test/tests/array_memory_test.py | sumau/tick | 1b56924a35463e12f7775bc0aec182364f26f2c6 | [
"BSD-3-Clause"
] | 411 | 2017-03-30T15:22:05.000Z | 2022-03-27T01:58:34.000Z | tick/array_test/tests/array_memory_test.py | saurabhdash/tick | bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48 | [
"BSD-3-Clause"
] | 345 | 2017-04-13T14:53:20.000Z | 2022-03-26T00:46:22.000Z | tick/array_test/tests/array_memory_test.py | saurabhdash/tick | bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48 | [
"BSD-3-Clause"
] | 102 | 2017-04-25T11:47:53.000Z | 2022-02-15T11:45:49.000Z | # License: BSD 3 clause
import gc
import unittest
import weakref
import numpy as np
import scipy
from scipy.sparse import csr_matrix
from tick.array.build.array import tick_double_sparse2d_from_file
from tick.array.build.array import tick_double_sparse2d_to_file
from tick.array_test.build import array_test as test
if __name__ == "__main__":
unittest.main()
| 36.652291 | 175 | 0.599941 |
4e73eaef843757f7ea7a8bbd35f9c54ff770774c | 6,878 | py | Python | chatbot/interact.py | VictorDebray/RoadBuddy | 9c62e2acd2d540caa0ebefc50af5446c0d4f864f | [
"MIT"
] | null | null | null | chatbot/interact.py | VictorDebray/RoadBuddy | 9c62e2acd2d540caa0ebefc50af5446c0d4f864f | [
"MIT"
] | null | null | null | chatbot/interact.py | VictorDebray/RoadBuddy | 9c62e2acd2d540caa0ebefc50af5446c0d4f864f | [
"MIT"
] | null | null | null | # Author: DINDIN Meryll
# Date: 15 September 2019
# Project: RoadBuddy
try: from chatbot.imports import *
except: from imports import *
| 38 | 126 | 0.604536 |
4e744f117ca425c6d830404575c231f03329052e | 20,731 | py | Python | workspace/plug/maya/lynxinode/scripts/python/lxCommand/template/nodeTemplate.py | no7hings/Lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | 2 | 2018-03-06T03:33:55.000Z | 2019-03-26T03:25:11.000Z | workspace/plug/maya/lynxinode/scripts/python/lxCommand/template/nodeTemplate.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | workspace/plug/maya/lynxinode/scripts/python/lxCommand/template/nodeTemplate.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | # encoding=utf-8
import re
#
import types
# noinspection PyUnresolvedReferences
import maya.mel as mel
# noinspection PyUnresolvedReferences
import maya.cmds as cmds
#
_objectStore = {}
#
#
#
#
#
#
#
#
#
#
#
#
#
#
def nodeAttr(self, attr):
return self.template.nodeAttr(attr)
#
#
#
#
#
def addTemplate(self, attr, template):
self.addChildTemplate(attr, template)
#
#
#
#
def beginLayout(self, label, **kwargs):
kwargs['label'] = label
cmds.setParent(self._layoutStack[-1])
cmds.frameLayout(**kwargs)
self._layoutStack.append(cmds.columnLayout(adjustableColumn=True))
#
#
#
#
#
#
#
#
def nodeAttr(self, attr=None):
if attr is None:
attr = self.attr
return self.nodeName + '.' + attr
#
#
# For Override
def setup(self):
pass
| 29.871758 | 139 | 0.589262 |
4e751966b10b05f698edd3d37469d6c2ff784045 | 31 | py | Python | bubble_io/__init__.py | jasontyping/bubble-io-python | 487dd253e85814a012df4a5a5a6a08f023517641 | [
"MIT"
] | null | null | null | bubble_io/__init__.py | jasontyping/bubble-io-python | 487dd253e85814a012df4a5a5a6a08f023517641 | [
"MIT"
] | null | null | null | bubble_io/__init__.py | jasontyping/bubble-io-python | 487dd253e85814a012df4a5a5a6a08f023517641 | [
"MIT"
] | 1 | 2020-10-25T08:31:59.000Z | 2020-10-25T08:31:59.000Z | from .bubbleio import BubbleIo
| 15.5 | 30 | 0.83871 |
4e769aee426de55532dd683d9dd832dcae724616 | 68 | py | Python | python/pandas_pbf/core.py | ccharlesgb/pandas-pbf | 8c5b1af2c291cfd485b1296a1a5ba34ddc93d995 | [
"MIT"
] | null | null | null | python/pandas_pbf/core.py | ccharlesgb/pandas-pbf | 8c5b1af2c291cfd485b1296a1a5ba34ddc93d995 | [
"MIT"
] | null | null | null | python/pandas_pbf/core.py | ccharlesgb/pandas-pbf | 8c5b1af2c291cfd485b1296a1a5ba34ddc93d995 | [
"MIT"
] | null | null | null | import pandas as pd
| 11.333333 | 36 | 0.661765 |
4e76f4bcaf6c2b3ef6bdb2c9d12ef79f80ffb1ec | 13,152 | py | Python | iceprod/server/rest/datasets.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-01-23T17:12:41.000Z | 2019-01-14T13:38:17.000Z | iceprod/server/rest/datasets.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 242 | 2016-05-09T18:46:51.000Z | 2022-03-31T22:02:29.000Z | iceprod/server/rest/datasets.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-03-27T09:13:40.000Z | 2019-01-27T10:55:30.000Z | import logging
import json
import uuid
from collections import defaultdict
import tornado.web
import tornado.httpclient
from tornado.platform.asyncio import to_asyncio_future
import pymongo
import motor
from rest_tools.client import RestClient
from iceprod.server.rest import RESTHandler, RESTHandlerSetup, authorization
from iceprod.server.util import nowstr, dataset_statuses, dataset_status_sort
logger = logging.getLogger('rest.datasets')
def setup(config, *args, **kwargs):
"""
Setup method for Dataset REST API.
Sets up any database connections or other prerequisites.
Args:
config (dict): an instance of :py:class:`iceprod.server.config`.
Returns:
list: Routes for dataset, which can be passed to :py:class:`tornado.web.Application`.
"""
cfg_rest = config.get('rest',{}).get('datasets',{})
db_cfg = cfg_rest.get('database',{})
# add indexes
db = pymongo.MongoClient(**db_cfg).datasets
if 'dataset_id_index' not in db.datasets.index_information():
db.datasets.create_index('dataset_id', name='dataset_id_index', unique=True)
handler_cfg = RESTHandlerSetup(config, *args, **kwargs)
handler_cfg.update({
'database': motor.motor_tornado.MotorClient(**db_cfg).datasets,
})
return [
(r'/datasets', MultiDatasetHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)', DatasetHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/description', DatasetDescriptionHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/status', DatasetStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/priority', DatasetPriorityHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/jobs_submitted', DatasetJobsSubmittedHandler, handler_cfg),
(r'/dataset_summaries/status', DatasetSummariesStatusHandler, handler_cfg),
]
| 34.978723 | 122 | 0.586603 |
4e79f990859c3061f129402b3a92ec843ee5ea60 | 2,938 | py | Python | backend/utils/n9e_api.py | itimor/one-ops | f1111735de252012752dfabe11598e9690c89257 | [
"MIT"
] | null | null | null | backend/utils/n9e_api.py | itimor/one-ops | f1111735de252012752dfabe11598e9690c89257 | [
"MIT"
] | 6 | 2021-03-19T10:20:05.000Z | 2021-09-22T19:30:21.000Z | backend/utils/n9e_api.py | itimor/one-ops | f1111735de252012752dfabe11598e9690c89257 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# author: itimor
import requests
import json
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
if __name__ == '__main__':
cli = FalconClient(endpoint="http://n9e.xxoo.com", user='admin', token='11871bd159bd19da9ab624d161c569e3c8')
params = {"idents": ["192.168.0.112"]}
r = cli.node['2'].endpoint_unbind.post(data=params)
print(r)
| 31.255319 | 112 | 0.566372 |
4e7a0f3a33cc53fa5588171ed4ecc80f2e9996b8 | 411 | py | Python | python/RASCUNHOS/criargrafico.py | raquelmachado4993/omundodanarrativagit | eb8cebcc74514ba8449fab5f9dc5e9a93a826850 | [
"MIT"
] | null | null | null | python/RASCUNHOS/criargrafico.py | raquelmachado4993/omundodanarrativagit | eb8cebcc74514ba8449fab5f9dc5e9a93a826850 | [
"MIT"
] | null | null | null | python/RASCUNHOS/criargrafico.py | raquelmachado4993/omundodanarrativagit | eb8cebcc74514ba8449fab5f9dc5e9a93a826850 | [
"MIT"
] | null | null | null | import matplotlib.pyplot
meses = ['Janeiro','Fevereiro','Marco','Abril','Maio','Junho']
valores = [105235, 107697, 110256, 109236, 108859, 109986]
matplotlib.pyplot.plot(meses, valores)
matplotlib.pyplot.title('Faturamento no primeiro semestre de 2017')
matplotlib.pyplot.xlabel('Meses')
matplotlib.pyplot.ylabel('Faturamento em R$')
matplotlib.pyplot.savefig('grafico.png', dpi=100)
matplotlib.pyplot.show()
| 34.25 | 67 | 0.766423 |
4e7aae2cfbb07486e48d60f16bce3de6949f6366 | 3,213 | py | Python | getHelmstedtGNDs.py | hbeyer/pcp-vd | e8b4903b4188fea5295e7709e25216f10954f23f | [
"MIT"
] | null | null | null | getHelmstedtGNDs.py | hbeyer/pcp-vd | e8b4903b4188fea5295e7709e25216f10954f23f | [
"MIT"
] | null | null | null | getHelmstedtGNDs.py | hbeyer/pcp-vd | e8b4903b4188fea5295e7709e25216f10954f23f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import mysql.connector
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "schleichkatze",
database = "helmstedt"
)
mycursor = mydb.cursor()
mycursor.execute("SELECT id, gnd FROM helmstedt.temp_prof_kat")
myresult = mycursor.fetchall()
gnds = [x[1] for x in myresult if x[1] != None]
print('|'.join(gnds))
"""
# Eine Liste (geordnet, indexiert und vernderlich)
mylist = ['Lerche', 'Schneider', 'Zimmermann', 'Kstner', 'Raabe', 'Schmidt-Glintzer', 'bURSCHEL']
mylist[len(mylist) - 1] = mylist[len(mylist) - 1].swapcase()
mylist.append('Ritter Rost')
mylist.insert(0, 'Zimmermann')
print(mylist)
"""
"""
# Ein Tupel (ist unvernderlich)
mytuple = ('Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag')
#print(mytuple[3:6])
"""
"""
# Ein Set (unindexiert und ungeordnet, Elemente sind unvernderlich, knnen aber vermehrt oder reduziert werden)
myset = {'Adenauer', 'Erhard', 'Kiesinger', 'Brandt', 'Schmidt', 'Kohl', 'Schrder', 'Merkel', 'Schulz'}
myset.remove('Schulz')
myset.add('Kramp-Karrenbauer')
for i in myset:
print(i)
"""
"""
# Ein Dictionary
mydict = {'Mann':'vyras', 'Frau':'moteris','Fisch':'uvis', 'Biber':'bebras', 'Stadt':'miestas', 'Knig':'karalius'}
for x, y in mydict.items():
print(x + ' heit auf Litauisch ' + y)
"""
"""
# Eine Datumsoperation
import time
import datetime
time = time.localtime(time.time())
print(time)
"""
"""
# Eine Funktion
def makeName(forename, surname, title=""):
result = forename + " " + surname
if title:
result = title + " " + result
return result
print(makeName("Hartmut", "Beyer", "Magister artium"))
"""
"""
# Eine Klasse
class Person:
def __init__(self, forename, surname):
self.forename = forename
self.surename = surname
person = Person('Ben', 'Gurion')
print(person.forename)
"""
"""
# Eine Klasse
class Language:
def __init__(self, code):
self.codes = {
"eng":"Englisch",
"ger":"Deutsch",
"fre":"Franzsisch",
"rus":"Russisch"
}
if code not in self.codes:
self.name = code
return
self.name = self.codes[code]
lang = Language("rus")
print(lang.name)
"""
"""
# Eine Datei aus dem Netz auslesen
import urllib.request as ur
url = "http://diglib.hab.de/edoc/ed000228/1623_06.xml"
fileobject = ur.urlopen(url)
string = fileobject.read()
print(string)
"""
"""
# Eine XML-Datei parsen
import xml.etree.ElementTree as et
tree = et.parse('test.xml')
root = tree.getroot()
nbs = root.findall('.//{http://www.tei-c.org/ns/1.0}rs')
name = ""
for ent in nbs:
if ent.get('type') == 'person':
name = str(ent.text).strip()
ref = str(ent.get('ref')).strip()
print(name + ' - ' + ref)
"""
"""
# Laden und Auslesen einer XML-Datei im Netz
import urllib.request as ur
import xml.etree.ElementTree as et
url = "http://diglib.hab.de/edoc/ed000228/1623_08.xml"
fileobject = ur.urlopen(url)
tree = et.parse(fileobject)
root = tree.getroot()
nbs = root.findall('.//{http://www.tei-c.org/ns/1.0}rs')
name = ""
for ent in nbs:
if ent.get('type') == 'person':
name = str(ent.text).strip()
ref = str(ent.get('ref')).strip()
print(name + ' - ' + ref)
"""
| 22.787234 | 116 | 0.647059 |
4e7ab87b888bbbe2383cfa6903a948e5d52465e7 | 9,035 | py | Python | tests/test_mapexplorer.py | OCHA-DAP/hdx-scraper-mapexplorer | 3fef67376815611657657c6d53ce904b8f9e4550 | [
"MIT"
] | null | null | null | tests/test_mapexplorer.py | OCHA-DAP/hdx-scraper-mapexplorer | 3fef67376815611657657c6d53ce904b8f9e4550 | [
"MIT"
] | null | null | null | tests/test_mapexplorer.py | OCHA-DAP/hdx-scraper-mapexplorer | 3fef67376815611657657c6d53ce904b8f9e4550 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Unit tests for scrapername.
'''
import difflib
import filecmp
from datetime import datetime
from os.path import join
from tempfile import gettempdir
import pytest
from hdx.hdx_configuration import Configuration
import hdx.utilities.downloader
from hdx.utilities.compare import assert_files_same
from hdx.utilities.loader import load_json
from src.acled import update_lc_acled, update_ssd_acled
from mapexplorer import get_valid_names
from src.cbpf import update_cbpf
from src.fts import update_fts
#from src.rowca import update_rowca
# def test_rowca(self, folder, downloaderrowca, valid_lc_names, replace_lc_values):
# resource_updates = dict()
# filename = 'Lake_Chad_Basin_Estimated_Population.csv'
# expected_population = join('tests', 'fixtures', filename)
# actual_population = join(folder, filename)
# resource_updates['rowca_population'] = {'path': actual_population}
# filename = 'Lake_Chad_Basin_Displaced.csv'
# expected_displaced = join('tests', 'fixtures', filename)
# actual_displaced = join(folder, filename)
# resource_updates['rowca_displaced'] = {'path': actual_displaced}
# update_rowca('http://haha/', downloaderrowca, valid_lc_names, replace_lc_values, resource_updates)
# assert filecmp.cmp(expected_population, actual_population, shallow=False) is True, 'Expected: %s and Actual: %s do not match!' % (expected_population, actual_population)
# assert filecmp.cmp(expected_displaced, actual_displaced, shallow=False) is True, 'Expected: %s and Actual: %s do not match!' % (expected_displaced, actual_displaced)
| 44.727723 | 220 | 0.657554 |
4e7bf6a5635ca159b39d2d3d8c59aa1c3e27375b | 255 | py | Python | configs/_base_/schedules/imagenet_bs256_140e.py | sty16/cell_transformer | fc3d8dd8363664381617c76fb016f14c704749d8 | [
"Apache-2.0"
] | 1 | 2022-03-15T07:36:04.000Z | 2022-03-15T07:36:04.000Z | configs/_base_/schedules/imagenet_bs256_140e.py | sty16/cell_transformer | fc3d8dd8363664381617c76fb016f14c704749d8 | [
"Apache-2.0"
] | null | null | null | configs/_base_/schedules/imagenet_bs256_140e.py | sty16/cell_transformer | fc3d8dd8363664381617c76fb016f14c704749d8 | [
"Apache-2.0"
] | null | null | null | # optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[40, 70, 90])
runner = dict(type='EpochBasedRunner', max_epochs=100)
| 36.428571 | 73 | 0.721569 |
4e81b7168137c8ead27ea61e73b96364b565fc1e | 708 | py | Python | 2016/day_06.py | nabiirah/advent-of-code | 9c7e7cae437c024aa05d9cb7f9211fd47f5226a2 | [
"MIT"
] | 24 | 2020-12-08T20:07:52.000Z | 2022-01-18T20:08:06.000Z | 2016/day_06.py | nestorhf/advent-of-code | 1bb827e9ea85e03e0720e339d10b3ed8c44d8f27 | [
"MIT"
] | null | null | null | 2016/day_06.py | nestorhf/advent-of-code | 1bb827e9ea85e03e0720e339d10b3ed8c44d8f27 | [
"MIT"
] | 10 | 2020-12-04T10:04:15.000Z | 2022-02-21T22:22:26.000Z | """ Advent of Code Day 6 - Signals and Noise"""
with open('inputs/day_06.txt', 'r') as f:
rows = [row.strip() for row in f.readlines()]
flipped = zip(*rows)
message = ''
mod_message = ''
for chars in flipped:
most_freq = ''
least_freq = ''
highest = 0
lowest = 100
for char in chars:
if chars.count(char) > highest:
highest = chars.count(char)
most_freq = char
if chars.count(char) < lowest: # Part Two
lowest = chars.count(char)
least_freq = char
message += most_freq
mod_message += least_freq
# Answer One
print("Error Corrected Message:", message)
# Answer Two
print("Modified Message:", mod_message)
| 22.83871 | 51 | 0.601695 |
4e83689fcdf6c1f0b2f2c351aa1c6fe2dad28771 | 1,846 | py | Python | tasks.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 6 | 2017-10-31T20:54:37.000Z | 2020-10-23T19:03:00.000Z | tasks.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 7 | 2020-03-24T16:14:34.000Z | 2021-03-18T20:51:37.000Z | tasks.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 1 | 2019-07-29T07:55:49.000Z | 2019-07-29T07:55:49.000Z | import hashlib
import json
import os
import pathlib
import shlex
import nbformat
from invoke import task
files_to_format = ["chmp/src", "tasks.py", "chmp/setup.py"]
inventories = [
"http://daft-pgm.org",
"https://matplotlib.org",
"http://www.numpy.org",
"https://pandas.pydata.org",
"https://docs.python.org/3",
"https://pytorch.org/docs/stable",
]
directories_to_test = ["chmp", "20170813-KeywordDetection/chmp-app-kwdetect"]
def run(c, *args, **kwargs):
args = [shlex.quote(arg) for arg in args]
args = " ".join(args)
return c.run(args, **kwargs)
| 20.511111 | 84 | 0.566089 |
4e83cc92299ecc7a687b5a70cfeda857351d4ef2 | 1,016 | py | Python | WeChat/translation.py | satoukiCk/SummerRobot | a22b17fb1927dcc1aa7316e2b892f7daee484583 | [
"MIT"
] | null | null | null | WeChat/translation.py | satoukiCk/SummerRobot | a22b17fb1927dcc1aa7316e2b892f7daee484583 | [
"MIT"
] | null | null | null | WeChat/translation.py | satoukiCk/SummerRobot | a22b17fb1927dcc1aa7316e2b892f7daee484583 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import requests
import json
import random
import hashlib
KEY = ''
APPID = ''
API = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
| 25.4 | 81 | 0.541339 |
4e85a767dc1a77745686895d8e0fa92531e23594 | 210 | py | Python | bolinette/defaults/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | 4 | 2020-11-02T15:16:32.000Z | 2022-01-11T11:19:24.000Z | bolinette/defaults/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | 14 | 2021-01-04T11:06:59.000Z | 2022-03-23T17:01:49.000Z | bolinette/defaults/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | null | null | null | import bolinette.defaults.models
import bolinette.defaults.mixins
import bolinette.defaults.services
import bolinette.defaults.middlewares
import bolinette.defaults.controllers
import bolinette.defaults.topics
| 30 | 37 | 0.885714 |
4e864e061007124f810efb595fdd8cc9331ec714 | 2,040 | py | Python | kicost/currency_converter/currency_converter.py | mdeweerd/KiCost | 2f67dad0f8d3335590835a6790181fc6428086d5 | [
"MIT"
] | null | null | null | kicost/currency_converter/currency_converter.py | mdeweerd/KiCost | 2f67dad0f8d3335590835a6790181fc6428086d5 | [
"MIT"
] | null | null | null | kicost/currency_converter/currency_converter.py | mdeweerd/KiCost | 2f67dad0f8d3335590835a6790181fc6428086d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Salvador E. Tropea
# Copyright (c) 2021 Instituto Nacional de Tecnologa Industrial
# License: Apache 2.0
# Project: KiCost
# Adapted from: https://github.com/alexprengere/currencyconverter
"""
CurrencyConverter:
This is reduced version of the 'Currency Converter' by Alex Prengre.
Original project: https://github.com/alexprengere/currencyconverter
This version only supports conversions for the last exchange rates, not
historic ones.
On the other hand this version always tries to get the last rates.
"""
try:
from .default_rates import default_rates, default_date
except ImportError:
# Only useful to boostrap
default_rates = {}
default_date = ''
from .download_rates import download_rates
# Author information.
__author__ = 'Salvador Eduardo Tropea'
__webpage__ = 'https://github.com/set-soft/'
__company__ = 'INTI-CMNB - Argentina'
| 30.447761 | 77 | 0.666667 |
4e893aecc42c83f372f87792977e579561f4f1e5 | 385 | py | Python | apps/vector.py | HayesAJ83/LeafMapAppTest | 5da65d5c1958f47934453124a72ec800c0ce6a93 | [
"MIT"
] | 22 | 2021-08-10T05:11:47.000Z | 2022-02-27T14:35:30.000Z | apps/vector.py | HayesAJ83/LeafMapAppTest | 5da65d5c1958f47934453124a72ec800c0ce6a93 | [
"MIT"
] | null | null | null | apps/vector.py | HayesAJ83/LeafMapAppTest | 5da65d5c1958f47934453124a72ec800c0ce6a93 | [
"MIT"
] | 8 | 2021-10-04T13:10:32.000Z | 2021-11-17T12:32:57.000Z | import streamlit as st
import leafmap
| 22.647059 | 85 | 0.675325 |
4e89a4bc599fa5786586aa1889dcbfc722b2e1e8 | 1,225 | py | Python | Book_Ladder/web/test.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | Book_Ladder/web/test.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | Book_Ladder/web/test.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | # -*- coding = utf-8 -*-
# @Time:2021/3/1417:56
# @Author:Linyu
# @Software:PyCharm
from web.pageutils import BooksScore
from web.pageutils import BooksCount
from web.pageutils import pointsDraw
from web.pageutils import scoreRelise
from web.pageutils import messBarInfo
from web.pageutils import tagRader
from web.models import tagThree
from web.wdCloud import infoCloud
from web.priceSpider import spider
from web.models import Dict
from web.models import Modle
from web.priceSpider import spiderDD
#
isbn = "'9787020090006'"
dd = "http://search.dangdang.com/?key=%s&act=input&sort_type=sort_xlowprice_asc#J_tab"%(isbn)
ddPrice = spiderDD(dd)
print(ddPrice)
# sql = 'select title from allbook where isbn = %s'%(isbn)
# print(sql)
# testData = Modle().query(sql)
# print(testData[0][0])
# title = "''"
# sqlNum = 'select id_num from corebook where title = %s'%(title)
# id_num = Modle().query(sqlNum)
# print(id_num[0][0])
# print(scoreRelise())
# print(BooksScore())
# print(BooksCount())
# print(pointsDraw())
# messBar()
# print(messBar())
# tagRader()
# tagThree("")
# infoCloud('2')
# print(spider('9787108009821'))
# dic = Dict()
# for key in dic.keys():
# print(key)
# print(dic[key])
| 24.5 | 93 | 0.719184 |
4e8e84d17eb6d1c3fde6f5fcc279f8f8f53c1518 | 2,252 | py | Python | news/spiders/bbc.py | azzuwan/ScraperExample | 6de382df0b20414f2a55b70837b5fd41d76e8712 | [
"MIT"
] | null | null | null | news/spiders/bbc.py | azzuwan/ScraperExample | 6de382df0b20414f2a55b70837b5fd41d76e8712 | [
"MIT"
] | null | null | null | news/spiders/bbc.py | azzuwan/ScraperExample | 6de382df0b20414f2a55b70837b5fd41d76e8712 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from readability import Document
import datetime
from pprint import pprint
| 29.246753 | 96 | 0.572824 |
4e8fedac1be1849239d5c90fd7ed2234086dcfe6 | 698 | py | Python | profiles/models.py | ev-agelos/acr-server | dba11b001ae4aae6dcbb761a5c0222c6fb3b939d | [
"MIT"
] | 1 | 2021-03-11T04:25:07.000Z | 2021-03-11T04:25:07.000Z | profiles/models.py | ev-agelos/acr-server | dba11b001ae4aae6dcbb761a5c0222c6fb3b939d | [
"MIT"
] | 7 | 2020-03-06T17:37:01.000Z | 2021-09-22T17:40:10.000Z | profiles/models.py | ev-agelos/ac-rank | dba11b001ae4aae6dcbb761a5c0222c6fb3b939d | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_countries.fields import CountryField
| 29.083333 | 70 | 0.767908 |
4e90e63aeba9851ed0445a458eb6eb560cabb51f | 5,684 | py | Python | tests/unit/test_command.py | shintaii/flower | fdeb135ddb3718404c0f1e9cca73fc45181f611a | [
"BSD-3-Clause"
] | 4,474 | 2015-01-01T18:34:36.000Z | 2022-03-29T06:02:38.000Z | tests/unit/test_command.py | shintaii/flower | fdeb135ddb3718404c0f1e9cca73fc45181f611a | [
"BSD-3-Clause"
] | 835 | 2015-01-06T21:29:48.000Z | 2022-03-31T04:35:10.000Z | tests/unit/test_command.py | shintaii/flower | fdeb135ddb3718404c0f1e9cca73fc45181f611a | [
"BSD-3-Clause"
] | 980 | 2015-01-02T21:41:28.000Z | 2022-03-31T08:30:52.000Z | import os
import sys
import tempfile
import unittest
import subprocess
from unittest.mock import Mock, patch
import mock
from prometheus_client import Histogram
from flower.command import apply_options, warn_about_celery_args_used_in_flower_command, apply_env_options
from tornado.options import options
from tests.unit import AsyncHTTPTestCase
| 39.748252 | 106 | 0.645144 |
4e91557146c36922257c5f4c9ff456b0ce8b407c | 534 | py | Python | Trojan.py | alrocks29/alpha-backdoor | 16a2d0ffdb183005f687bdf19b25cc918a1f12a0 | [
"MIT"
] | null | null | null | Trojan.py | alrocks29/alpha-backdoor | 16a2d0ffdb183005f687bdf19b25cc918a1f12a0 | [
"MIT"
] | null | null | null | Trojan.py | alrocks29/alpha-backdoor | 16a2d0ffdb183005f687bdf19b25cc918a1f12a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests
import subprocess
import os
import tempfile
temp_directory = tempfile.gettempdir()
os.chdir(temp_directory)
download("http://ip/image.jpg")
subprocess.Popen("image.jpg", shell=True)
download("http://ip/backdoor.exe")
subprocess.call("backdoor.exe", shell=True)
os.remove("image.jpg")
os.remove("backdoor.exe")
| 21.36 | 44 | 0.724719 |
4e91fe4c0f8f01c97da338f53c30caedc69665c2 | 3,524 | py | Python | Assignments/Assignment_1/Q1/task1.py | Kaustubh1Verma/CS671_Deep-Learning_2019 | 062002a1369dc962feb52d3c9561a3f1153e0f84 | [
"MIT"
] | null | null | null | Assignments/Assignment_1/Q1/task1.py | Kaustubh1Verma/CS671_Deep-Learning_2019 | 062002a1369dc962feb52d3c9561a3f1153e0f84 | [
"MIT"
] | null | null | null | Assignments/Assignment_1/Q1/task1.py | Kaustubh1Verma/CS671_Deep-Learning_2019 | 062002a1369dc962feb52d3c9561a3f1153e0f84 | [
"MIT"
] | 1 | 2019-06-12T14:02:33.000Z | 2019-06-12T14:02:33.000Z | import numpy as np
import cv2
import math
import random
import os
from tempfile import TemporaryFile
from sklearn.model_selection import train_test_split
# Creating classes.
length=[7,15]
width=[1,3]
col=[]
col.append([0,0,255]) #Blue
col.append([255,0,0]) #Red
interval=15
angles=[]
x=0
while x<180:
angles.append(x)
x+=interval
dirn=1
a1=0
os.mkdir("/home/aj/Desktop/DL2")
for l in length:
a2=0 #a1 0->7,1->15
for w in width:
a3=0 #a2 0->1,1->3
for co in col:
a4=0 #a3 0->red,1->blue
for ang in angles:
flag=0
m=0
os.mkdir("/home/aj/Desktop/DL2/"+str(dirn))
while flag<1000:
img=np.zeros((28,28,3),np.uint8)
x=random.randrange((28-math.ceil(l*math.sin(math.radians(180-ang)))))
y=random.randrange((28-math.ceil(l*math.sin(math.radians(180-ang)))))
endy = y+l*math.sin(math.radians(180-ang))
endy=math.floor(endy)
endx = x+l*math.cos(math.radians(180-ang))
endx=math.floor(endx)
if(0<=endx<=28 and 0<=endy<=28):
cv2.line(img,(x,y),(endx,endy),co,w)
flag=flag+1
cv2.imwrite("/home/aj/Desktop/DL2/"+str(dirn)+"/"+str(a1)+"_"+str(a2)+"_"+str(a4)+"_"+str(a3)+"_"+str(flag)+".png",img)
dirn+=1
a4+=1
a3=a3+1
a2=a2+1
a1=a1+1
outfile = TemporaryFile()
# Creating Frames
train=[]
train_class=[]
test_class=[]
allimg=[]
label=[]
flag=0
# os.mkdir("/home/aj/Desktop/DL2/frames")
for count in range (1,97):
f=[]
# os.mkdir("/home/aj/Desktop/DL2/frames/frame_"+str(count))
f=os.listdir("/home/aj/Desktop/DL2/"+str(count))
for fi in f:
# print(fi)
n=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+fi)
n = n.reshape(2352)
allimg.append(n)
label.append(flag)
flag+=1
for i in range (0,10):
img1=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i],1)
img2=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+1],1)
img3=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+2],1)
img1f=np.concatenate((img1,img2,img3),axis=1)
img4=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+3],1)
img5=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+4],1)
img6=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+5],1)
img2f=np.concatenate((img4,img5,img6),axis=1)
img7=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+6],1)
img8=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+7],1)
img9=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+8],1)
img3f=np.concatenate((img7,img8,img9),axis=1)
imgf=np.concatenate((img1f,img2f,img3f),axis=0)
cv2.imwrite("/home/aj/Desktop/DL2/frames/frame_"+str(count)+"/"+"f"+str(i+1)+".png",imgf)
# print(allimg[0])
# print(label[0:97])
X_train, X_test, y_oldtrain, y_oldtest = train_test_split(allimg, label, test_size=0.40, random_state=42)
# print(y_oldtrain[0:10])
y_oldtrain = np.array(y_oldtrain).reshape(-1)
y_train=np.eye(96)[y_oldtrain]
y_oldtest = np.array(y_oldtest).reshape(-1)
y_test=np.eye(96)[y_oldtest]
np.savez_compressed("/home/aj/Desktop/DL2/outfile",X_train=X_train,X_test=X_test,y_train=y_train,y_test=y_test)
# Creating Video
# img_frame=[]
# for i in range (1,97):
# f=[]
# f=os.listdir("/home/aj/Desktop/DL2/frames/frame_"+str(i))
# path="/home/aj/Desktop/DL2/frames/frame_"+str(i)+"/"
# for file in f:
# img = cv2.imread(path+file)
# height,width,layers = img.shape
# size = (width,height)
# img_frame.append(img)
# out = cv2.VideoWriter("/home/aj/Desktop/DL2/assign1.mp4",0x7634706d,5, size)
# for i in range(len(img_frame)):
# out.write(img_frame[i])
# out.release() | 32.036364 | 126 | 0.653235 |
4e92e1a84cbde5bdb7f5c55409d43bffcb5d668d | 17,703 | py | Python | octopus_deploy_swagger_client/models/user_role_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/user_role_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/user_role_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserRoleResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 42.555288 | 2,033 | 0.673106 |
4e949a6f4b8f9d86c879098cae8dde8d91b75f85 | 10,163 | py | Python | helpers.py | jchanke/mixtape50 | 68d03034b503fd0374b9fcba1c1d5207ed7f0170 | [
"MIT"
] | 1 | 2022-03-15T11:49:54.000Z | 2022-03-15T11:49:54.000Z | helpers.py | jchanke/mixtape50 | 68d03034b503fd0374b9fcba1c1d5207ed7f0170 | [
"MIT"
] | null | null | null | helpers.py | jchanke/mixtape50 | 68d03034b503fd0374b9fcba1c1d5207ed7f0170 | [
"MIT"
] | null | null | null | """
Does the legwork of searching for matching tracks.
Contains:
(1) Search functions:
- search_message
- search_spotipy
- search_db
- search_lookup
(2) String parsers (to clean title name):
- clean_title
- remove_punctuation
(3) Creates new Spotify playlist.
- create_playlist
"""
from typing import Any, List, Dict, Union
import os
import re
import sqlite3
import time
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from announcer import MessageAnnouncer, format_sse
# Localhost URL to access the application; Flask runs on port 5000 by default
# Adapated from https://github.com/Deffro/statify/blob/dd15a6e70428bd36ecddb5d4a8ac3d82b85c9339/code/server.py#L553
CLIENT_SIDE_URL = "http://127.0.0.1"
PORT = 5000
# Get environment variables
SPOTIPY_CLIENT_ID = os.getenv("SPOTIPY_CLIENT_ID")
SPOTIPY_CLIENT_SECRET = os.getenv("SPOTIFY_CLIENT_SECRET")
SPOTIPY_REDIRECT_URI = f"{CLIENT_SIDE_URL}:{PORT}/callback"
SCOPE = "playlist-modify-public playlist-modify-private playlist-read-private"
# Set up Spotipy
sp = spotipy.Spotify(auth_manager = SpotifyOAuth(client_id = SPOTIPY_CLIENT_ID,
client_secret = SPOTIPY_CLIENT_SECRET,
redirect_uri = SPOTIPY_REDIRECT_URI,
scope = SCOPE,
))
# Create ('instantiate') a MessageAnnouncer object
announcer = MessageAnnouncer()
"""
(1) Search functions:
- search_message
- search_spotipy
- search_db
- search_lookup
"""
def search_message(message: str, max_search_length: int = 10,
query_lookup: Dict[str, list] = dict(), failed_queries: set = set()) -> List[Union[list, Any]]:
"""
search_message(message, max_search_length = 10)
Returns a list of song names (change to ids) matching the message.
Uses regex-style greedy search.
Song names will be limited to [max_search_length] words (default is 10, can
be adjusted.)
Returns songs from Spotify API via spotipy library; if not, checks
Spotify 1.2M songs dataset via an sqlite3 query.
Memoizes successful queries (to query_lookup) and failured queries (to
failed_queries).
https://www.kaggle.com/rodolfofigueroa/spotify-12m-songs
"""
# Split message into list of lower-case words
message = remove_punctuation(message.casefold()).split()
# Gets up to max_search_length words of message
query_length = min(max_search_length, len(message))
# List containing search functions to iterate over
search_functions = [
search_lookup,
search_spotipy,
search_db,
]
# Wait 0.2 seconds to ensure /creating has loaded
time.sleep(0.2)
# Splits query into prefix and suffix, decrementing prefix, until
# - prefix exactly matches a song
# - suffix can be expressed as a list of songs
for i in range(query_length):
prefix, suffix = message[:query_length - i], message[query_length - i:]
prefix, suffix = " ".join(prefix), " ".join(suffix)
announcer.announce(format_sse(event = "add", data = prefix))
# Only search if suffix is not known to fail
if suffix in failed_queries:
time.sleep(0.1)
announcer.announce(format_sse(event = "drop", data = prefix))
continue # back to the start of the 'for' loop
# Looping through search functions,
for search_function in search_functions:
# Search for tracks matching prefix
prefix_results = search_function(prefix, query_lookup = query_lookup)
if prefix_results:
query_lookup[prefix] = prefix_results
print(f"Try: {prefix} in {search_function.__name__.replace('search_', '')}")
# In announcer: replace prefix, add each track in prefix_results
announcer.announce(format_sse(event = "drop", data = prefix))
for track in map(lambda tracks: tracks[0]["name"], prefix_results):
announcer.announce(format_sse(event = "add", data = remove_punctuation(clean_title(track.casefold()))))
time.sleep(0.1)
# Base case: if prefix is whole message, suffix == "", so we should just return prefix
if suffix == "":
print(f"All done!")
announcer.announce(format_sse(event = "lock in"))
return prefix_results
# Recursive case: make sure suffix it can be split into songs as well
suffix_results = search_message(suffix, max_search_length = max_search_length,
query_lookup = query_lookup, failed_queries = failed_queries)
# If both are valid, return joined list
if suffix_results:
results = prefix_results + suffix_results
query_lookup[" ".join([prefix, suffix])] = results
return results
# Suffix cannot be split into songs, drop prefix
for track in map(lambda tracks: tracks[0]["name"], prefix_results):
announcer.announce(format_sse(event = "drop", data = remove_punctuation(clean_title(track.casefold()))))
time.sleep(0.1)
print(f"\"{suffix}\" suffix can't be split.")
break # suffix doesn't work, try next prefix-suffix pair
# Prefix not found in all search functions, drop it
else:
print(f"\"{prefix}\" doesn't work, moving on.")
announcer.announce(format_sse(data = "prefix doesn't work, dropping it"))
announcer.announce(format_sse(event = "drop", data = prefix))
# Recursive case: failure
failed_queries.add(" ".join(message))
return []
def search_lookup(query: str, query_lookup: Dict[str, list]) -> list:
"""
Checks query_lookup (a dictionary created at the initial function call
of search_message) and returns the results of the query if it has
already been found.
"""
# Checks query_lookup dict
if query in query_lookup:
return query_lookup[query]
else:
return []
def search_spotipy(query: str, query_lookup: Dict[str, list]) -> list:
"""
Uses Spotify API via spotipy library to return a list of songs (name
& id) which match the query.
Note: the query_lookup parameter is not used. It is only included
in the definition because query_lookup is passed to search_functions.
"""
# Attributes to return
attributes = ["name", "id"]
# Search for tracks where the name matches query
results = sp.search(q=f"track:\"{query}\"", type="track", limit=50)
results = results["tracks"]["items"]
results = [{ attr: item[attr] for attr in attributes } for item in results if remove_punctuation(clean_title(item["name"].casefold())) == remove_punctuation(query)]
# If no results, return empty list:
if results == []:
return []
else:
return [results]
def search_db(query: str, query_lookup: Dict[str, list]) -> list:
"""
Searches tracks.db (1.2 million songs from Spotify from the Kaggle
database) to return a list of songs (name & id) which match the
query.
https://www.kaggle.com/rodolfofigueroa/spotify-12m-songs
"""
# Import sqlite database
tracks = sqlite3.connect("tracks.db")
db = sqlite3.Cursor(tracks)
# SQLite3 query
results = db.execute("SELECT name, id FROM tracks WHERE name_cleaned = ?", [remove_punctuation(query)]).fetchall()
results = list(map(lambda item: {
"name": item[0],
"id": item[1],
}, results))
# If no results, return empty list
if results == []:
return []
else:
return [results]
"""
(2) String parsers (to clean title name):
- clean_title
- remove_punctuation
"""
def clean_title(title):
"""
Cleans title by performing the following transformations in order:
- Remove substrings enclosed in (...) or [...] and preceding whitespace (using regex greedy matching)
- Remove " - " and substring after
- Remove " feat.", " ft(.)", or " featuring" and substring after
https://stackoverflow.com/questions/14596884/remove-text-between-and
"""
# (Greedy) replace substrings between (...) and []
title = re.sub(r"\s+\(.+\)", "", title)
title = re.sub(r"\s+\[.+\]", "", title)
# Remove " - " and subsequent substring
title = re.sub(r" - .*", "", title)
# Remove " feat(.) ", " ft(.) ", or " featuring " (but not "feature") and substring after
title = re.sub(r"\W+(ft[:.]?|feat[:.]|featuring)\s.*", "", title)
return title
def remove_punctuation(title):
"""
Removes punctuation by performing the following transformations:
- Delete XML escape sequences: & " < > '
- Replace "/", "//", etc. and surrounding whitespace with " " (in medley titles)
- Replace "&" and surrounding whitespace with " and "
- Remove the following characters from the string: !"#$%'()*+,-.:;<=>?@[\]^_`{|}~
- Strips surrounding whitespace
"""
title = re.sub(r"&[amp|quot|lt|gt|apos];", "", title)
title = re.sub(r"\s*\/+\s*", " ", title)
title = re.sub(r"\s*&\s*", " and ", title)
title = re.sub(r"[!\"#$%'()*+,-.:;<=>?@[\\\]^_`{|}~]", "", title)
title = re.sub(r"\s{2,}", " ", title)
return title.strip()
"""
(3) Creates new Spotify playlist.
"""
def create_playlist(results):
"""
Takes the result of search_message as input.
Constructs a playlist (via the spotipy library).
Returns the Spotify id of the playlist.
"""
# Process items
items = list(map(lambda songs: songs[0]["id"], results))
# Create playlist
playlist = sp.user_playlist_create(
user=sp.me()["id"],
name="mixtape50",
public=False,
collaborative=False,
description="Created with Mixtape50: https://github.com/jchanke/mixtape50."
)
sp.playlist_add_items(playlist_id=playlist["id"], items=items)
return playlist["id"] | 34.686007 | 168 | 0.630326 |
4e96757c37df00a4561207275579e02e7d774aeb | 3,836 | py | Python | molly/routing/providers/cyclestreets.py | mollyproject/mollyproject | 3247c6bac3f39ce8d275d19aa410b30c6284b8a7 | [
"Apache-2.0"
] | 7 | 2015-05-16T13:27:21.000Z | 2019-08-06T11:09:24.000Z | molly/routing/providers/cyclestreets.py | mollyproject/mollyproject | 3247c6bac3f39ce8d275d19aa410b30c6284b8a7 | [
"Apache-2.0"
] | null | null | null | molly/routing/providers/cyclestreets.py | mollyproject/mollyproject | 3247c6bac3f39ce8d275d19aa410b30c6284b8a7 | [
"Apache-2.0"
] | 4 | 2015-11-27T13:36:36.000Z | 2021-03-09T17:55:53.000Z | from urllib import urlencode
from urllib2 import urlopen
import simplejson
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from molly.apps.places.models import bearing_to_compass
from molly.utils.templatetags.molly_utils import humanise_distance, humanise_seconds
CYCLESTREETS_URL = 'http://www.cyclestreets.net/api/journey.json?%s'
if 'cyclestreets' not in settings.API_KEYS:
# Cyclestreets not configured
raise ImportError()
def generate_route(points, type):
"""
Given 2 Points, this will return a route between them. The route consists
of a dictionary with the following keys:
* error (optional, and if set means that the object contains no route),
which is a string describing any errors that occurred in plotting the
route
* total_time: An int of the number of seconds this route is estimated to
take
* total_distance: An int of the number of metres this route is expected to
take
* waypoints: A list of dictionaries, where each dictionary has 2 keys:
'instruction', which is a human-readable description of the steps to be
taken here, and 'location', which is a Point describing the route to be
taken
@param points: An ordered list of points to be included in this route
@type points: [Point]
@param type: The type of route to generate (foot, car or bike)
@type type: str
@return: A dictionary containing the route and metadata associated with it
@rtype: dict
"""
# Build Cyclestreets request:
url = CYCLESTREETS_URL % urlencode({
'key': settings.API_KEYS['cyclestreets'],
'plan': 'balanced',
'itinerarypoints': '|'.join('%f,%f' % (p[0], p[1]) for p in points)
})
json = simplejson.load(urlopen(url))
if not json:
return {
'error': _('Unable to plot route')
}
else:
summary = json['marker'][0]['@attributes']
waypoints = []
for i, waypoint in enumerate(json['marker'][1:]):
segment = waypoint['@attributes']
waypoints.append({
'instruction': _('%(instruction)s at %(name)s') % {
'instruction': capfirst(segment['turn']),
'name': segment['name']
},
'additional': _('%(direction)s for %(distance)s (taking approximately %(time)s)') % {
'direction': bearing_to_compass(int(segment['startBearing'])),
'distance': humanise_distance(segment['distance'], False),
'time': humanise_seconds(segment['time'])
},
'waypoint_type': {
'straight on': 'straight',
'turn left': 'left',
'bear left': 'slight-left',
'sharp left': 'sharp-left',
'turn right': 'right',
'bear right': 'slight-right',
'sharp right': 'sharp-right',
'double-back': 'turn-around',
}.get(segment['turn']),
'location': Point(*map(float, segment['points'].split(' ')[0].split(','))),
'path': LineString(map(lambda ps: Point(*map(float, ps.split(','))),
segment['points'].split(' ')))
})
return {
'total_time': summary['time'],
'total_distance': summary['length'],
'waypoints': waypoints,
'path': LineString(map(lambda ps: Point(*map(float, ps.split(','))), summary['coordinates'].split(' ')))
} | 40.808511 | 116 | 0.569343 |
4e99de297b6c41fb361b034e5f59be29d6569791 | 316 | py | Python | exercises/exc_G1.py | dataXcode/IPP | c9b94ad2d7dc14b01e6657a4fa555507bbc7e93b | [
"MIT"
] | null | null | null | exercises/exc_G1.py | dataXcode/IPP | c9b94ad2d7dc14b01e6657a4fa555507bbc7e93b | [
"MIT"
] | null | null | null | exercises/exc_G1.py | dataXcode/IPP | c9b94ad2d7dc14b01e6657a4fa555507bbc7e93b | [
"MIT"
] | null | null | null | num_list = [10,50,30,12,6,8,100]
print( max_min_first_last(num_list) )
| 28.727273 | 48 | 0.78481 |
4e9c35d7a10e21f257f971c50e260fb397455462 | 5,829 | py | Python | web/impact/impact/tests/api_test_case.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 5 | 2017-10-19T15:11:52.000Z | 2020-03-08T07:16:21.000Z | web/impact/impact/tests/api_test_case.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 182 | 2017-06-21T19:32:13.000Z | 2021-03-22T13:38:16.000Z | web/impact/impact/tests/api_test_case.py | masschallenge/impact-api | 81075ced8fcc95de9390dd83c15e523e67fc48c0 | [
"MIT"
] | 1 | 2018-06-23T11:53:18.000Z | 2018-06-23T11:53:18.000Z | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import json
from oauth2_provider.models import get_application_model
from rest_framework.test import APIClient
from test_plus.test import TestCase
from django.core import mail
from django.conf import settings
from django.contrib.auth.models import Group
from django.urls import reverse
from accelerator_abstract.models.base_clearance import (
CLEARANCE_LEVEL_GLOBAL_MANAGER,
CLEARANCE_LEVEL_STAFF
)
from impact.tests.factories import (
ClearanceFactory,
UserFactory,
)
OAuth_App = get_application_model()
API_GROUPS = [settings.V0_API_GROUP, settings.V1_API_GROUP]
DESCRIPTION_CONTENT = 'DESCRIPTION:Topics: {topics}'
LOCATION_CONTENT = 'LOCATION:{location}\\;'
LOCATION_INFO = 'LOCATION:{location}\\;{meeting_info}'
def assert_notified(self,
user,
message="",
subject="",
check_alternative=False):
'''Assert that the user received a notification.
If `message` is specified, assert that the message appears in one of
the outgoing emails to this user
'''
emails = [email for email in mail.outbox if user.email in email.to]
self.assertGreater(len(emails), 0)
if message:
if check_alternative:
self.assertTrue(any([_message_included_in_email_alternative(
email, message) for email in emails]))
else:
self.assertTrue(any([
message in email.body for email in emails]))
if subject:
self.assertIn(subject, [email.subject for email in emails])
def assert_ics_email_attachments(self, user):
'''assert that the ics email attachment exists
'''
emails = [email for email in mail.outbox if user.email in email.to]
for email in emails:
attachments = email.attachments
self.assertGreater(len(email.attachments), 0)
self.assertIn("reminder.ics",
[attachment[0] for attachment in attachments])
def assert_not_notified(self, user):
'''Assert that the specified user did not receive a notification.
'''
if mail.outbox:
self.assertNotIn(user.email, [email.to for email in mail.outbox],
msg="Found an email sent to user")
def _message_included_in_email_alternative(email, message):
return any([message in alt[0] for alt in email.alternatives])
| 36.892405 | 79 | 0.621891 |
4e9d488202a407ec4de3fade6bfb2e435ba6bb6b | 607 | py | Python | pydis_site/apps/api/models/bot/aoc_link.py | Robin5605/site | 81aa42aa748cb228d7a09e6cf6b211484b654496 | [
"MIT"
] | 13 | 2018-02-03T22:57:41.000Z | 2018-05-17T07:38:36.000Z | pydis_site/apps/api/models/bot/aoc_link.py | Robin5605/site | 81aa42aa748cb228d7a09e6cf6b211484b654496 | [
"MIT"
] | 61 | 2018-02-07T21:34:39.000Z | 2018-06-05T16:15:28.000Z | pydis_site/apps/api/models/bot/aoc_link.py | Robin5605/site | 81aa42aa748cb228d7a09e6cf6b211484b654496 | [
"MIT"
] | 16 | 2018-02-03T12:37:48.000Z | 2018-06-02T17:14:55.000Z | from django.db import models
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.models.mixins import ModelReprMixin
| 27.590909 | 85 | 0.698517 |
4e9e6122ed3109b35f3efe158b363d95df381cc6 | 10,892 | py | Python | server/tree_pickler.py | michaelpeterswa/CPSC322Project-WildfireAnalysis | 872727e8c59619fcfc11aaa70367762271207dbd | [
"MIT"
] | null | null | null | server/tree_pickler.py | michaelpeterswa/CPSC322Project-WildfireAnalysis | 872727e8c59619fcfc11aaa70367762271207dbd | [
"MIT"
] | null | null | null | server/tree_pickler.py | michaelpeterswa/CPSC322Project-WildfireAnalysis | 872727e8c59619fcfc11aaa70367762271207dbd | [
"MIT"
] | 1 | 2021-04-16T21:21:25.000Z | 2021-04-16T21:21:25.000Z | import pickle
best_trees = [
{'accuracy': 0.36416184971098264, 'tree':
['Attribute', 'att1',
['Value', 'Pend Oreille',
['Leaf', 2.0, 0, 69]
],
['Value', 'Okanogan',
['Leaf', 3.0, 0, 314]
],
['Value', 'Lincoln',
['Leaf', 5.0, 0, 55]
],
['Value', 'Grant',
['Leaf', 5.0, 0, 4]
], ['Value', 'Chelan', ['Leaf', 3.0, 0, 136]], ['Value', 'Stevens', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 18]], ['Value', 'Miscellaneou', ['Leaf', 2.0, 0, 83]], ['Value', 'Lightning', ['Leaf', 2.0, 0, 43]], ['Value', 'Under Invest', ['Leaf', 5.0, 0, 6]], ['Value', 'Debris Burn', ['Leaf', 3.0, 0, 120]], ['Value', 'Children', ['Leaf', 3.0, 0, 8]], ['Value', 'None', ['Leaf', 5.0, 1, 308]], ['Value', 'Smoker', ['Leaf', 2.0, 0, 7]], ['Value', 'Logging', ['Leaf', 3.0, 0, 8]], ['Value', 'Arson', ['Leaf', 2.0, 0, 5]], ['Value', 'Undetermined', ['Leaf', 9.0, 2, 308]], ['Value', 'Railroad', ['Leaf', 4.0, 0, 7]]]], ['Value', 'Clark', ['Leaf', 3.0, 0, 20]], ['Value', 'Yakima', ['Leaf', 3.0, 0, 97]], ['Value', 'Spokane', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 23]], ['Value', 'Miscellaneou', ['Leaf', 2.0, 0, 142]], ['Value', 'Lightning', ['Leaf', 3.0, 0, 24]], ['Value', 'Under Invest', ['Leaf', 3.0, 0, 4]], ['Value', 'Debris Burn', ['Leaf', 2.0, 0, 54]], ['Value', 'Children', ['Leaf', 3.0, 0, 20]], ['Value', 'None', ['Leaf', 3.0, 3, 326]], ['Value', 'Smoker', ['Leaf', 2.0, 0, 2]], ['Value', 'Logging', ['Leaf', 2.0, 0, 3]], ['Value', 'Arson', ['Leaf', 2.0, 0, 29]], ['Value', 'Undetermined', ['Leaf', 2.0, 0, 7]], ['Value', 'Railroad', ['Leaf', 2.0, 0, 15]]]], ['Value', 'Pierce', ['Leaf', 3.0, 0, 55]], ['Value', 'Skagit', ['Leaf', 3.0, 0, 34]], ['Value', 'Grays Harbor', ['Leaf', 3.0, 0, 52]], ['Value', 'Skamania', ['Leaf', 3.0, 0, 28]], ['Value', 'King', ['Leaf', 3.0, 0, 41]], ['Value', 'Island', ['Leaf', 3.0, 0, 7]], ['Value', 'Klickitat', ['Leaf', 3.0, 0, 180]], ['Value', 'Whitman', ['Leaf', 7.0, 0, 5]], ['Value', 'Cowlitz', ['Leaf', 3.0, 0, 68]], ['Value', 'Douglas', ['Leaf', 5.0, 0, 27]], ['Value', 'Ferry', ['Leaf', 3.0, 0, 72]], ['Value', 'Mason', ['Leaf', 3.0, 0, 66]], ['Value', 'Kittitas', ['Leaf', 3.0, 0, 99]], ['Value', 'Jefferson', ['Leaf', 3.0, 0, 30]], ['Value', 'Franklin', ['Leaf', 5.0, 3, 2503]], ['Value', 'Clallam', ['Leaf', 3.0, 0, 44]], ['Value', 'Pacific', ['Leaf', 3.0, 0, 51]], ['Value', 'Lewis', ['Leaf', 3.0, 0, 93]], ['Value', 'Thurston', ['Leaf', 2.0, 0, 59]], ['Value', 'Walla Walla', ['Leaf', 3.0, 0, 18]], ['Value', 'Snohomish', ['Leaf', 3.0, 0, 38]], ['Value', 'Asotin', ['Leaf', 4.0, 0, 23]], ['Value', 'Adams', ['Leaf', 5.0, 1, 2503]], ['Value', 'Whatcom', ['Leaf', 2.0, 0, 40]], ['Value', 'San Juan', ['Leaf', 3.0, 0, 7]], ['Value', 'Garfield', ['Leaf', 3.0, 0, 10]], ['Value', 'Columbia', ['Leaf', 3.0, 0, 14]], ['Value', 'Benton', ['Leaf', 7.0, 1, 2503]], ['Value', 'Wahkiakum', ['Leaf', 3.0, 5, 2503]], ['Value', 'No Data', ['Leaf', 4.0, 1, 2503]], ['Value', 'Kitsap', ['Leaf', 3.0, 0, 2]]]}, {'accuracy': 0.34375, 'tree': ['Attribute', 'att1', ['Value', 'Klickitat', ['Leaf', 2.0, 0, 150]], ['Value', 'Ferry', ['Leaf', 3.0, 0, 66]], ['Value', 'Okanogan', ['Leaf', 3.0, 0, 341]], ['Value', 'Clallam', ['Leaf', 3.0, 0, 53]], ['Value', 'Lewis', ['Leaf', 3.0, 0, 105]], ['Value', 'Kittitas', ['Leaf', 3.0, 0, 115]], ['Value', 'Spokane', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 31]], ['Value', 'Arson', ['Leaf', 2.0, 0, 37]], ['Value', 'Lightning', ['Leaf', 3.0, 0, 25]], ['Value', 'Miscellaneou', ['Leaf', 3.0, 0, 122]], ['Value', 'Logging', ['Leaf', 3.0, 1, 318]], ['Value', 'Under Invest', ['Leaf', 5.0, 4, 318]], ['Value', 'Debris Burn', ['Leaf', 3.0, 0, 51]], ['Value', 'Railroad', ['Leaf', 2.0, 0, 25]], ['Value', 'Children', ['Leaf', 4.0, 0, 12]], ['Value', 'Undetermined', ['Leaf', 5.0, 0, 5]], ['Value', 'Smoker', ['Leaf', 6.0, 0, 4]], ['Value', 'None', ['Leaf', 3.0, 1, 318]]]], ['Value', 'Chelan', ['Leaf', 3.0, 0, 142]], ['Value', 'Mason', ['Leaf', 3.0, 0, 69]], ['Value', 'Lincoln', ['Leaf', 3.0, 0, 79]], ['Value', 'Yakima', ['Leaf', 3.0, 0, 82]], ['Value', 'Jefferson', ['Leaf', 3.0, 0, 32]], ['Value', 'Pend Oreille', ['Leaf', 2.0, 0, 61]], ['Value', 'Stevens', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 15]], ['Value', 'Arson', ['Leaf', 2.0, 0, 11]], ['Value', 'Lightning', ['Leaf', 3.0, 0, 33]], ['Value', 'Miscellaneou', ['Leaf', 3.0, 0, 84]], ['Value', 'Logging', ['Leaf', 3.0, 4, 290]], ['Value', 'Under Invest', ['Leaf', 5.0, 0, 4]], ['Value', 'Debris Burn', ['Leaf', 2.0, 0, 117]], ['Value', 'Railroad', ['Leaf', 2.0, 0, 6]], ['Value', 'Children', ['Leaf', 2.0, 0, 4]], ['Value', 'Undetermined', ['Leaf', 9.0, 1, 290]], ['Value', 'Smoker', ['Leaf', 2.0, 0, 10]], ['Value', 'None', ['Leaf', 5.0, 1, 290]]]], ['Value', 'Cowlitz', ['Leaf', 3.0, 0, 77]], ['Value', 'Pierce', ['Leaf', 3.0, 0, 58]], ['Value', 'King', ['Leaf', 2.0, 0, 23]], ['Value', 'Walla Walla', ['Leaf', 3.0, 0, 24]], ['Value', 'Douglas', ['Leaf', 6.0, 0, 17]], ['Value', 'Island', ['Leaf', 3.0, 0, 9]], ['Value', 'Skamania', ['Leaf', 3.0, 0, 27]], ['Value', 'Thurston', ['Leaf', 2.0, 0, 52]], ['Value', 'Columbia', ['Leaf', 3.0, 0, 15]], ['Value', 'Snohomish', ['Leaf', 3.0, 0, 36]], ['Value', 'Skagit', ['Leaf', 3.0, 0, 47]], ['Value', 'Pacific', ['Leaf', 3.0, 0, 36]], ['Value', 'Grays Harbor', ['Leaf', 2.0, 0, 56]], ['Value', 'Whatcom', ['Leaf', 3.0, 0, 37]], ['Value', 'Clark', ['Leaf', 3.0, 0, 30]], ['Value', 'Kitsap', ['Leaf', 3.0, 2, 2503]], ['Value', 'San Juan', ['Leaf', 3.0, 0, 9]], ['Value', 'Asotin', ['Leaf', 4.0, 0, 20]], ['Value', 'Garfield', ['Leaf', 3.0, 0, 7]], ['Value', 'Adams', ['Leaf', 5.0, 2, 2503]], ['Value', 'Wahkiakum', ['Leaf', 2.0, 0, 7]], ['Value', 'Whitman', ['Leaf', 5.0, 0, 5]], ['Value', 'Grant', ['Leaf', 5.0, 1, 2503]], ['Value', 'No Data', ['Leaf', 4.0, 0, 2]], ['Value', 'Benton', ['Leaf', 7.0, 1, 2503]]]}, {'accuracy': 0.33568904593639576, 'tree': ['Attribute', 'att1', ['Value', 'Stevens', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 24]], ['Value', 'Debris Burn', ['Leaf', 2.0, 0, 105]], ['Value', 'Children', ['Leaf', 3.0, 0, 4]], ['Value', 'Miscellaneou', ['Leaf', 3.0, 0, 80]], ['Value', 'Railroad', ['Leaf', 2.0, 0, 6]], ['Value', 'Undetermined', ['Leaf', 9.0, 3, 300]], ['Value', 'Logging', ['Leaf', 3.0, 0, 9]], ['Value', 'Lightning', ['Leaf', 2.0, 0, 39]], ['Value', 'Smoker', ['Leaf', 2.0, 0, 8]], ['Value', 'None', ['Leaf', 5.0, 2, 300]], ['Value', 'Arson', ['Leaf', 3.0, 0, 15]], ['Value', 'Under Invest', ['Leaf', 3.0, 0, 5]]]], ['Value', 'Grays Harbor', ['Leaf', 2.0, 0, 49]], ['Value', 'Chelan', ['Leaf', 3.0, 0, 143]], ['Value', 'Okanogan', ['Leaf', 3.0, 0, 306]], ['Value', 'Spokane', ['Attribute', 'att2', ['Value', 'Recreation', ['Leaf', 2.0, 0, 27]], ['Value', 'Debris Burn', ['Leaf', 3.0, 0, 66]], ['Value', 'Children', ['Leaf', 2.0, 0, 10]], ['Value', 'Miscellaneou', ['Leaf', 3.0, 0, 152]], ['Value', 'Railroad', ['Leaf', 2.0, 0, 21]], ['Value', 'Undetermined', ['Leaf', 5.0, 0, 8]], ['Value', 'Logging', ['Leaf', 2.0, 0, 2]], ['Value', 'Lightning', ['Leaf', 3.0, 0, 25]], ['Value', 'Smoker', ['Leaf', 3.0, 0, 3]], ['Value', 'None', ['Leaf', 2.0, 0, 5]], ['Value', 'Arson', ['Leaf', 2.0, 0, 24]], ['Value', 'Under Invest', ['Leaf', 5.0, 2, 345]]]], ['Value', 'Cowlitz', ['Leaf', 3.0, 0, 74]], ['Value', 'Lincoln', ['Leaf', 3.0, 0, 66]], ['Value', 'Kittitas', ['Leaf', 3.0, 0, 122]], ['Value', 'Pacific', ['Leaf', 3.0, 0, 61]], ['Value', 'Skagit', ['Leaf', 3.0, 0, 57]], ['Value', 'Lewis', ['Leaf', 3.0, 0, 111]], ['Value', 'Island', ['Leaf', 3.0, 0, 8]], ['Value', 'Klickitat', ['Leaf', 2.0, 0, 193]], ['Value', 'Walla Walla', ['Leaf', 4.0, 0, 19]], ['Value', 'Jefferson', ['Leaf', 3.0, 0, 23]], ['Value', 'Garfield', ['Leaf', 7.0, 0, 6]], ['Value', 'Thurston', ['Leaf', 2.0, 0, 50]], ['Value', 'King', ['Leaf', 3.0, 0, 33]], ['Value', 'Douglas', ['Leaf', 6.0, 0, 28]], ['Value', 'Yakima', ['Leaf', 3.0, 0, 90]], ['Value', 'Mason', ['Leaf', 3.0, 0, 55]], ['Value', 'Snohomish', ['Leaf', 3.0, 0, 27]], ['Value', 'Pierce', ['Leaf', 3.0, 0, 44]], ['Value', 'Kitsap', ['Leaf', 3.0, 0, 6]], ['Value', 'Clark', ['Leaf', 3.0, 0, 18]], ['Value', 'Columbia', ['Leaf', 3.0, 0, 17]], ['Value', 'Pend Oreille', ['Leaf', 3.0, 0, 45]], ['Value', 'Skamania', ['Leaf', 3.0, 0, 27]], ['Value', 'Asotin', ['Leaf', 7.0, 0, 17]], ['Value', 'Whatcom', ['Leaf', 3.0, 0, 39]], ['Value', 'Ferry', ['Leaf', 3.0, 0, 72]], ['Value', 'Wahkiakum', ['Leaf', 3.0, 1, 2503]], ['Value', 'Clallam', ['Leaf', 3.0, 0, 38]], ['Value', 'Adams', ['Leaf', 5.0, 3, 2503]], ['Value', 'San Juan', ['Leaf', 2.0, 0, 3]], ['Value', 'Grant', ['Leaf', 6.0, 1, 2503]], ['Value', 'No Data', ['Leaf', 4.0, 0, 2]], ['Value', 'Whitman', ['Leaf', 5.0, 0, 4]]]}, {'accuracy': 0.33390705679862304, 'tree': ['Attribute', 'att1', ['Value', 'Spokane', ['Leaf', 3.0, 0, 364]], ['Value', 'Stevens', ['Leaf', 2.0, 0, 298]], ['Value', 'Klickitat', ['Leaf', 3.0, 0, 165]], ['Value', 'Okanogan', ['Leaf', 3.0, 0, 340]], ['Value', 'Yakima', ['Leaf', 5.0, 0, 88]], ['Value', 'Chelan', ['Leaf', 3.0, 0, 110]], ['Value', 'Cowlitz', ['Leaf', 3.0, 0, 84]], ['Value', 'Thurston', ['Leaf', 2.0, 0, 78]], ['Value', 'Pend Oreille', ['Leaf', 2.0, 0, 46]], ['Value', 'Pierce', ['Leaf', 3.0, 0, 45]], ['Value', 'Mason', ['Leaf', 3.0, 0, 69]], ['Value', 'Grays Harbor', ['Leaf', 2.0, 0, 58]], ['Value', 'Douglas', ['Leaf', 6.0, 0, 33]], ['Value', 'Ferry', ['Leaf', 3.0, 0, 77]], ['Value', 'Skagit', ['Leaf', 3.0, 0, 39]], ['Value', 'Clark', ['Leaf', 2.0, 0, 28]], ['Value', 'Kittitas', ['Leaf', 3.0, 0, 108]], ['Value', 'Lewis', ['Leaf', 3.0, 0, 106]], ['Value', 'Skamania', ['Leaf', 3.0, 0, 25]], ['Value', 'King', ['Leaf', 3.0, 0, 23]], ['Value', 'Asotin', ['Leaf', 3.0, 0, 24]], ['Value', 'Snohomish', ['Leaf', 3.0, 0, 26]], ['Value', 'Pacific', ['Leaf', 2.0, 0, 36]], ['Value', 'Jefferson', ['Leaf', 3.0, 0, 29]], ['Value', 'Clallam', ['Leaf', 3.0, 0, 44]], ['Value', 'Lincoln', ['Leaf', 3.0, 0, 56]], ['Value', 'Walla Walla', ['Leaf', 3.0, 0, 18]], ['Value', 'Island', ['Leaf', 3.0, 6, 2503]], ['Value', 'Whatcom', ['Leaf', 3.0, 0, 26]], ['Value', 'Benton', ['Leaf', 7.0, 1, 2503]], ['Value', 'Kitsap', ['Leaf', 3.0, 0, 8]], ['Value', 'San Juan', ['Leaf', 2.0, 0, 14]], ['Value', 'Columbia', ['Leaf', 3.0, 0, 16]], ['Value', 'Franklin', ['Leaf', 5.0, 1, 2503]], ['Value', 'Grant', ['Leaf', 5.0, 4, 2503]], ['Value', 'Garfield', ['Leaf', 3.0, 0, 5]], ['Value', 'Whitman', ['Leaf', 7.0, 2, 2503]], ['Value', 'Wahkiakum', ['Leaf', 2.0, 1, 2503]], ['Value', 'No Data', ['Leaf', 3.0, 1, 2503]], ['Value', 'Adams', ['Leaf', 5.0, 1, 2503]]]}]
packaged_object = best_trees
# pickle packaged_object
outfile = open("trees.p", "wb")
pickle.dump(packaged_object, outfile)
outfile.close() | 495.090909 | 10,287 | 0.484209 |
4e9e7a69ae46de63cdefe46d785a1f6e94dac1e1 | 624 | py | Python | parse.py | Mimori256/kdb-parse | 45f7aca85fea9a7db612da86e9c31daaec52a580 | [
"MIT"
] | 3 | 2021-06-20T04:35:05.000Z | 2021-10-05T06:30:09.000Z | parse.py | Mimori256/kdb-parse | 45f7aca85fea9a7db612da86e9c31daaec52a580 | [
"MIT"
] | 2 | 2021-06-13T01:19:12.000Z | 2022-03-23T04:27:05.000Z | parse.py | Mimori256/kdb-parse | 45f7aca85fea9a7db612da86e9c31daaec52a580 | [
"MIT"
] | null | null | null | import json
#Parse csv to kdb.json
with open("kdb.csv", "r", encoding="utf_8") as f:
l=[]
lines = f.readlines()
# remove the header
lines.pop(0)
for line in lines:
tmp1 = line.split('"')
if tmp1[15] == "":
tmp1[15] = " "
if not "" in set([tmp1[1], tmp1[3], tmp1[11], tmp1[13], tmp1[15], tmp1[21]]):
l.append([tmp1[1], tmp1[3], tmp1[11], tmp1[13], tmp1[15], tmp1[21]])
json_data = {}
l.pop(0)
for i in l:
json_data[i[0]] = i[1:]
enc = json.dumps(json_data,ensure_ascii=False)
with open("kdb.json", "w") as f:
f.write(enc)
print("complete")
| 20.8 | 85 | 0.540064 |
4e9e92e9363d4d32c2609f2f36539abe9b27e294 | 2,600 | py | Python | DLFrameWork/dataset/CIFAR_10.py | Mostafa-ashraf19/TourchPIP | a5090a0ec9cc81a91fe1fd6af41d77841361cec1 | [
"MIT"
] | null | null | null | DLFrameWork/dataset/CIFAR_10.py | Mostafa-ashraf19/TourchPIP | a5090a0ec9cc81a91fe1fd6af41d77841361cec1 | [
"MIT"
] | null | null | null | DLFrameWork/dataset/CIFAR_10.py | Mostafa-ashraf19/TourchPIP | a5090a0ec9cc81a91fe1fd6af41d77841361cec1 | [
"MIT"
] | null | null | null | import os
import shutil
import tarfile
import urllib.request
import pandas as pd
CIFAR10_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
| 40.625 | 115 | 0.575385 |
4ea10581f2a6479a2145424512ce3b01dbcd78d5 | 367 | py | Python | Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py | ch96an/BaekJoonSolution | 25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33 | [
"MIT"
] | null | null | null | Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py | ch96an/BaekJoonSolution | 25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33 | [
"MIT"
] | null | null | null | Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py | ch96an/BaekJoonSolution | 25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33 | [
"MIT"
] | null | null | null |
for _ in range(int(input())):
n,k=map(int,input().split())
lst=list(map(int,input().split()))
printer(n,k,lst) | 24.466667 | 70 | 0.59673 |
4ea295d60036c358d4e3b22a59c7d5d5aba282d3 | 4,119 | py | Python | grpc/clients/python/vegaapiclient/generated/commands/v1/oracles_pb2.py | ConnorChristie/api | 7e585d47bad1a5ef95ca932045b0ce70962b029a | [
"MIT"
] | 6 | 2021-05-20T15:30:46.000Z | 2022-02-22T12:06:39.000Z | grpc/clients/python/vegaapiclient/generated/commands/v1/oracles_pb2.py | ConnorChristie/api | 7e585d47bad1a5ef95ca932045b0ce70962b029a | [
"MIT"
] | 29 | 2021-03-16T11:58:05.000Z | 2021-10-05T14:04:45.000Z | grpc/clients/python/vegaapiclient/generated/commands/v1/oracles_pb2.py | ConnorChristie/api | 7e585d47bad1a5ef95ca932045b0ce70962b029a | [
"MIT"
] | 6 | 2021-05-07T06:43:02.000Z | 2022-03-29T07:18:01.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: commands/v1/oracles.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='commands/v1/oracles.proto',
package='vega.commands.v1',
syntax='proto3',
serialized_options=b'\n io.vegaprotocol.vega.commands.v1Z+code.vegaprotocol.io/vega/proto/commands/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19\x63ommands/v1/oracles.proto\x12\x10vega.commands.v1\"\xcb\x01\n\x14OracleDataSubmission\x12K\n\x06source\x18\x01 \x01(\x0e\x32\x33.vega.commands.v1.OracleDataSubmission.OracleSourceR\x06source\x12\x18\n\x07payload\x18\x02 \x01(\x0cR\x07payload\"L\n\x0cOracleSource\x12\x1d\n\x19ORACLE_SOURCE_UNSPECIFIED\x10\x00\x12\x1d\n\x19ORACLE_SOURCE_OPEN_ORACLE\x10\x01\x42O\n io.vegaprotocol.vega.commands.v1Z+code.vegaprotocol.io/vega/proto/commands/v1b\x06proto3'
)
_ORACLEDATASUBMISSION_ORACLESOURCE = _descriptor.EnumDescriptor(
name='OracleSource',
full_name='vega.commands.v1.OracleDataSubmission.OracleSource',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ORACLE_SOURCE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ORACLE_SOURCE_OPEN_ORACLE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=175,
serialized_end=251,
)
_sym_db.RegisterEnumDescriptor(_ORACLEDATASUBMISSION_ORACLESOURCE)
_ORACLEDATASUBMISSION = _descriptor.Descriptor(
name='OracleDataSubmission',
full_name='vega.commands.v1.OracleDataSubmission',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='vega.commands.v1.OracleDataSubmission.source', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='source', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payload', full_name='vega.commands.v1.OracleDataSubmission.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='payload', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_ORACLEDATASUBMISSION_ORACLESOURCE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=251,
)
_ORACLEDATASUBMISSION.fields_by_name['source'].enum_type = _ORACLEDATASUBMISSION_ORACLESOURCE
_ORACLEDATASUBMISSION_ORACLESOURCE.containing_type = _ORACLEDATASUBMISSION
DESCRIPTOR.message_types_by_name['OracleDataSubmission'] = _ORACLEDATASUBMISSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OracleDataSubmission = _reflection.GeneratedProtocolMessageType('OracleDataSubmission', (_message.Message,), {
'DESCRIPTOR' : _ORACLEDATASUBMISSION,
'__module__' : 'commands.v1.oracles_pb2'
# @@protoc_insertion_point(class_scope:vega.commands.v1.OracleDataSubmission)
})
_sym_db.RegisterMessage(OracleDataSubmission)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.495327 | 480 | 0.786113 |
4ea392c525c167ab6e4487bc47072399df2ebdf7 | 2,788 | py | Python | non-regression-tests/config.py | etalab-ia/piaf-ml | cd006b905d4c3e6a358326a42b84179724b00e5f | [
"MIT"
] | 5 | 2021-06-22T08:51:53.000Z | 2021-12-14T17:26:32.000Z | non-regression-tests/config.py | etalab-ia/piaf-ml | cd006b905d4c3e6a358326a42b84179724b00e5f | [
"MIT"
] | 55 | 2021-06-16T07:58:16.000Z | 2021-08-30T10:30:26.000Z | non-regression-tests/config.py | etalab-ia/piaf-ml | cd006b905d4c3e6a358326a42b84179724b00e5f | [
"MIT"
] | null | null | null | import os
parameter_tuning_options = {
"experiment_name": "non-regression-tests",
# Tuning method alternatives:
# - "optimization": use bayesian optimisation
# - "grid_search"
"tuning_method": "grid_search",
# Additionnal options for the grid search method
"use_cache": False,
# Additionnal options for the optimization method
"optimization_ncalls": 10,
}
parameters_fquad = {
"k_retriever": [5],
"k_title_retriever" : [1], # must be present, but only used when retriever_type == title_bm25
"k_reader_per_candidate": [20],
"k_reader_total": [10],
"reader_model_version": ["053b085d851196110d7a83d8e0f077d0a18470be"],
"retriever_model_version": ["1a01b38498875d45f69b2a6721bf6fe87425da39"],
"dpr_model_version": ["v1.0"],
"retriever_type": ["bm25"], # Can be bm25, sbert, dpr, title or title_bm25
"squad_dataset": [
os.getenv("DATA_DIR") + "/non-regression-tests/fquad_dataset.json"
],
"filter_level": [None],
"preprocessing": [False],
"boosting" : [1], #default to 1
"split_by": ["word"], # Can be "word", "sentence", or "passage"
"split_length": [1000],
}
# A dictionnary specifying the criteria a test result must pass. Keys are
# metrics names and keys are predicates on the corresponding metric which must
# return true if the value is satisfying.
pass_criteria_fquad = {
"reader_topk_accuracy_has_answer":
# metric ~= 0.747 +/- 1%
lambda metric: abs(metric / 0.747 - 1) < 0.01
}
parameters_dila = {
"k_retriever": [1],
"k_title_retriever" : [1], # must be present, but only used when retriever_type == title_bm25
"k_reader_per_candidate": [20],
"k_reader_total": [10],
"reader_model_version": ["053b085d851196110d7a83d8e0f077d0a18470be"],
"retriever_model_version": ["1a01b38498875d45f69b2a6721bf6fe87425da39"],
"dpr_model_version": ["v1.0"],
"retriever_type": ["bm25"], # Can be bm25, sbert, dpr, title or title_bm25
"squad_dataset": [
os.getenv("SRC_DIR") + "/piaf-ml/clients/dila/knowledge_base/squad.json"],
"filter_level": [None],
"preprocessing": [False],
"boosting" : [1], #default to 1
"split_by": ["word"], # Can be "word", "sentence", or "passage"
"split_length": [1000],
}
# A dictionnary specifying the criteria a test result must pass. Keys are
# metrics names and keys are predicates on the corresponding metric which must
# return true if the value is satisfying.
pass_criteria_dila = {
"reader_topk_accuracy_has_answer":
# metric ~= 0.427 +/- 1%
lambda metric: abs(metric / 0.427 - 1) < 0.01
}
tests = [
(parameters_fquad, parameter_tuning_options, pass_criteria_fquad),
(parameters_dila, parameter_tuning_options, pass_criteria_dila),
]
| 35.74359 | 97 | 0.681133 |
4ea47fc79c5dcbec42ef206e57f938c9dff9b024 | 2,101 | py | Python | zhang.py | AndrewQuijano/Treespace_REU_2017 | e1aff2224ad5152d82f529675444146a70623bca | [
"MIT"
] | 2 | 2021-06-07T12:22:46.000Z | 2021-09-14T00:19:03.000Z | zhang.py | AndrewQuijano/Treespace_REU_2017 | e1aff2224ad5152d82f529675444146a70623bca | [
"MIT"
] | null | null | null | zhang.py | AndrewQuijano/Treespace_REU_2017 | e1aff2224ad5152d82f529675444146a70623bca | [
"MIT"
] | null | null | null | import networkx as nx
from misc import maximum_matching_all
from networkx import get_node_attributes
# Use this for non-binary graph
| 32.828125 | 86 | 0.595907 |
4ea5498deec294ffeeebf2d2ad50bbf782de71a8 | 141 | py | Python | esteid/idcard/__init__.py | thorgate/django-esteid | 4a4227b20dca7db5441a3514f724f1404575562c | [
"BSD-3-Clause"
] | 17 | 2016-03-30T09:20:19.000Z | 2022-01-17T12:04:03.000Z | esteid/idcard/__init__.py | thorgate/django-esteid | 4a4227b20dca7db5441a3514f724f1404575562c | [
"BSD-3-Clause"
] | 15 | 2016-02-22T22:49:07.000Z | 2021-11-09T12:29:35.000Z | esteid/idcard/__init__.py | thorgate/django-esteid | 4a4227b20dca7db5441a3514f724f1404575562c | [
"BSD-3-Clause"
] | 2 | 2016-07-27T10:57:52.000Z | 2017-10-05T13:15:59.000Z | __all__ = ["BaseIdCardAuthenticationView", "IdCardSigner"]
from .signer import IdCardSigner
from .views import BaseIdCardAuthenticationView
| 28.2 | 58 | 0.836879 |
4eaa032d7c85557301f7b3de83a688e4d6c318a3 | 101 | py | Python | placeable_interface.py | Alex92rus/funkyBlue | 747fdbfc72edd85556465204f0f654a5cac32c2a | [
"MIT"
] | 2 | 2020-03-07T19:46:52.000Z | 2020-03-08T09:11:02.000Z | placeable_interface.py | Alex92rus/funkyBlue | 747fdbfc72edd85556465204f0f654a5cac32c2a | [
"MIT"
] | 3 | 2020-03-07T10:09:31.000Z | 2021-01-14T08:40:27.000Z | placeable_interface.py | Alex92rus/funkyBlue | 747fdbfc72edd85556465204f0f654a5cac32c2a | [
"MIT"
] | null | null | null |
from arcade import Sprite
| 7.769231 | 27 | 0.643564 |
4eaa0630211dc9678f367337b57ebf1618235962 | 3,765 | py | Python | sme_financing/main/apis/document_api.py | BuildForSDG/team-214-backend | f1aff9c27d7b7588b4bbb2bc68956b35051d4506 | [
"MIT"
] | 1 | 2020-05-20T16:32:33.000Z | 2020-05-20T16:32:33.000Z | sme_financing/main/apis/document_api.py | BuildForSDG/team-214-backend | f1aff9c27d7b7588b4bbb2bc68956b35051d4506 | [
"MIT"
] | 23 | 2020-05-19T07:12:53.000Z | 2020-06-21T03:57:54.000Z | sme_financing/main/apis/document_api.py | BuildForSDG/team-214-backend | f1aff9c27d7b7588b4bbb2bc68956b35051d4506 | [
"MIT"
] | 1 | 2020-05-18T14:18:12.000Z | 2020-05-18T14:18:12.000Z | """RESTful API Document resource."""
from flask_restx import Resource, reqparse
from flask_restx._http import HTTPStatus
from werkzeug.datastructures import FileStorage
from ..service.document_service import (
delete_document,
edit_document,
get_all_documents,
get_document,
save_document,
)
from .dto import DocumentDTO
api = DocumentDTO.document_api
_document = DocumentDTO.document
parser = reqparse.RequestParser()
parser.add_argument("document_name", type=str, help="Document name", location="form")
parser.add_argument("file", type=FileStorage, location="files")
# @api.route("/smes/<sme_id>")
# @api.param("sme_id", "The SME id")
# @api.response(HTTPStatus.NOT_FOUND, "SME not found")
# class DocumentSME(Resource):
# @api.doc("List all documents of an SME")
# @api.marshal_list_with(_document, envelope="data")
# def get(self, sme_id):
# """List all documents of an SME."""
# if not get_sme_by_id(sme_id):
# api.abort(404)
# return get_all_sme_documents(sme_id)
| 35.186916 | 86 | 0.657371 |
4eaadef4bc857f47d228828cdfd23ca47dfe5099 | 1,405 | py | Python | column_name_renaming.py | strathclyde-rse/strathclyde-software-survey | 1dd3805a416f1da6cbfa27958ae96a5ad685fe19 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | column_name_renaming.py | strathclyde-rse/strathclyde-software-survey | 1dd3805a416f1da6cbfa27958ae96a5ad685fe19 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | column_name_renaming.py | strathclyde-rse/strathclyde-software-survey | 1dd3805a416f1da6cbfa27958ae96a5ad685fe19 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
col_shortener = {
'Q1':'confirm',
'Q2':'faculty',
'Q3':'department',
'Q4':'funders',
'Q5':'position',
'Q6':'use_software',
'Q7':'importance_software',
'Q8':'develop_own_code',
'Q9':'development_expertise',
'Q10':'sufficient_training',
'Q11':'want_to_commercialise',
'Q12':'ready_to_release',
'Q13':'hpc_use',
'Q14_1':'version_control',
'Q14_2':'unit_regression_testing',
'Q14_3':'continuous_integration',
'Q14_4':'compilation',
'Q14_5':'documentation',
'Q15':'uni_support',
'Q16':'hired_developer',
'Q17':'costed_developer',
'Q18_1':'hire_full_time_developer',
'Q18_2':'hire_pool_developer',
'Q19':'voucher',
'Q20':'consulting',
'Q21':'mailing'
}
add_an_other_category = [
'funders',
'position',
'hpc_use'
]
sort_no_further_analysis = [
'faculty',
'funders',
'position',
'hpc_use'
]
yes_no_analysis = [
'use_software',
'develop_own_code',
'sufficient_training',
'want_to_commercialise',
'ready_to_release',
'hired_developer'
]
scale_analysis = [
'importance_software',
'development_expertise',
'sufficient_training'
]
worded_scale_analysis = [
'version_control',
'continuous_integration',
'unit_regression_testing',
'hire_full_time_developer',
'hire_pool_developer'
]
| 19.788732 | 39 | 0.635587 |
4eae8bae94764d3c5a64b90797dd929834fa6067 | 1,974 | py | Python | scanpy/tests/test_scaling.py | alexcwsmith/scanpy | b69015e9e7007193c9ac461d5c6fbf845b3d6962 | [
"BSD-3-Clause"
] | 1,171 | 2017-01-17T14:01:02.000Z | 2022-03-31T23:02:57.000Z | scanpy/tests/test_scaling.py | alexcwsmith/scanpy | b69015e9e7007193c9ac461d5c6fbf845b3d6962 | [
"BSD-3-Clause"
] | 1,946 | 2017-01-22T10:19:04.000Z | 2022-03-31T17:13:03.000Z | scanpy/tests/test_scaling.py | alexcwsmith/scanpy | b69015e9e7007193c9ac461d5c6fbf845b3d6962 | [
"BSD-3-Clause"
] | 499 | 2017-01-21T11:39:29.000Z | 2022-03-23T13:57:35.000Z | import pytest
import numpy as np
from anndata import AnnData
from scipy.sparse import csr_matrix
import scanpy as sc
# test "data" for 3 cells * 4 genes
X = [
[-1, 2, 0, 0],
[1, 2, 4, 0],
[0, 2, 2, 0],
] # with gene std 1,0,2,0 and center 0,2,2,0
X_scaled = [
[-1, 2, 0, 0],
[1, 2, 2, 0],
[0, 2, 1, 0],
] # with gene std 1,0,1,0 and center 0,2,1,0
X_centered = [
[-1, 0, -1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
] # with gene std 1,0,1,0 and center 0,0,0,0
| 35.890909 | 81 | 0.662614 |
4eae8be82d67b6164b7865425e58eaf76d1e1eba | 7,810 | py | Python | wsireg/tmpSaves/demo_self6_complete unit.py | luweishuang/wsireg | 344af8585932e3e0f5df3ce40a7dc75846a0214b | [
"MIT"
] | null | null | null | wsireg/tmpSaves/demo_self6_complete unit.py | luweishuang/wsireg | 344af8585932e3e0f5df3ce40a7dc75846a0214b | [
"MIT"
] | null | null | null | wsireg/tmpSaves/demo_self6_complete unit.py | luweishuang/wsireg | 344af8585932e3e0f5df3ce40a7dc75846a0214b | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import bilinear
import patchreg
from skimage.util import view_as_windows
MAX_FEATURES = 5000
GOOD_MATCH_PERCENT = 0.45
if __name__ == "__main__":
# draw_img()
# exit()
root = "../data/"
master_srcdata = cv2.imread(root + "OK1_1.jpg")
target_srcdata = cv2.imread(root + "NG1_1.jpg")
master3 = master_srcdata[300:4850,:,:]
# cv2.imwrite("master3.jpg", master3)
target3 = target_srcdata[720:5270,:,:]
# cv2.imwrite("target3.jpg", target3)
# padding to 1000s, at least 2000
master3_pad, target3_pad, top_pad, down_pad, left_pad, right_pad = pad_imgs(master3, target3)
# cv2.imwrite("master3_pad.jpg", master3_pad)
# cv2.imwrite("target3_pad.jpg", target3_pad)
masterpad_h, masterpad_w, _ = master3_pad.shape
master_reg_pad = bilinear_interpolation_of_patch_registration(master3_pad, target3_pad)
master3_reg = master_reg_pad[top_pad: masterpad_h-down_pad, left_pad:masterpad_w-right_pad, : ]
cv2.imwrite("master3_reg.jpg", master3_reg)
cv2.imwrite("master3.jpg", master3)
cv2.imwrite("target3.jpg", target3)
# Stage Five: high-precision feature alignment
master_reg_out = process_single_imgpart(master3_reg, target3)
cv2.imwrite("master_reg_out.jpg", master_reg_out)
master_out = process_single_imgpart(master3, target3)
cv2.imwrite("master_out.jpg", master_out) | 43.631285 | 131 | 0.705634 |
4eaf9ec2243bbc0b3558c08de925bc43b8365f96 | 1,253 | py | Python | src/sprites/weapon_vfx.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 1 | 2020-08-29T06:41:03.000Z | 2020-08-29T06:41:03.000Z | src/sprites/weapon_vfx.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 10 | 2019-07-15T05:15:38.000Z | 2020-11-25T03:14:03.000Z | src/sprites/weapon_vfx.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 1 | 2020-11-22T08:25:26.000Z | 2020-11-22T08:25:26.000Z | # Copyright (c) 2020
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
import pygame as pg
import ruamel.yaml
from random import choice
vec = pg.math.Vector2
| 27.23913 | 59 | 0.60016 |
4eafa55a23b75bd6783941216b9d9087a84c8b15 | 9,860 | py | Python | game/blenderpanda/pman.py | Moguri/prototype-nitrogen | 607f78219fcfbd55dfcd1611684107a2922f635d | [
"Apache-2.0"
] | 1 | 2017-05-29T23:03:13.000Z | 2017-05-29T23:03:13.000Z | game/blenderpanda/pman.py | Moguri/prototype-nitrogen | 607f78219fcfbd55dfcd1611684107a2922f635d | [
"Apache-2.0"
] | null | null | null | game/blenderpanda/pman.py | Moguri/prototype-nitrogen | 607f78219fcfbd55dfcd1611684107a2922f635d | [
"Apache-2.0"
] | null | null | null | import fnmatch
import os
import shutil
import subprocess
import sys
import time
from collections import OrderedDict
try:
import configparser
except ImportError:
import ConfigParser as configparser
if '__file__' not in globals():
__is_frozen = True
__file__ = ''
else:
__is_frozen = False
_config_defaults = OrderedDict([
('general', OrderedDict([
('name', 'Game'),
('render_plugin', ''),
])),
('build', OrderedDict([
('asset_dir', 'assets/'),
('export_dir', 'game/assets/'),
('ignore_patterns', '*.blend1, *.blend2'),
])),
('run', OrderedDict([
('main_file', 'game/main.py'),
('auto_build', True),
('auto_save', True),
])),
])
_user_config_defaults = OrderedDict([
('blender', OrderedDict([
('last_path', 'blender'),
('use_last_path', True),
])),
])
| 27.853107 | 131 | 0.600406 |
4eb07bf2ab74b26ed4d8db65e2b44e12fd9bf220 | 1,326 | py | Python | src/createGraph.py | AJMFactsheets/NetworkSpeedGrapher | 86e755e8831ab22394719520713d4949ed3d018e | [
"Apache-2.0"
] | null | null | null | src/createGraph.py | AJMFactsheets/NetworkSpeedGrapher | 86e755e8831ab22394719520713d4949ed3d018e | [
"Apache-2.0"
] | null | null | null | src/createGraph.py | AJMFactsheets/NetworkSpeedGrapher | 86e755e8831ab22394719520713d4949ed3d018e | [
"Apache-2.0"
] | null | null | null | import sys
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
#Argument 1 must be your plotly username, argument 2 is your api key. Get those by registering for a plotly account.
#Argument 3 is the name of the input file to input data from. Must be in the form: Date \n Download \n Upload \n
plotly.tools.set_credentials_file(username=sys.argv[1], api_key=sys.argv[2])
time = []
download = []
upload = []
lnum = 1
x = 1
file = open(sys.argv[3], 'r')
for line in file:
if lnum == 1:
#time.append(line[11:13])
time.append(x)
x += 1
lnum = 2
elif lnum == 2:
download.append(line[10:15])
lnum = 3
elif lnum == 3:
upload.append(line[8:12])
lnum = 1
else:
raise SystemError('lnum internal error', lnum)
#trace1 = go.Histogram(
# x=time,
# y=download,
# opacity=0.75
#)
#trace2 = go.Histogram(
# x=time,
# y=upload,
# opacity=0.75
#)
#data = [trace1, trace2]
#layout = go.Layout(barmode='overlay')
#fig = go.Figure(data=data, layout=layout)
#py.iplot(fig, filename='Network Speed Graph')
trace1 = go.Bar(
x=time,
y=download,
name='Download Speed'
)
trace2 = go.Bar(
x=time,
y=upload,
name='Upload Speed'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='Network Speed Graph')
| 17 | 116 | 0.6727 |
4eb27769bbc6f1af6058f15f8a964479f5a48ebc | 484 | py | Python | crosshair/libimpl/__init__.py | mristin/CrossHair | 66a44a0d10021e0b1e2d847a677274e62ddd1e9d | [
"MIT"
] | null | null | null | crosshair/libimpl/__init__.py | mristin/CrossHair | 66a44a0d10021e0b1e2d847a677274e62ddd1e9d | [
"MIT"
] | null | null | null | crosshair/libimpl/__init__.py | mristin/CrossHair | 66a44a0d10021e0b1e2d847a677274e62ddd1e9d | [
"MIT"
] | null | null | null | from crosshair.libimpl import builtinslib
from crosshair.libimpl import collectionslib
from crosshair.libimpl import datetimelib
from crosshair.libimpl import mathlib
from crosshair.libimpl import randomlib
from crosshair.libimpl import relib
| 30.25 | 44 | 0.82438 |
4eb3cdb71cd64b8402ec42bddffc7a5a65442095 | 690 | py | Python | schedulesy/apps/ade_legacy/serializers.py | unistra/schedulesy | bcd8c42281013f02ecd5c89fba9b622f20e47761 | [
"Apache-2.0"
] | 1 | 2020-07-24T19:17:56.000Z | 2020-07-24T19:17:56.000Z | schedulesy/apps/ade_legacy/serializers.py | unistra/schedulesy | bcd8c42281013f02ecd5c89fba9b622f20e47761 | [
"Apache-2.0"
] | 1 | 2020-07-09T10:23:28.000Z | 2020-07-09T10:23:28.000Z | schedulesy/apps/ade_legacy/serializers.py | unistra/schedulesy | bcd8c42281013f02ecd5c89fba9b622f20e47761 | [
"Apache-2.0"
] | null | null | null | import re
from django.urls import reverse
from rest_framework import serializers
from schedulesy.apps.ade_legacy.models import Customization
| 26.538462 | 75 | 0.701449 |
4eb6277eff4239146619cfdeeb4696e25cfb8808 | 927 | py | Python | tests/test_operator_filter.py | gva-jjoyce/gva_data | cda990d0abb4b175025aaf16e75192bd9cc213af | [
"Apache-2.0"
] | null | null | null | tests/test_operator_filter.py | gva-jjoyce/gva_data | cda990d0abb4b175025aaf16e75192bd9cc213af | [
"Apache-2.0"
] | 24 | 2020-12-24T12:21:42.000Z | 2021-01-28T14:22:38.000Z | tests/test_operator_filter.py | gva-jjoyce/gva_data | cda990d0abb4b175025aaf16e75192bd9cc213af | [
"Apache-2.0"
] | null | null | null | """
Test Filter Operator
"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from gva.flows.operators import FilterOperator
try:
from rich import traceback
traceback.install()
except ImportError:
pass
if __name__ == "__main__":
test_filter_operator_default()
test_filter_operator()
print('okay') | 20.152174 | 52 | 0.558792 |
4eb65c2f13bb97d8948357e8ad1093ac25bd46cd | 5,620 | py | Python | python/pynamics/quaternion.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2018-08-20T22:01:18.000Z | 2021-04-19T00:50:56.000Z | python/pynamics/quaternion.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 3 | 2017-10-24T03:10:17.000Z | 2017-10-24T03:15:27.000Z | python/pynamics/quaternion.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2017-03-03T23:04:17.000Z | 2021-03-20T20:33:53.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 10:24:05 2021
@author: danaukes
https://en.wikipedia.org/wiki/Rotation_formalisms_in_three_dimensions
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
"""
import sympy
sympy.init_printing(pretty_print=False)
from sympy import sin,cos,tan,pi,acos
import numpy
import sympy
a,b,c,d = sympy.symbols('a,b,c,d')
e,f,g,h = sympy.symbols('e,f,g,h')
q = sympy.Symbol('q')
v1 = Quaternion(a,b,c,d)
v12 = [b,c,d]
q = UnitQuaternion(e,f,g,h)
# q = Quaternion.build_from_axis_angle(q, 0,0,1)
# v1 = Quaternion(0,2,3,4)
v2 = v1.rotate_by(q)
v22 = q*v1*q.inv()
v3 = q.rotate(v12)
| 27.54902 | 109 | 0.55694 |
4eb77f4c11a2d3ec08d7055fbeacf7a5223e4aad | 630 | py | Python | src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 13 | 2020-07-03T01:20:54.000Z | 2021-11-22T06:06:21.000Z | src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 660 | 2020-06-26T02:52:18.000Z | 2022-03-31T14:14:02.000Z | src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 3 | 2020-07-12T06:18:39.000Z | 2021-06-22T06:54:47.000Z | """Adds voice category per channel
Revision ID: 6e982c9318a6
Revises: ef54f035a75c
Create Date: 2021-12-03 13:18:57.468342
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "6e982c9318a6"
down_revision = "ef54f035a75c"
branch_labels = None
depends_on = None
| 19.6875 | 64 | 0.655556 |
4eb7e3ada081cac1383991df8368a6295ca6cbec | 5,057 | py | Python | decompile/Scanner.py | gauravssnl/Pyc2Py-Symbian | 6e0a3e8f4bf9b470005decabb3c34f9f4723cf61 | [
"MIT"
] | 3 | 2020-03-28T11:57:46.000Z | 2021-04-16T14:10:40.000Z | decompile/Scanner.py | gauravssnl/Pyc2Py-Symbian | 6e0a3e8f4bf9b470005decabb3c34f9f4723cf61 | [
"MIT"
] | null | null | null | decompile/Scanner.py | gauravssnl/Pyc2Py-Symbian | 6e0a3e8f4bf9b470005decabb3c34f9f4723cf61 | [
"MIT"
] | 3 | 2019-04-18T14:33:36.000Z | 2021-07-07T13:44:52.000Z | __all__ = ['Token', 'Scanner', 'getscanner']
import types
__scanners = {}
| 36.912409 | 77 | 0.479533 |
4eb925716edb4ee9dd67f2ff8a8ea4fae8d882c9 | 312 | py | Python | library/fcntl_test.py | creativemindplus/skybison | d1740e08d8de85a0a56b650675717da67de171a0 | [
"CNRI-Python-GPL-Compatible"
] | 278 | 2021-08-31T00:46:51.000Z | 2022-02-13T19:43:28.000Z | library/fcntl_test.py | creativemindplus/skybison | d1740e08d8de85a0a56b650675717da67de171a0 | [
"CNRI-Python-GPL-Compatible"
] | 9 | 2021-11-05T22:28:43.000Z | 2021-11-23T08:39:04.000Z | library/fcntl_test.py | tekknolagi/skybison | bea8fc2af0a70e7203b4c19f36c14a745512a335 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2021-08-31T07:49:54.000Z | 2021-10-08T01:09:01.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import unittest
if __name__ == "__main__":
unittest.main()
| 20.8 | 76 | 0.695513 |
4eb9e46990415a6b4e9b33a746cb5c6ea0b09797 | 7,576 | py | Python | main.py | jarchv/capsnet-tensorflow | e4a69124060ac946cf21861b3ef3870e956325b6 | [
"MIT"
] | null | null | null | main.py | jarchv/capsnet-tensorflow | e4a69124060ac946cf21861b3ef3870e956325b6 | [
"MIT"
] | null | null | null | main.py | jarchv/capsnet-tensorflow | e4a69124060ac946cf21861b3ef3870e956325b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#title :main.py
#description :Tensorflow implementation of CapsNet.
#author :Jose Chavez
#date :2019/04/30
#version :1.0
#usage :python3 main.py
#python_version :3.6.7
#==============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from capsnet import CapsNet
from tensorflow.examples.tutorials.mnist import input_data
import functools
mnist = input_data.read_data_sets('MNIST_data/')
batch_size = 10
tf.reset_default_graph()
tf.random.set_random_seed(0)
np.random.seed(0)
checkpoint_file = './tmp/model.ckpt'
if __name__ == '__main__':
tf.reset_default_graph()
model = CapsNet(rounds = 3)
#train(model, False, 50)
test(model)
#reconstruction(model, 5)
| 35.905213 | 161 | 0.574314 |
4eb9f86a4d0753268d20bf93e74403357afd1729 | 7,707 | py | Python | legacy/otc_mp.py | kimSooHyun950921/Heuristics | 97757aebdaf1290c371b84596757de00742d9f5c | [
"Apache-2.0"
] | 3 | 2020-06-26T05:29:20.000Z | 2021-03-26T22:11:24.000Z | legacy/otc_mp.py | kimSooHyun950921/Heuristics | 97757aebdaf1290c371b84596757de00742d9f5c | [
"Apache-2.0"
] | 1 | 2021-08-23T20:51:27.000Z | 2021-08-23T20:51:27.000Z | legacy/otc_mp.py | kimSooHyun950921/Heuristics | 97757aebdaf1290c371b84596757de00742d9f5c | [
"Apache-2.0"
] | null | null | null | import os
import sys
import time
import decimal
import sqlite3
import multiprocessing
from secret import rpc_user, rpc_password
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import cluster_db_query as cdq
import db_query as dq
rpc_ip = '127.0.0.1'
rpc_port = '8332'
timeout = 300
def get_min_cluster_num(addr, flag=0):
''' DON'T USE
flag: 0
flag: 1 -1 '''
cluster_num_list = cdq.get_min_cluster(addr)
cluster_num_list = list()
for addr in addr_set.keys():
cluster_num_list.append(addr_set[addr])
sort_cls_num_list = sorted(cluster_num_list)
if flag == 0:
return sort_cls_num_list[0]
elif flag == 1:
for num in sort_cls_num_list:
if num > -1:
return num
def is_utxo(address, tx):
'''
utxo
1. Output Tx TxIn id TxOut id
2. retur utxo .
'''
utxo_list = get_utxo(tx)
if utxo_list > 0:
return True, utxo_list
return False, None
def is_first(address, tx):
'''
1. tx True
'''
first_tx = cdq.find_tx_first_appeared_address(address)
if first_tx == tx:
return True
return False
def is_power_of_ten(address, tx):
'''
- 4 .
'''
value = cdq.find_addr_value(address, tx)
num_of_decimal = abs(decimal.Decimal(str(a)).as_tuple().exponent())
if num_of_decimal >= 4:
return True
return False
def add_db(c_dict):
'''
1. db
- ==>
- -1 max
2. db
- -1
3.
'''
for _, addrs in c_dict.items():
cluster_num_list = sorted(list(cdq.get_cluster_number(addrs)))
if len(cluster_num_list) == 1 and cluster_num_list[0] == -1:
cluster_num = cdq.get_max_clustered() + 1
execute_list = list(zip([cluster_num]*len(addrs), addrs))
cdq.update_cluster_many(execute_list)
else:
cluster_num = -1
for num in cluster_num_list:
if num != -1:
cluster_num = num
break
for num in cluster_num_list:
if num != cluster_num:
addr = cdq.find_addr_from_cluster_num(num)
else:
addr = addrs
execute_list = list(zip([cluster_num]*len(addr), addr))
cdq.update_cluster_many(execute_list)
if __name__=="__main__":
main() | 30.705179 | 101 | 0.552615 |
4ebb360ae9b11a1457dfb35575d9b1a3c0b33203 | 6,240 | py | Python | platforms_handlers/dialogflow/request.py | Robinson04/inoft_vocal_framework | 9659e0852604bc628b01e0440535add0ae5fc5d1 | [
"MIT"
] | 11 | 2020-04-15T07:47:34.000Z | 2022-03-30T21:47:36.000Z | platforms_handlers/dialogflow/request.py | Robinson04/inoft_vocal_framework | 9659e0852604bc628b01e0440535add0ae5fc5d1 | [
"MIT"
] | 20 | 2020-08-09T00:11:49.000Z | 2021-09-11T11:34:02.000Z | platforms_handlers/dialogflow/request.py | Robinson04/inoft_vocal_framework | 9659e0852604bc628b01e0440535add0ae5fc5d1 | [
"MIT"
] | 6 | 2020-02-21T04:45:19.000Z | 2021-07-18T22:13:55.000Z | from typing import Optional, List
from pydantic import Field
from pydantic.main import BaseModel
from inoft_vocal_framework.utils.formatters import normalize_intent_name
| 37.365269 | 149 | 0.680769 |
4ebc1c80bd48bd6945b5be017cbcc2dddcc7d826 | 589 | py | Python | emailtemplates/admin.py | mpasternak/django-emailtemplates | 529e0120c8c3a58605257eff893df636a5cbf8d0 | [
"MIT"
] | 1 | 2015-05-18T13:51:08.000Z | 2015-05-18T13:51:08.000Z | emailtemplates/admin.py | mpasternak/django-emailtemplates | 529e0120c8c3a58605257eff893df636a5cbf8d0 | [
"MIT"
] | null | null | null | emailtemplates/admin.py | mpasternak/django-emailtemplates | 529e0120c8c3a58605257eff893df636a5cbf8d0 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from django.contrib import admin
from emailtemplates.models import EmailTemplate
from emailtemplates.models import MailServerFailure
admin.site.register(EmailTemplate, EmailTemplateAdmin)
admin.site.register(MailServerFailure, MailServerFailureAdmin)
| 26.772727 | 63 | 0.726655 |
4ebf96b0cd05bc2eb3a4a7d33d2460323ab21921 | 1,073 | py | Python | scraping_data.py | WeiTaKuan/TPEX_StockBot | e8a7d694dd08efdc66989a827518a629e380de16 | [
"MIT"
] | null | null | null | scraping_data.py | WeiTaKuan/TPEX_StockBot | e8a7d694dd08efdc66989a827518a629e380de16 | [
"MIT"
] | null | null | null | scraping_data.py | WeiTaKuan/TPEX_StockBot | e8a7d694dd08efdc66989a827518a629e380de16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#--------------------------------#
"""
File name: TPEX_STOCKBOT/main.py
Author: WEI-TA KUAN
Date created: 12/9/2021
Date last modified: 9/10/2021
Version: 1.0
Python Version: 3.8.8
Status: Developing
"""
#--------------------------------#
from scraping_data import stock_daily_scraping, tpex_holiday
import pickle
import datetime
year = datetime.datetime.today().strftime("%Y")
today = datetime.datetime.today().strftime("%Y/%m/%d")
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
# update the market close date for each year
while True:
if year != holiday[""][0].split("/")[0]:
print("Update Holiday")
tpex_holiday.get_holiday()
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
break
# Dont run the code if the market is close
if (today != holiday[""]).any() and datetime.datetime.today().weekday() not in [5, 6]:
print("Run 360 TPEX Stockbot...")
# run the daily scraping method to store today stock data
stock_daily_scraping.daily_scraping() | 29 | 90 | 0.649581 |
4ec073c949edac61a57ee7d6306e6b0a094db09d | 3,959 | py | Python | l1t_cli/commands/list/twikis/__init__.py | kreczko/l1t-cli | f708f001b6f434d4245da6631a068a7eeb9edf30 | [
"Apache-2.0"
] | null | null | null | l1t_cli/commands/list/twikis/__init__.py | kreczko/l1t-cli | f708f001b6f434d4245da6631a068a7eeb9edf30 | [
"Apache-2.0"
] | null | null | null | l1t_cli/commands/list/twikis/__init__.py | kreczko/l1t-cli | f708f001b6f434d4245da6631a068a7eeb9edf30 | [
"Apache-2.0"
] | null | null | null | """
list twikis:
List all L1 Trigger Offline Twikis
Usage:
list twikis [check=1]
Parameters:
check: force a check of the twiki URL before printing.
Useful when adding new entries. Default: 0
"""
import logging
import urllib
import hepshell
LOG = logging.getLogger(__name__)
URL_PREFIX = 'https://twiki.cern.ch/twiki/bin/view/'
TWIKIS = {
'L1T offline DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TOfflineDev',
'description': 'Instructions for L1 offline software development',
},
'L1T Calo Upgrade Offline Analysis': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1CaloUpgradeOfflineAnalysis',
'description': 'Some CaloL2 analysis workflows are detailed here',
},
'L1T phase 2': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2',
'description': 'In preparation ! ',
},
'L1T phase 2 interface specs': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2InterfaceSpecifications',
'description': 'Working definitions of Trigger Primitive inputs',
},
'CSC trigger emulator timing': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/CSCDigitizationTiming',
'description': 'Simulation of signal times for CSC',
},
'L1 Trigger Emulator Stage 2 Upgrade Instructions': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TStage2Instructions',
'description': 'L1 Trigger Emulator Stage 2 Upgrade Instructions',
},
'Offline DQM': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/DQMOffline',
'description': 'Twiki meant to give you a basic understanding of Offline DQM',
},
'L1T DQM DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/Sandbox/L1TDQMModuleDev',
'description': 'L1T DQM Module Development Guide',
}
}
| 32.186992 | 97 | 0.605961 |
4ec139a98dfaa140655178c0f7864e5e8a59aecf | 1,528 | py | Python | examples/surrogates/corrnoise.py | manu-mannattil/nolitsa | 40befcb1ce5535703f90ffe87209181bcdb5eb5c | [
"BSD-3-Clause"
] | 118 | 2017-06-21T08:38:07.000Z | 2022-03-29T05:39:44.000Z | examples/surrogates/corrnoise.py | tanmaymaloo/nolitsa | 40befcb1ce5535703f90ffe87209181bcdb5eb5c | [
"BSD-3-Clause"
] | 2 | 2018-06-17T03:49:53.000Z | 2019-10-21T14:45:01.000Z | examples/surrogates/corrnoise.py | tanmaymaloo/nolitsa | 40befcb1ce5535703f90ffe87209181bcdb5eb5c | [
"BSD-3-Clause"
] | 35 | 2018-06-16T22:41:24.000Z | 2022-02-19T19:42:45.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""IAAFT surrogates for correlated noise.
The properties of linearly correlated noise can be captured quite
accurately by IAAFT surrogates. Thus, they cannot easily fool
a dimension estimator (here we use Takens's maximum likelihood estimator
for the correlation dimension) if surrogate analysis is performed
additionally.
"""
import matplotlib.pyplot as plt
import numpy as np
from nolitsa import surrogates, d2, noise, delay
x = noise.sma(np.random.normal(size=(2 ** 12)), hwin=100)
ends = surrogates.mismatch(x)[0]
x = x[ends[0]:ends[1]]
act = np.argmax(delay.acorr(x) < 1 / np.e)
mle = np.empty(19)
# Compute 19 IAAFT surrogates and compute the correlation sum.
for k in range(19):
y = surrogates.iaaft(x)[0]
r, c = d2.c2_embed(y, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, mle_surr = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(y))
mle[k] = mle_surr[i]
plt.loglog(r, c, color='#BC8F8F')
r, c = d2.c2_embed(x, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, true_mle = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(x))
true_mle = true_mle[i]
plt.title('IAAFT surrogates for correlated noise')
plt.xlabel('Distance $r$')
plt.ylabel('Correlation sum $C(r)$')
plt.loglog(r, c, color='#000000')
plt.figure(2)
plt.title('Takens\'s MLE for correlated noise')
plt.xlabel(r'$D_\mathrm{MLE}$')
plt.vlines(mle, 0.0, 0.5)
plt.vlines(true_mle, 0.0, 1.0)
plt.yticks([])
plt.ylim(0, 3.0)
plt.show()
| 26.807018 | 72 | 0.685864 |
4ec177a61c4b2700cdcadf9e2506e37171a32c85 | 1,853 | py | Python | test/pubmed/test_entrez.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | 4 | 2022-03-09T05:20:46.000Z | 2022-03-13T11:18:58.000Z | test/pubmed/test_entrez.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | null | null | null | test/pubmed/test_entrez.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | 1 | 2022-03-09T05:21:53.000Z | 2022-03-09T05:21:53.000Z | import time
from unittest import TestCase
from app.pubmed.source_entrez import *
| 36.333333 | 115 | 0.636805 |
4ec3208965da07154e57bd52236ae75fc871d372 | 776 | py | Python | src/nltkproperties.py | marufzubery/Red-List-Bot | 6c9f737ede6d4c823693476fa7b7b85bf4dcf5a8 | [
"Apache-2.0"
] | null | null | null | src/nltkproperties.py | marufzubery/Red-List-Bot | 6c9f737ede6d4c823693476fa7b7b85bf4dcf5a8 | [
"Apache-2.0"
] | null | null | null | src/nltkproperties.py | marufzubery/Red-List-Bot | 6c9f737ede6d4c823693476fa7b7b85bf4dcf5a8 | [
"Apache-2.0"
] | null | null | null |
import nltk
import numpy as np
from nltk.stem.porter import PorterStemmer
nltk.download('punkt')
stemmer = PorterStemmer()
# splitting a string into words, punctuation and numbers
# generating the root form the words ex: universe - univers, university - univers
# put all these words in a bag to be used later
| 24.25 | 81 | 0.725515 |
4ec4dd9e5afd36d15c0c2a204aed4c3badf824b1 | 1,799 | py | Python | bankapi.py | robinstauntoncollins/bank-api | b19cadf5a65f5e66ca14688af8774f400d4fb0f8 | [
"Unlicense"
] | null | null | null | bankapi.py | robinstauntoncollins/bank-api | b19cadf5a65f5e66ca14688af8774f400d4fb0f8 | [
"Unlicense"
] | null | null | null | bankapi.py | robinstauntoncollins/bank-api | b19cadf5a65f5e66ca14688af8774f400d4fb0f8 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import os
import click
from bank_api import create_app, db, models, utils
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
if __name__ == '__main__':
app.run(debug=True)
| 32.125 | 105 | 0.612007 |
4ec57a734fd6e6ba23c2187e7f9b9d79eb49894f | 742 | py | Python | src/push_api_clientpy/__init__.py | coveo/push-api-client.py | bc4e7a6befbaed14ac16863cc25ff43ef41525d8 | [
"MIT"
] | null | null | null | src/push_api_clientpy/__init__.py | coveo/push-api-client.py | bc4e7a6befbaed14ac16863cc25ff43ef41525d8 | [
"MIT"
] | 1 | 2022-02-09T11:59:17.000Z | 2022-02-09T11:59:17.000Z | src/push_api_clientpy/__init__.py | coveo/push-api-client.py | bc4e7a6befbaed14ac16863cc25ff43ef41525d8 | [
"MIT"
] | null | null | null | import sys
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = "coveo-push-api-client.py"
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
from .document import *
from .documentbuilder import *
from .source import *
from .platformclient import *
from .securityidentitybuilder import *
| 32.26087 | 85 | 0.745283 |
4ec7963e75127ea8afb2b3034873981f0b12657f | 296 | py | Python | loaddd.py | Sharingsky/resrep | a173d1bc256b75b2c902024929e406863ce48b9b | [
"MIT"
] | null | null | null | loaddd.py | Sharingsky/resrep | a173d1bc256b75b2c902024929e406863ce48b9b | [
"MIT"
] | null | null | null | loaddd.py | Sharingsky/resrep | a173d1bc256b75b2c902024929e406863ce48b9b | [
"MIT"
] | null | null | null | import os
import sys
rootpath=str("D:/_1work/pycharmcode/pycharmproject/resrep")
syspath=sys.path
sys.path=[]
sys.path.append(rootpath)#python
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#python
sys.path.extend(syspath)
print(sys.path) | 32.888889 | 98 | 0.790541 |
4ecc15d4ccded89291e34497472b06937ec1df8b | 18,554 | py | Python | WS_CNN.py | Aks-Dmv/WSDDN | 71fe1ccb17d5e779c8dac94a84227c871bd3aa73 | [
"MIT"
] | null | null | null | WS_CNN.py | Aks-Dmv/WSDDN | 71fe1ccb17d5e779c8dac94a84227c871bd3aa73 | [
"MIT"
] | null | null | null | WS_CNN.py | Aks-Dmv/WSDDN | 71fe1ccb17d5e779c8dac94a84227c871bd3aa73 | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
import time
import sys
import sklearn
import sklearn.metrics
import torch
torch.cuda.init()
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from AlexNet import *
from voc_dataset import *
from utils import *
import wandb
USE_WANDB = True # use flags, wandb is not convenient for debugging
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', default='localizer_alexnet')
parser.add_argument(
'-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument(
'--epochs',
default=30,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch-size',
default=256,
type=int,
metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument(
'--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument(
'--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument(
'--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument(
'--print-freq',
'-p',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--eval-freq',
default=2,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument(
'-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument(
'--world-size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument(
'--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument(
'--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--vis', action='store_true')
best_prec1 = 0
cntr_train = 0
cntr_val = 0
#TODO: You can add input arguments if you wish
# TODO: You can make changes to this function if you wish (not necessary)
if __name__ == '__main__':
main()
| 32.955595 | 129 | 0.579929 |