hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c97fb65ad57fdb00bbfc2ec6a2a6804e6bf866f3 | 50 | py | Python | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | __name__ = "{{Name}}"
__version__ = "{{Version}}"
| 16.666667 | 27 | 0.6 |
c9801e27d75fc448c57278f4f2febd70cf000239 | 3,203 | py | Python | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 5 | 2016-09-06T10:29:24.000Z | 2017-02-22T14:07:48.000Z | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 66 | 2016-09-06T06:40:24.000Z | 2022-03-11T23:18:05.000Z | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 3 | 2016-10-06T15:17:38.000Z | 2016-12-04T13:25:53.000Z | import json
# PyQt imports
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWebChannel import QWebChannel
# Local includes
from .ui.widget_ui import Ui_Dialog
from alfred import data_rc
import alfred.alfred_globals as ag
from alfred.modules.api.view_components import ARow, AColumn, ACard, AHeading
| 37.244186 | 138 | 0.67468 |
c98046d6e476b2db7f4e9b5014b73851b0a58d74 | 5,573 | py | Python | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | import re
from utils import *
def get_token_type(self, token):
if self.is_keyword(token):
return 'keyword'
elif self.is_symbol(token):
return 'symbol'
elif self.is_identifier(token):
return 'identifier'
elif self.is_int(token):
return 'integerConstant'
elif self.is_string(token):
return 'stringConstant'
def tokenize(self):
self.filter()
for line in self._data:
segments = self.split_line_by_symbols(line)
for segment in segments:
current_type = self.get_token_type(segment)
if current_type is not None:
self._types.append(current_type)
self._tokens.append(segment)
if current_type not in {'stringConstant', 'integerConstant'}:
current_type = current_type.lower()
else:
if current_type == 'stringConstant':
current_type = 'stringConstant'
self._tokens[-1] = self._tokens[-1].strip('\"')
segment = segment.strip('\"')
else:
current_type = 'integerConstant'
if segment in {'<', '>', '\"', '&'}:
self._tokens[-1] = self.convert_lt_gt_quot_amp(segment)
segment = self.convert_lt_gt_quot_amp(segment)
self._xml.append('<' + current_type + '> ' + segment + ' </' + current_type + '>')
elif len(segment.strip()):
print(segment)
raise Exception("Invalid Token")
self._xml.append('</tokens>')
| 33.981707 | 102 | 0.466535 |
c98092ff02eaf3078402f8fe2053638da3880d53 | 1,115 | py | Python | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | 2 | 2021-02-15T20:38:03.000Z | 2021-12-15T12:42:54.000Z | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | null | null | null | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | null | null | null | """
Main module of a program.
"""
import folium
from tools import find_coords, user_input
def creating_map():
"""
Creates HTML page for a given data.
"""
year, coords = user_input()
locations = find_coords(year, coords)
mp = folium.Map(location=coords, zoom_start=10)
mp.add_child(folium.Marker(
location=coords,
popup="You are here",
icon=folium.Icon(color='red',
icon_color='lightgray',
icon='home')))
for location in locations:
mp.add_child(folium.Marker(
location=[location[1][0], location[1][1]],
popup=location[0],
icon=folium.Icon(color='green',
icon_color='white',
icon='cloud')))
folium.PolyLine(locations=[(coords[0], coords[1]),
location[1]], color='orange').add_to(mp)
mp.save('nearest_films.html')
print("Map succesfully generated")
if __name__ == "__main__":
creating_map()
# print(find_coords(2017, (52.4081812, -1.510477)))
| 27.195122 | 75 | 0.552466 |
c98373f93bfe070f74725f6b7462934da5ef570c | 1,366 | py | Python | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | null | null | null | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | 25 | 2021-12-08T07:20:11.000Z | 2021-12-10T12:07:05.000Z | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | null | null | null | from ptCrypt.Symmetric.Modes.Mode import Mode
from ptCrypt.Symmetric.BlockCipher import BlockCipher
from ptCrypt.Symmetric.Paddings.Padding import Padding
| 35.947368 | 170 | 0.666179 |
c983d81c361719032d41d5bf9ca26fcce754a0f2 | 1,335 | py | Python | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 8 | 2021-08-25T01:08:09.000Z | 2022-01-18T12:44:41.000Z | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | null | null | null | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 1 | 2022-03-13T22:36:18.000Z | 2022-03-13T22:36:18.000Z | #!/usr/bin/env python3 # Originally python2
# Sample from https://www.collabora.com/news-and-blog/blog/2019/05/14/an-ebpf-overview-part-5-tracing-user-processes/
# Python program with embedded C eBPF program
from bcc import BPF, USDT
import sys
bpf = """
#include <uapi/linux/ptrace.h>
BPF_PERF_OUTPUT(events);
struct file_transf {
char client_ip_str[20];
char file_path[300];
u32 file_size;
u64 timestamp;
};
int trace_file_transfers(struct pt_regs *ctx, char *ipstrptr, char *pathptr, u32 file_size) {
struct file_transf ft = {0};
ft.file_size = file_size;
ft.timestamp = bpf_ktime_get_ns();
bpf_probe_read(&ft.client_ip_str, sizeof(ft.client_ip_str), (void *)ipstrptr);
bpf_probe_read(&ft.file_path, sizeof(ft.file_path), (void *)pathptr);
events.perf_submit(ctx, &ft, sizeof(ft));
return 0;
};
"""
u = USDT(pid=int(sys.argv[1]))
u.enable_probe(probe="file_transfer", fn_name="trace_file_transfers")
b = BPF(text=bpf, usdt_contexts=[u])
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| 31.046512 | 117 | 0.702622 |
c984c4501d6e403db82fdd8d7b4131f8e313f048 | 1,004 | py | Python | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pytest
from sudoku import SudokuLine, build_rows, build_columns
from sudoku import SudokuSquare, build_squares
| 23.348837 | 56 | 0.657371 |
c985d647edcaf8c1a409b8e34f91d4add29cf574 | 1,424 | py | Python | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | import os
import json
import datetime
from pytz import timezone, utc
def update_total_score(name_list_dict, score_rules, now_kst_aware, penalty_const=.1):
"""
Update Total Score when scheduled day written in "ScoreRule.json"
:param name_list_dict: This contains contestants score info loaded from "namelist.json"
:param score_rules: Score rules loaded from "ScoreRule.json"
:param now_kst_aware: Current Aware Time(UTC difference info stored) for Korea/Seoul(+9:00)
:return: None
"""
current_time = str(now_kst_aware)
name_list = name_list_dict['namelist']
# Read Score Rules and Calculate total score
for rule in score_rules:
date_rule = datetime.datetime.strptime(rule['date'], '%Y-%m-%d %H:%M:%S')
if now_kst_aware.month == date_rule.month and now_kst_aware.day == date_rule.day:
name_list_dict['total_score_update_time'] = current_time
print("Today is {} Update scheduled as {}".format(rule["var_name"], rule['date']))
# Todo: change 'avg_accuracy' to 'last_accuracy'
for info in name_list:
info[rule["var_name"]] = info['avg_accuracy']
for info in name_list:
total_score = 0
for rule in score_rules:
total_score += info[rule['var_name']] * rule['weight']
total_score -= info["penalty"] * penalty_const
info['total_score'] = round(total_score, 5) | 45.935484 | 95 | 0.67486 |
c98644a1740c0b9a2213d68e9dafb7bed9e7032f | 3,500 | py | Python | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 41 | 2018-10-05T21:48:33.000Z | 2022-02-16T10:24:39.000Z | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 9 | 2018-10-21T14:45:01.000Z | 2022-02-25T14:25:29.000Z | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 9 | 2018-10-16T07:00:51.000Z | 2022-02-17T13:10:47.000Z | import os
import gzip
import pickle
import h5py
import numpy as np
import theano
from utils.misc import get_file_names_in_dir
from utils.vocab import UNK
class Conll05Loader(Loader):
class Conll12Loader(Loader):
def load_emb(path):
word_list = []
emb = []
with open(path) as f:
for line in f:
line = line.rstrip().split()
word_list.append(line[0])
emb.append(line[1:])
emb = np.asarray(emb, dtype=theano.config.floatX)
if UNK not in word_list:
word_list = [UNK] + word_list
unk_vector = np.mean(emb, axis=0)
emb = np.vstack((unk_vector, emb))
return word_list, emb
def load_pickle(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
| 26.315789 | 69 | 0.513714 |
a309e90ac2f88ea56edc2aaeacb9b7f74fba3681 | 591 | py | Python | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | null | null | null | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | 3 | 2020-02-11T23:29:05.000Z | 2021-06-10T21:03:42.000Z | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | 2 | 2019-01-24T20:39:31.000Z | 2019-01-29T07:42:27.000Z | from django.urls import path
from .views import (
home,
MachineDetailView,
MachineListView,
DryRunDataDetailView,
MachineLastDataView,
)
urlpatterns = [
path('', MachineListView.as_view(), name='home-view'),
path('', MachineListView.as_view(), name='machine-list-view'),
path('machine/<int:pk>', MachineDetailView.as_view(), name='machine-detail-view'),
path('machine/<int:pk>/last', MachineLastDataView.as_view(), name='machine-last-data-view'),
path('machine/run_data/<int:pk>', DryRunDataDetailView.as_view(), name='dry-run-data-detail-view'),
]
| 32.833333 | 103 | 0.698816 |
a30a5b9c466fd79c98aae5b462aff3ba4ea72d40 | 480 | py | Python | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | """
Python version 3.6.7
OS Linux Ubuntu 18.04.1 LTS
Created: 30/11/2018 17:12
Finished: 30/11/2018 19:
Author: Adrian Garrido Garcia
"""
import sys
from wall.builder import build_a_wall
if __name__ == '__main__':
try:
build_a_wall(sys.argv[1], sys.argv[2])
except IndexError:
rows = input("Please, give me the number of wall rows: ")
bricks = input("Please, give me the number of bricks for every wall row: ")
build_a_wall(rows, bricks)
| 25.263158 | 83 | 0.672917 |
a30c417b3a747422a1fa92c8a3a68fa2a0ddf883 | 2,770 | py | Python | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | import os
import csv
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
def transform(snippet):
''' stack & noralization '''
snippet = np.concatenate(snippet, axis=-1)
snippet = torch.from_numpy(snippet).permute(2, 0, 1).contiguous().float()
snippet = snippet.mul_(2.).sub_(255).div(255)
snippet = snippet.view(-1,3,snippet.size(1),snippet.size(2)).permute(1,0,2,3)
return snippet
# from gist.github.com/MFreidank/821cc87b012c53fade03b0c7aba13958 | 37.432432 | 136 | 0.615884 |
a30c4f34a3721276b10ca7d81878d13ffef5c2e3 | 1,342 | py | Python | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | 2 | 2017-01-30T01:33:07.000Z | 2017-02-12T22:00:19.000Z | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | 90 | 2017-02-02T01:56:30.000Z | 2017-05-07T02:58:46.000Z | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-19 08:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 34.410256 | 114 | 0.551416 |
a30c6a24fddc04808da5db4f7c0d305641d9674e | 8,505 | py | Python | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | 2 | 2020-07-06T23:25:12.000Z | 2021-09-21T23:11:55.000Z | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | null | null | null | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | null | null | null | # generate_tfrecords.py
# Note: substantial portions of this code, expecially the create_tf_example() function, are credit to Dat Tran
# see his website here: https://towardsdatascience.com/how-to-train-your-own-object-detector-with-tensorflows-object-detector-api-bec72ecfe1d9
# and his GitHub here: https://github.com/CDahmsTemp/TensorFlow_Tut_3_Object_Detection_Walk-through/blob/master/1_xml_to_csv.py
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple
# module-level variables ##############################################################################################
# input training CSV file and training images directory
TRAIN_CSV_FILE_LOC = os.getcwd() + "/training_data/" + "Bbox_info_CSV_output_Train.csv"
TRAIN_IMAGES_DIR = os.getcwd() + "/Train_Output"
# input test CSV file and test images directory
EVAL_CSV_FILE_LOC = os.getcwd() + "/training_data/" + "Bbox_info_CSV_output_Evaluation.csv"
TEST_IMAGES_DIR = os.getcwd() + "/Evaluation_Output"
# training and testing output .tfrecord files
TRAIN_TFRECORD_FILE_LOC = os.getcwd() + "/training_data/" + "train.tfrecord"
EVAL_TFRECORD_FILE_LOC = os.getcwd() + "/training_data/" + "eval.tfrecord"
#######################################################################################################################
# end main
#######################################################################################################################
# end function
#######################################################################################################################
# end function
#######################################################################################################################
# end function
#######################################################################################################################
# end function
#######################################################################################################################
# end function
#######################################################################################################################
if __name__ == '__main__':
main() | 45 | 142 | 0.642563 |
a30d6af902c1a8c64022ae0458cac17dd1fa6032 | 6,398 | py | Python | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | import gevent.monkey
gevent.monkey.patch_all()
import os
from logging import getLogger
#from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.gevent import GeventScheduler as Scheduler
from couchdb import Server, Session
from couchdb.http import Unauthorized, extract_credentials
from datetime import datetime, timedelta
#from openprocurement.chronograph.jobstores import CouchDBJobStore
from openprocurement.chronograph.design import sync_design
from openprocurement.chronograph.scheduler import push
from openprocurement.chronograph.utils import add_logging_context
from pyramid.config import Configurator
from pytz import timezone
from pyramid.events import ApplicationCreated, ContextFound
from pbkdf2 import PBKDF2
LOGGER = getLogger(__name__)
TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev')
SECURITY = {u'admins': {u'names': [], u'roles': ['_admin']}, u'members': {u'names': [], u'roles': ['_admin']}}
VALIDATE_DOC_ID = '_design/_auth'
VALIDATE_DOC_UPDATE = """function(newDoc, oldDoc, userCtx){
if(newDoc._deleted) {
throw({forbidden: 'Not authorized to delete this document'});
}
if(userCtx.roles.indexOf('_admin') !== -1 && newDoc.indexOf('_design/') === 0) {
return;
}
if(userCtx.name === '%s') {
return;
} else {
throw({forbidden: 'Only authorized user may edit the database'});
}
}"""
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_subscriber(add_logging_context, ContextFound)
config.include('pyramid_exclog')
config.add_route('home', '/')
config.add_route('resync_all', '/resync_all')
config.add_route('resync_back', '/resync_back')
config.add_route('resync', '/resync/{tender_id}')
config.add_route('recheck', '/recheck/{tender_id}')
config.add_route('calendar', '/calendar')
config.add_route('calendar_entry', '/calendar/{date}')
config.add_route('streams', '/streams')
config.scan(ignore='openprocurement.chronograph.tests')
config.add_subscriber(start_scheduler, ApplicationCreated)
config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))
db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
server = Server(settings.get('couchdb.url'), session=Session(retry_delays=range(60)))
if 'couchdb.admin_url' not in settings and server.resource.credentials:
try:
server.version()
except Unauthorized:
server = Server(extract_credentials(settings.get('couchdb.url'))[0], session=Session(retry_delays=range(60)))
config.registry.couchdb_server = server
if 'couchdb.admin_url' in settings and server.resource.credentials:
aserver = Server(settings.get('couchdb.admin_url'), session=Session(retry_delays=range(10)))
users_db = aserver['_users']
if SECURITY != users_db.security:
LOGGER.info("Updating users db security", extra={'MESSAGE_ID': 'update_users_security'})
users_db.security = SECURITY
username, password = server.resource.credentials
user_doc = users_db.get('org.couchdb.user:{}'.format(username), {'_id': 'org.couchdb.user:{}'.format(username)})
if not user_doc.get('derived_key', '') or PBKDF2(password, user_doc.get('salt', ''), user_doc.get('iterations', 10)).hexread(int(len(user_doc.get('derived_key', '')) / 2)) != user_doc.get('derived_key', ''):
user_doc.update({
"name": username,
"roles": [],
"type": "user",
"password": password
})
LOGGER.info("Updating chronograph db main user", extra={'MESSAGE_ID': 'update_chronograph_main_user'})
users_db.save(user_doc)
security_users = [username, ]
if db_name not in aserver:
aserver.create(db_name)
db = aserver[db_name]
SECURITY[u'members'][u'names'] = security_users
if SECURITY != db.security:
LOGGER.info("Updating chronograph db security", extra={'MESSAGE_ID': 'update_chronograph_security'})
db.security = SECURITY
auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
if auth_doc.get('validate_doc_update') != VALIDATE_DOC_UPDATE % username:
auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
LOGGER.info("Updating chronograph db validate doc", extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
db.save(auth_doc)
# sync couchdb views
sync_design(db)
db = server[db_name]
else:
if db_name not in server:
server.create(db_name)
db = server[db_name]
# sync couchdb views
sync_design(db)
config.registry.db = db
jobstores = {
#'default': CouchDBJobStore(database=db_name, client=server)
}
#executors = {
#'default': ThreadPoolExecutor(5),
#'processpool': ProcessPoolExecutor(5)
#}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
config.registry.api_url = settings.get('api.url')
config.registry.callback_url = settings.get('callback.url')
scheduler = Scheduler(jobstores=jobstores,
#executors=executors,
job_defaults=job_defaults,
timezone=TZ)
if 'jobstore_db' in settings:
scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
config.registry.scheduler = scheduler
# scheduler.remove_all_jobs()
# scheduler.start()
resync_all_job = scheduler.get_job('resync_all')
now = datetime.now(TZ)
if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
if resync_all_job:
args = resync_all_job.args
else:
args = [settings.get('callback.url') + 'resync_all', None]
run_date = now + timedelta(seconds=60)
scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
id='resync_all', args=args,
replace_existing=True, misfire_grace_time=60 * 60)
return config.make_wsgi_app()
| 44.124138 | 215 | 0.664739 |
a30f4fc2ab1f50558de3a730d24cdd2bc794f650 | 1,078 | py | Python | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 12 | 2016-03-05T16:40:16.000Z | 2019-10-27T07:48:12.000Z | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 1 | 2016-03-03T16:54:59.000Z | 2016-03-09T12:14:33.000Z | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 2 | 2015-12-01T08:01:07.000Z | 2019-10-27T07:48:19.000Z | #code which sends many setBlock commands all in one go, to see if there was
# a performance improvement.. It sent them a lot quicker, but you still had to wait
# for minecraft to catch up
import mcpi.minecraft as minecraft
import mcpi.block as block
import mcpi.util as util
from time import time, sleep
mc = minecraft.Minecraft.create()
starttime = time()
blocksToSet = []
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
blocksToSet.append((x,y,z,block.DIAMOND_BLOCK.id))
endtime = time()
print(endtime - starttime)
setManyBlocks(mc, blocksToSet)
sleep(5)
starttime = time()
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
mc.setBlock(x,y,z,block.DIRT.id)
endtime = time()
print(endtime - starttime)
| 25.069767 | 83 | 0.666976 |
a31137aa372b035d450bbbaac3b873065c92d845 | 2,059 | py | Python | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | null | null | null | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | 1 | 2021-02-22T18:06:52.000Z | 2021-02-22T18:06:52.000Z | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | null | null | null | from django import template
from django.conf import settings
from django.utils.translation import ugettext as _
from django.forms.forms import pretty_name
#from yats.diff import generate_patch_html
import re
try:
import json
except ImportError:
from django.utils import simplejson as json
register = template.Library()
register.filter('prettify', prettify)
register.filter('contains', contains)
register.filter('numberToTicketURL', numberToTicketURL)
register.tag('diff', do_diff)
| 27.824324 | 107 | 0.599806 |
a3140847cb4f8ae37f600751b8e796fb5fea58ee | 129 | py | Python | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | 1 | 2018-06-21T18:21:24.000Z | 2018-06-21T18:21:24.000Z | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | """Test the binding of names when a circular zaimportuj shares the same name jako an
attribute."""
z .rebinding2 zaimportuj util
| 32.25 | 84 | 0.782946 |
a3156184194412b6c58e7f98504a56f1d8eea1bf | 1,132 | py | Python | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 125 | 2017-08-10T18:09:55.000Z | 2022-03-29T10:14:31.000Z | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 4 | 2018-01-19T05:42:58.000Z | 2019-03-07T06:18:52.000Z | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 79 | 2017-08-15T00:40:36.000Z | 2022-02-26T10:20:24.000Z | #!/usr/bin/env python
# Python Network Programming Cookbook, Second Edition -- Chapter - 8
# This program is optimized for Python 2.7.12 and Python 3.5.2.
# It may run on any other version with/without modifications.
import os
from scapy.all import *
pkts = []
count = 0
pcapnum = 0
if __name__ == '__main__':
print ("Started packet capturing and dumping... Press CTRL+C to exit")
sniff(prn=write_cap)
test_dump_file()
| 24.085106 | 74 | 0.590106 |
a316647d2535cb3b325343092f5d4ee583cf738e | 2,862 | py | Python | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 4 | 2015-03-04T11:35:15.000Z | 2017-05-05T04:00:26.000Z | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 16 | 2015-02-14T00:56:03.000Z | 2015-02-19T22:23:13.000Z | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 3 | 2015-08-25T13:20:17.000Z | 2020-05-10T19:02:18.000Z | import pyramid_crud
import pytest
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import ISessionFactory
def test_check_session_no_factory(config):
with pytest.raises(ConfigurationError):
pyramid_crud.check_session(config)
def test_parse_options_from_settings_defaults():
settings = pyramid_crud.parse_options_from_settings({}, 'crud.')
ref_settings = {'static_url_prefix': '/static/crud'}
assert settings == ref_settings
def test_includeme_no_session(config):
pyramid_crud.includeme(config)
with pytest.raises(ConfigurationError):
config.commit()
def test_includeme_session_correct_order(config):
config.action(('pyramid_crud', 'session_test'), register)
pyramid_crud.includeme(config)
config.commit()
def test_includeme_session_wrong_order(config):
config.action(('pyramid_crud', 'session_test'), register, order=2)
pyramid_crud.includeme(config)
with pytest.raises(ConfigurationError):
config.commit()
| 30.446809 | 74 | 0.759609 |
a3168c69f4eb9f2ba122306fee2a6890c6f1230e | 1,621 | py | Python | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-02-28T07:43:59.000Z | 2021-02-28T07:43:59.000Z | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | null | null | null | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-03-03T03:52:21.000Z | 2021-03-03T03:52:21.000Z | """
You are given a sorted array in ascending order that is rotated at some unknown pivot
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]) and a target value.
Write a function that returns the target value's index. If the target value is not present
in the array, return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
""" | 25.730159 | 91 | 0.58359 |
a31d402b111d9ee652386e79f628f7e0ddffa959 | 987 | py | Python | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | 3 | 2021-01-18T22:10:05.000Z | 2022-01-07T21:46:34.000Z | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | null | null | null | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | 2 | 2020-10-21T01:27:34.000Z | 2021-01-02T23:51:02.000Z | import discord
from discord.ext import commands
| 37.961538 | 140 | 0.64843 |
a31f5b674099dd26d6054dab2dbff6ca679ee640 | 8,215 | py | Python | torch/ao/quantization/fx/fusion_patterns.py | li-ang/pytorch | 17f3179d607b9a2eac5efdfc36673e89f70e6628 | [
"Intel"
] | 1 | 2022-02-15T07:07:31.000Z | 2022-02-15T07:07:31.000Z | torch/ao/quantization/fx/fusion_patterns.py | xiaozhoushi/pytorch | 7dba88dfdb414def252531027658afe60409291d | [
"Intel"
] | null | null | null | torch/ao/quantization/fx/fusion_patterns.py | xiaozhoushi/pytorch | 7dba88dfdb414def252531027658afe60409291d | [
"Intel"
] | null | null | null | import torch
from torch.fx.graph import Node
from .pattern_utils import (
register_fusion_pattern,
)
from .utils import _parent_name
from .quantization_types import QuantizerCls, NodePattern, Pattern
from ..fuser_method_mappings import get_fuser_method
from ..fuser_method_mappings import get_fuser_method_new
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Union
from .match_utils import MatchAllNode
# ----------------------------
# Fusion Pattern Registrations
# ----------------------------
# Base Pattern Handler
| 48.89881 | 118 | 0.684967 |
a31fadf9b33e9208ee29c713435331b8514e5684 | 9,786 | py | Python | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 38 | 2021-08-19T18:07:49.000Z | 2022-02-28T10:41:29.000Z | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 1 | 2021-10-30T14:43:18.000Z | 2021-11-13T01:18:53.000Z | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 5 | 2021-08-20T05:12:42.000Z | 2022-01-13T06:14:27.000Z | import numpy as np
import torch
import torch.nn as nn
import torchvision
EPS = 1e-7
| 41.466102 | 128 | 0.54251 |
a31fd12f7a47de59f2c6b4b5f49ad1fb2f1eaf44 | 244 | py | Python | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | 2 | 2018-01-19T08:16:13.000Z | 2019-08-15T12:26:08.000Z | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | null | null | null | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | 1 | 2019-08-15T12:26:11.000Z | 2019-08-15T12:26:11.000Z | #!/usr/bin python
#coding:utf-8
#
# 10^7
import random
RANGE = 10000000
f = open('../test/input/bitSort.input','w')
for i in random.sample(range(RANGE),RANGE):
f.write(str(i) + '\n')
f.close()
print 'generator input file success!' | 15.25 | 43 | 0.672131 |
a323da1e6144f951fab0d4c366a9e8d27bf93ca5 | 46,478 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
| 39.48853 | 146 | 0.650652 |
a3256f1d5ce64484739511b64bf4572f8dcbb09c | 407 | py | Python | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | 3 | 2021-04-04T19:32:29.000Z | 2022-02-10T05:25:27.000Z | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | null | null | null | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | null | null | null | import nibabel as nib
import numpy as np
from glob import glob
| 22.611111 | 44 | 0.638821 |
a3259ed1f24efeaecf755551060f140ed167c93c | 576 | py | Python | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | import os
import unittest
from doublebook.ebook import Ebook
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
unittest.main(verbosity=3)
| 22.153846 | 72 | 0.689236 |
a32a2a3e9e3b2d5447dc0047b1a00f9577f9eedc | 13,924 | py | Python | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 155 | 2019-02-04T20:24:35.000Z | 2020-07-10T00:14:37.000Z | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 25 | 2019-02-15T05:36:02.000Z | 2020-07-10T08:06:00.000Z | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 13 | 2019-04-09T09:32:55.000Z | 2020-07-07T20:56:46.000Z | #!/usr/bin/env python
"""
run this every time you upgrade the godot-base version to generate new matching github workflows
You must be in this directory, and in the modules subfolder of godot (just as if you would install this project into godot)
usage:
python build_github_actions.py --godot-version "3.4.4-stable" --godot-github-folder ../../.github --ECMAS-github-folder .github
"""
import argparse
import yaml
import os
import subprocess
from dataclasses import dataclass, field
from typing import Dict, List, Any
import copy
# https://stackoverflow.com/a/33300001 + some changes
yaml.add_representer(str, str_presenter)
# to use with safe_dump:
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
# END https://stackoverflow.com/a/33300001
def fix_all_actions(ECMAS_github_folder: str, actions: List[str]) -> List[str]:
"""
This can be simplified once:
https://github.com/actions/runner/pull/1767
is completed
"""
actions_that_require_shell_set = set()
for action_base_fn in actions:
full_action_fn = os.path.join(ECMAS_github_folder, action_base_fn)
data = yaml.safe_load(open(full_action_fn))
new_steps = []
for step in data["runs"]["steps"]:
if "shell" in step:
for shell in ["sh", "msys2 {0}"]:
cp_step = copy.deepcopy(step)
cp_step["shell"] = shell
cp_step["if"] = f"inputs.shell == '{shell}'"
new_steps.append(cp_step)
data["inputs"]["shell"] = {"description": "the shell to run this under", "default": "sh"}
actions_that_require_shell_set.add(action_base_fn)
else:
new_steps.append(step)
# new_steps.append(step)
# Uncomment this when github actions updated
# if "shell" in step:
# step["shell"] = "${{ inputs.shell }}"
# data["inputs"]["shell"] = {"description": "the shell to run this under", "default": "sh"}
# new_steps.append(step)
# We ca
data["runs"]["steps"] = new_steps
with open(full_action_fn, "w") as fh:
yaml.dump(data, fh, sort_keys=False, allow_unicode=True)
return list(sorted([x.split("/")[1] for x in actions_that_require_shell_set]))
if __name__ == "__main__":
main()
| 40.242775 | 214 | 0.568946 |
a32ad9de709c3a24f830152b0d7a35e9a5113527 | 10,061 | py | Python | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-11-13T17:26:59.000Z | 2021-03-19T15:11:26.000Z | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-10-28T10:22:35.000Z | 2021-01-27T17:33:58.000Z | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 3 | 2021-01-26T07:58:03.000Z | 2021-03-11T13:28:34.000Z | from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Set, Tuple, cast
from sqlalchemy import column
from panoramic.cli.husky.common.enum import EnumHelper
from panoramic.cli.husky.core.taxonomy.aggregations import AggregationDefinition
from panoramic.cli.husky.core.taxonomy.enums import AggregationType, TaxonTypeEnum
from panoramic.cli.husky.core.taxonomy.models import Taxon
from panoramic.cli.husky.core.taxonomy.override_mapping.types import (
OverrideMappingTelData,
)
from panoramic.cli.husky.core.tel.exceptions import TelExpressionException
from panoramic.cli.husky.core.tel.result import PostFormula, PreFormula, TaxonToTemplate
from panoramic.cli.husky.core.tel.sql_formula import SqlFormulaTemplate, SqlTemplate
from panoramic.cli.husky.core.tel.tel_dialect import TaxonTelDialect
from panoramic.cli.husky.service.context import HuskyQueryContext
from panoramic.cli.husky.service.filter_builder.filter_clauses import FilterClause
from panoramic.cli.husky.service.types.api_data_request_types import BlendingDataRequest
from panoramic.cli.husky.service.utils.exceptions import (
HuskyInvalidTelException,
InvalidRequest,
)
from panoramic.cli.husky.service.utils.taxon_slug_expression import (
TaxonExpressionStr,
TaxonMap,
)
| 43.743478 | 125 | 0.657191 |
a32bb9ecf389628aa17fb222486d5eb8bc144dcb | 13,836 | py | Python | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | import os
import re
import six
import h5py
import json
import logging
import tensorflow.keras as keras
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.distribute import distributed_file_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util import serialization
def load_optimizer_weights(model, filepath):
"""Loads optimizer weights to compiled model from hdf5 file.
Arguments:
model: Compiled model
"""
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode='r')
else:
f = filepath
try:
if model.optimizer and 'optimizer_weights' in f:
try:
model.optimizer._create_all_weights(model.trainable_variables)
except (NotImplementedError, AttributeError):
logging.warning(
'Error when creating the weights of optimizer {}, making it '
'impossible to restore the saved optimizer state. As a result, '
'your model is starting with a freshly initialized optimizer.')
optimizer_weight_values = hdf5_format.load_optimizer_weights_from_hdf5_group(f)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
finally:
if opened_new_file:
f.close()
return model
| 44.632258 | 92 | 0.626337 |
a32cb578d2151333cae6b68de0344e2c78b2c29d | 875 | py | Python | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 7 | 2018-01-17T20:26:59.000Z | 2022-03-23T08:12:00.000Z | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 511 | 2017-10-21T17:59:50.000Z | 2022-03-28T18:49:21.000Z | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 2 | 2018-05-02T08:27:42.000Z | 2020-08-17T18:42:49.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 06:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from wapps.utils import get_image_model
| 28.225806 | 121 | 0.693714 |
a32cce774b4abbb45c4fa0ea764a45099fab3182 | 6,888 | py | Python | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import climt
from pylab import *
# Replicates the behavior of the online radiation calculator
# see maths.ucd.ie/~rca
scheme = 'ccm3'
Insolation = 337. #* .75
Imbalance = 30.
Albedo = 30. /100.
CO2 = 350.
CH4 = 1.7 + 1.e-9
N2O = 0. + 1.e-9
TropoHeight = 16.
LapseRate = -6.5
LapseRate_RH = -6.5
BLRelHum = 80. / 100.
LTRelHum = 80. / 100.
UTRelHum = 80. / 100.
STRelHum = 1.e-16 / 100.
TropoHeight_RH = 16.
RH_control = 0
T0_RH = 20. + 273.15
Drop_size = 10.
Cloud_frac_hi = 0.
Cloud_frac_lo = 0.
Cloud_water_hi = 0.
Cloud_water_lo = 0.
zen = 60.
if Cloud_water_lo == 0.: Cloud_frac_lo = 0.
if Cloud_water_hi == 0.: Cloud_frac_hi = 0.
# instantiate radiation objects, get number of levels
r=climt.radiation(scheme=scheme)
nlev=r.nlev
# define some fixed profiles
SurfPres = 1000.
Pressure = ( arange(nlev)+ 0.5 ) * SurfPres/nlev
cldf = zeros( nlev, 'd') # Cloud frac
clwp = zeros( nlev, 'd') # Cloud liquid water path
cloud_lev_hi = int(nlev*0.2) # put high cloud roughly at 200 mb
cloud_lev_lo = int(nlev*0.8) # put low cloud roughly at 800 mb
cldf[cloud_lev_lo] = Cloud_frac_lo
cldf[cloud_lev_hi] = Cloud_frac_hi
clwp[cloud_lev_lo] = Cloud_water_lo
clwp[cloud_lev_hi] = Cloud_water_hi
# dictionary for input into rad call
input={}
input['ps'] = SurfPres
input['lev'] = Pressure
input['cldf'] = cldf
input['clwp'] = clwp
input['solin'] = Insolation
input['r_liq'] = Drop_size + Pressure*0.
input['r_ice'] = Drop_size + Pressure*0.
input['aldir'] = Albedo
input['aldif'] = Albedo
input['asdir'] = Albedo
input['asdif'] = Albedo
input['co2'] = CO2
input['ch4'] = CH4
input['n2o'] = N2O
input['zen'] = zen
#input['o3'] = Pressure*0. + 1.e-16
# functions
def profiles(SurfTemp):
"""
Compute temp and humidity profiles
Stratosphere is isothermal
"""
# parameters
Rd = r['Rd']
g = r['g']
# assume near-surface temp is 1 K less than surface
T0 = SurfTemp - 1.
# scale height (assume dry atmos) see Holton pg. 21
Tmean = (T0**2*TropoHeight + LapseRate*T0*TropoHeight**2
+ LapseRate**2*TropoHeight**3)/(T0*TropoHeight +
LapseRate*TropoHeight**2/2)
Tmean_RH = (T0_RH**2*TropoHeight_RH + LapseRate_RH*T0_RH*TropoHeight_RH**2
+ LapseRate_RH**2*TropoHeight_RH**3)/(T0_RH*TropoHeight_RH +
LapseRate_RH*TropoHeight_RH**2/2)
H = Rd*Tmean/g * 1.e-3 # [km]
H_RH = Rd*Tmean_RH/g * 1.e-3 # [km]
# now compute profiles
z = -H*log(Pressure/SurfPres)
z900 = -H*log(900./SurfPres)
z700 = -H*log(700./SurfPres)
T = T0 + LapseRate*z
Tstrat = T0 + LapseRate*TropoHeight
q = zeros(nlev, 'd')
for k in range(nlev-1,-1,-1): # compute from bottom up
if z[k] <= z900:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*BLRelHum
elif z[k] > z900 and z[k] <= z700:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*LTRelHum
elif z[k] > z700 and z[k] <= TropoHeight:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*UTRelHum
else:
T[k] = Tstrat
q[k] = 1.e-9 #climt.thermodyn.qsflatau(T[k],Pressure[k],2)*STRelHum
if T[k] < 273.-80.: q[k] = 1.e-9
# correct humidity if necessary
if RH_control:
z_RH = -H_RH*log(Pressure/SurfPres)
T_RH = T0_RH + LapseRate_RH*z_RH
z900 = -H_RH*log(900./SurfPres)
z700 = -H_RH*log(700./SurfPres)
q = zeros(nlev, 'd')
for k in range(nlev-1,-1,-1): # compute from bottom up
if z_RH[k] <= z900:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*BLRelHum
elif z_RH[k] > z900 and z_RH[k] <= z700:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*LTRelHum
elif z_RH[k] > z700 and z_RH[k] <= TropoHeight_RH:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*UTRelHum
else:
T_RH[k] = T_RH[k+1]
q[k] = climt.thermodyn.qsflatau(T_RH[k],Pressure[k],2)*STRelHum
return T, q, z
# Now compute equil surf temp assuming low albedo
try:
Teq = climt.mathutil.ridder_root(TOAFlux, (173.15,353.15), accuracy=0.1)
except climt.mathutil.BracketingException, err:
if str(err) == 'initial interval does not bracket a root: root probably to the right of interval':
print '<P><font color="red"><b>Equilibrium surface temperature exceeds 80 <sup>o</sup>C.</font>'
if str(err) == 'initial interval does not bracket a root: root probably to the left of interval':
print '<P><font color="blue"><b>Equilibrium surface temperature less than -100 <sup>o</sup>C.</font>'
sys.exit(1)
T,q,z = profiles(Teq)
input['T'] = T
input['Ts'] = Teq
input['q'] = q
r(**input)
# print results
print
print 'Equilibrium near-surface air temperature is %4.1f degC (%4.1f K)' % ((Teq-273.15-1.),Teq-1.)
print
print r['SwToaCf'],r['LwToaCf'],Teq
sys.exit()
print 'Profiles'
print("lev p z T q LW flux LW heating SW flux SW heating cld frac cld water\n")
for i in range(r.nlev):
print("%3i %6.1f %7.2f %6.1f %6.2f %10.2f %6.2f %10.2f %6.2f %6.1f %6.1f" % \
(i, Pressure[i], z[i], T[i], q[i], r['lwflx'][i], r['lwhr'][i], r['swflx'][i], r['swhr'][i] , cldf[i], clwp[i]))
# make plot
subplot(231)
T = T-273.15
plot(T,z, 'b-o',linewidth=1,ms=3)
#title(r'$\rm{Temperature} (^__\rm{o}\rm{C})$')
title('Temperature (C)',fontsize=10)
ylabel('height (km)',fontsize=10)
setlims(T,z)
subplot(232)
plot(q,z, 'b-o',linewidth=1,ms=3)
title('Specific humidity (g/kg)',fontsize=10)
setlims(q,z)
subplot(233)
plot(clwp,z, 'b-o',linewidth=1,ms=3)
title('Cloud water path (g/m2)',fontsize=10)
setlims(clwp,z)
ax=subplot(234)
ax.xaxis.set_major_locator(MultipleLocator(50))
plot(r['lwflx'],z, 'b-o',linewidth=1,ms=3)
title('Longwave flux (W/m2)',fontsize=10)
ylabel('height (km)',fontsize=10)
setlims(r['lwflx'],z)
ax=subplot(235)
ax.xaxis.set_major_locator(MultipleLocator(50))
plot(r['swflx'],z, 'b-o',linewidth=1,ms=3)
title('Shortwave flux (W/m2)',fontsize=10)
setlims(r['swflx'],z)
subplot(236)
plot(r['lwhr'],z,'b-o', r['swhr'],z,'r-o', r['swhr']+r['lwhr'],z,'k-o',linewidth=1,ms=3)
title('Heating rates (K/day)',fontsize=10)
legend(('LW', 'SW', 'Total'), 'upper left')
x=r['lwhr'].tolist()
x.extend(r['swhr'].tolist())
x.extend((r['lwhr']+r['swhr']).tolist())
setlims(array(x),z)
#savefig(os.path.join(ImageDir,TimeStamp),dpi=100)
show()
| 30.613333 | 123 | 0.612079 |
a32d307b1fe682f59762c7e5a70a9d45122fc794 | 117 | py | Python | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 1,160 | 2015-05-02T15:13:20.000Z | 2022-03-31T20:04:28.000Z | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 19 | 2015-04-20T13:47:00.000Z | 2021-07-07T13:00:42.000Z | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 257 | 2015-04-01T21:42:33.000Z | 2022-03-10T11:57:51.000Z | {
"ev_get_bg_color" : {
"repl_text" : ("(self, color, ea) -> int", "(self, ea) -> int or None"),
}
}
| 19.5 | 80 | 0.452991 |
a32d410f0fad03a9c0fdccb975ef58812fe45a3f | 4,576 | py | Python | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | 1 | 2020-05-21T23:56:57.000Z | 2020-05-21T23:56:57.000Z | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | #pca model n componentes
from sklearn.decomposition import PCA
import numpy as np
from pylab import rcParams
import matplotlib.pyplot as plt
import pandas as pd
def pca_model_n_components(df,n_components):
'''
Definition:
Initialize pca with n_components
args:
dataframe and number of components
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA(n_components)
return pca,pca.fit_transform(df)
def pca_model(df):
'''
Definition:
Initialize pca
args:
dataframe
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA()
return pca,pca.fit_transform(df)
def get_min_components_variance(df,retain_variance):
'''
Definition:
get min components to retain variance
args:
dataframe and retained_variance ratio
returns:
number of min components to retain variance
'''
pca,pca_tranformed = pca_model(df)
cumulative_sum = np.cumsum(pca.explained_variance_ratio_)
return min(np.where(cumulative_sum>=retain_variance)[0]+1)
def plot_curve_min_components_variance(df,mode="cumulative_variance"):
'''
Definition:
plot curve of variance of pca
args:
dataframe and mode to be plotted (cumulative_variance or variance)
returns:
None, only plot the curve
'''
rcParams['figure.figsize'] = 12, 8
pca,pca_transformed = pca_model(df)
fig = plt.figure()
explained_variance = pca.explained_variance_ratio_
cumulative_sum = np.cumsum(explained_variance)
n_components = len(explained_variance)
ind = np.arange(n_components)
ax = plt.subplot(111)
if(mode=="cumulative_variance"):
title = "Explained Cumulative Variance per Principal Component"
ylabel = "Cumulative Variance (%)"
ax.plot(ind, cumulative_sum)
mark_1 = get_min_components_variance(df,0.2)
mark_2 = get_min_components_variance(df,0.4)
mark_3 = get_min_components_variance(df,0.6)
mark_4 = get_min_components_variance(df,0.8)
mark_5 = get_min_components_variance(df,0.9)
mark_6 = get_min_components_variance(df,0.95)
mark_7 = get_min_components_variance(df,0.99)
plt.hlines(y=0.2, xmin=0, xmax=mark_1, color='green', linestyles='dashed',zorder=1)
plt.hlines(y=0.4, xmin=0, xmax=mark_2, color='green', linestyles='dashed',zorder=2)
plt.hlines(y=0.6, xmin=0, xmax=mark_3, color='green', linestyles='dashed',zorder=3)
plt.hlines(y=0.8, xmin=0, xmax=mark_4, color='green', linestyles='dashed',zorder=4)
plt.hlines(y=0.9, xmin=0, xmax=mark_5, color='green', linestyles='dashed',zorder=5)
plt.hlines(y=0.95, xmin=0, xmax=mark_6, color='green', linestyles='dashed',zorder=6)
plt.hlines(y=0.99, xmin=0, xmax=mark_7, color='green', linestyles='dashed',zorder=6)
plt.vlines(x=mark_1, ymin=0, ymax=0.2, color='green', linestyles='dashed',zorder=7)
plt.vlines(x=mark_2, ymin=0, ymax=0.4, color='green', linestyles='dashed',zorder=8)
plt.vlines(x=mark_3, ymin=0, ymax=0.6, color='green', linestyles='dashed',zorder=9)
plt.vlines(x=mark_4, ymin=0, ymax=0.8, color='green', linestyles='dashed',zorder=10)
plt.vlines(x=mark_5, ymin=0, ymax=0.9, color='green', linestyles='dashed',zorder=11)
plt.vlines(x=mark_6, ymin=0, ymax=0.95, color='green', linestyles='dashed',zorder=12)
plt.vlines(x=mark_7, ymin=0, ymax=0.99, color='green', linestyles='dashed',zorder=12)
else:
title = "Variance per Principal Component"
ylabel = "Variance (%)"
ax.plot(ind, explained_variance)
ax.set_xlabel("Number of principal components")
ax.set_ylabel(ylabel)
plt.title(title)
def report_features(feature_names,pca,component_number):
'''
Definition:
This function returns the weights of the original features in relation to a component number of pca
args:
feature_names, pca model and the component_number
returns:
data frame with features names and the correspondent weights
'''
components = pca.components_
feature_weights = dict(zip(feature_names, components[component_number]))
sorted_weights = sorted(feature_weights.items(), key = lambda kv: kv[1])
data = []
for feature, weight, in sorted_weights:
data.append([feature,weight])
df = pd.DataFrame(data,columns=["feature","weight"])
df.set_index("feature",inplace=True)
return df
| 29.908497 | 101 | 0.671547 |
a32ec2ac9f37deceb74746f32c5ce3fa89c08ee8 | 4,446 | py | Python | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | import datetime
import numpy as np
import json
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
import spacy
from media_analyzer import database
NUM_TOPICS = 20
if __name__ == "__main__":
compute()
| 33.938931 | 97 | 0.634278 |
a3306511794dd745848ecb0131b99b481e38843f | 669 | py | Python | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 125 | 2021-10-01T19:05:26.000Z | 2021-10-03T13:32:42.000Z | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 201 | 2021-10-30T20:40:01.000Z | 2022-03-22T17:26:28.000Z | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 294 | 2021-10-01T18:46:05.000Z | 2021-10-03T14:25:07.000Z | import urllib2
public_ip = "None"
target_url = "http://ip.42.pl/raw"
public_ip = get_public_ip(target_url)
if not "None" in public_ip:
print("Your Public IP address is: %s") % (str(public_ip))
else:
print("Your Public IP address was not found") | 31.857143 | 72 | 0.730942 |
a3325c6fb73e3191f10fa77771bfdc292d1ff768 | 2,586 | py | Python | scraper.py | squash-bit/Automate-Whatsapp-News | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 4 | 2020-11-21T19:08:56.000Z | 2021-05-06T13:09:45.000Z | scraper.py | squash-bit/Agent-Wallie | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 1 | 2021-05-06T19:26:06.000Z | 2021-05-06T19:26:06.000Z | scraper.py | squash-bit/Agent-Wallie | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 1 | 2021-05-06T13:25:08.000Z | 2021-05-06T13:25:08.000Z | # import necessary modules
import os
import re
import requests
import newspaper
from bs4 import BeautifulSoup
from newspaper import Article
from newspaper import Config
from article_summarizer import summarizer
from time import sleep
# clean data
| 39.181818 | 144 | 0.593968 |
a3340d73b31131cbb0f369140b3afe55408788f6 | 1,351 | py | Python | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | from aria.models import Genus, Species, Subspecies
from django import forms
from django.forms import inlineformset_factory
from .templates.templates import createTextInput, createSelectInput
| 29.369565 | 67 | 0.634345 |
a3349b6abd791f21baf0e781406ef6802460401f | 285 | py | Python | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 3 | 2017-04-30T17:44:53.000Z | 2018-02-03T06:02:11.000Z | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 10 | 2021-03-18T20:17:19.000Z | 2022-03-11T23:14:19.000Z | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | null | null | null | import math
print(angle(20))
i = 0
for m in range(0,1440*60):
a = angle(m) / 360
d = a - math.floor(a)
if (d < 0.00001):
print(a, math.floor(a), d, d == 0.0)
i += 1
print(i)
for m in range(25):
print(360*m/5.5)
| 14.25 | 44 | 0.508772 |
a33556dfd1ea6c5a377213bf148dae18a67adec5 | 4,038 | py | Python | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_encrypt08.py
# Test some error conditions with the libsodium encryption extension.
#
import wiredtiger, wttest
from wtscenario import make_scenarios
#
# Test sodium encryption configuration.
# This exercises the error paths in the encryptor's customize method when
# used for system (not per-table) encryption.
#
if __name__ == '__main__':
wttest.run()
| 43.419355 | 87 | 0.698613 |
a336bdbfb6767de53ac20167cacab792872e5ecf | 1,779 | py | Python | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path, include, reverse_lazy
from django.views.generic.base import RedirectView
from rest_framework import permissions
from rest_framework.authtoken import views
from rest_framework.routers import DefaultRouter
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from .users.views import UserViewSet, UserCreateViewSet
router = DefaultRouter()
router.register(r"users", UserViewSet)
router.register(r"users", UserCreateViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include(router.urls)),
path("api-token-auth/", views.obtain_auth_token),
path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
re_path(r"^$", RedirectView.as_view(url=reverse_lazy("api-root"), permanent=False)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = "{{ cookiecutter.app_title }}"
admin.site.site_title = "{{ cookiecutter.app_title }} Admin Portal"
admin.site.index_title = "{{ cookiecutter.app_title }} Admin"
# Swagger
api_info = openapi.Info(
title="{{ cookiecutter.app_title }} API",
default_version="v1",
description="API Documentation for {{ cookiecutter.app_title }}",
contact=openapi.Contact(email="{{ cookiecutter.email }}"),
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
] | 35.58 | 88 | 0.754918 |
a33868010eb5e7ae344ef9b1e3fe0336947b0c2f | 4,260 | py | Python | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=line-too-long, no-member
from __future__ import print_function
import arrow
import requests
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.utils import timezone
from django.utils.text import slugify
from passive_data_kit_external_sensors.models import SensorRegion, Sensor, SensorLocation, SensorDataPayload, SensorModel
| 38.035714 | 141 | 0.603286 |
a339496b618754603c49253c77c1461b236400c0 | 37,496 | py | Python | fgcm/fgcmConfig.py | erykoff/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 5 | 2018-02-02T15:36:46.000Z | 2021-05-11T21:54:49.000Z | fgcm/fgcmConfig.py | erykoff/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 1 | 2021-08-19T19:56:33.000Z | 2021-08-19T19:56:33.000Z | fgcm/fgcmConfig.py | lsst/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 10 | 2019-01-09T22:50:04.000Z | 2020-02-12T16:36:27.000Z | import numpy as np
import os
import sys
import yaml
from .fgcmUtilities import FocalPlaneProjectorFromOffsets
from .fgcmLogger import FgcmLogger
def _setVarsFromDict(self, d):
for key in d:
if key not in type(self).__dict__:
raise AttributeError("Unknown config variable: %s" % (key))
setattr(self, key, d[key])
def validate(self):
"""
"""
for var in type(self).__dict__:
try:
type(self).__dict__[var].validate(var)
except AttributeError:
pass
def _setDefaultLengths(self):
"""
"""
pass
def _convertDictToBandList(self, inputDict, dtype, default,
required=False, ndarray=False, dictName=''):
"""
Convert an input dict into a list or ndarray in band order.
Parameters
----------
inputDict : `dict`
Input dictionary
dtype : `type`
Type of array
default : value of dtype
Default value
ndarray : `bool`, optional
Return ndarray (True) or list (False)
required : `bool`, optional
All bands are required?
dictName: `str`, optional
Name of dict for error logging. Should be set if required is True.
Returns
-------
bandOrderedList : `ndarray` or `list`
"""
if ndarray:
retval = np.zeros(len(self.bands), dtype=dtype) + default
else:
retval = [default]*len(self.bands)
if required:
for band in self.bands:
if band not in inputDict:
raise RuntimeError("All bands must be listed in %s" % (dictName))
for i, band in enumerate(self.bands):
if band in inputDict:
retval[i] = inputDict[band]
return retval
| 46.063882 | 139 | 0.618759 |
a339e1e76e5e76805cb412ba27d8dde8548e8e54 | 1,474 | py | Python | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | 6 | 2021-06-29T11:26:49.000Z | 2022-01-20T18:12:47.000Z | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | null | null | null | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | 9 | 2018-06-28T13:06:35.000Z | 2021-06-20T18:21:58.000Z | #Bibeta in action.
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import beta
from scipy.stats import randint
a_1 = np.linspace(0,10,100)
a_2 = np.linspace(0,10,100)
b_1 = np.linspace(0,10,100)
b_2 = np.linspace(0,10,100)
pi = np.linspace(0,1,10)
input_space = np.linspace(0,1,1000)
pi_rvs = randint.rvs(0,10)
a_1_rvs = randint.rvs(0,100)
a_2_rvs = randint.rvs(0,100)
b_1_rvs = randint.rvs(0,100)
b_2_rvs = randint.rvs(0,100)
a = a_1[a_1_rvs]
b = b_1[b_1_rvs]
a1 = a_1[a_1_rvs]
a2 = a_2[a_2_rvs]
b1 = b_1[b_1_rvs]
b2 = b_2[b_2_rvs]
p = pi[pi_rvs]
beta_cdf = beta.cdf(input_space,a,b)
bibeta_cdf = p*beta.cdf(input_space,a1,b1) + (1-p)*beta.cdf(input_space,a2,b2)
plot_1D_function(input_space, input_space, 'Input Space')
plot_1D_function(input_space, beta_cdf, 'Beta cdf')
plot_1D_function(input_space, bibeta_cdf, 'Bibeta cdf')
plot_1D_function(input_space, fixed_objective_function(input_space), 'Objective')
plot_1D_function(input_space, fixed_objective_function(beta_cdf), 'Warped Objective Beta')
plot_1D_function(input_space, fixed_objective_function(bibeta_cdf), 'Warped Objective Bibeta')
| 29.48 | 94 | 0.720488 |
a33b33e393caf1662689964e235489b7f1ad1bdc | 1,350 | py | Python | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | 1 | 2021-04-18T16:09:17.000Z | 2021-04-18T16:09:17.000Z | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | 6 | 2018-09-03T06:46:39.000Z | 2019-05-25T21:42:17.000Z | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from scatteringmatrix import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='scatteringmatrix',
version=__version__,
description='Optical scattering matrix library',
long_description=long_description,
long_description_content_type="text/markdown",
author='Andrew G. Flood',
author_email='andrew.flood@mail.utoronto.ca',
url='https://github.com/agflood/scatteringmatrix',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='optics scattering matrix photonics',
packages=find_packages(exclude=('tests', 'docs', 'sphinx')),
py_modules=["scatteringmatrix"],
install_requires=['numpy','scipy'],
python_requires='>=3',
zip_safe=False)
| 39.705882 | 66 | 0.60963 |
a33e27da81bb7bce9e69f05f780fb86f4b322234 | 501 | py | Python | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | 1 | 2021-05-22T17:29:17.000Z | 2021-05-22T17:29:17.000Z | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | null | null | null | tests/test_trinomial.py | caltechlibrary/trinomial | 0313e16400aca50f54bfe79080a10a1977702e9a | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import sys
from time import time
try:
thisdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(thisdir, '..'))
except:
sys.path.append('..')
import trinomial
| 22.772727 | 56 | 0.686627 |
a33e48a10e11e80e4f5b747b61f4474ae03b0923 | 667 | py | Python | openprocurement/tender/openeu/views/award_complaint_document.py | leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | 1 | 2016-02-02T09:55:08.000Z | 2016-02-02T09:55:08.000Z | openprocurement/tender/openeu/views/award_complaint_document.py | Leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | 2 | 2021-03-26T00:35:37.000Z | 2022-03-21T22:21:31.000Z | openprocurement/tender/openeu/views/award_complaint_document.py | leits/openprocurement.tender.openeu | 6e8b6650a23761cc09794030583206029a2928e8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from openprocurement.api.utils import opresource
from openprocurement.tender.openua.views.award_complaint_document import TenderUaAwardComplaintDocumentResource
| 51.307692 | 111 | 0.773613 |
a33e4ece404ced51ee4f1506f207476b0d455c63 | 2,398 | py | Python | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 147 | 2019-12-23T02:52:04.000Z | 2022-03-06T16:30:43.000Z | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 4 | 2020-12-18T12:47:21.000Z | 2021-05-21T02:18:01.000Z | pymic/layer/activation.py | vincentme/PyMIC | 5cbbca7d0a19232be647086d4686ceea523f45ee | [
"Apache-2.0"
] | 32 | 2020-01-08T13:48:50.000Z | 2022-03-12T06:31:13.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
| 31.552632 | 67 | 0.607173 |
a33eb973d0edc831eea7bb11066042e56e9c2e88 | 3,359 | py | Python | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | 9 | 2022-03-15T02:02:30.000Z | 2022-03-18T16:16:59.000Z | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | null | null | null | ui/flowlayout.py | amadotejada/self-portal | c508fb120548f3eb65e872d08a823d3942fc650d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Amado Tejada
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt5.QtCore import QPoint, QRect, QSize, Qt
from PyQt5.QtWidgets import QLayout, QSizePolicy
| 31.688679 | 112 | 0.575171 |
a340df3cf71eb1be1675fbe29cece65cbcc98d43 | 3,183 | py | Python | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 1 | 2020-04-18T11:16:02.000Z | 2020-04-18T11:16:02.000Z | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 6 | 2020-04-13T18:38:04.000Z | 2022-03-12T00:55:56.000Z | methods/smartdumpRJ.py | wdempsey/sense2stop-lvm | ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2 | [
"CECILL-B"
] | 1 | 2020-07-02T04:47:00.000Z | 2020-07-02T04:47:00.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 14:29:26 2020
@author: Walter Dempsey & Jamie Yap
"""
#%%
###############################################################################
# Build a RJMCMC class
###############################################################################
from pymc import Stochastic, Deterministic, Node, StepMethod
from numpy import ma, random, where
from numpy.random import random
from copy import deepcopy
| 31.514851 | 88 | 0.602576 |
a34205264c406b528a6fcfa5ac69debf00a2b02c | 2,021 | py | Python | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | null | null | null | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | 7 | 2018-09-08T20:07:43.000Z | 2021-12-13T19:54:53.000Z | tests/test_slack_user.py | tomcooperca/mlb-slack-tracker | bbfd8ed6f0c345d5286813a6cd4b04e0557a762a | [
"MIT"
] | null | null | null | from unittest.mock import MagicMock
from slack.user import User
from baseball.team import Team
reusableUser = User(token='blah', id='UB00123', team=None)
testTeam = Team(abbreviation='CN', location='City Name',
full_name='City Name Players', record='0W-162L', division='CL Beast',
wins=0, losses=162, standing=5, todays_game_text='CN@BOB',
todays_game_score='1-0')
| 34.844828 | 74 | 0.730332 |
a342151afcda4ba72f2d257247a2de01de22ba98 | 1,934 | py | Python | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | 2 | 2018-02-05T01:27:07.000Z | 2018-06-10T02:02:25.000Z | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | null | null | null | tmuxp/testsuite/test_workspacefreezer.py | wrongwaycn/tmuxp | 367cca3eb1b3162bb7e4801fe752b520f1f8eefa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import logging
import time
import kaptan
from .. import Window, config, exc
from ..workspacebuilder import WorkspaceBuilder, freeze
from .helpers import TmuxTestCase
logger = logging.getLogger(__name__)
current_dir = os.path.abspath(os.path.dirname(__file__))
example_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
| 23.585366 | 80 | 0.609617 |
a343d4acc1180ec43471b02424e6695cc4893a9e | 9,132 | py | Python | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 36 | 2020-11-19T07:23:42.000Z | 2022-03-30T03:35:57.000Z | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 4 | 2021-01-30T09:49:10.000Z | 2021-12-05T12:49:11.000Z | libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v15.py | Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | [
"Apache-2.0"
] | 6 | 2020-11-23T07:54:47.000Z | 2021-07-09T07:20:15.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
GCL + OMEGA = 180 / 512.
{'0.6': {'ground-track-field': 0.573582489319409, 'harbor': 0.3891521609424017, 'bridge': 0.2563337419887201, 'small-vehicle': 0.5648505388890961, 'plane': 0.8953705097216129, 'baseball-diamond': 0.6304525425142407, 'tennis-court': 0.9068133847959017, 'roundabout': 0.5504477682851595, 'storage-tank': 0.7818913345802345, 'swimming-pool': 0.39985514157699587, 'mAP': 0.5792389738191542, 'soccer-ball-field': 0.624200360919821, 'basketball-court': 0.5216235844619704, 'large-vehicle': 0.5246429570098051, 'ship': 0.7314627227976299, 'helicopter': 0.3379053694843169},
'0.8': {'ground-track-field': 0.2640926811979444, 'harbor': 0.0994356798615974, 'bridge': 0.09090909090909091, 'small-vehicle': 0.14845898197949595, 'plane': 0.5189377689746963, 'baseball-diamond': 0.14224201616818288, 'tennis-court': 0.7850084962037644, 'roundabout': 0.2161224596513639, 'storage-tank': 0.4032224420253035, 'swimming-pool': 0.021645021645021644, 'mAP': 0.25175554640925113, 'soccer-ball-field': 0.38894355893358884, 'basketball-court': 0.361673373734271, 'large-vehicle': 0.08588614768791838, 'ship': 0.18384638625743577, 'helicopter': 0.06590909090909092},
'mmAP': 0.35923286694026607,
'0.7': {'ground-track-field': 0.4385066163040262, 'harbor': 0.2004849369462918, 'bridge': 0.13189991198289955, 'small-vehicle': 0.41173024457583235, 'plane': 0.7905792123899915, 'baseball-diamond': 0.33846255142519494, 'tennis-court': 0.9031235090086663, 'roundabout': 0.45296468077000096, 'storage-tank': 0.6792869554877644, 'swimming-pool': 0.1969023557455042, 'mAP': 0.4448856961613535, 'soccer-ball-field': 0.5147552299156577, 'basketball-court': 0.47906270045099153, 'large-vehicle': 0.3334752568068329, 'ship': 0.5709906745500424, 'helicopter': 0.23106060606060608},
'0.9': {'ground-track-field': 0.013986013986013986, 'harbor': 0.002932551319648094, 'bridge': 0.000282326369282891, 'small-vehicle': 0.0031978072179077205, 'plane': 0.12144979203802733, 'baseball-diamond': 0.09090909090909091, 'tennis-court': 0.3105592596206337, 'roundabout': 0.09090909090909091, 'storage-tank': 0.043532372020744114, 'swimming-pool': 0.00029231218941829873, 'mAP': 0.05292676216204492, 'soccer-ball-field': 0.05524475524475524, 'basketball-court': 0.045454545454545456, 'large-vehicle': 0.006060606060606061, 'ship': 0.009090909090909092, 'helicopter': 0.0},
'0.65': {'ground-track-field': 0.5256384950288536, 'harbor': 0.2916501930015581, 'bridge': 0.17809220559814648, 'small-vehicle': 0.5129586251041002, 'plane': 0.8894034686906369, 'baseball-diamond': 0.5249010996303538, 'tennis-court': 0.9050013758244457, 'roundabout': 0.504625741843787, 'storage-tank': 0.7537275931713616, 'swimming-pool': 0.2889168538278225, 'mAP': 0.5213593647460195, 'soccer-ball-field': 0.5539343130129118, 'basketball-court': 0.5139638068449094, 'large-vehicle': 0.4321755180088217, 'ship': 0.6335125302514466, 'helicopter': 0.3118886513511373},
'0.5': {'ground-track-field': 0.5817047190853409, 'harbor': 0.5423160296407179, 'bridge': 0.37985530785380944, 'small-vehicle': 0.6212558927508246, 'plane': 0.8991382954230245, 'baseball-diamond': 0.6884909042118417, 'tennis-court': 0.9074714532809276, 'roundabout': 0.6247024980791215, 'storage-tank': 0.7908352165588822, 'swimming-pool': 0.5101446981453137, 'mAP': 0.6433669597686625, 'soccer-ball-field': 0.709771501950316, 'basketball-court': 0.5437748871261118, 'large-vehicle': 0.6161368250574863, 'ship': 0.8084240148818748, 'helicopter': 0.4264821524843431},
'0.55': {'ground-track-field': 0.575700748371701, 'harbor': 0.48360728773857997, 'bridge': 0.32298317197853993, 'small-vehicle': 0.6060592932618177, 'plane': 0.8978626322707085, 'baseball-diamond': 0.657004331905233, 'tennis-court': 0.907337369076047, 'roundabout': 0.6011977619793185, 'storage-tank': 0.7885043330695543, 'swimming-pool': 0.48472692462266914, 'mAP': 0.6140150681924789, 'soccer-ball-field': 0.6472686724945429, 'basketball-court': 0.5309924718578253, 'large-vehicle': 0.5552623519506533, 'ship': 0.750600756135258, 'helicopter': 0.40111791617473436},
'0.95': {'ground-track-field': 0.0, 'harbor': 0.0, 'bridge': 0.0, 'small-vehicle': 0.00010078613182826043, 'plane': 0.004102785575469661, 'baseball-diamond': 0.0, 'tennis-court': 0.09090909090909091, 'roundabout': 0.0016835016835016834, 'storage-tank': 0.003621876131836291, 'swimming-pool': 0.0, 'mAP': 0.007933510175509946, 'soccer-ball-field': 0.018181818181818184, 'basketball-court': 0.0, 'large-vehicle': 0.00025826446280991736, 'ship': 0.00014452955629426219, 'helicopter': 0.0},
'0.85': {'ground-track-field': 0.12179691653375865, 'harbor': 0.00818181818181818, 'bridge': 0.011363636363636364, 'small-vehicle': 0.020008904011782284, 'plane': 0.3041595005123823, 'baseball-diamond': 0.10876623376623376, 'tennis-court': 0.6415239979360767, 'roundabout': 0.1266637317484775, 'storage-tank': 0.21079632046855917, 'swimming-pool': 0.004329004329004329, 'mAP': 0.1360229133672777, 'soccer-ball-field': 0.17866004962779156, 'basketball-court': 0.18620689655172412, 'large-vehicle': 0.02561482058270067, 'ship': 0.07928485690820646, 'helicopter': 0.012987012987012986},
'0.75': {'ground-track-field': 0.38324233567107485, 'harbor': 0.11957411957411958, 'bridge': 0.10577255444175597, 'small-vehicle': 0.2773328982910034, 'plane': 0.6717961393802804, 'baseball-diamond': 0.18744781108289382, 'tennis-court': 0.80974614279133, 'roundabout': 0.3273415371813541, 'storage-tank': 0.5539919596357566, 'swimming-pool': 0.0639939770374553, 'mAP': 0.3408238746009085, 'soccer-ball-field': 0.4580894506562955, 'basketball-court': 0.42804302074314954, 'large-vehicle': 0.2186913819763849, 'ship': 0.3686584269144099, 'helicopter': 0.13863636363636364}}
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_DCL_G_2x_20200929'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 2000
SAVE_WEIGHTS_INTE = 20673 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = None
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTATrain' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
OMEGA = 180 / 512.
ANGLE_MODE = 1
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 90 or 180
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
| 71.34375 | 583 | 0.727551 |
a34438fcd2d05af774f8b7d208037ebd093f49f3 | 1,488 | py | Python | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 6 | 2016-07-20T07:37:07.000Z | 2022-01-14T06:35:26.000Z | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 1 | 2020-03-29T05:13:58.000Z | 2020-03-29T05:13:58.000Z | test.py | KyleJeong/ast_calculator | cf65ad76739839ac4b3df36b82862612d6bd4492 | [
"MIT"
] | 1 | 2020-03-29T04:29:36.000Z | 2020-03-29T04:29:36.000Z | """
Test cases for AST calculator
"""
from unittest import TestCase
from calc import evaluate
| 23.619048 | 62 | 0.511425 |
a3446186d570e00d5586a746c5e62060ac9246b6 | 315 | py | Python | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 2 | 2019-06-10T19:30:06.000Z | 2020-04-30T01:05:04.000Z | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | null | null | null | app/main/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 3 | 2019-01-23T21:37:29.000Z | 2020-04-08T13:22:29.000Z | from flask import request
from app.main.extensions import cache
| 19.6875 | 83 | 0.733333 |
a344e089a4efbd0afbd6c50e23cff7269d7dd9c8 | 1,931 | py | Python | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 16 | 2015-05-04T18:47:33.000Z | 2021-02-03T17:10:40.000Z | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 4 | 2015-09-08T14:48:31.000Z | 2016-09-09T09:49:41.000Z | gum/managers.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 2 | 2015-05-04T18:39:23.000Z | 2016-04-18T14:35:47.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from gum.utils import elasticsearch_connection
| 32.183333 | 86 | 0.658726 |
a34991845be5613841f0b124224655a27cd95755 | 1,732 | py | Python | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | null | null | null | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | null | null | null | app.py | u-aaa/House-_prediction_model | 4808b4aefb802520a7ccd878c342699093e6942d | [
"MIT"
] | 1 | 2021-09-23T19:42:36.000Z | 2021-09-23T19:42:36.000Z | import pickle
import json
import numpy as np
from flask import Flask, request, jsonify
app = Flask(__name__)
with open('models/regressor.pkl', 'rb') as f:
model = pickle.load(f)
def __process_input(posted_data) -> np.array:
'''
transforms JSON type data acquired from request and transforms it into 2D array the model understands
:param posted_data:
:return:np.array
'''
try:
data_str = json.loads(posted_data)
data_list = data_str['features']
data_item = np.array(data_list)
dimensions = data_item.ndim
if dimensions > 2:
return None
if len(data_item.shape) == 1: #checks if array is 1D
data_item = data_item.reshape(1, -1)
arr_len = data_item.shape[-1]
if arr_len == 13:
return data_item
return None
except (KeyError, json.JSONDecodeError, AssertionError):
return None
if __name__ == '__main__':
app.run()
| 28.393443 | 105 | 0.639723 |
a34ab44ceb198f7ffec0e7a91a4d37823eb68330 | 4,734 | py | Python | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | sobit_bringup/src/joint_state_listner.py | Choi-Laboratory/sobit_blue | 81a228390e1726653d54f33afb3fbb1c1fdb5b24 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
import math
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from sobit_bringup.msg import *
#--------------------------------
motion = [0]*21
TIME = 0.1
serial_joint = Serial_motion()
state_jointstate = JointState()
state_jointstate.name =["L_wheel","R_wheel","L_shoulder_roll","L_shoulder_pitch","L_elbow_yaw","L_shoulder_pitch","R_shoulder_roll","R_shoulder_pitch","R_elbow_yaw","R_elbow_pitch","neck_pitch","neck_roll","neck_yaw","L_hand_twist","L_hand_thumb","L_hand_index","L_hand_mid","L_hand_ring","L_hand_pinky","R_hand_twist"]
####[]-------------------------------------------------------------------
####[JOINT_STATE CALLBACK]-------------------------------------------------------------------------------------
####[]#################################################################################################################
if __name__ == '__main__':
rospy.init_node('joint_listner')
sub = rospy.Subscriber('sobit/joint_states', JointState, callback1) #joint_state
rospy.spin()
| 37.872 | 322 | 0.603929 |
a34d2a23f38ff576e6a5ef0f805165729d2fc6ef | 2,789 | py | Python | scalex/metrics.py | jsxlei/SCALEX | 021c6d35a0cebeaa1f59ea53b9b9e22015ce6e5f | [
"MIT"
] | 11 | 2021-04-09T02:46:29.000Z | 2022-01-04T16:42:44.000Z | scale/metrics.py | QingZhan98/SCALE_v2 | 69bb02beee40ec085684335f356798d4dcb53fbc | [
"MIT"
] | 2 | 2021-04-18T02:30:18.000Z | 2022-03-05T10:40:00.000Z | scale/metrics.py | QingZhan98/SCALE_v2 | 69bb02beee40ec085684335f356798d4dcb53fbc | [
"MIT"
] | 4 | 2021-03-29T12:34:47.000Z | 2022-03-06T12:42:45.000Z | #!/usr/bin/env python
"""
# Author: Xiong Lei
# Created Time : Thu 10 Jan 2019 07:38:10 PM CST
# File Name: metrics.py
# Description:
"""
import numpy as np
import scipy
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
def batch_entropy_mixing_score(data, batches, n_neighbors=100, n_pools=100, n_samples_per_pool=100):
"""
Calculate batch entropy mixing score
Algorithm
-----
* 1. Calculate the regional mixing entropies at the location of 100 randomly chosen cells from all batches
* 2. Define 100 nearest neighbors for each randomly chosen cell
* 3. Calculate the mean mixing entropy as the mean of the regional entropies
* 4. Repeat above procedure for 100 iterations with different randomly chosen cells.
Parameters
----------
data
np.array of shape nsamples x nfeatures.
batches
batch labels of nsamples.
n_neighbors
The number of nearest neighbors for each randomly chosen cell. By default, n_neighbors=100.
n_samples_per_pool
The number of randomly chosen cells from all batches per iteration. By default, n_samples_per_pool=100.
n_pools
The number of iterations with different randomly chosen cells. By default, n_pools=100.
Returns
-------
Batch entropy mixing score
"""
# print("Start calculating Entropy mixing score")
n_neighbors = min(n_neighbors, len(data) - 1)
nne = NearestNeighbors(n_neighbors=1 + n_neighbors, n_jobs=8)
nne.fit(data)
kmatrix = nne.kneighbors_graph(data) - scipy.sparse.identity(data.shape[0])
score = 0
batches_ = np.unique(batches)
N_batches = len(batches_)
if N_batches < 2:
raise ValueError("Should be more than one cluster for batch mixing")
P = np.zeros(N_batches)
for i in range(N_batches):
P[i] = np.mean(batches == batches_[i])
for t in range(n_pools):
indices = np.random.choice(np.arange(data.shape[0]), size=n_samples_per_pool)
score += np.mean([entropy(batches[kmatrix[indices].nonzero()[1]
[kmatrix[indices].nonzero()[0] == i]])
for i in range(n_samples_per_pool)])
Score = score / float(n_pools)
return Score / float(np.log2(N_batches))
from sklearn.metrics import silhouette_score | 34.8625 | 114 | 0.639656 |
a34d8a9377344d0edaae44dd4947affb31816584 | 189 | py | Python | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | src/miu/colorbilding.py | memowe/miniciti-user | ce1cbccdb693c4916217e1395eacb14f06d536a1 | [
"MIT"
] | null | null | null | from miniciti.bilding import Bilding
| 18.9 | 36 | 0.650794 |
a34e461868bd92e65252352e4554823a69ea35c7 | 2,603 | py | Python | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | examples/data/create_data.py | fdabek1/EHR-Functions | e6bd0b6fa213930358c4a19be31c459ac7430ca9 | [
"MIT"
] | null | null | null | import pandas as pd
import random
import time
# Source: https://stackoverflow.com/a/553320/556935
def str_time_prop(start, end, date_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, date_format))
etime = time.mktime(time.strptime(end, date_format))
ptime = stime + prop * (etime - stime)
return time.strftime(date_format, time.localtime(ptime))
if __name__ == '__main__':
random.seed(3)
basic()
encounters()
| 31.743902 | 102 | 0.579332 |
a350ecde028977958b337223398f9351c3e4bbec | 1,317 | py | Python | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | contests/ccpc20qhd/f超时.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
#ccpc20qhd-f =>
#,,
#=-
#(>0)?
#DFS,? ?
#
O(V+E)..
Runtime Error?
:
1: DFS
2: !
'''
t = int(input())
for i in range(t):
n,m = list(map(int,input().split()))
l = [list(map(int,input().split())) for _ in range(m)]
print('Case #%d: %s'%((i+1), f(n,l)))
| 23.105263 | 82 | 0.430524 |
a3525d2e36b057b387fd2a242a0be1258c2a7481 | 2,920 | py | Python | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | null | null | null | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | 1 | 2021-10-19T08:09:44.000Z | 2021-10-19T08:09:44.000Z | test/feature_extraction/list_counter_test.py | tmhatton/MLinPractice | 759706e13181cec864d6aa8ece9ae7042f083e4c | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from code.feature_extraction.list_counter import PhotosNum, URLsNum, HashtagNum, MentionNum, TokenNum
from code.util import COLUMN_PHOTOS, COLUMN_URLS, COLUMN_HASHTAGS, COLUMN_MENTIONS
if __name__ == '__main__':
unittest.main()
| 30.736842 | 161 | 0.667466 |
a352f55dcd4b6a9dcf2653a39663d590b4d79e27 | 926 | py | Python | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 4 | 2022-03-05T20:51:38.000Z | 2022-03-15T17:10:22.000Z | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | null | null | null | tests/test_2_promethee.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 1 | 2022-03-08T13:45:22.000Z | 2022-03-08T13:45:22.000Z | import unittest
from operator import index
from EasyMCDM.models.Promethee import Promethee
if __name__ == '__main__':
unittest.main() | 42.090909 | 161 | 0.565875 |
a354ea47baa38abfde41024d2fd179d6d96966cf | 1,207 | py | Python | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 1 | 2021-05-25T09:33:28.000Z | 2021-05-25T09:33:28.000Z | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | 6 | 2020-06-06T01:59:09.000Z | 2021-06-10T20:17:56.000Z | testbed_frontend/api/emulation/emulation_handler.py | Ncu-software-research-center/IIOT-testbed | b4c8f91d1fd1e596d2262152ce99afeb22976f7a | [
"Apache-2.0"
] | null | null | null | '''
Vortex OpenSplice
This software and documentation are Copyright 2006 to TO_YEAR ADLINK
Technology Limited, its affiliated companies and licensors. All rights
reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from functools import wraps
import redis
from api.emulation import (
Config,
EmulationStatus
)
| 32.621622 | 92 | 0.729909 |
a3551761361e06ddd937cee500aed18df74cd70f | 2,027 | py | Python | torchtraining/functional/metrics/regression.py | szymonmaszke/torchtraining | 1ddf169325b7239d6d6686b20072a406b69a0180 | [
"MIT"
] | 3 | 2020-08-26T06:11:58.000Z | 2020-08-27T08:11:15.000Z | torchtraining/functional/metrics/regression.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2020-08-25T19:19:43.000Z | 2020-08-25T19:19:43.000Z | torchtraining/functional/metrics/regression.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2021-04-15T18:55:57.000Z | 2021-04-15T18:55:57.000Z | import typing
import torch
from .. import utils
| 27.026667 | 84 | 0.693143 |
a3552615d55b8131f79fc858dd41da8c30cf2d71 | 6,028 | py | Python | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/game/systems/puzzle/hold.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | 1 | 2019-11-27T18:00:00.000Z | 2019-11-27T18:00:00.000Z | #!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""hold.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
"""
import pieces as _pieces
import utils.const as _const
import utils.layouter as _layouter
| 31.233161 | 77 | 0.535169 |
a35602a1c5d4bcf343e77bdb5e4000c799357ee5 | 347 | py | Python | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | 8 | 2019-11-15T18:15:56.000Z | 2020-02-03T18:05:05.000Z | homeworks/kirill_shevchuk/hw05/level04.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | from urllib.parse import urlparse
| 21.6875 | 44 | 0.570605 |
a35af943a1738408edb737fd87daf987635bfda0 | 1,554 | py | Python | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | null | null | null | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | null | null | null | pertemuan_13/draw_utils.py | Muhammad-Yunus/Jetson-Nano-OpenCV-Learn | 933cb2594539a877030fb82dc3e6867409c1a557 | [
"Apache-2.0"
] | 2 | 2021-09-28T00:24:21.000Z | 2022-03-09T13:38:29.000Z | import cv2
import numpy as np
# draw_ped() function to draw bounding box with top labeled text | 39.846154 | 135 | 0.47426 |
a35b39c11aff2330ec7aa6556e235a658417a015 | 2,204 | py | Python | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | Sensors/PortStart.py | cybertraining-dsc/boat | 32e4942b69059d1dd48d79c8e0f55bac438eb5e7 | [
"Apache-2.0"
] | null | null | null | """
Code modified from:
apps.fishandwhistle.net/archives/1155
"""
from __future__ import print_function
import serial
import sys
import glob
port_list = {}
def identifyPort(port):
"""
tests the port and identifies what device is attached to it from probing it
:param port:
:return: a port list dict with the tho porst for 'GPS' and 'Sonar'
"""
global port_list
try:
with serial.Serial(port, baudrate=4800, timeout=1) as ser:
# read 10 lines from the serial output
for i in range(10):
line = ser.readline().decode('ascii', errors='replace')
msg = line.split(',')
if msg[0] == '$GPRMC':
port_list['GPS'] = port
return
elif msg[0] == '$SDDBT':
port_list['Sonar'] = port
return
except Exception as e:
print(e)
def _scan_ports():
"""
scan the ports on various devices including Windows, linux, and OSX
:return:
"""
if sys.platform.startswith('win'):
print("scan Windows")
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
print("scan Linux")
# this excludes your current terminal "/dev/tty"
patterns = ('/dev/tty[A-Za-z]*', '/dev/ttyUSB*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
elif sys.platform.startswith('darwin'):
print("scan Darwin")
patterns = ('/dev/*serial*', '/dev/ttyUSB*', '/dev/ttyS*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
else:
raise EnvironmentError('Unsupported platform')
return ports
def getPorts():
"""
get the ports
:return: return the ports dict
"""
ports = _scan_ports()
print(ports)
for port in ports:
identifyPort(port)
global port_list
return port_list
if __name__ == "__main__":
test()
| 26.878049 | 79 | 0.583031 |
a35c7cddf46b7abcc142f392526fdba0c6a3aa7e | 112 | py | Python | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | test/python/test_utils.py | AdityaSidharta/docker_base | ccdbe7f10e4ddb329dfc106734b3e58226cd0b05 | [
"MIT"
] | null | null | null | from app.python.utils import get_datetime
| 18.666667 | 42 | 0.776786 |
a35f847cfae16fa50a6998fa4b3afcf7165085cb | 883 | py | Python | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 955 | 2018-05-16T17:10:12.000Z | 2022-03-30T20:14:26.000Z | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 490 | 2018-05-16T08:00:22.000Z | 2022-03-28T21:14:39.000Z | tests/core/test_registered_plugins.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 243 | 2018-05-17T11:07:24.000Z | 2022-03-27T18:01:07.000Z | from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.plugins.runners import RunnersPluginRegister
from nornir.plugins.inventory import SimpleInventory
from nornir.plugins.runners import SerialRunner, ThreadedRunner
from nornir_utils.plugins.inventory import YAMLInventory
| 33.961538 | 65 | 0.737259 |
a3644bcfb5d4ed17d821b83cba8aacde7ddfe23f | 1,411 | py | Python | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | 2 | 2019-07-13T14:15:30.000Z | 2020-01-04T10:44:47.000Z | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | null | null | null | setup.py | hodgestar/tesseract-control-software | 41f47a4b901a0069f1745c90abe28f0778704b0e | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="tessled",
version="0.0.1",
url='http://github.com/hodgestar/tesseract-control-software',
license='MIT',
description="Tesseract control software and simulator.",
long_description=open('README.rst', 'r').read(),
author='Simon Cross',
author_email='hodgestar+tesseract@gmail.com',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click',
'numpy',
'pillow',
'zmq',
],
extras_require={
'simulator': ['faulthandler', 'pygame_cffi', 'PyOpenGL'],
'spidev': ['wiringpi', 'spidev'],
},
entry_points={ # Optional
'console_scripts': [
'tesseract-effectbox=tessled.effectbox:main',
'tesseract-simulator=tessled.simulator:main',
'tesseract-spidev-driver=tessled.spidev_driver:main',
],
},
scripts=[
'bin/tesseract-runner',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Games/Entertainment',
],
)
| 30.673913 | 65 | 0.593196 |
a364c15aa063e5f5b9ce9b053b0dc00b7991aba9 | 45 | py | Python | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | null | null | null | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | null | null | null | config.py | grimpy/glunit | ed8b8fabc8539abe94a9bf93418b95d006283066 | [
"MIT"
] | 1 | 2019-03-02T12:32:40.000Z | 2019-03-02T12:32:40.000Z | GITLAB_URL = "XXXXXX"
GITLAB_TOKEN = "XXXXX"
| 15 | 22 | 0.733333 |
a3670442cb8f6ed8744f92e8d59bbfa74b3455a4 | 481 | py | Python | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | null | null | null | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | 2 | 2015-10-08T15:41:56.000Z | 2015-10-08T15:50:48.000Z | app/collect/patch.py | luiscape/hdxscraper-unhcr-mediterranean-refugees | 372bd7f565569e1d3a8428e6f09e86a01842bb9c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
def Epoch(data):
'''Patching Epoch timestamps.'''
for record in data:
record['last_updated'] = time.strftime('%Y-%m-%d', time.localtime(record['last_updated']))
return data
def Date(data):
'''Patching date stamps.'''
for record in data:
m = time.strptime(record['month_en'], '%B')
m = time.strftime('%m', m)
record['date'] = '{year}-{month}'.format(year=record['year'], month=m)
return data
| 20.041667 | 94 | 0.619543 |
a3686d9e544eb4ac435a125dc81bd7efb5af661e | 1,875 | py | Python | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | datasets/nlmap/preprocess_nlmap.py | zhuang-li/pyaudio_with_tranx | 934d0431539564bb815c4c2c6399fe9d2fe7db55 | [
"Apache-2.0"
] | null | null | null | from nltk.tokenize import TweetTokenizer
import io
dir_path = "../../data/nlmap/"
train_en_path = dir_path + "nlmaps.train.en"
train_mrl_path = dir_path + "nlmaps.train.mrl"
test_en_path = dir_path + "nlmaps.test.en"
test_mrl_path = dir_path + "nlmaps.test.mrl"
train_txt = dir_path + "train.txt"
test_txt = dir_path + "test.txt"
train_en_result, train_mrl_result = read_nlmap_data(train_en_path, train_mrl_path)
test_en_result, test_mrl_result = read_nlmap_data(test_en_path, test_mrl_path)
process_results(train_en_result, train_mrl_result, train_txt)
process_results(test_en_result, test_mrl_result, test_txt) | 29.296875 | 82 | 0.6208 |
a3697ddf813bc6d7c74b1660f1c7cbb233952678 | 2,228 | py | Python | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 13 | 2020-09-25T16:48:06.000Z | 2022-01-31T01:36:33.000Z | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 5 | 2021-01-19T09:36:59.000Z | 2022-03-25T06:56:08.000Z | ocr.py | RonLek/ALPR-and-Identification-for-Indian-Vehicles | 2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e | [
"FTL",
"Xnet",
"X11"
] | 7 | 2020-09-24T01:15:52.000Z | 2022-03-23T06:50:55.000Z | states = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA',
'GJ', 'HR', 'HP', 'JH', 'KA', 'KL',
'MP', 'MH', 'MN', 'ML', 'MZ', 'NL',
'OD', 'PB', 'RJ', 'SK', 'TN', 'TS',
'TR', 'UP', 'UK', 'WB', 'AN', 'CH',
'DD', 'DL', 'JK', 'LA', 'LD', 'PY']
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
#with open('results.txt', 'w', encoding='utf8') as f:
#result=""
#for text in texts:
# result+=text.description
# result+='\n"{}"'.format(text.description)
#vertices = (['({},{})'.format(vertex.x, vertex.y)
# for vertex in text.bounding_poly.vertices])
#result+='bounds: {}'.format(','.join(vertices))
#f.write(result)
plate = preprocess(texts[0].description)
plate = resultplate(plate)
print(plate)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
detect_text('numberplate.jpg')
| 30.520548 | 75 | 0.508528 |
a36a1929d767b48efa4751ceab577496580f2e66 | 667 | py | Python | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | setup.py | LehmRob/photorename | b499b08f225264e5c7be3b51988d8e8fcbeb088f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
from distutils.util import convert_path
main_ns = {}
vpath = convert_path('photorename/version.py')
with open(vpath) as vfile:
exec(vfile.read(), main_ns)
setup(
name='photorename',
version=main_ns['__version__'],
description='bulk rename photos in a dictionary',
author='Robert Lehmann',
author_email='lehmrob@posteo.net',
url='https://github.com/lehmrob',
packages=['photorename'],
entry_points = {
'console_scripts': ['phore=photorename.cli:main'],
},
install_requires=[
'exif',
],
test_suite='nose.collector',
tests_require=['nose'],
)
| 23.821429 | 58 | 0.667166 |
a36a758d49817dccb80abed74b7ead8eedf80c06 | 456 | py | Python | python_loop/groom_lt1.py | hesthers/self-python-practice- | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | 1 | 2021-09-07T00:26:56.000Z | 2021-09-07T00:26:56.000Z | python_loop/groom_lt1.py | hesthers/self-python-practice | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | null | null | null | python_loop/groom_lt1.py | hesthers/self-python-practice | 79e7a86385e599ca430af761f533f6eaf90aa448 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# UTF-8 encoding when using korean
import numpy as np
import math
input_l = []
while True:
user_input = int(input(''))
input_l.append(user_input)
if len(input_l[1:]) == input_l[0]:
#user_input = user_input.split('\n')
cnt_input = []
for i in range(1, len(input_l)):
if np.sqrt(input_l[i])-math.isqrt(input_l[i]) == 0:
cnt_input.append(input_l[i])
else:
pass
break
else:
next
print(len(cnt_input))
| 17.538462 | 54 | 0.640351 |
a36c8b3504ed6254b18b5d9848ed7acfd15782c9 | 454 | py | Python | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 8 | 2021-06-20T17:59:22.000Z | 2021-09-15T05:28:45.000Z | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 2 | 2021-12-02T20:33:08.000Z | 2021-12-28T14:23:17.000Z | setup.py | hasangchun/ContextNet | da2515bb506a304186c9f579b251be8f086c541a | [
"Apache-2.0"
] | 3 | 2022-02-02T10:36:01.000Z | 2022-03-03T09:04:37.000Z | from setuptools import setup, find_packages
setup(
name='ContextNet',
version='latest',
packages=find_packages(),
description='ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context',
author='Sangchun Ha',
author_email='seomk9896@naver.com',
url='https://github.com/hasangchun/ContextNet',
install_requires=[
'torch>=1.4.0',
],
python_requires='>=3.6',
) | 28.375 | 123 | 0.698238 |
a36d0ac9736ee7f0f87c898553b9622f6343c622 | 130 | py | Python | katas/kyu_7/product_of_main_diagonal.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/kyu_7/product_of_main_diagonal.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/kyu_7/product_of_main_diagonal.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | from operator import mul
| 21.666667 | 67 | 0.730769 |
a36e85cc522d69fee1eb9747d2afca83c85e094a | 1,643 | py | Python | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 94 | 2022-02-15T19:34:49.000Z | 2022-03-26T19:26:22.000Z | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-03-03T02:58:47.000Z | 2022-03-11T18:41:05.000Z | src/ctc/protocols/curve_utils/cli/curve_pools_command.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-02-15T17:53:07.000Z | 2022-03-17T19:14:17.000Z | from __future__ import annotations
import toolcli
from ctc.protocols import curve_utils
| 26.934426 | 80 | 0.593427 |
a3724c66e413effcdf21b1d39aedb643be084706 | 218 | py | Python | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | constants/db.py | sshikshu/app.cavill.in | 4e9472ea9640dad920f17d29b9625c8485022a5e | [
"MIT"
] | null | null | null | """
db constants
"""
DB_HOST = 'localhost'
DB_PORT = 28015
# Database is cavilling
DB_NAME = 'cavilling'
DB_TABLE_CAVILLS = 'cavills'
DB_TABLE_HAIRDOS = 'hairdos'
DB_TABLE_POLRUS = 'polrus'
DB_TABLE_USERS = 'users'
| 14.533333 | 28 | 0.733945 |
a3725144f31da3c2b8b26f9fa9ea6b635892f533 | 76,062 | py | Python | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | 1 | 2021-04-11T05:53:38.000Z | 2021-04-11T05:53:38.000Z | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | null | null | null | socfakerservice/api/api.py | MSAdministrator/soc-faker-service | f3c401f8d6931d1c421f594dc8f1137f2ad984aa | [
"MIT"
] | 1 | 2021-04-11T05:53:42.000Z | 2021-04-11T05:53:42.000Z | from flask import Blueprint, render_template, request, abort, jsonify, Response
from socfakerservice import status, HTMLRenderer, set_renderers
from socfakerservice.model import TokenModel
from socfaker import SocFaker
socfaker = SocFaker()
api_bp = Blueprint(
'api',
__name__
)
### AGENT ROUTES ###
### ALERT ROUTES ###
### ALERT ROUTES ###
### APPLICATION ROUTES ###
### APPLICATION ROUTES ###
### CLOUD ROUTES ###
### CLOUD ROUTES ###
### COMPUTER ROUTES ###
### COMPUTER ROUTES ###
### CONTAINER ROUTES ###
### CONTAINER ROUTES ###
### DNS ROUTES ###
### DNS ROUTES ###
### EMPLOYEE ROUTES ###
### EMPLOYEE ROUTES ###
### FILE ROUTES ###
### FILE ROUTES ###
### FILE ROUTES ###
### LOCATION ROUTES ###
### LOCATION ROUTES ###
### LOGS ROUTES ###
### LOGS ROUTES ###
### NETWORK ROUTES ###
### NETWORK ROUTES ###
### OPERATING_SYSTEM ROUTES ###
### OPERATING_SYSTEM ROUTES ###
### ORGANIZATION ROUTES ###
### ORGANIZATION ROUTES ###
### PCAP ROUTES ###
### PCAP ROUTES ###
### REGISTRY ROUTES ###
### REGISTRY ROUTES ###
### TIMESTAMP ROUTES ###
### TIMESTAMP ROUTES ###
### USER_AGENT ROUTES ###
### USER_AGENT ROUTES ###
### VULNERABILITY ROUTES ###
### VULNERABILITY ROUTES ###
### WORDS ROUTES ###
### WORDS ROUTES ###
### PRODUCT ROUTES ###
### PRODUCTS - AZURE - VM - DETAILS ###
### PRODUCTS - AZURE - VM - DETAILS ###
### PRODUCTS - AZURE - VM - METRICS ###
### PRODUCTS - AZURE - VM - METRICS ###
### PRODUCTS - AZURE - VM - TOPOLOGY ###
### PRODUCTS - AZURE - VM - TOPOLOGY ###
### PRODUCTS - ELASTIC ###
### PRODUCTS - ELASTIC ###
### PRODUCTS - QUALYSGUARD ###
### PRODUCTS - QUALYSGUARD ###
### PRODUCTS - SERVICENOW ###
| 28.349609 | 162 | 0.639623 |
a37355a19aa8f440bb3300c6b512a843d8e672aa | 3,494 | py | Python | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 28 | 2019-06-18T15:56:53.000Z | 2021-11-09T13:11:13.000Z | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 2 | 2018-10-24T01:09:56.000Z | 2018-11-08T07:13:48.000Z | jdit/trainer/instances/fashionClassParallelTrainer.py | dingguanglei/jdit | ef878e696c9e2fad5069f106496289d4e4cc6154 | [
"Apache-2.0"
] | 8 | 2019-01-11T01:12:15.000Z | 2021-03-12T10:15:43.000Z | # coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from jdit.trainer.single.classification import ClassificationTrainer
from jdit.model import Model
from jdit.optimizer import Optimizer
from jdit.dataset import FashionMNIST
from jdit.parallel import SupParallelTrainer
def build_task_trainer(unfixed_params):
"""build a task just like FashionClassTrainer.
:param unfixed_params:
:return:
"""
logdir = unfixed_params['logdir']
gpu_ids_abs = unfixed_params["gpu_ids_abs"]
depth = unfixed_params["depth"]
lr = unfixed_params["lr"]
batch_size = 32
opt_name = "RMSprop"
lr_decay = 0.94
decay_position= 1
position_type = "epoch"
weight_decay = 2e-5
momentum = 0
nepochs = 100
num_class = 10
torch.backends.cudnn.benchmark = True
mnist = FashionMNIST(root="datasets/fashion_data", batch_size=batch_size, num_workers=2)
net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method="kaiming", verbose=False)
opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,
lr=lr, weight_decay=weight_decay, momentum=momentum)
Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)
return Trainer
def trainerParallel():
unfixed_params = [
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-3,
},
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-3,
},
]
tp = SupParallelTrainer(unfixed_params, build_task_trainer)
return tp
def start_fashionClassPrarallelTrainer(run_type="debug"):
tp = trainerParallel()
tp.train()
if __name__ == '__main__':
start_fashionClassPrarallelTrainer()
| 32.351852 | 105 | 0.634516 |
a37644a1e11006bb540b7235f3216f75efbca584 | 5,711 | py | Python | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | 1 | 2022-01-13T21:46:40.000Z | 2022-01-13T21:46:40.000Z | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | null | null | null | movies_modeling.py | amotter443/movies | ae375d19befb8133c014199dc1bf1ae728fd0147 | [
"MIT"
] | null | null | null | #Initialize packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.model_selection as model_selection
from sklearn import linear_model
import sklearn.metrics as metrics
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from sklearn.feature_selection import RFE
from sklearn.impute import KNNImputer
import warnings
#Read in data
df = pd.read_csv(r'\movie_data_final.csv')
#If revenue is less than $5000 set to NA
df.loc[df['revenue'] <= 5000,'revenue'] = np.nan
#Impute missing reveneue using KNN (ignoring date and name columns)
imputer = KNNImputer(n_neighbors=2)
df.iloc[: , 2:] = imputer.fit_transform(df.iloc[: , 2:])
#Drop columns that cause problems with the modeling aspect
df=df.drop(['Logged_Date','Name','Logged_Year'], axis=1)
######################## Transformations ########################
#Plot correlation matrix
corrMatrix = df.corr()
plt.subplots(figsize=(20,15))
sns_plot = sns.heatmap(corrMatrix,cmap="RdBu",annot=True)
fig = sns_plot.get_figure()
fig.savefig("jupyter_heatmap.png")
#Scale non-boolean features
df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']] = StandardScaler().fit_transform(df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']])
#Plot potenitally problematic features
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
#Remove outliers and replace with mean
replace = df['runtime'].mean()
df.loc[df['runtime'] >= 2,'runtime'] = np.nan
df['runtime'] = np.where(df['runtime'].isna(),replace,df['runtime'])
#Same process but with popularity
replace = df['popularity'].mean()
df.loc[df['popularity'] >= 2,'popularity'] = np.nan
df['popularity'] = np.where(df['popularity'].isna(),replace,df['popularity'])
#Transform problematic columns
df['movie_sentiment'] = df['movie_sentiment']**(1./3.)
#Recode bad values to mean
df.replace([np.inf, -np.inf], np.nan, inplace=True)
replace = df['movie_sentiment'].mean()
df['movie_sentiment'] = np.where(df['movie_sentiment'].isna(),replace,df['movie_sentiment'])
#Plot again to see change in features after transformation
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
############ Research Question: Which factors impact revenue the most? ############
#Train Test Split
X=df.drop('revenue', axis=1)
y=df[['revenue']]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
###### 1.1 OLS ######
lm = linear_model.LinearRegression()
lm.fit(X_train, y_train)
ols_fitted = lm.predict(X_test)
#Calculate R Squared
print("OLS R Squared: %s" % round(metrics.r2_score(y_test, ols_fitted),2))
###### 1.2 Elastic Net ######
search=model_selection.GridSearchCV(estimator=linear_model.ElasticNet(),param_grid={'alpha':np.logspace(-5,2,8),'l1_ratio':[.2,.4,.6,.8]},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10)
search.fit(X_train,y_train)
print(search.best_params_)
enet=linear_model.ElasticNet(normalize=True,alpha=0.001,l1_ratio=0.8)
enet.fit(X_train, y_train)
enet_fitted = enet.predict(X_test)
#Calculate R Squared
print("Elastic Net R Squared: %s" % round(metrics.r2_score(y_test, enet_fitted),2))
###### 1.3 RF ######
warnings.simplefilter("ignore")
nof_list=np.arange(1,37)
high_score=0
nof=0
score_list =[]
#Variable to store the optimum features
for n in range(len(nof_list)):
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
model = linear_model.LinearRegression()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,y_train)
score = model.score(X_test_rfe,y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
#Optimum number of features: 35
#Score with 35 features: 0.645497
rf = RandomForestRegressor(max_features = 35, n_estimators=100)
rf.fit(X_train, y_train)
rf_fitted = rf.predict(X_test)
#Generate Feature Importance
rev_importance = {} # a dict to hold feature_name: feature_importance
for feature, importance in zip(X_train.columns, rf.feature_importances_):
rev_importance[feature] = importance #add the name/value pair
rev_importance = pd.DataFrame.from_dict(rev_importance, orient='index').rename(columns={0: 'Revenue_Importance'})
#Calculate R Squared
print("RF R Squared: %s" % round(metrics.r2_score(y_test, rf_fitted),2))
################### Feature Importance ###################
#Plot Feature Importance table
print(rev_importance.sort_values(by='Revenue_Importance', ascending=False))
#Plot as bar chart
rev_importance.sort_values(by='Revenue_Importance', ascending=False).plot(kind='bar', rot=45)
| 38.073333 | 366 | 0.711959 |
a3767371ed8f0cd8ffdd0f52e641dd47e92c68df | 1,287 | py | Python | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | 1 | 2018-04-16T21:01:50.000Z | 2018-04-16T21:01:50.000Z | Python/142.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 142 - Perfect Square Collection
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from itertools import combinations
import numpy as np
if __name__ == "__main__":
print(run())
| 28.6 | 79 | 0.529915 |
a3775d28ecda7be7aab9864818d4a6bf38e3387c | 6,032 | py | Python | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | garcon/activity.py | mortaliorchard/garcon | 281221c63da0b2ea3ea51174a3d5878d1de8f487 | [
"MIT"
] | null | null | null | """
Activity
========
Activities are self generated classes to which you can pass an identifier,
and a list of tasks to perform. The activities are in between the decider and
the task.
For ease, two types of task runners are available: SyncTasks and AsyncTasks. If
you need something more specific, you should either create your own runner, or
you should create a main task that will then split the work.
"""
from threading import Thread
import boto.swf.layer2 as swf
import json
ACTIVITY_STANDBY = 0
ACTIVITY_SCHEDULED = 1
ACTIVITY_COMPLETED = 2
def worker_runner(worker):
"""Run indefinitely the worker.
Args:
worker (object): the Activity worker.
"""
while(worker.run()):
continue
def create(domain):
"""Helper method to create Activities.
The helper method simplifies the creation of an activity by setting the
domain, the task list, and the activity dependencies (what other
activities) need to be completed before this one can run.
Note:
The task list is generated based on the domain and the name of the
activity. Always make sure your activity name is unique.
"""
return wrapper
def find_activities(flow):
"""Retrieves all the activities from a flow.
Args:
flow (module): the flow module.
Return:
List of all the activities for the flow.
"""
activities = []
for module_attribute in dir(flow):
instance = getattr(flow, module_attribute)
if isinstance(instance, Activity):
activities.append(instance)
return activities
| 28.587678 | 79 | 0.629145 |
a37b1669512f165099c1e03b767ae6863a2fb2c7 | 7,754 | py | Python | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | csvdiff.py | bugph0bia/PyCsvDiff | 57d438d50e758e13834affd8d0e46f8a7bfe0ebe | [
"MIT"
] | null | null | null | import sys
import csv
import json
import argparse
from collections import namedtuple
# diff info
DiffInfo = namedtuple('DiffInfo', [
'mark', # diff kind (!, -, +)
'address', # row/column addresses of diff
'keyname', # row/column key names of diff
'value', # values of diff
])
def main():
"""main"""
parser = argparse.ArgumentParser(description='Output the difference between two CSV files.')
parser.add_argument('csv1', help='1st CSV file.')
parser.add_argument('csv2', help='2nd CSV file.')
parser.add_argument('-e', '--encoding', default='utf-8', help='Encoding for CSV files. (default: utf-8)')
parser.add_argument('-p', '--primary-key', type=int, default=1, help='Column number as primary key. (range: 1-N, default: 1)')
parser.add_argument('-t', '--has-title', action='store_true', help='Treat the first line as a header.')
parser.add_argument('-f', '--format', default='normal', help='Set format. (normal, json)')
parser.add_argument('--excel-style', action='store_true', help='Print addresses excel A1 style.')
parser.add_argument('--hide-address', action='store_true', help='Do not print row/column addresses.')
parser.add_argument('--hide-keyname', action='store_true', help='Do not print row/column key names.')
parser.add_argument('--hide-value', action='store_true', help='Do not print difference values.')
args = parser.parse_args()
# read csv
csv1, header1 = read_csv(args.csv1, args.encoding, args.has_title)
csv2, header2 = read_csv(args.csv2, args.encoding, args.has_title)
# check column count
if len(header1) != len(header2):
print(f'error: different column count in CSV files. (csv1:{len(header1)}, csv2:{len(header2)})', file=sys.stderr)
return
# check primary key value
if not (0 < args.primary_key <= len(header1)):
print(f'error: primary key invalid. (primary key:{args.primary_key}, column count:{len(header1)})', file=sys.stderr)
return
# correct column number to start with 0
primary_key = args.primary_key - 1
# sort by primary key
csv1.sort(key=lambda x: x[primary_key])
csv2.sort(key=lambda x: x[primary_key])
# get diff info
diffs = diff_csv(csv1, header1, csv2, header2, primary_key, args.excel_style)
# print result
if args.format.lower() == 'json':
print(json.dumps([d._asdict() for d in diffs]))
else:
print_diffs(diffs, args.hide_address, args.hide_keyname, args.hide_value)
def read_csv(fname: str, encoding: str, has_header: bool):
"""Read CSV file
Args:
fname (str): CSV file.
encoding (str): encoding for CSV File.
has_header (bool): if first row is header then True, else False.
Returns:
tuple[list[list[str]], list[str]]: Tuple of CSV data and CSV header.
"""
with open(fname, 'r', encoding=encoding) as f:
csvdata = list(csv.reader(f))
# Match the column count to their max
max_colmuns = max(map(lambda x: len(x), csvdata))
for row in csvdata:
row.extend([''] * (max_colmuns - len(row)))
# get header row
if has_header:
header = csvdata[0]
csvdata = csvdata[1:]
else:
header = [''] * len(csvdata[0])
return csvdata, header
def diff_csv(csv1: list[list[str]], header1: list[str],
csv2: list[list[str]], header2: list[str],
primary_key: int, excel_style: bool):
"""Diff CSV files.
Args:
csv1 (list[list[str]]): 1st CSV data.
header1 (list[str]): 1st CSV header.
csv2 (list[list[str]]): 2nd CSV data.
header2 (list[str]): 2nd CSV header.
primary_key (int): column number of primary key.
excel_style (bool): excel A1 style.
Returns:
list[DiffInfo]: list of diff infos.
"""
diffs = []
ri1 = ri2 = 0
while True:
# get target row
row1 = csv1[ri1] if len(csv1) > ri1 else None
row2 = csv2[ri2] if len(csv2) > ri2 else None
# get primary key of target row
pkey1 = row1[primary_key] if row1 else None
pkey2 = row2[primary_key] if row2 else None
# exit when both CSV data is terminated
if row1 is None and pkey2 is None:
break
# remaining lines of csv2, if csv1 is terminated
# (== the row in csv2 only)
elif pkey1 is None:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# remaining lines of csv1, if csv2 is terminated
# (== the row in csv1 only)
elif pkey2 is None:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in csv2 only
elif pkey1 > pkey2:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# the row in csv1 only
elif pkey1 < pkey2:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in both files
else: # pkey1 == pkey2
for ci, (v1, v2) in enumerate(zip(row1, row2)):
if v1 != v2:
diffs.append(DiffInfo(
mark='!',
address=make_cell_address(ri1, ri2, ci, excel_style),
keyname=f'{pkey1},{header1[ci]}',
value=f'{v1} | {v2}',
))
ri1 += 1
ri2 += 1
return diffs
def a1_address(ri, ci):
"""Make Excel A1 style address from row/column address."""
CHR_A = 65 # ascii code of 'A'
ALNUM = 26 # number of alphabet
if ci >= ALNUM:
return chr(CHR_A + (ci // ALNUM)) + chr(CHR_A + (ci % ALNUM)) + str(ri+1)
else:
return chr(CHR_A + (ci % ALNUM)) + str(ri+1)
def make_row_address(ri, excel_style):
"""Make row address for print."""
if excel_style:
return f'{ri+1}:{ri+1}'
else:
return f'R{ri+1}'
def make_cell_address(ri1, ri2, ci, excel_style):
"""Make cell addresses for print."""
if excel_style:
return f'{a1_address(ri1, ci)} | {a1_address(ri2, ci)}'
else:
return f'R{ri1+1},C{ci+1} | R{ri2+1},C{ci+1}'
def print_diffs(diffs, hide_address, hide_keyname, hide_value):
"""Print diffs.
Args:
diffs (list[DiffInfo]): list of diff infos.
hide_address (bool): if true then do not print addresses.
hide_keyname (bool): if true then do not print key names.
hide_value (bool): if true then do not print values.
"""
for diff in diffs:
pstr = f'{diff.mark} '
if not hide_address and diff.address:
pstr += f'[{diff.address}] '
if not hide_keyname and diff.keyname:
pstr += f'[{diff.keyname}] '
if not hide_value and diff.value:
pstr += f'> {diff.value}'
print(pstr)
print(f'(diff count: {len(diffs)})')
if __name__ == '__main__':
main()
| 33.5671 | 131 | 0.554037 |
a37e426d249e4fa306b483523b559a9a0ae9cff3 | 2,563 | py | Python | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | null | null | null | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | 11 | 2020-11-13T18:29:37.000Z | 2022-02-10T00:25:15.000Z | map_reduce/core/data_source.py | HuynhThanhQuan/graph-network | e429a641e7baecad9765700cac580cfbdedbe1bd | [
"MIT"
] | null | null | null | import os
from time import time
import pandas as pd
from sqlalchemy import create_engine
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| 35.109589 | 106 | 0.587593 |
a37e9163f756c5b933aa7522cfc07f57edae5c1e | 3,431 | py | Python | setup.py | michael-borisov/django-omnibus | 3275ae41dcad5a140433f0bfcea5961dc837e913 | [
"BSD-3-Clause"
] | null | null | null | setup.py | michael-borisov/django-omnibus | 3275ae41dcad5a140433f0bfcea5961dc837e913 | [
"BSD-3-Clause"
] | 4 | 2020-08-19T08:39:55.000Z | 2021-03-31T08:23:26.000Z | setup.py | radiosilence/django-omnibus | c31337306c601e75fbdac9d6b9b62dcc980e04f5 | [
"BSD-3-Clause"
] | null | null | null | import codecs
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
test_requires = [
'pytest>=2.5.2',
'pytest-cov>=1.6',
'pytest-flakes>=0.2',
'pytest-pep8>=1.0.5',
'pytest-django>=2.6',
'mock==1.0.1',
'pep8==1.4.6'
]
install_requires = [
'Django>=1.4',
'pyzmq==14.1.1',
'tornado==3.1.1',
'sockjs-tornado>=1.0.0',
]
dev_requires = [
'tox',
]
docs_requires = [
'sphinx',
'sphinx_rtd_theme'
]
setup(
name='django-omnibus',
version='0.1.0',
description='Django/JavaScript WebSocket Connections',
long_description=read('README.md'),
author='Stephan Jaekel, Norman Rusch',
author_email='info@moccu.com',
url='https://github.com/moccu/django-omnibus/',
packages=find_packages(exclude=[
'testing',
'testing.pytests',
'examples',
]),
include_package_data=True,
extras_require={
'docs': docs_requires,
'tests': test_requires,
'dev': dev_requires,
},
test_suite='.',
install_requires=install_requires,
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
'Framework :: Django',
],
zip_safe=False,
)
| 27.669355 | 86 | 0.575051 |
a37f3e393c9a970f74e1fb50bf59be6bc0954abc | 504 | py | Python | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 105 | 2018-02-07T22:07:47.000Z | 2022-03-31T18:16:47.000Z | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 57 | 2018-02-07T23:07:41.000Z | 2021-11-21T17:14:06.000Z | scripts/tests/snapshots/snap_etc_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
] | 10 | 2018-02-24T23:44:51.000Z | 2022-03-02T07:52:27.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_etc 1'] = '[{"lineno": 2, "value": "tup = (1, 2, 3)"}, {"lineno": 3, "source": ["tup\\n"], "value": "(1, 2, 3)"}, {"lineno": 5, "value": "False"}, {"lineno": 7, "value": "text = happy"}, {"lineno": 9, "source": ["text\\n"], "value": "happy"}, {"lineno": 12, "value": "x = foo\\nfaa"}, {"lineno": 15, "value": "a = 1"}]'
| 45.818182 | 335 | 0.56746 |
a37f43b419e3def4e72bb772a8952c0f709cee66 | 1,823 | py | Python | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | config.py | greatwallet/cosypose | e72ce7d521ef61870daef267cbbe65aaebe9d24d | [
"MIT"
] | null | null | null | import cosypose
import os
import yaml
from joblib import Memory
from pathlib import Path
import getpass
import socket
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(cosypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
DATA_DIR = PROJECT_DIR / 'data'
LOCAL_DATA_DIR = PROJECT_DIR / 'local_data'
TEST_DATA_DIR = LOCAL_DATA_DIR
DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
EXP_DIR = LOCAL_DATA_DIR / 'experiments'
RESULTS_DIR = LOCAL_DATA_DIR / 'results'
DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
DEPS_DIR = PROJECT_DIR / 'deps'
CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
TEST_DATA_DIR.mkdir(exist_ok=True)
DASK_LOGS_DIR.mkdir(exist_ok=True)
SYNT_DS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
ASSET_DIR = DATA_DIR / 'assets'
MEMORY = Memory(CACHE_DIR, verbose=2)
CONDA_PREFIX = os.environ['CONDA_PREFIX']
if 'CONDA_PREFIX_1' in os.environ:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
else:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
CONDA_ENV = 'base'
cfg = yaml.load((PROJECT_DIR / 'config_yann.yaml').read_text(), Loader=yaml.FullLoader)
SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
SLURM_QOS = cfg['slurm_qos']
DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
# Kwai path
KWAI_PATH = "/data2/cxt/kwai/IMG_3486" | 30.383333 | 88 | 0.765222 |
a37f76a50ac5297fbe0ae2e72f8f20c6b13bb7e0 | 2,761 | py | Python | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | 2018/16/main.py | zelane/advent-of-code-2016 | 72ebda3ec4eca4d1921dd8d94f2f84bbd0566fc5 | [
"MIT"
] | null | null | null | from copy import copy
import re
opts = [
"addr", "addi", "mulr", "muli", "banr", "bani", "borr", "bori", "setr",
"seti", "gtir", "gtri", "gtrr", "eqir", "eqri", "eqrr"
]
inputs = []
program = []
parse_mem = lambda s: list(map(int, re.findall(r"[0-9]", s)))
parse_params = lambda s: list(map(int, s.split(" ")))
with open('input.txt') as f:
while True:
before, args, after, _ = (f.readline().strip() for x in range(4))
if not before:
break
inputs.append(
(parse_mem(before), parse_params(args), parse_mem(after)))
while True:
line = f.readline()
if not line:
break
program.append(parse_params(line))
answer_1, opt_map = test(opts)
print(answer_1)
while opts:
_, solved = test(opts)
opt_map.update(solved)
for opt in solved.values():
opts.remove(opt)
cpu = Cpu([0, 0, 0, 0])
for args in program:
func_name = opt_map[args[0]]
getattr(cpu, func_name)(*args[1:])
print(cpu.mem[0])
| 23.801724 | 75 | 0.522637 |
a382674b28c095d002534f7e5a89fab99c7987b3 | 2,700 | py | Python | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | null | null | null | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | null | null | null | streamlit/About.py | sarthak815/sage2therescue | b4bc38731a2b43866c545560e850cdddd85a0852 | [
"MIT"
] | 1 | 2022-03-04T09:31:18.000Z | 2022-03-04T09:31:18.000Z | import streamlit as st | 117.391304 | 1,320 | 0.802963 |
a3842c6138c7e752e05c72628b0129a00a3d511f | 1,617 | py | Python | tests/test_reduce_sum.py | gavinuhma/tf-encrypted | 4e18d78a151bbe91489a1773fb839b889ff5b460 | [
"Apache-2.0"
] | 3 | 2018-10-18T19:36:02.000Z | 2020-07-05T19:46:23.000Z | tests/test_reduce_sum.py | dropoutlabs/tf-encrypted | 48c9dc7419163425e736ad05bb19980d134fc851 | [
"Apache-2.0"
] | null | null | null | tests/test_reduce_sum.py | dropoutlabs/tf-encrypted | 48c9dc7419163425e736ad05bb19980d134fc851 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
import unittest
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
if __name__ == '__main__':
unittest.main()
| 24.134328 | 54 | 0.650588 |
a38463fb4d443f7e3aa2457876c06216a04ae227 | 1,010 | py | Python | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null | tests/test_nodes.py | simonzabrocki/GraphModels | b43e44a189d663364ae08de9a1d1305320854d63 | [
"MIT"
] | null | null | null |
import pytest
from GraphModels.models.Sarah.model_agricultural_water import AgriculturalWaterNodes
from GraphModels.models.Sarah.model_freshwater_available import FreshwaterAvailableNodes
from GraphModels.models.Sarah.model_municipal_water import MunicipalWaterNodes
nodes_list = AgriculturalWaterNodes + FreshwaterAvailableNodes + MunicipalWaterNodes
computationnal_nodes = [node for node in nodes_list if 'computation' in node.keys()]
def test_inputs_computation():
inputs_computation = [val for sublist in [node['in'] for node in nodes_list if 'in' in node] for val in sublist]
node_ids = [node['id'] for node in nodes_list]
assert set(inputs_computation) <= set(node_ids)
| 37.407407 | 116 | 0.773267 |
a3859a2bc6f5180117d2aa59a1b851252ca8c8a5 | 1,350 | py | Python | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | appheap/social-media-analyzer | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | [
"Apache-2.0"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | from django.db import transaction
from db.scaffold import Scaffold
from typing import List
from telegram import models as tg_models
from pyrogram import types
| 32.926829 | 92 | 0.565926 |
a387303aea958c01ac561cbc1d4a035d79ef4112 | 486 | py | Python | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | app/user_api/user_api.py | Basselbi/hikma-health-backend | 0f891821a04aa103fff62097443bd585bc342dbc | [
"MIT"
] | null | null | null | from flask import Blueprint, request, jsonify
from web_util import assert_data_has_keys
from users.user import User
user_api = Blueprint('users_api', __name__, url_prefix='/api/user')
| 34.714286 | 81 | 0.73251 |