hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f536be230ab9f47d327f6fa5a8e54f230ab096d9
| 1,745
|
py
|
Python
|
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
import socket
import sys
import os
import optparse
from threading import *
clients = []
if __name__ == '__main__':
main()
| 19.606742
| 91
| 0.676218
|
f537b763bb0939c0d65ba5d32dd7d3fcdadbcca3
| 1,502
|
py
|
Python
|
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: ascii -*-
"""
Byte utilities testing.
:date: 2021
:author: Christian Wiche
:contact: cwichel@gmail.com
:license: The MIT License (MIT)
"""
import unittest
from embutils.utils import bitmask, reverse_bits, reverse_bytes
# -->> Definitions <<------------------
# -->> Test API <<---------------------
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| 23.107692
| 63
| 0.581891
|
f538ca85acdf301ac647a1ecf10d45b209f9fdd3
| 2,419
|
py
|
Python
|
events/migrations/0020_add_event_comments.py
|
alysivji/GetTogether
|
403d9945fff019701de41d081ad4452e771e1ce1
|
[
"BSD-2-Clause"
] | 446
|
2018-01-21T09:22:41.000Z
|
2022-03-25T17:46:12.000Z
|
events/migrations/0020_add_event_comments.py
|
alysivji/GetTogether
|
403d9945fff019701de41d081ad4452e771e1ce1
|
[
"BSD-2-Clause"
] | 272
|
2018-01-03T16:55:39.000Z
|
2022-03-11T23:12:30.000Z
|
events/migrations/0020_add_event_comments.py
|
alysivji/GetTogether
|
403d9945fff019701de41d081ad4452e771e1ce1
|
[
"BSD-2-Clause"
] | 100
|
2018-01-27T02:04:15.000Z
|
2021-09-09T09:02:21.000Z
|
# Generated by Django 2.0 on 2018-03-24 02:55
import datetime
import django.db.models.deletion
from django.db import migrations, models
import mptt.fields
| 32.689189
| 88
| 0.47871
|
f53bf8a6756951f510b486992b5a699a1e895570
| 13,529
|
py
|
Python
|
ant_algorithm.py
|
devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat
|
d766c94ab862e2856412ee19cb883033b914bd3f
|
[
"MIT"
] | 1
|
2021-11-08T12:53:16.000Z
|
2021-11-08T12:53:16.000Z
|
ant_algorithm.py
|
devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat
|
d766c94ab862e2856412ee19cb883033b914bd3f
|
[
"MIT"
] | null | null | null |
ant_algorithm.py
|
devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat
|
d766c94ab862e2856412ee19cb883033b914bd3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Ant_Algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Zjt1SInhoaFEqSmsPjEfWQE7jhugAvZA
# **ANT ALGORITHM BY KELOMPOK 9**
1. Heri Khariono - 18081010002
2. Devan Cakra Mudra Wijaya - 18081010013
3. Ika Nur Habibah - 18081010033
4. Trisa Pratiwi - 18081010036
5. Rifky Akhmad Fernanda - 18081010126
# **1. Import Libraries**
"""
#**********************************IMPORT LIBRARIES*******************************
#Library untuk operasi matematika
import math
#Library untuk membentuk dan memanipulasi segala bentuk graf dan jaringan
import networkx as nx
#Library untuk visualisasi grafik
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from pylab import *
#Library untuk mendukung komputasi numerik
import numpy as np
#Library untuk analisis dan manipulasi data tingkat tinggi
import pandas as pn
#Library untuk untuk mengukur waktu eksekusi
from time import time
"""# **2. Read Data**"""
read_jarak_antarkota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/jarak_antarkota.xlsx')
read_kota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/kota.xlsx')
arr_kota = np.array(read_kota)
arr_jarak_antarkota = np.array(read_jarak_antarkota)
#Grafik Map
Grafik_Map(arr_kota,arr_jarak_antarkota)
"""# **3. Implementasi Algoritma Ant**
1. Transisi status, Pembaruan Feromon Lokal, Pembaruan Feromon Global
"""
import random
"""2. Konfigurasi perutean"""
#Mendefinisikan fungsi untuk mengirim konfigurasi yang berbeda secara teratur
#Konfigurasi yang berbeda didefinisikan
txt_config = [] #Teks konfigurasi
jumlah_semut = [] #Ukuran koloni
langkah = [] #Jumlah langkah total
rho = [] #Tingkat penguapan fermones ANTARA 0 dan 1
txt_config.append('Konfigurasi 1'); jumlah_semut.append(50); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 2'); jumlah_semut.append(100); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 3'); jumlah_semut.append(250); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 4'); jumlah_semut.append(50); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 5'); jumlah_semut.append(90); langkah.append(40); rho.append(0.5);
txt_config.append('Konfigurasi 6'); jumlah_semut.append(150); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 7'); jumlah_semut.append(50); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 8'); jumlah_semut.append(200); langkah.append(90); rho.append(0.1);
txt_config.append('Konfigurasi 9'); jumlah_semut.append(150); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 10'); jumlah_semut.append(80); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 11'); jumlah_semut.append(100); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 12'); jumlah_semut.append(150); langkah.append(100); rho.append(0.5);
jarak_ab = [] #Vektor perpindahan akhir di setiap konfigurasi
tempo = [] #Vektor waktu eksekusi algoritma di setiap konfigurasi
for i in range(len(txt_config)):
start_time = time()
jarak_ab.append(config(txt_config[i], jumlah_semut[i], langkah[i], rho[i]))
tempo.append(time()-start_time)
"""3. Pemilihan Hasil Terbaik"""
#Grafik hasil tiga rute terbaik berdasarkan jarak
index1=jarak_ab.index(sorted(jarak_ab,reverse=False)[0])
index2=jarak_ab.index(sorted(jarak_ab,reverse=False)[1])
index3=jarak_ab.index(sorted(jarak_ab,reverse=False)[2])
if index2==index1:
index2=index2+1
if index2==index3:
index3=index3+1
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),sorted(jarak_ab,reverse=False)[0:3], edgecolor='#93329F', color='#5D87B6')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])-1, max(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jarak")
plt.ylabel('Jarak tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jarak)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan waktu
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(tempo[index1],tempo[index2],tempo[index3]), edgecolor='#282623', color='#138d90')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(tempo[index1],tempo[index2],tempo[index3])-1, max(tempo[index1],tempo[index2],tempo[index3])+10)
plt.title("Hasil konfigurasi terbaik berdasarkan waktu")
plt.ylabel('Waktu tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (waktu)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan jalur
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(langkah[index1],langkah[index2],langkah[index3]), edgecolor='#F387FF', color='#0D3E00')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(langkah[index1],langkah[index2],langkah[index3])-1, max(langkah[index1],langkah[index2],langkah[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jalur")
plt.ylabel('Jalur tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jalur)\n\n')
plt.show()
| 46.332192
| 168
| 0.663612
|
f53d0274845ff18a273019ee23bb400432511d7c
| 588
|
py
|
Python
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 4
|
2021-09-04T04:02:57.000Z
|
2021-12-27T13:27:26.000Z
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 1
|
2021-09-10T07:40:36.000Z
|
2022-01-02T06:23:12.000Z
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 1
|
2021-09-10T07:36:29.000Z
|
2021-09-10T07:36:29.000Z
|
import numpy as np
from scipy.ndimage import maximum_filter
def signal2noise(r_map):
""" Compute the signal-to-noise ratio of correlation plane.
w*h*c"""
r = r_map.copy()
max_r = maximum_filter(r_map, (5,5,1))
ind = max_r> (r_map+1e-3)
r[ind] = 0.05
r = np.reshape(r, (-1, r.shape[-1]))
r = np.sort(r,axis=0)
ratio = r[-1,:]/r[-2,:]
return ratio
if __name__=='__main__':
main()
| 18.375
| 63
| 0.612245
|
f53f1078d0ccf6010a2d5acd1664c6d7881e41c8
| 8,584
|
py
|
Python
|
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | 1
|
2020-12-16T07:18:00.000Z
|
2020-12-16T07:18:00.000Z
|
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | null | null | null |
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | 1
|
2022-03-12T16:41:32.000Z
|
2022-03-12T16:41:32.000Z
|
import os
import time
import argparse
from tqdm import tqdm
import torch
from torch import optim
from torch import nn
from fastNLP import BucketSampler
from fastNLP import logger
from fastNLP import DataSetIter
from fastNLP import Tester
from fastNLP import cache_results
from bjtunlp.models import BertParser
from bjtunlp.models.metrics import SegAppCharParseF1Metric, CWSPOSMetric, ParserMetric
from bjtunlp.modules.trianglelr import TriangleLR
from bjtunlp.modules.chart import save_table
from bjtunlp.modules.pipe import CTBxJointPipe
from bjtunlp.modules.word_batch import BatchSampler
from bjtunlp.modules.embedding import ElectraEmbedding
if __name__ == '__main__':
main()
| 48.224719
| 203
| 0.65028
|
f53f3f14419ce7e5f5fb052bfc8906e374ee8971
| 7,978
|
py
|
Python
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 23
|
2021-05-17T09:24:24.000Z
|
2022-01-29T18:40:44.000Z
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 2
|
2021-05-17T16:15:12.000Z
|
2021-07-20T09:11:22.000Z
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 3
|
2021-05-17T09:31:53.000Z
|
2021-12-02T16:29:59.000Z
|
import time
import torch
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from archived.elasticache import redis_init
from archived.s3.get_object import get_object
from archived.old_model import LogisticRegression
from data_loader.libsvm_dataset import DenseDatasetWithLines
# lambda setting
redis_location = "test.fifamc.ng.0001.euc1.cache.amazonaws.com"
grad_bucket = "tmp-grads"
model_bucket = "tmp-updates"
local_dir = "/tmp"
w_prefix = "w_"
b_prefix = "b_"
w_grad_prefix = "w_grad_"
b_grad_prefix = "b_grad_"
# algorithm setting
learning_rate = 0.1
batch_size = 100
num_epochs = 2
validation_ratio = .2
shuffle_dataset = True
random_seed = 42
endpoint = redis_init(redis_location)
| 44.322222
| 120
| 0.587741
|
f53f7ca7e55025431c0eddd3b58db5224cb4211d
| 177
|
py
|
Python
|
src/params/NeuronTypes.py
|
thatmariia/grid-ping
|
3c32e48226adddcffba605573daa80cca02b5a57
|
[
"BSD-4-Clause"
] | null | null | null |
src/params/NeuronTypes.py
|
thatmariia/grid-ping
|
3c32e48226adddcffba605573daa80cca02b5a57
|
[
"BSD-4-Clause"
] | null | null | null |
src/params/NeuronTypes.py
|
thatmariia/grid-ping
|
3c32e48226adddcffba605573daa80cca02b5a57
|
[
"BSD-4-Clause"
] | null | null | null |
from enum import Enum
| 16.090909
| 66
| 0.649718
|
f5401cd673d6e1e3eddd77c34fed0869702ad889
| 2,346
|
py
|
Python
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
from typing import List
from backend.common.cache_clearing import get_affected_queries
from backend.common.manipulators.manipulator_base import ManipulatorBase
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.team import Team
| 37.238095
| 101
| 0.656436
|
f5405ca41fa935c1df325e78905e0a54820977fe
| 179
|
py
|
Python
|
dtf/packages/settings.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
dtf/packages/settings.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
dtf/packages/settings.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
INFUSIONSOFT_COMPANY = getattr(settings, 'INFUSIONSOFT_COMPANY_ID', None)
INFUSIONSOFT_API_KEY = getattr(settings, 'INFUSIONSOFT_API_KEY', None)
| 35.8
| 73
| 0.832402
|
f5450958d50c031030e18504e081e98ce995e8e8
| 3,680
|
py
|
Python
|
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | 8
|
2020-06-12T12:49:19.000Z
|
2021-04-27T12:10:49.000Z
|
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | null | null | null |
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | 5
|
2020-04-18T11:30:47.000Z
|
2022-03-04T07:05:21.000Z
|
# MIT License
#
# Copyright (c) 2020 HENSOLDT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Python implementation of the under/over-exposure measure. We focus on simplicity and readability rather than efficiency.
#
# This code is related to the paper
# M. Teutsch, S. Sedelmaier, S. Moosbauer, G. Eilertsen, T. Walter,
# "An Evaluation of Objective Image Quality Assessment for Thermal Infrared Video Tone Mapping", IEEE CVPR Workshops, 2020.
#
# Please cite the paper if you use the code for your evaluations.
# This measure was originally proposed here:
# G. Eilertsen, R. Mantiuk, J. Unger, "A comparative review of tone-mapping algorithms for high dynamic range video", Eurographics, 2017.
import numpy as np
import cv2
## Calcuate the over- and under-exposure measure (number of over- and under-exposed pixels) for one given tone mapped LDR image.
# @param image_ldr Low Definition Range image (processed image after tone mapping).
## Calculate over- and under-exposure measure for all (already tone mapped) images in given path.
# @param images_ldr_path Directory path that contains the tone mapped images of one sequence.
| 41.818182
| 137
| 0.741848
|
f54590a9d9506eac6f07374f1bb10c88ce804b14
| 2,567
|
py
|
Python
|
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
from pilco.models.pilco import PILCO
import jax.numpy as jnp
import numpy as np
import objax
import os
import oct2py
import logging
oc = oct2py.Oct2Py(logger=oct2py.get_log())
oc.logger = oct2py.get_log("new_log")
oc.logger.setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath("__file__")) + "/tests/Matlab Code"
oc.addpath(dir_path)
if __name__ == "__main__":
test_cascade()
| 27.602151
| 86
| 0.638878
|
f545d16ab5e716cdb065a0e70787360e7d612aef
| 275
|
py
|
Python
|
server/services/main.py
|
Jordonkopp/Flask-Vue
|
db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00
|
[
"MIT"
] | 2
|
2019-02-27T16:55:01.000Z
|
2019-02-27T20:23:29.000Z
|
server/services/main.py
|
Jordonkopp/Flask-Vue
|
db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00
|
[
"MIT"
] | 5
|
2020-04-30T00:01:01.000Z
|
2021-10-05T19:42:15.000Z
|
server/services/main.py
|
Jordonkopp/Flask-Vue
|
db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, redirect, url_for
from server.utils.core_utils import logger
# Create Blueprint
main = Blueprint("main", __name__)
# redirect when you visit /
| 21.153846
| 46
| 0.727273
|
f5465045af39eda12ecdfeb4fa359c70d7f7cca7
| 528
|
py
|
Python
|
api/migrations/0003_group_post.py
|
KolesnikRV/api_final_yatube
|
23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d
|
[
"BSD-3-Clause"
] | null | null | null |
api/migrations/0003_group_post.py
|
KolesnikRV/api_final_yatube
|
23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d
|
[
"BSD-3-Clause"
] | null | null | null |
api/migrations/0003_group_post.py
|
KolesnikRV/api_final_yatube
|
23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-07 06:43
from django.db import migrations, models
import django.db.models.deletion
| 25.142857
| 132
| 0.63447
|
f546b5a71740ed44a893660c0c2c42d95a14bc63
| 111
|
py
|
Python
|
iflow/model/cflows/__init__.py
|
WeitaoZC/iflow
|
404ffdbeb27d9fae7d1350de6af84ed7bfdaad99
|
[
"MIT"
] | 11
|
2020-11-01T06:03:57.000Z
|
2022-03-10T01:14:03.000Z
|
iflow/model/cflows/__init__.py
|
WeitaoZC/iflow
|
404ffdbeb27d9fae7d1350de6af84ed7bfdaad99
|
[
"MIT"
] | 1
|
2022-03-14T21:32:51.000Z
|
2022-03-14T21:32:51.000Z
|
iflow/model/cflows/__init__.py
|
WeitaoZC/iflow
|
404ffdbeb27d9fae7d1350de6af84ed7bfdaad99
|
[
"MIT"
] | 2
|
2021-02-03T02:41:14.000Z
|
2021-06-08T16:31:02.000Z
|
from .odefunc import (ODEnet,
ODEfunc)
from .cnf import CNF
from .diffeq_layers import *
| 22.2
| 30
| 0.621622
|
f547d48b9bf65696e52de1543f4c4b442a9e0501
| 2,042
|
py
|
Python
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 272
|
2015-02-11T16:26:39.000Z
|
2022-03-31T08:47:33.000Z
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 254
|
2015-02-11T01:12:35.000Z
|
2021-04-22T22:14:20.000Z
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 211
|
2015-02-10T00:09:07.000Z
|
2022-02-24T12:27:40.000Z
|
import urllib, urllib2, json, time, os
username = "username" #CHANGE
password = "password" #CHANGE
replicaURL = "feature service url/FeatureServer/createReplica" #CHANGE
replicaLayers = [0] #CHANGE
replicaName = "replicaTest" #CHANGE
print("Generating token")
url = "https://arcgis.com/sharing/rest/generateToken"
data = {'username': username,
'password': password,
'referer': "https://www.arcgis.com",
'f': 'json'}
request = urllib2.Request(url, urllib.urlencode(data))
jsonResponse = sendRequest(request)
token = jsonResponse['token']
print("Creating the replica")
data = {'f' : 'json',
'replicaName' : replicaName,
'layers' : replicaLayers,
'returnAttachments' : 'true',
'returnAttachmentsDatabyURL' : 'false',
'syncModel' : 'none',
'dataFormat' : 'filegdb',
'async' : 'true',
'token': token}
request = urllib2.Request(replicaURL, urllib.urlencode(data))
jsonResponse = sendRequest(request)
print(jsonResponse)
print("Pinging the server")
responseUrl = jsonResponse['statusUrl']
url = "{}?f=json&token={}".format(responseUrl, token)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
while not jsonResponse.get("status") == "Completed":
time.sleep(5)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
userDownloads = os.environ['USERPROFILE'] + "\\Downloads"
print("Downloading the replica. In case this fails note that the replica URL is: \n")
jres = jsonResponse['resultUrl']
url = "{0}?token={1}".format(jres, token)
print(url)
f = urllib2.urlopen(url)
with open(userDownloads + "\\" + os.path.basename(jres), "wb") as local_file:
local_file.write(f.read())
print("\n Finished!")
| 34.610169
| 85
| 0.642018
|
f54a3cae489a26d054375f7cc639c9b189e844de
| 10,980
|
py
|
Python
|
tests/test_dsl.py
|
os-climate/declarative-trino-access-control
|
8a810fccaca0e089cd17d4a1c888da7bcb36063e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dsl.py
|
os-climate/declarative-trino-access-control
|
8a810fccaca0e089cd17d4a1c888da7bcb36063e
|
[
"Apache-2.0"
] | 4
|
2022-01-15T14:37:21.000Z
|
2022-03-26T12:42:24.000Z
|
tests/test_dsl.py
|
os-climate/osc-trino-acl-dsl
|
8a810fccaca0e089cd17d4a1c888da7bcb36063e
|
[
"Apache-2.0"
] | null | null | null |
import re
import textwrap
import yaml
from osc_trino_acl_dsl.dsl2rules import dsl_to_rules
def rule_matches(rule: dict, table: Table, user: User) -> bool:
"""emulates trino rule matching semantics"""
if ("catalog" in rule) and (not re.fullmatch(rule["catalog"], table.catalog)):
return False
if ("schema" in rule) and (not re.fullmatch(rule["schema"], table.schema)):
return False
if ("table" in rule) and (not re.fullmatch(rule["table"], table.table)):
return False
if ("user" in rule) and (not re.fullmatch(rule["user"], user.user)):
return False
if "group" in rule:
x = [e for e in list(user.groups) if re.fullmatch(rule["group"], e)]
if len(x) == 0:
return False
return True
def first_matching_rule(user: User, table: Table, rules: list) -> dict:
for rule in rules:
if rule_matches(rule, table, user):
return rule
return None
def rule_permissions(user: User, table: Table, rules: dict) -> tuple:
assert type(rules) == dict
assert "catalogs" in rules
assert "schemas" in rules
assert "tables" in rules
crule = first_matching_rule(user, table, rules["catalogs"])
assert type(crule) == dict
assert "allow" in crule
allow = crule["allow"]
srule = first_matching_rule(user, table, rules["schemas"])
assert type(srule) == dict
assert "owner" in srule
owner = srule["owner"]
trule = first_matching_rule(user, table, rules["tables"])
assert type(trule) == dict
assert "privileges" in trule
privs = trule["privileges"]
return (allow, owner, privs)
_admin = ["SELECT", "INSERT", "DELETE", "OWNERSHIP"]
_public = ["SELECT"]
| 32.485207
| 105
| 0.543443
|
f54e716dfa472cc32b79479172fc0cb1532d563d
| 1,028
|
py
|
Python
|
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
import os
from distutils.command.build import build
from django.core import management
from setuptools import find_packages, setup
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
cmdclass = {
'build': CustomBuild
}
setup(
name='byro-cnss',
version='0.0.1',
description='Byro plugin for CNSS (Clausewitz-Netzwerk fr Strategische Studien e.V.)',
long_description=long_description,
url='https://github.com/henryk/byro-cnss',
author='Henryk Pltz',
author_email='henryk@ploetzli.ch',
license='Apache Software License',
install_requires=[],
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[byro.plugin]
byro_cnss=byro_cnss:ByroPluginMeta
""",
)
| 23.363636
| 92
| 0.696498
|
f54f18f6eb1da6e577537fa0c7b336cc4d1057b5
| 2,181
|
py
|
Python
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 2
|
2021-10-19T13:55:11.000Z
|
2021-11-11T11:26:05.000Z
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 2
|
2021-11-10T20:22:35.000Z
|
2022-02-10T04:44:40.000Z
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 1
|
2021-03-09T09:48:56.000Z
|
2021-03-09T09:48:56.000Z
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from utils import tensor_utils
if __name__ == '__main__':
tf.test.main()
| 32.552239
| 79
| 0.64099
|
f54f50b36cac1b6f41d6778991e01f0570bbafab
| 3,426
|
py
|
Python
|
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import logging.handlers
import sys
from sys import argv, modules
from os.path import join
from autonmap import cron_scheduler
from autonmap import launch_client
from autonmap import launch_server
from autonmap.server import server_config as sconfig
"""
This module allows autonmap to interact with the server and client process to
preform the tasks each is assigned.
"""
LOG_FILE = "/tmp/autonmap.log"
LOGGING_LEVEL = logging.INFO
logger = logging.getLogger(__name__)
logger.setLevel(LOGGING_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE, when='midnight', backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def main():
"""
Main routine
:return: None
"""
if len(argv) > 1:
print("Automated nMap Server/Client Manager")
if argv[1] == 'cron':
cron_scheduler.main()
elif argv[1] == "update":
if len(argv) == 3:
file_location = join(sconfig.get_base(), "work.txt")
if str(argv[2]).lower() == "delete":
with open(file_location, "w") as file:
pass # This empties the file of all contents
else:
with open(argv[2], "r") as infile:
with open(file_location, "w+") as outfile:
subnets = set()
for in_line in infile:
subnets.add(in_line)
for out_line in outfile:
subnets.add(out_line)
outfile.seek(0)
outfile.truncate()
for item in subnets:
outfile.write("{}\n".format(item))
elif len(argv) == 3:
if argv[2] in ['start', 'stop', 'update', 'report']:
if argv[1] == 'server':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_server.main(argv[2])
elif argv[1] == 'client':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_client.main(argv[2])
else:
print("Invalid arguments")
else:
print("Invalid arguments")
else:
print("Usage: {} {} {}".format("python3 -m autonmap",
"client|server|update", "start<client>|stop|report|update|"
"location<update>|delete<update>"))
print("Usage: {} {}".format("python3 -m autonmap", "cron"))
print("\t{} {}".format("python3 -m autonmap", "update ~/workfile.txt"))
print("Client script is located at: \n\t\t{}".format(modules[launch_client.__name__]))
print("The log is located in /tmp/autonmap.log")
if __name__ == "__main__":
main()
| 35.6875
| 98
| 0.537069
|
f54ff4d5dcb3a333a55f6c56d21b89f6d29ae597
| 6,166
|
py
|
Python
|
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
# import random
# from typing import List, Dict
import numpy as np
# import matplotlib.pyplot as plt
def get_info() -> dict:
"""
This controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
return {
"apiversion": "1",
"author": "Mex", # TODO: Your Battlesnake Username
"color": "#888889", # TODO: Personalize
"head": "silly", # TODO: Personalize
"tail": "curled", # TODO: Personalize
}
# Globals
food_weight = 9
snake_weight = -9
snake_head_weight = -2
wall_weight = -9
board_centre = 1
board_x = None
board_y = None
def gkern(l=10, scale=4):
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`
"""
sig = (l-1)/3
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
kernel = np.outer(gauss, gauss)
return scale * kernel / np.max(kernel)
data = {
"turn": 14,
"board": {
"height": 11,
"width": 11,
"food": [
{"x": 5, "y": 5},
{"x": 9, "y": 0},
{"x": 2, "y": 6}
],
"hazards": [
{"x": 3, "y": 2}
],
"snakes": [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
},
{
"id": "snake-b67f4906-94ae-11ea-bb37",
"name": "Another Snake",
"health": 16,
"body": [
{"x": 5, "y": 4},
{"x": 5, "y": 3},
{"x": 6, "y": 3},
{"x": 6, "y": 2}
],
"latency": "222",
"head": {"x": 5, "y": 4},
"length": 4,
"shout": "I'm not really sure...",
"squad": "",
"customizations":{
"color":"#26CF04",
"head":"silly",
"tail":"curled"
}
}
]
},
"you": {
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
}
}
if False:
board = centre_grad(data)
board_x, board_y = 11, 11
populate_other_snakes(board, data)
populate_food(board, data)
board = np.pad(board, 1, 'constant', constant_values=snake_weight)
# plt.imshow(np.rot90(np.fliplr(board)), interpolation='none', origin="lower")
# plt.show()
| 28.155251
| 105
| 0.509569
|
f55005f6eda0c8aeadd07c4aee7c84c8198766c5
| 11
|
py
|
Python
|
src/__init__.py
|
Peefy/StatisticalLearningMethod.Python
|
7324d51b58932052bc518b9e82f64b76f0c39bf0
|
[
"Apache-2.0"
] | 1
|
2018-10-05T08:20:50.000Z
|
2018-10-05T08:20:50.000Z
|
src/__init__.py
|
Peefy/StatisticalLearningMethod.Python
|
7324d51b58932052bc518b9e82f64b76f0c39bf0
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
Peefy/StatisticalLearningMethod.Python
|
7324d51b58932052bc518b9e82f64b76f0c39bf0
|
[
"Apache-2.0"
] | null | null | null |
# pdf.244
| 3.666667
| 9
| 0.545455
|
f5538c72ced0bc74b5e82bee2c3ce5f0a35952cd
| 11,836
|
py
|
Python
|
nuclear/help/help.py
|
igrek51/glue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | 4
|
2019-07-04T20:41:06.000Z
|
2020-04-23T18:17:33.000Z
|
nuclear/help/help.py
|
igrek51/cliglue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | null | null | null |
nuclear/help/help.py
|
igrek51/cliglue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | null | null | null |
import os
import sys
from dataclasses import dataclass, field
from typing import List, Set, Optional
from nuclear.builder.rule import PrimaryOptionRule, ParameterRule, FlagRule, CliRule, SubcommandRule, \
PositionalArgumentRule, ManyArgumentsRule, DictionaryRule, ValueRule
from nuclear.parser.context import RunContext
from nuclear.parser.keyword import format_var_names, format_var_name
from nuclear.parser.parser import Parser
from nuclear.parser.transform import filter_rules
from nuclear.parser.value import generate_value_choices
from nuclear.version import __version__
internal_options = {'--autocomplete', '--install-bash', '--install-autocomplete'}
| 35.22619
| 119
| 0.710037
|
f553c00e89c0f5a71a1f1863c8dfb6394c78b550
| 1,997
|
py
|
Python
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 17
|
2017-06-27T04:14:42.000Z
|
2022-03-07T03:37:44.000Z
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/Cyclops-VFX
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2017-06-14T04:17:51.000Z
|
2018-08-23T20:12:44.000Z
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2019-03-18T06:18:33.000Z
|
2019-08-14T21:07:53.000Z
|
#The MIT License (MIT)
#
#Copyright (c) 2015 Geoffroy Givry
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import nuke
LookAtName = "LookAt"
| 46.44186
| 263
| 0.725588
|
f554392a1fb675c44914e72f8067a655af6c342c
| 464
|
py
|
Python
|
graphql/main.py
|
py-in-the-sky/appengine-swapi
|
824d770cd11e5510b2300d1e248a9474e3fde8c2
|
[
"MIT"
] | null | null | null |
graphql/main.py
|
py-in-the-sky/appengine-swapi
|
824d770cd11e5510b2300d1e248a9474e3fde8c2
|
[
"MIT"
] | null | null | null |
graphql/main.py
|
py-in-the-sky/appengine-swapi
|
824d770cd11e5510b2300d1e248a9474e3fde8c2
|
[
"MIT"
] | null | null | null |
"""
`main` is the top level module where AppEngine gets access
to your Flask application.
"""
from app import create_app
from config import config
from os import environ
if environ['SERVER_SOFTWARE'].startswith('Development'):
app_config = config['development']
else:
app_config = config['production']
app = create_app(app_config)
# Note: We don't need to call run() since our application is
# embedded within the App Engine WSGI application server.
| 22.095238
| 60
| 0.752155
|
f5553f600d9e51ffdced6978931c7ede4d5b363d
| 7,458
|
py
|
Python
|
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*-coding:utf-8-*-
"""
This module is used to extract features from the data
"""
import numpy as np
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
import python_speech_features
eps = 0.00000001
def file_length(soundParams):
"""Returns the file length, in seconds"""
return soundParams[3] / soundParams[2]
def zcr(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return countZ / (count - 1)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / len(frame)
def energy_entropy(frame, numOfShortBlocks=10):
"""Computes entropy of energy"""
tfe = np.sum(frame ** 2) # total frame energy
L = len(frame)
subWinLength = int(np.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
frame = frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(subWindows ** 2, axis=0) / (tfe + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -1 * np.sum(s * np.log2(s + eps))
return entropy
def spectral_centroid_and_spread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
C = (NUM / DEN) # Centroid
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN) # Spread
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def avg_mfcc(sound_obj, avg=True):
"""Extract the MFCC from the sound object"""
soundD = sound_obj["sound"] # raw data
sr = sound_obj["params"][2] # samplerate
# nf = sound_obj["params"][3] # nframes
all_mfcc = python_speech_features.mfcc(soundD, samplerate=sr, winlen=0.025, winstep=1)
if avg:
return np.mean(all_mfcc, axis=0)
return all_mfcc
def mfcc_init_filter_banks(fs, nfft):
"""Computes the triangular filterbank for MFCC computation"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = np.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nFiltTotal, nfft))
nfreqs = np.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def mfcc(X, fbank, nceps=13):
"""Computes the MFCCs of a frame, given the fft mag"""
mspec = np.log10(np.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def extract_all_features0(sound_obj):
"""Extract the features from the sound object"""
# fl = file_length(sound_obj["params"])
test_mfcc_avg = avg_mfcc(sound_obj)
# return np.concatenate(([fl], test_mfcc_avg))
return test_mfcc_avg
def features_labels0():
"""Give a name to each feature"""
return ["mfcc{}".format(i) for i in range(13)]
def extract_all_features(sound_obj, wins=None, steps=None):
"""Extract the features from the sound object"""
sr = sound_obj["params"][2] # samplerate
nbs = sound_obj["params"][3] # number of samples
if wins is None:
wins = int(0.050 * sr)
if steps is None:
steps = int(nbs/15 - wins)
# Signal normalization
signal = sound_obj["sound"]
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (np.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
curPos = steps // 2 # skip the very beginning
nFFT = wins // 2
# compute the triangular filter banks used in the mfcc calculation
#[fbank, _] = mfcc_init_filter_banks(sr, nFFT)
totalNumOfFeatures = 5 + 13
stFeatures = []
while curPos + wins - 1 < N: # for each short-term window until the end of signal
x = signal[curPos:curPos+wins] # get current window
curPos = curPos + steps # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
curFV = np.zeros(totalNumOfFeatures)
curFV[0] = zcr(x) # zero crossing rate
curFV[1] = energy(x) # short-term energy
curFV[2] = energy_entropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = spectral_centroid_and_spread(X, sr) # spectral centroid and spread
# curFV[5] = stSpectralEntropy(X) # spectral entropy
# curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
# curFV[7] = stSpectralRollOff(X, 0.90, sr) # spectral rolloff
# curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
#
# chromaNames, chromaF = stChromaFeatures(X, sr, nChroma, nFreqsPerChroma)
# curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
# curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
#curFV[5:18] = mfcc(X, fbank, 13)
#curFV[0:13] = mfcc(X, fbank, 13)
curFV[5:18] = python_speech_features.mfcc(x, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# TEMP
#curFV = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins, winstep=steps).T
stFeatures.append(curFV)
# stFeatures = np.array(stFeatures)
stFeatures = np.concatenate(stFeatures, 0).flatten()
#stFeatures = np.mean(stFeatures, axis=0)
# stFeatures = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# stFeatures = np.mean(stFeatures, axis=0)
return stFeatures
# sound_obj2 = sound_obj.copy()
# sound_obj2["sound"] = signal
#
# # fl = file_length(sound_obj["params"])
# test_mfcc_avg = avg_mfcc(sound_obj2)
# # return np.concatenate(([fl], test_mfcc_avg))
# return test_mfcc_avg
def features_labels():
"""Give a name to each feature"""
return ["zrc", "energy", "en_ent", "centr", "spread"] + ["mfcc{}".format(i) for i in range(13)]
| 34.850467
| 121
| 0.616519
|
f558d1166458b00b259c7deac962e45e929e8c73
| 255
|
py
|
Python
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 10
|
2018-10-17T18:50:08.000Z
|
2021-11-05T22:27:45.000Z
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 7
|
2020-06-03T20:04:10.000Z
|
2021-03-31T13:59:01.000Z
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 6
|
2020-05-12T12:43:55.000Z
|
2022-02-25T08:47:17.000Z
|
from .combine_sector_results import *
from .DiagnosticsLog import *
from .EstimationModel import *
from .format_regression_table import *
from .save_and_load import *
from .SlimResults import *
from .Specification import *
from .visualize_results import *
| 31.875
| 38
| 0.815686
|
f5592d87345b5a481da2afaed4ea4665c57dc09d
| 2,435
|
py
|
Python
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 187
|
2015-09-21T15:08:57.000Z
|
2017-07-31T08:01:22.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 1,533
|
2015-09-15T23:49:33.000Z
|
2017-08-01T08:52:00.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 52
|
2015-10-11T10:42:50.000Z
|
2017-07-16T22:31:42.000Z
|
# Part of the Engi-WebGL suite.
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
from mathutils import *
from functools import reduce
import os, sys, os.path, bpy, bmesh, math, struct, base64, itertools
bl_info = {
'name': 'Curve Export (.json)',
'author': 'Lasse Nielsen',
'version': (0, 2),
'blender': (2, 72, 0),
'location': 'File > Export > Curve (.json)',
'description': 'Curve Export (.json)',
'category': 'Import-Export'
}
# Compress number representation to save as much space as possible.
if __name__ == '__main__':
register()
| 24.59596
| 85
| 0.657906
|
f5599fb599f6ac244f63777232a27937cf321454
| 2,678
|
py
|
Python
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 1,231
|
2018-01-13T17:06:24.000Z
|
2022-03-31T22:14:36.000Z
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 170
|
2018-03-13T19:15:17.000Z
|
2022-03-31T10:14:15.000Z
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 86
|
2018-03-14T02:12:49.000Z
|
2022-03-27T00:16:07.000Z
|
import mimetypes
from pathlib import Path
from organize.utils import DotDict, flatten
from .filter import Filter
| 26.514851
| 125
| 0.5295
|
f55a03a501c8713245dc76b3760e3ffdd100d23e
| 1,857
|
py
|
Python
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,847
|
2020-03-24T19:01:42.000Z
|
2022-03-31T13:18:57.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,100
|
2020-03-24T19:41:13.000Z
|
2022-03-31T14:27:09.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 228
|
2020-03-25T05:32:08.000Z
|
2022-03-31T11:27:39.000Z
|
from conans import ConanFile, CMake, tools
| 36.411765
| 82
| 0.622509
|
f55c8c9f40e1cf4319ff4ee1c9422d7c3883f725
| 524
|
py
|
Python
|
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
import numpy as np
VEC_FORWARD = np.array([0, 0, 1])
VEC_UP = np.array([0, 1, 0])
VEC_RIGHT = np.array([1, 0, 0])
STYLE_NOMOVE = np.array([1, 0, 0, 0, 0, 0])
STYLE_TROT = np.array([0, 1, 0, 0, 0, 0])
STYLE_JUMP = np.array([0, 0, 1, 0, 0, 0])
STYLE_SIT = np.array([0, 0, 0, 1, 0, 0])
STYLE_STAND = np.array([0, 0, 0, 0, 1, 0])
STYLE_LAY = np.array([0, 0, 0, 0, 0, 1])
NUM_STYLES = 6
SYS_FREQ = 60
DURATION = 9
NUM_QUERIES = SYS_FREQ * DURATION
MOCAP_SAMPLE_PATH = "animation/data/mocap-sample.txt"
| 23.818182
| 54
| 0.593511
|
f55da49181d53035411252526f6236de7beb9882
| 2,222
|
py
|
Python
|
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | null | null | null |
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | 4
|
2018-10-25T02:09:37.000Z
|
2019-06-27T20:50:27.000Z
|
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import argparse,codes3d,configparser, os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a BED file detailing the locations of genes in the genome, and a database containing additional gene information. Note: If a file in .gtf format is supplied, no other arguments are required.")
parser.add_argument("-i","--gene_files",required=True,nargs='+',help="The gene file/s to be indexed; either in tabular format, or, by default, the .gtf file format, as supplied by the GTEx project.")
parser.add_argument("-g","--symbol_col",type=int,help="The index of the column containing the gene symbol (non-zero based; default: ).")
parser.add_argument("-c","--chr_col",type=int,help="The index of the column containing the chromosome name (non-zero based; default: ).")
parser.add_argument("-s","--start_col",type=int,help="The index of the column containing the gene start site (non-zero based; default: ).")
parser.add_argument("-e","--end_col",type=int,help="The index of the column containing the gene end site (non-zero based; default: ).")
parser.add_argument("-p","--p_threshold_col",type=int,help="The index of the column containing the GTEx p-threshold for this gene (optional; non-zero based; default: ).")
parser.add_argument("-H","--no_header",action="store_true",help="Use this option if the table has no header.")
parser.add_argument("-b","--output_bed_fp",help="The path to which to output the resultant BED file of gene locations (default: the input file name with the extension \".bed\").")
parser.add_argument("-o","--output_db",help="The path to which to output the resultant gene index database (default: the input file name with the extension \".db\").")
parser.add_argument("-C","--config_file",default=os.path.join(os.path.dirname(__file__),"../docs/codes3d.conf"),help="The configuration file specifying the location of the CoDeS3D library (default: docs/codes3d.conf).")
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_file)
codes3d.build_gene_index(args.gene_files,args.output_bed_fp,args.output_db,config,args.symbol_col,args.chr_col,args.start_col,args.end_col,args.p_threshold_col,args.no_header)
| 96.608696
| 246
| 0.756976
|
f55e3e29a41fea6104e2a766525f7a160ac34c13
| 5,900
|
py
|
Python
|
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
import numpy as np
from Optimizer.path import get_x_substeps
from Kinematic import frames, chain as kc
# General
def frames2spheres(f, robot):
"""
x_spheres (n_samples, n_wp, n_links, n_dim)
"""
return frames2pos(f, frame_idx=robot.spheres_frame_idx, rel_pos=robot.spheres_position)
def frames2spheres_jac(f, j, robot):
"""
x_spheres (n_samples, n_wp, n_spheres, n_dim)
dx_dq (n_samples, n_wp, n_dof, n_spheres, n_dim)
"""
x_spheres = frames2spheres(f=f, robot=robot)
dx_dq = (j[:, :, :, robot.spheres_frame_idx, :, :] @ robot.spheres_position[:, :, np.newaxis])[..., :-1, 0]
return x_spheres, dx_dq
# nfi - next frame index
# iff - influence frame frame
# Helper
# Combine fun
def create_frames_dict(f, nfi):
"""
Create a dict to minimize the calculation of unnecessary transformations between the frames
The value to the key 0 holds all transformations form the origin to the whole chain.
Each next field holds the transformation from the current frame to all frames to come.
The calculation happens from back to front, to save some steps
# 0 1 2 3 4
# F01
# F02 F12
# F03 F13 F23
# F04 F14 F24 F34
# F05 F15 F25 F35 F45
"""
n_frames = f.shape[-3]
d = {}
for i in range(n_frames - 1, -1, -1):
nfi_i = nfi[i]
if nfi_i == -1:
d[i] = f[..., i:i + 1, :, :]
elif isinstance(nfi_i, (list, tuple)):
d[i] = np.concatenate([
f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ np.concatenate([d[j] for j in nfi_i], axis=-3)],
axis=-3)
else:
d[i] = np.concatenate([f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ d[nfi_i]], axis=-3)
return d
| 33.908046
| 132
| 0.597119
|
f560897ff46b99cf1a7890d1251f2fa26c8a2e3a
| 977
|
py
|
Python
|
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
import dns.resolver
import dns.ipv4
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', "--list", help="List of dns names you want IP's for")
parser.add_argument('-o', "--output", help="Output file to save list")
args = parser.parse_args()
ip_list = [...]
subs = open(args.list, 'r', newline='')
if args.list:
for host in subs:
host = host.strip('\n',)
host = host.strip('https://')
host = host.strip('http://')
# print(host)
try:
i = dns.resolver.query(host,'A' )
#print(i.rrset.items[0])
for item in i:
if not item in ip_list:
ip_list.append(item)
print(item)
except Exception as error:
a = error
if args.output:
file = open(args.output, "w")
for p in ip_list:
file.write(str(p))
file.write("\n")
file.close()
| 27.914286
| 79
| 0.518936
|
f5609c24bd958aa1dc8093dff8643942d2269130
| 8,416
|
py
|
Python
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:52:50.000Z
|
2021-03-03T06:52:50.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-05T09:36:50.000Z
|
2021-03-08T12:02:53.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:57:02.000Z
|
2021-03-03T06:57:02.000Z
|
import pandas
import numpy as np
import math
import os
import sys
import re
from utils import *
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
percentiles = [ 10, 25, 50, 75, 90, 95, 99, 99.9 ]
DATA_FOLDER = DIR_PATH + '/data'
if __name__ == "__main__":
main()
| 33.52988
| 110
| 0.557153
|
f560efe52fd0d8fc1e6638e6bf52578a71fd2927
| 1,821
|
py
|
Python
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 228
|
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 167
|
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/backend/foursquare/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16
|
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
from typing import Optional
from platypush.backend import Backend
from platypush.context import get_plugin
from platypush.message.event.foursquare import FoursquareCheckinEvent
# vim:sw=4:ts=4:et:
| 34.358491
| 115
| 0.697968
|
f560ffe95556ccc11b3d6d39837b76f47f81ba08
| 2,980
|
py
|
Python
|
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
import os
import time
import pandas as pd
from src.utils import get_project_root
from src.data.item_names_replacement import REPLACE_DICT1, REPLACE_DICT1
YEARS = [str(x) for x in list(range(2013,2021))]
ROOT_DIR = get_project_root()
def load_data(data_abs_path: str) -> pd.DataFrame:
"""Load raw data
Parameters:
-----------
data_abs_path: absolute path of csv data
Returns:
--------
data_df: raw data dataframe
"""
data_df = pd.read_csv(data_abs_path)
data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, format='%Y-%m-%d', utc=True)
data_df.set_index('sales_datetime', inplace=True)
return data_df
| 40.27027
| 125
| 0.632215
|
f5617dd2284793a4d37b296ffc5aba3ca5a2e5d9
| 1,143
|
py
|
Python
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 9
|
2017-08-17T08:34:44.000Z
|
2021-01-06T16:08:09.000Z
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 2
|
2017-08-17T19:53:41.000Z
|
2020-01-22T23:19:44.000Z
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 1
|
2018-07-29T19:16:14.000Z
|
2018-07-29T19:16:14.000Z
|
"""
This file contains assorted general utility functions used by other
modules in the aiml_bot package.
"""
# TODO: Correctly handle abbreviations.
def split_sentences(text: str) -> list:
"""Split the string s into a list of sentences."""
if not isinstance(text, str):
raise TypeError(text)
position = 0
results = []
length = len(text)
while position < length:
try:
period = text.index('.', position)
except ValueError:
period = length + 1
try:
question = text.index('?', position)
except ValueError:
question = length + 1
try:
exclamation = text.index('!', position)
except ValueError:
exclamation = length + 1
end = min(period, question, exclamation)
sentence = text[position:end].strip()
if sentence:
results.append(sentence)
position = end + 1
# If no sentences were found, return a one-item list containing
# the entire input string.
if not results:
results.append(text.strip())
# print(results)
return results
| 29.307692
| 67
| 0.594051
|
f565e620ce2b4fec57d532c3907bb966211865f1
| 5,858
|
py
|
Python
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
import os, time, random
from collections import defaultdict
from System import Console, ConsoleColor, ConsoleKey
from System.Threading import Thread, ThreadStart
if __name__ == "__main__":
screen = Screen(); logic = GameLogic(); stats = Stastics(); fruit = Fruit(); snake = Snake()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
while logic.update(screen, snake, fruit, stats): time.sleep(0.05)
logic.end()
| 59.171717
| 177
| 0.669
|
f566d3437e302ac56089e454a2ea9560ed781683
| 14,376
|
py
|
Python
|
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
#
#! coding:utf-8
import xml.etree.ElementTree as ET
from xml.etree import ElementTree
import base64
import binascii
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
SubType = {'1':'ASD','2':'CSD','3':'TF','4':'???','5':'COH'}
average_type = {'0':'Fixed','1':'Exponential','2':'Accumulative'} # not comfirmed
window_type = {'0':'Uniform','1':'Hanning','2':'Flat-top',
'3':'Welch','4':'Bartlet','5':'BMH'} # not comfirmed
| 38.438503
| 81
| 0.483097
|
f56710ff85a90ed722496b29dbe8a6afdffc8f9d
| 2,291
|
py
|
Python
|
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Program to build a graph based on dense input features (embeddings).
This is a wrapper around the `nsl.tools.build_graph` API. See its documentation
for more details.
USAGE:
`python graph_builder.py` [*flags*] *input_features.tfr... output_graph.tsv*
For details about this program's flags, run `python graph_builder.py --help`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from neural_structured_learning.tools import graph_builder_lib
import tensorflow as tf
def _main(argv):
"""Main function for running the graph_builder program."""
flag = flags.FLAGS
flag.showprefixforinfo = False
if len(argv) < 3:
raise app.UsageError(
'Invalid number of arguments; expected 2 or more, got %d' %
(len(argv) - 1))
graph_builder_lib.build_graph(argv[1:-1], argv[-1], flag.similarity_threshold,
flag.id_feature_name,
flag.embedding_feature_name)
if __name__ == '__main__':
flags.DEFINE_string(
'id_feature_name', 'id',
"""Name of the singleton bytes_list feature in each input Example
whose value is the Example's ID.""")
flags.DEFINE_string(
'embedding_feature_name', 'embedding',
"""Name of the float_list feature in each input Example
whose value is the Example's (dense) embedding.""")
flags.DEFINE_float(
'similarity_threshold', 0.8,
"""Lower bound on the cosine similarity required for an edge
to be created between two nodes.""")
# Ensure TF 2.0 behavior even if TF 1.X is installed.
tf.compat.v1.enable_v2_behavior()
app.run(_main)
| 34.19403
| 80
| 0.717154
|
f56a3c3291794639e68ab580cfe7cfde7175ba0c
| 11,672
|
py
|
Python
|
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
import torch
from torch.utils import data
import sys
from sklearn.utils import shuffle
import numpy as np
import argparse
import matplotlib.pyplot as plt
def get_PostSet(pcounts_name = "opt_pcounts", pcounts_path = "results/metadata",
pc_split=0.1, seed = 0,
metadata_name = "opt_tags", metadata_path = "results/metadata",
bias_top=1, bias_normal=1):
"""
ONLY VALID FOR METADATA THAT IS A LIST FOR EACH SONG
"""
# LOAD PCOUNTS AND METADATA
pcounts = torch.load(pcounts_path + "/" + pcounts_name) #list
index2 = int(len(pcounts)*(1 - pc_split))
pcounts = shuffle(pcounts, random_state=seed)[index2:] # Test partition
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
# CHANGE METADATA
print("Metadata2num and opt_pcounts to dict...")
idx_metadata = {} # same as metadata but using the index of meta2idx
for i in range(len(metadata)):
if metadata[i] == -1:
idx_metadata[i] = -1
else:
idx_metadata[i] = [meta2idx[m] for m in metadata[i]]
dict_pcounts = {}
for i in range(len(pcounts)):
dict_pcounts[i] = pcounts[i]
# USER META COUNT
print("Before filtering users without metadata,", len(pcounts))
user2class_counts = {}
total = len(dict_pcounts)
for b, user in enumerate(list(dict_pcounts.keys())):
print(" {0:0.3f}% \r".format((b+1.)*100./total), end="")
class_counts = torch.zeros(Nclasses)
for song in dict_pcounts[user]:
if idx_metadata[song] != -1:
class_counts[idx_metadata[song]] += 1
if (class_counts != 0).any():
user2class_counts[user] = class_counts.data.tolist()
else:
del dict_pcounts[user]
# GET TOP CLASS
print("After filtering users without metadata,", len(user2class_counts), len(dict_pcounts))
user2topclass = {}
for user in user2class_counts.keys():
user2topclass[user] = idx2meta[torch.argmax(torch.tensor(user2class_counts[user])).data.tolist()]
# SPLIT INTO [SONGS, TOP CLASS SONGS, TOP TAG]
user2topsongs = {}
user2normalsongs = {}
total = len(dict_pcounts)
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
top = []
normal = []
Ntop = 0
for song in dict_pcounts[user]:
if metadata[song] != -1:
if (user2topclass[user] in metadata[song]) and Ntop<100:
top += [song]
Ntop += 1
else:
normal += [song]
else:
normal += [song]
user2topsongs[user] = top
user2normalsongs[user] = normal
# DELETE USERS (BIAS_TOP, BIAS_NORMAL)
predict_dataset = []
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
if len(user2topsongs[user]) >= bias_top and len(user2normalsongs[user]) >= bias_normal:
predict_dataset += [[user2normalsongs[user], user2topsongs[user], user2topclass[user]]]
print("# Users (after deleting top<{}, inp<{}): ".format(bias_top, bias_normal), len(predict_dataset))
torch.save(predict_dataset, metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_topclass2Ntopclass(bias_top=1, bias_normal=1, metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating topclass2Ntopclass...")
PostSet = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
topclass2Ntopclass = {}
for b, (inp, out, c) in enumerate(PostSet):
if c not in list(topclass2Ntopclass.keys()): topclass2Ntopclass[c] = 0
topclass2Ntopclass[c] += 1
torch.save(topclass2Ntopclass, metadata_path + "/topclass2Ntopclass_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_class2song(metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating class2song...")
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
class2song = {c:[] for c in meta}
total = len(metadata)
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
if metadata[i] == -1: continue
for c in metadata[i]:
class2song[c] += [i]
torch.save(class2song, metadata_path + "/{}2song".format(metadata_name))
return
def get_class2vector(metadata_path="results/metadata", metadata_name="opt_tags", Nsongs=180198):
print("Calculating get_class2vector...")
class2song = torch.load(metadata_path + "/{}2song".format(metadata_name))
_, meta = torch.load(metadata_path + "/" + metadata_name) # for idx2meta
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
total = len(class2song)
class2vector = torch.zeros(total,Nsongs).long()
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
class2vector[i][class2song[idx2meta[i]]] = 1
torch.save(class2vector, metadata_path + "/{}2vector".format(metadata_name))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bias_top', type=int, default=1, help="Minimum number of songs in user_topsongs to be taken in care")
parser.add_argument('--bias_normal', type=int, default=1, help="Minimum number of songs in user_normalsongs to be taken in care")
parser.add_argument('--Nsongs', type=int, default=180198, help="Number of different songs")
parser.add_argument('--metadata_name', type=str, default="opt_tags", help="Name of the metadata to use")
parser.add_argument('--metadata_path', type=str, default="results/metadata", help="Path of the metadata to use")
parser.add_argument('--pcounts_name', type=str, default="opt_pcounts", help="Name of the pcounts to use")
parser.add_argument('--pcounts_path', type=str, default="results/metadata", help="Path of the pcounts to use")
parser.add_argument('--TODO', nargs='+', type=str, default=["all"], help="Things to calculate")
args = parser.parse_args()
if args.TODO == ["all"]: args.TODO = ["postset", "topclass2Ntopclass", "class2song", "class2vector"]
print("METADATA: {}\nBIAS TOP: {}\nBIAS NORMAL: {}\n".format(args.metadata_name, args.bias_top, args.bias_normal))
if "postset" in args.TODO:
get_PostSet(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path, pcounts_name=args.pcounts_name, pcounts_path=args.pcounts_path)
if "topclass2Ntopclass" in args.TODO:
get_topclass2Ntopclass(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2song" in args.TODO:
get_class2song(metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2vector" in args.TODO:
get_class2vector(metadata_name=args.metadata_name, metadata_path=args.metadata_path, Nsongs=args.Nsongs)
| 36.936709
| 199
| 0.706306
|
f56aef37015ae46f5772b8eb36d680a12e113fe7
| 892
|
py
|
Python
|
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
import json
from typing import List
from LocationObject import LocationObject
# add geocoding for each location
| 27.030303
| 89
| 0.602018
|
f56b8e9802094da8814e591262fd9b96c9698428
| 736
|
py
|
Python
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
#!/usr/bin/env python3
#-*- codin:utf-8 -*-
'''
django + celery + redis
python manage.py migrate -- looks at the INSTALLED_APPS setting and creates any necessary database tables according to the database settings in your mysite/settings.py file and the database migrations shipped with the app
python manage.py runserver --
python manage.py startapp app_name --
python manage.py makemigrations app_name --
python manage.py sqlmigrate app_name 0001 --
'''
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "picha.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 33.454545
| 221
| 0.777174
|
f56b9c719e339cbfa0c390fd236dda0208636e27
| 7,786
|
py
|
Python
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | 16
|
2020-09-05T16:03:40.000Z
|
2022-03-19T17:42:05.000Z
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from sqlalchemy.orm import sessionmaker
import nfp.servicos.model as tables
from nfp import CONEXAO
# ---------------- Funes de mdulo ------
def selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id)
def get_id_tarefa_remota(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_id_tarefa_remota(tarefa_id)
def get_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_tarefa(tarefa_id)
def reativar_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.reativar_tarefa(tarefa_id)
# ----------------------------------------
if __name__ == "__main__":
pass
| 32.041152
| 96
| 0.595171
|
f56c33ff6b67b94fb127b4ea54ac62ad0efe9506
| 2,458
|
py
|
Python
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 6
|
2017-08-11T23:53:43.000Z
|
2019-10-25T17:34:11.000Z
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 2
|
2019-05-07T01:36:08.000Z
|
2021-03-31T18:40:11.000Z
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 2
|
2018-12-11T09:43:15.000Z
|
2019-10-25T18:19:05.000Z
|
# -*- coding: utf-8 -*-
"""
Artifactory repository endpoint
"""
__copyright__ = "Copyright (C) 2016 Veritas Technologies LLC. All rights reserved."
# project imports
from ..http import HTTP
from .repotype import RepositoryType
from .virtual import Virtual
from .local import Local
from .remote import Remote
# define all repo types
REPO_TYPE = {
"local": Local,
"remote": Remote,
"virtual": Virtual,
}
| 27.311111
| 83
| 0.579333
|
f56e6fbda99325c6509cd93be29f620a11819e74
| 2,887
|
py
|
Python
|
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
"""Adoption application."""
from flask import Flask, request, redirect, render_template
from models import db, connect_db, Pets
from wtforms import StringField, IntegerField, TextAreaField, BooleanField
from wtforms.validators import DataRequired,InputRequired,AnyOf,URL, NumberRange
from flask_wtf import FlaskForm
from petfunctions import get_random_pet
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adopt'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
connect_db(app)
db.create_all()
from flask_debugtoolbar import DebugToolbarExtension
app.config['SECRET_KEY'] ='SOSECRET'
debug=DebugToolbarExtension(app)
| 29.459184
| 114
| 0.66505
|
f56f1c7317138379cc46e4bc9738fe0615922706
| 17,810
|
py
|
Python
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
"""
Utilities for (weighted) bootstrap resampling applied to geoscientific point-data.
"""
import numpy as np
import pandas as pd
from .meta import subkwargs
from .spatial import great_circle_distance, _get_sqare_grid_segment_indicies
from .log import Handle
logger = Handle(__name__)
try:
import sklearn
HAVE_SKLEARN = True
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
HAVE_SKLEARN = False
def _segmented_univariate_distance_matrix(
A, B, distance_metric, dtype="float32", segs=10
):
"""
A method to generate a point-to-point distance matrix in segments to be softer
on memory requirements yet retain precision (e.g. beyond a few thousand points).
Parameters
-----------
A, B : :class:`numpy.ndarray`
Numpy arrays with positions of points.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
dtype : :class:`str` | :class:`numpy.dtype`
Data type to use for the matrix.
segs : :class:`int`
Number of segments to split the matrix into (note that this will effectively
squared - i.e. 10 -> 100 individual segments).
Returns
-------
dist : :class:`numpy.ndarray`
2D point-to-point distance matrix.
"""
max_size = np.max([a.shape[0] for a in [A, B]])
dist = np.zeros((max_size, max_size), dtype=dtype) # full matrix
# note that this could be parallelized; the calcuations are independent
for ix_s, ix_e, iy_s, iy_e in _get_sqare_grid_segment_indicies(max_size, segs):
dist[ix_s:ix_e, iy_s:iy_e] = distance_metric(
A[ix_s:ix_e][:, np.newaxis], B[iy_s:iy_e][np.newaxis, :],
)
return dist
def univariate_distance_matrix(a, b=None, distance_metric=None):
"""
Get a distance matrix for a single column or array of values (here used for ages).
Parameters
-----------
a, b : :class:`numpy.ndarray`
Points or arrays to calculate distance between. If only one array is
specified, a full distance matrix (i.e. calculate a point-to-point distance
for every combination of points) will be returned.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
Returns
-------
:class:`numpy.ndarray`
2D distance matrix.
"""
if distance_metric is None:
distance_metric = lambda a, b: np.abs(a - b)
a = np.atleast_1d(np.array(a).astype(np.float))
full_matrix = False
if b is not None:
# a second set of points is specified; the return result will be 1D
b = np.atleast_1d(np.array(b).astype(np.float))
else:
# generate a full point-to-point matrix for a single set of points
full_matrix = True
b = a.copy()
return _segmented_univariate_distance_matrix(a, b, distance_metric)
def get_spatiotemporal_resampling_weights(
df,
spatial_norm=1.8,
temporal_norm=38,
latlong_names=["Latitude", "Longitude"],
age_name="Age",
max_memory_fraction=0.25,
normalized_weights=True,
**kwargs
):
"""
Takes a dataframe with lat, long and age and returns a sampling weight for each
sample which is essentailly the inverse of the mean distance to other samples.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to calculate weights for.
spatial_norm : :class:`float`
Normalising constant for spatial measures (1.8 arc degrees).
temporal_norm : :class:`float`
Normalising constant for temporal measures (38 Mya).
latlong_names : :class:`list`
List of column names referring to latitude and longitude.
age_name : :class:`str`
Column name corresponding to geological age or time.
max_memory_fraction : :class:`float`
Constraint to switch to calculating mean distances where :code:`matrix=True`
and the distance matrix requires greater than a specified fraction of total
avaialbe physical memory. This is passed on to
:func:`~pyrolite.util.spatial.great_circle_distance`.
normalized_weights : :class:`bool`
Whether to renormalise weights to unity.
Returns
--------
weights : :class:`numpy.ndarray`
Sampling weights.
Notes
------
This function is equivalent to Eq(1) from Keller and Schone:
.. math::
W_i \\propto 1 \\Big / \\sum_{j=1}^{n} \\Big ( \\frac{1}{((z_i - z_j)/a)^2 + 1} + \\frac{1}{((t_i - t_j)/b)^2 + 1} \\Big )
"""
weights = pd.Series(index=df.index, dtype="float")
z = great_circle_distance(
df[[*latlong_names]],
absolute=False,
max_memory_fraction=max_memory_fraction,
**subkwargs(kwargs, great_circle_distance)
) # angular distances
_invnormdistances = np.zeros_like(z)
# where the distances are zero, these weights will go to inf
# instead we replace with the smallest non-zero distance/largest non-inf
# inverse weight
norm_inverse_distances = 1.0 / ((z / spatial_norm) ** 2 + 1)
norm_inverse_distances[~np.isfinite(norm_inverse_distances)] = 1
_invnormdistances += norm_inverse_distances
# ages - might want to split this out as optional for spatial resampling only?
t = univariate_distance_matrix(df[age_name])
norm_inverse_time = 1.0 / ((t / temporal_norm) ** 2 + 1)
norm_inverse_time[~np.isfinite(norm_inverse_time)] = 1
_invnormdistances += norm_inverse_time
weights = 1.0 / np.sum(_invnormdistances, axis=0)
if normalized_weights:
weights = weights / weights.sum()
return weights
def add_age_noise(
df,
min_sigma=50,
noise_level=1.0,
age_name="Age",
age_uncertainty_name="AgeUncertainty",
min_age_name="MinAge",
max_age_name="MaxAge",
):
"""
Add gaussian noise to a series of geological ages based on specified uncertainties
or age ranges.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe with age data within which to look up the age name and add noise.
min_sigma : :class:`float`
Minimum uncertainty to be considered for adding age noise.
noise_level : :class:`float`
Scaling of the noise added to the ages. By default the uncertaines are unscaled,
but where age uncertaines are specified and are the one standard deviation level
this can be used to expand the range of noise added (e.g. to 2SD).
age_name : :class:`str`
Column name for absolute ages.
age_uncertainty_name : :class:`str`
Name of the column specifiying absolute age uncertainties.
min_age_name : :class:`str`
Name of the column specifying minimum absolute ages (used where uncertainties
are otherwise unspecified).
max_age_name : :class:`str`
Name of the column specifying maximum absolute ages (used where uncertainties
are otherwise unspecified).
Returns
--------
df : :class:`pandas.DataFrame`
Dataframe with noise-modified ages.
Notes
------
This modifies the dataframe which is input - be aware of this if using outside
of the bootstrap resampling for which this was designed.
"""
# try and get age uncertainty
try:
age_uncertainty = df[age_uncertainty_name]
except KeyError:
# otherwise get age min age max
# get age uncertainties
age_uncertainty = (
np.abs(df[max_age_name] - df[min_age_name]) / 2
) # half bin width
age_uncertainty[
~np.isfinite(age_uncertainty) | age_uncertainty < min_sigma
] = min_sigma
# generate gaussian age noise
age_noise = np.random.randn(df.index.size) * age_uncertainty.values
age_noise *= noise_level # scale the noise
# add noise to ages
df[age_name] += age_noise
return df
def spatiotemporal_bootstrap_resample(
df,
columns=None,
uncert=None,
weights=None,
niter=100,
categories=None,
transform=None,
boostrap_method="smooth",
add_gaussian_age_noise=True,
metrics=["mean", "var"],
default_uncertainty=0.02,
relative_uncertainties=True,
noise_level=1,
age_name="Age",
latlong_names=["Latitude", "Longitude"],
**kwargs
):
"""
Resample and aggregate metrics from a dataframe, optionally aggregating by a given
set of categories. Formulated specifically for dealing with resampling to address
uneven sampling density in space and particularly geological time.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to resample.
columns : :class:`list`
Columns to provide bootstrap resampled estimates for.
uncert : :class:`float` | :class:`numpy.ndarray` | :class:`pandas.Series` | :class:`pandas.DataFrame`
Fractional uncertainties for the dataset.
weights : :class:`numpy.ndarray` | :class:`pandas.Series`
Array of weights for resampling, if precomputed.
niter : :class:`int`
Number of resampling iterations. This will be the minimum index size of the output
metric dataframes.
categories : :class:`list` | :class:`numpy.ndarray` | :class:`pandas.Series`
List of sample categories to group the ouputs by, which has the same size as the
dataframe index.
transform
Callable function to transform input data prior to aggregation functions. Note
that the outputs will need to be inverse-transformed.
boostrap_method : :class:`str`
Which method to use to add gaussian noise to the input dataset parameters.
add_gaussian_age_noise : :class:`bool`
Whether to add gassian noise to the input dataset ages, where present.
metrics : :class:`list`
List of metrics to use for dataframe aggregation.
default_uncertainty : :class:`float`
Default (fractional) uncertainty where uncertainties are not given.
relative_uncertainties : :class:`bool`
Whether uncertainties are relative (:code:`True`, i.e. fractional proportions
of parameter values), or absolute (:code:`False`)
noise_level : :class:`float`
Multiplier for the random gaussian noise added to the dataset and ages.
age_name : :class:`str`
Column name for geological age.
latlong_names : :class:`list`
Column names for latitude and longitude, or equvalent orthogonal spherical
spatial measures.
Returns
--------
:class:`dict`
Dictionary of aggregated Dataframe(s) indexed by statistical metrics. If
categories are specified, the dataframe(s) will have a hierarchical index of
:code:`categories, iteration`.
"""
# uncertainty managment ############################################################
uncertainty_type = None
if uncert is not None:
if isinstance(uncert, float):
uncertainty_type = "0D" # e.g. 2%
elif isinstance(uncert, (list, pd.Series)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim < 2
):
uncertainty_type = "1D" # e.g. [0.5%, 1%, 2%]
# shape should be equal to parameter column number
elif isinstance(uncert, (pd.DataFrame)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim >= 2
):
uncertainty_type = "2D" # e.g. [[0.5%, 1%, 2%], [1.5%, 0.6%, 1.7%]]
# shape should be equal to parameter column number by rows
else:
raise NotImplementedError("Unknown format for uncertainties.")
# weighting ########################################################################
# generate some weights for resampling - here addressing specifically spatial
# and temporal resampling
if weights is None:
weights = get_spatiotemporal_resampling_weights(
df,
age_name=age_name,
latlong_names=latlong_names,
**subkwargs(kwargs, get_spatiotemporal_resampling_weights)
)
# to efficiently manage categories we can make sure we have an iterable here
if categories is not None:
if isinstance(categories, (list, tuple, pd.Series, np.ndarray)):
pass
elif isinstance(categories, str) and categories in df.columns:
categories = df[categories]
else:
msg = "Categories unrecognized"
raise NotImplementedError(msg)
# column selection #################################################################
# get the subset of parameters to be resampled, removing spatial and age names
# and only taking numeric data
subset = columns or [
c
for c in df.columns
if c not in [[i for i in df.columns if age_name in i], *latlong_names]
and np.issubdtype(df.dtypes[c], np.number)
]
# resampling #######################################################################
metric_data = {_metric_name(metric): [] for metric in metrics}
# samples are independent, so this could be processed in parallel
for repeat in range(niter):
# take a new sample with replacement equal in size to the original dataframe
smpl = df.sample(weights=weights, frac=1, replace=True)
# whether to specfically add noise to the geological ages
# note that the metadata around age names are passed through to this function
# TODO: Update to have external disambiguation of ages/min-max ages,
# and just pass an age series to this function.
if add_gaussian_age_noise:
smpl = add_age_noise(
smpl,
min_sigma=50,
age_name=age_name,
noise_level=noise_level,
**subkwargs(kwargs, add_age_noise)
)
# transform the parameters to be estimated before adding parameter noise?
if transform is not None:
smpl[subset] = smpl[subset].apply(transform, axis="index")
# whether to add parameter noise, and if so which method to use?
# TODO: Update the naming of this? this is only one part of the bootstrap process
if boostrap_method is not None:
# try to get uncertainties for the data, otherwise use standard deviations?
if boostrap_method.lower() == "smooth":
# add random noise within uncertainty bounds
# this is essentially smoothing
# consider modulating the noise model using the covariance structure?
# this could be done by individual group to preserve varying covariances
# between groups?
if uncert is None:
noise = (
smpl[subset].values
* default_uncertainty
* np.random.randn(*smpl[subset].shape)
) * noise_level
else:
noise = np.random.randn(*smpl[subset].shape) * noise_level
if uncertainty_type in ["0D", "1D"]:
# this should work if a float or series is passed
noise *= uncert
else:
# need to get indexes of the sample to look up uncertainties
# need to extract indexes for the uncertainties, which might be arrays
arr_idxs = df.index.take(smpl.index).values
noise *= uncert[arr_idxs, :]
if relative_uncertainties:
noise *= smpl[subset].values
smpl[subset] += noise
elif (boostrap_method.upper() == "GP") or (
"process" in bootstrap_method.lower()
):
# gaussian process regression to adapt to covariance matrix
msg = "Gaussian Process boostrapping not yet implemented."
raise NotImplementedError(msg)
else:
msg = "Bootstrap method {} not recognised.".format(boostrap_method)
raise NotImplementedError(msg)
# whether to independently estimate metric values for individual categories?
# TODO: Should the categories argument be used to generate indiviudal
# bootstrap resampling processes?
if categories is not None:
for metric in metrics:
metric_data[_metric_name(metric)].append(
smpl[subset].groupby(categories).agg(metric)
)
else: # generate the metric summaries for the overall dataset
for metric in metrics:
metric_data[_metric_name(metric)].append(smpl[subset].agg(metric))
# where the whole dataset is presented
if categories is not None:
# the dataframe will be indexed by iteration of the bootstrap
return {
metric: pd.concat(data, keys=range(niter), names=["Iteration"])
.swaplevel(0, 1)
.sort_index()
for metric, data in metric_data.items()
}
else:
# the dataframe will be indexed by categories and iteration
# TODO: add iteration level to this index?
return {metric: pd.DataFrame(data) for metric, data in metric_data.items()}
| 40.022472
| 131
| 0.614935
|
f56fadc40b0f5bac091cf8c15f4a134f11cb883f
| 49
|
py
|
Python
|
sfrmaker/test/test_nhdplus_utils.py
|
mnfienen/sfrmaker
|
f3ee175c67c80df15bff509235d9a6218bfc6b0b
|
[
"CC0-1.0"
] | 17
|
2015-08-15T02:20:04.000Z
|
2020-04-30T17:36:21.000Z
|
sfrmaker/test/test_nhdplus_utils.py
|
rfrederiksen/sfrmaker
|
7e66d67d6cb0ad84fbb9994402f0baaf5b3fcd01
|
[
"CC0-1.0"
] | 15
|
2015-03-04T16:57:13.000Z
|
2020-01-14T16:29:18.000Z
|
sfrmaker/test/test_nhdplus_utils.py
|
rfrederiksen/sfrmaker
|
7e66d67d6cb0ad84fbb9994402f0baaf5b3fcd01
|
[
"CC0-1.0"
] | 9
|
2015-08-18T14:15:07.000Z
|
2020-04-28T18:45:21.000Z
|
# TODO: add unit tests for test_nhdplus_utils.py
| 24.5
| 48
| 0.795918
|
f570043bcd7ec43faf876327124a5a21c6d01798
| 1,809
|
py
|
Python
|
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import SequenceLearning
sns.set(style='white', palette='colorblind', context='poster')
np.random.seed(0)
'''how to use'''
# init
n_param, n_branch = 16, 4
pad_len = 0
n_parts = 2
n_samples = 256
p_rm_ob_enc = 0
p_rm_ob_rcl = 0
n_rm_fixed = False
task = SequenceLearning(
n_param, n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc,
p_rm_ob_rcl=p_rm_ob_rcl,
n_rm_fixed=n_rm_fixed,
)
# take sample
X, Y = task.sample(n_samples, to_torch=False)
print(f'X shape = {np.shape(X)}, n_example x time x x-dim')
print(f'Y shape = {np.shape(Y)}, n_example x time x y-dim')
'''visualize the sample'''
# pick a sample
i = 0
x, y = X[i], Y[i]
cmap = 'bone'
x_split = np.split(x, (n_param, n_param + n_branch), axis=1)
mat_list = x_split + [y]
f, axes = plt.subplots(
2, 4, figsize=(14, 11), sharey=True,
gridspec_kw={
'width_ratios': [n_param, n_branch, n_param, n_branch],
'height_ratios': [n_param, n_param]
},
)
title_list = ['Observed feature', 'Observed value',
'Queried feature', 'Queried value']
ylabel_list = ['Part one', 'Part two']
for i, mat in enumerate(mat_list):
[mat_p1, mat_p2] = np.split(mat, [n_param], axis=0)
axes[0, i].imshow(mat[:n_param, :], cmap=cmap)
axes[1, i].imshow(mat[n_param:, :], cmap=cmap)
axes[0, i].set_title(title_list[i], fontname='Helvetica')
axes[0, i].set_xticks([])
for i in [1, 3]:
axes[1, i].set_xticks(range(n_branch))
axes[1, i].set_xticklabels(i for i in np.arange(4) + 1)
for i in range(2):
axes[i, 0].set_yticks(np.arange(0, n_param, 5))
axes[i, 0].set_ylabel(ylabel_list[i], fontname='Helvetica')
f.tight_layout()
f.savefig(f'examples/figs/stimulus-rep.png', dpi=100, bbox_inches='tight')
| 28.265625
| 74
| 0.666667
|
f571719391b271f64aa33623e91452b85398b280
| 704
|
py
|
Python
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | 1
|
2021-06-13T18:08:50.000Z
|
2021-06-13T18:08:50.000Z
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Custom exceptions
"""
from __future__ import annotations
__all__ = [
"AlreadyRegistered",
"ConsumerError",
"EventBusError",
"UnknownEvent",
]
| 16.761905
| 79
| 0.661932
|
f572b933b1b5aed70aca3d4ac6ade4a2e8fe1e58
| 9,580
|
py
|
Python
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 11
|
2020-11-01T11:35:30.000Z
|
2022-03-30T02:19:52.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 8
|
2020-12-13T12:17:38.000Z
|
2021-12-21T21:04:27.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | null | null | null |
from sparse_ct.tool import plot_grid
from sparse_ct.data import image_to_sparse_sinogram
from sparse_ct.reconstructor_2d import (
IRadonReconstructor,
SartReconstructor,
SartTVReconstructor,
DgrReconstructor,
SartBM3DReconstructor)
import logging
logging.basicConfig(
filename='dgr_example_32_35.log',
filemode='a',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG
)
if __name__ == "__main__":
#
test("../data/shepp_logan.jpg", "shepp_logan_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct2.jpg", "ct2_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct1.jpg", "ct1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004013_02_01_119.png", "LoDoPaB1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004017_01_01_151.png", "LoDoPaB2_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004028_01_04_109.png", "LoDoPaB3_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004043_01_01_169.png", "LoDoPaB4_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004049_04_01_062.png", "LoDoPaB5_32_35", n_proj=32, noise_pow=35.0)
| 38.167331
| 93
| 0.423173
|
f5735cd931c6cb22e8fa362f8340421fcf372c3d
| 1,340
|
py
|
Python
|
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | 3
|
2021-06-17T00:06:15.000Z
|
2022-01-26T03:53:51.000Z
|
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | null | null | null |
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings
from ..models.pydantic.database import DatabaseURL
p: Path = Path(__file__).parents[2] / ".env"
config: Config = Config(p if p.exists() else None)
DATABASE: str = config("POSTGRES_DB", cast=str)
DB_USER: Optional[str] = config("POSTGRES_USER", cast=str, default=None)
DB_PASSWORD: Optional[str] = config(
"POSTGRES_PASSWORD", cast=str, default=None
)
DB_HOST: str = config("DB_HOST", cast=str, default="postgres_db")
DB_PORT: int = config("DB_PORT", cast=int, default=5432)
DATABASE_CONFIG: DatabaseURL = DatabaseURL(
drivername="asyncpg",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
ALEMBIC_CONFIG: DatabaseURL = DatabaseURL(
drivername="postgresql+psycopg2",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
REDIS_IP: str = config("REDIS_IP", cast=str, default="redis")
REDIS_PORT: int = config("REDIS_PORT", cast=int, default=6379)
REDIS_PASSWORD: str = config("REDIS_PASSWORD", cast=str, default=None)
ARQ_BACKGROUND_FUNCTIONS: Optional[CommaSeparatedStrings] = config(
"ARQ_BACKGROUND_FUNCTIONS", cast=CommaSeparatedStrings, default=None
)
| 31.162791
| 72
| 0.750746
|
f5738865aace2f3446a95a35c7f51b460031ae67
| 1,607
|
py
|
Python
|
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
fruit = str(input())
day_of_the_week = str(input())
quantity = float(input())
price = 0
if fruit == 'banana' or \
fruit == 'apple' or \
fruit == 'orange' or \
fruit == 'grapefruit' or \
fruit == 'kiwi' or \
fruit == 'pineapple' or \
fruit == 'grapes':
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or \
day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or \
day_of_the_week == 'Friday':
if fruit == 'banana':
price = 2.50
elif fruit == 'apple':
price = 1.20
elif fruit == 'orange':
price = 0.85
elif fruit == 'grapefruit':
price = 1.45
elif fruit == 'kiwi':
price = 2.70
elif fruit == 'pineapple':
price = 5.50
elif fruit == 'grapes':
price = 3.85
total_price = quantity * price
print(f'{total_price:.2f}')
elif day_of_the_week == 'Saturday' or day_of_the_week == 'Sunday':
if fruit == 'banana':
price = 2.70
elif fruit == 'apple':
price = 1.25
elif fruit == 'orange':
price = 0.90
elif fruit == 'grapefruit':
price = 1.60
elif fruit == 'kiwi':
price = 3
elif fruit == 'pineapple':
price = 5.60
elif fruit == 'grapes':
price = 4.20
total_price = quantity * price
print(f'{total_price:.2f}')
else:
print('error')
else:
print('error')
| 21.716216
| 71
| 0.47542
|
f573e98c3617ee161a5bc2f46171d1b7f2905fc3
| 1,368
|
py
|
Python
|
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
import unittest
from trajectories.dynamic_time_warper import *
from trajectories.trajectory import Trajectory
from trajectories.point import Point
if __name__ == '__main__':
unittest.main()
| 33.365854
| 93
| 0.54386
|
f57687a33470d7205bc87af05ce7973af384b2a0
| 235
|
py
|
Python
|
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | 3
|
2021-09-21T18:50:20.000Z
|
2021-12-14T13:07:31.000Z
|
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
while True:
h1,m1,h2,m2=map(int,input().split())
i=f=0
if m1+m2+h1+h2==0:break
if h1==0:i=(24*60)+m1
else:i=(h1*60)+m1
if h2==0:f=(24*60)+m2
else:f=(h2*60)+m2
print(f-i) if f>i else print((24*60)-(i-f))
| 23.5
| 47
| 0.52766
|
f578b94c9410c26b42768750abeeeeadcdf0cd8f
| 4,820
|
py
|
Python
|
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | null | null | null |
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | 4
|
2020-02-11T23:47:01.000Z
|
2021-06-10T21:12:36.000Z
|
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Count
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect
from .models import *
from .forms import *
import os
from django.http import HttpResponse
from portfolio_pj import settings
# Create your views here.
# Private Methods
| 41.551724
| 111
| 0.700415
|
f5792851b55e8b741f344366679574e04969bc93
| 1,022
|
py
|
Python
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | 1
|
2022-02-08T19:35:22.000Z
|
2022-02-08T19:35:22.000Z
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
import datetime
from ..databases.postgresql import session
from ..models.bookmark_model import Bookmark
# Select one
# Insert
# Update
# Count by post id
| 31.9375
| 116
| 0.720157
|
f57b07d03e45e8f7fc9d99adb6fc72590a4d7edd
| 3,326
|
py
|
Python
|
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#
# User management application
#
"""
pythoncgi
1. (get)
2. (post)
3. (post)
4. (post)
1. REQUEST_METHOD getpost
2. QUERY_STRING
3. subprocess.getoutput, os.system shell
4.
grep ^root /etc/passwd
useradd user-name
usermod user-name
userdel user-name
"""
import os
import sys
import subprocess as sub
def read_user_info(name):
""" /etc/passwd """
db = '/etc/passwd'
info = [line.split(':') for line in open(db).read().splitlines()]
user_info = [i for i in info if i[0] == name]
if not user_info: #
return
user_info = user_info[0]
colnames = ('name', 'password', 'uid', 'gid', 'comment', 'home', 'shell')
return dict(zip(colnames, user_info))
if __name__ == '__main__':
headers = []
headers.append('Content-Type: text/plain')
if os.getenv('REQUEST_METHOD') == 'GET':
params = os.getenv('QUERY_STRING', '')
get_user_info(params, headers)
elif os.getenv('REQUEST_METHOD') == 'POST':
alter_user(headers)
else:
headers.append('Status: 405 METHOD_NOT_ALLOWED')
response(headers, [])
| 27.262295
| 77
| 0.591401
|
f57b96c36aa134214c43fa41994f0ddf57c913f2
| 1,979
|
py
|
Python
|
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | 2
|
2018-10-11T17:34:51.000Z
|
2021-04-26T20:51:45.000Z
|
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | null | null | null |
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | null | null | null |
bl_info = {
"name": "Universal Exporter",
"category": "Import & Export",
}
import bpy
if __name__ == "__main__":
register()
| 32.983333
| 69
| 0.458312
|
f57c524ea058c9eaac99f335f5d9b80e94762f25
| 2,024
|
py
|
Python
|
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
import random
# Parameters
states_num: int = 900
trans_per_state: int = 3
transitions_num: int = trans_per_state * states_num
num_non_zero_start_probs: int = 2
emit_range: int = 20
file_name: str = "random_" + \
str(states_num) + "_" + str(transitions_num) + "_" + \
str(emit_range) + "_" + str(num_non_zero_start_probs) + ".chmm"
# Implicit parameter for probabilities generation
rng_range: int = 100
# Generation
with open(file_name, 'w') as f:
f.write(str(states_num) + '\n')
# Start probabilities pairs info
start_probs: list = generate_probability_list(num_non_zero_start_probs)
f.write(str(num_non_zero_start_probs) + '\n')
for i in range(num_non_zero_start_probs):
f.write(str(i) + ' ' + start_probs[i] + '\n')
# Emissions probabilities for each state
f.write(str(emit_range) + '\n')
for _ in range(states_num):
emit_probs: list = generate_probability_list(emit_range)
emit_str: str = ' '.join(emit_probs) + '\n'
f.write(emit_str)
# Transitions info
f.write(str(transitions_num) + '\n')
for src in range(states_num):
used_dst: list = []
for _ in range(trans_per_state):
dst: int = random.randrange(states_num)
while (dst in used_dst):
dst = random.randrange(states_num)
used_dst.append(dst)
trans_probs: list = generate_probability_list(trans_per_state)
for i in range(trans_per_state):
f.write(str(src) + ' ' + str(used_dst[i]) +
' ' + trans_probs[i] + '\n')
| 32.126984
| 77
| 0.64081
|
f57e7874385469f7089b57659f20f37fe7da9980
| 1,764
|
py
|
Python
|
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
# Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.
#
# This is case sensitive, for example "Aa" is not considered a palindrome here.
#
# Note:
# Assume the length of given string will not exceed 1,010.
#
#
# Example:
#
# Input:
# "abccccdd"
#
# Output:
# 7
#
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
#
#
#
# @lc app=leetcode id=409 lang=python3
#
# [409] Longest Palindrome
#
# https://leetcode.com/problems/longest-palindrome/description/
#
# algorithms
# Easy (48.27%)
# Likes: 547
# Dislikes: 56
# Total Accepted: 100.6K
# Total Submissions: 208.5K
# Testcase Example: '"abccccdd"'
#
# Given a string which consists of lowercase or uppercase letters, find the
# length of the longest palindromes that can be built with those letters.
#
# This is case sensitive, for example "Aa" is not considered a palindrome
# here.
#
# Note:
# Assume the length of given string will not exceed 1,010.
#
#
# Example:
#
# Input:
# "abccccdd"
#
# Output:
# 7
#
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
#
#
#
| 20.275862
| 148
| 0.616213
|
f57f22bf388fb4aa2a7b99663c5c1b62f0a9da4f
| 108
|
py
|
Python
|
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | 6
|
2020-05-26T08:49:10.000Z
|
2022-01-03T17:44:19.000Z
|
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | null | null | null |
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | 1
|
2021-08-23T06:59:05.000Z
|
2021-08-23T06:59:05.000Z
|
from django.apps import AppConfig
| 18
| 35
| 0.796296
|
f580e360a82ba7dad75ab77286f0111cf9d43ab3
| 392
|
py
|
Python
|
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
import socketio
import socketio
sio = socketio.Client()
sio.connect('http://localhost:5000')
sio.wait()
| 17.818182
| 57
| 0.660714
|
f582dcf3f1bc8baf921c638fcb41729df76ff930
| 2,042
|
py
|
Python
|
ricecooker/utils/libstudio.py
|
elaeon/ricecooker
|
e5ef13478625b6996775ae7690e027140bc63373
|
[
"MIT"
] | null | null | null |
ricecooker/utils/libstudio.py
|
elaeon/ricecooker
|
e5ef13478625b6996775ae7690e027140bc63373
|
[
"MIT"
] | 1
|
2019-04-20T07:14:04.000Z
|
2019-04-20T07:14:04.000Z
|
ricecooker/utils/libstudio.py
|
nucleogenesis/ricecooker
|
7525f842e34f5bbb37d1f2d3c85872faa32724ff
|
[
"MIT"
] | null | null | null |
import requests
import requests_cache
requests_cache.install_cache()
from ricecooker.config import LOGGER
STUDIO_URL = 'https://studio.learningequality.org'
NODES_ENDPOINT = STUDIO_URL + '/api/get_nodes_by_ids_complete/'
LICENSES_LIST_ENDPOINT = STUDIO_URL + '/api/license'
# TODO https://studio.learningequality.org/api/get_node_path/ca8f380/18932/41b2549
# TODO http://develop.studio.learningequality.org/api/channel/094097ce6f395ec0b50aabd04943c6b3
| 37.127273
| 94
| 0.664055
|
f583aafa3eab4133dcbce8cce69eba93bfd77474
| 2,163
|
py
|
Python
|
Python/kruskal.py
|
AtilioA/algoritmos-teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | 2
|
2020-05-14T14:12:45.000Z
|
2020-09-07T20:44:23.000Z
|
Python/kruskal.py
|
AtilioA/teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | null | null | null |
Python/kruskal.py
|
AtilioA/teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | null | null | null |
# Supostamente no funciona
from aresta import Aresta
from insert import insert_sort
from collections import defaultdict
if __name__ == "__main__":
arestas = list()
# arestas.append(Aresta(1, 'a', 'b'))
# arestas.append(Aresta(8, 'a', 'c'))
# arestas.append(Aresta(3, 'c', 'b'))
# arestas.append(Aresta(4, 'b', 'd'))
# arestas.append(Aresta(2, 'd', 'e'))
# arestas.append(Aresta(3, 'b', 'e'))
# arestas.append(Aresta(-1, 'c', 'd'))
# arestas.append(Aresta(13, '0', '3'))
# arestas.append(Aresta(24, '0', '1'))
# arestas.append(Aresta(13, '0', '2'))
# arestas.append(Aresta(22, '0', '4'))
# arestas.append(Aresta(13, '1', '3'))
# arestas.append(Aresta(22, '1', '2'))
# arestas.append(Aresta(13, '1', '4'))
# arestas.append(Aresta(19, '2', '3'))
# arestas.append(Aresta(14, '2', '4'))
# arestas.append(Aresta(19, '3', '4'))
arestas.append(Aresta(2, "0", "1"))
arestas.append(Aresta(-10, "0", "3"))
arestas.append(Aresta(3, "0", "2"))
arestas.append(Aresta(5, "1", "2"))
arestas.append(Aresta(0, "1", "3"))
arestas.append(Aresta(4, "2", "3"))
grafo = kruskal(arestas)
print("Imprimindo rvore geradora mnima:")
for aresta in grafo:
print(f"Peso {aresta.peso:2}: {aresta.first:1} para {aresta.second:2}")
| 33.796875
| 90
| 0.609801
|
f58515c1c70e7d555b4680ca39bcd04616159789
| 1,983
|
py
|
Python
|
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys
__author__ = "Paulo Srgio dos Santos Araujo"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "paulo.araujo [at] splab.ufcg.edu.br"
if __name__ == "__main__":
msn = Msn(eq="-x**2 + 3", tol=0.01, alg="false_position")
msn.findRoots(-2, 3) # -1.7320508075688774 e 1.7320508075688776
msn2 = Msn(eq="-x**2 + 3", tol=0.01, alg="bisection")
msn2.findRoots(-2, 3) # -1.736328125 e 1.740234375
| 36.054545
| 122
| 0.534039
|
f585cb72a0db164994f5a14aac9910a31f98b2a9
| 1,096
|
py
|
Python
|
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
"""
Project: Data Types Notes
Author: Mr. Buckley
Last update: 8/25/2018
Description: Goes over comments, int, float, str, and type casting
"""
# *** COMMENTS ***
# This is a comment (with a "#")
# Comments are only for the user's eyes, the program doesn't read them.
# Describe what sections of code do with a comment.
"""
This is a
multiline comment
"""
# *** DATA TYPE: INTEGER ***
# TODO: An integer number (no decimal)
integer = 5
print (integer)
print (type(integer))
# *** DATA TYPE: FLOAT ***
# TODO: A decimal number
decimal = 4.85
print (decimal)
print (type(decimal))
# *** DATA TYPE: STRING ***
# TODO: A string of characters enclosed in quotes
word = "these are my words"
print (word)
print (type(word))
# *** TYPE CASTING ***
# This converts one type to another
# TODO: Cast float to int
decimal = 55.55
dec_to_int = int(decimal)
print(dec_to_int)
# TODO: Cast int to string
number = "8"
print (int(number)+2)
# TODO: Cast number string to int
print ("give me add I'll add 1 to it")
number = float (input())
print (number + 1)
# TODO: Input demo (str to float)
| 20.296296
| 71
| 0.671533
|
f5869e041f8cfc604cdaeae8bc529488e18f09e4
| 3,812
|
py
|
Python
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 103
|
2015-03-28T14:32:44.000Z
|
2021-03-31T08:20:24.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 6
|
2016-05-17T13:31:56.000Z
|
2020-11-13T17:19:19.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 106
|
2015-05-10T14:29:06.000Z
|
2021-07-13T08:19:19.000Z
|
import os
import zarr
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm, trange
def main(batch_size=64, epochs=50):
data_train = FaceDataset('data/anime_faces/train.lmdb')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loader = DataLoader(data_train, batch_size=batch_size, num_workers=10)
model = Model()
model.to(device)
model.train()
optim = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in trange(epochs):
t = tqdm(loader)
for i, (images, labels) in enumerate(t):
images = images.to(device)
labels = labels.to(device)
optim.zero_grad()
logits = model(images)
loss = model.criteria(logits, labels)
loss.backward()
optim.step()
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
accuracy = (predicts == labels).to(torch.float32).mean()
t.set_postfix(
epoch=epoch, i=i, loss=loss.item(), accuracy=accuracy.item())
data_val = FaceDataset('data/anime_faces/val.lmdb')
val_loader = DataLoader(data_val, batch_size=batch_size, num_workers=0)
total = len(data_val)
total_correct = 0
model.eval()
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
logits = model(images)
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
correct = (predicts == labels).sum()
total_correct += correct.item()
print('Val accuracy = {}'.format(total_correct / total))
if __name__ == '__main__':
main()
| 33.147826
| 88
| 0.597587
|
f586db857714c3a406cc8d011335a90b361a86d4
| 1,066
|
py
|
Python
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | 2
|
2021-03-31T19:47:28.000Z
|
2021-06-08T20:39:41.000Z
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
import os
import scrapy
from pepper.items import PepperItem
| 28.052632
| 93
| 0.541276
|
f5885ba233a8e2203989f8de45355db074bbea32
| 4,334
|
py
|
Python
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 5
|
2015-03-12T00:36:33.000Z
|
2022-02-24T16:41:25.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 133
|
2016-02-03T23:54:45.000Z
|
2022-03-30T21:33:58.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 6
|
2015-01-07T23:21:15.000Z
|
2017-12-07T08:26:33.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
import simplejson as json
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server.org_filters import SearchFilterChain
def spot_with_noise_level(name, noise_level):
"""Create a spot with the given noise level"""
spot = Spot.objects.create(name=name)
spot.spotextendedinfo_set.create(key='noise_level',
value=noise_level)
return spot
| 42.910891
| 79
| 0.682972
|
f58a63fcbad7aec0c720d44005782e265c314a57
| 17
|
py
|
Python
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 7
|
2018-06-17T02:45:33.000Z
|
2021-06-11T09:13:06.000Z
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 1
|
2021-02-07T03:33:22.000Z
|
2021-06-02T21:11:59.000Z
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 7
|
2018-07-14T19:45:43.000Z
|
2021-10-12T09:45:04.000Z
|
__all__ = ["des"]
| 17
| 17
| 0.588235
|
f58b58dad3bb3dc21147ea1fd781e8e5e4ef8b49
| 185
|
py
|
Python
|
spotdl/metadata/providers/__init__.py
|
khjxiaogu/spotify-downloader
|
a8dcb8d998da0769bbe210f2808d16b346453c23
|
[
"MIT"
] | 4,698
|
2017-06-20T22:37:10.000Z
|
2022-03-28T13:38:07.000Z
|
spotdl/metadata/providers/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 690
|
2017-06-20T20:08:42.000Z
|
2022-02-26T23:36:07.000Z
|
spotdl/metadata/providers/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 741
|
2017-06-21T23:32:51.000Z
|
2022-03-07T12:11:54.000Z
|
from spotdl.metadata.providers.spotify import ProviderSpotify
from spotdl.metadata.providers.youtube import ProviderYouTube
from spotdl.metadata.providers.youtube import YouTubeSearch
| 37
| 61
| 0.881081
|
f58db2e3a8108081fdad6ca36c2b07a1f84d614d
| 1,476
|
py
|
Python
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | 1
|
2021-06-30T10:34:38.000Z
|
2021-06-30T10:34:38.000Z
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
from math import floor
first_num = Integer(10)
second_num = Integer.from_roman("IV")
print(Integer.from_float("2.6"))
print(Integer.from_string(2.6))
print(first_num.add(second_num))
| 25.448276
| 89
| 0.554201
|
f58e292660fbb4b40d7f4326ad34ea03b891aa42
| 324
|
py
|
Python
|
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | null | null | null |
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | 13
|
2021-03-04T22:59:54.000Z
|
2021-05-16T23:24:22.000Z
|
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | 1
|
2021-04-20T14:51:43.000Z
|
2021-04-20T14:51:43.000Z
|
import datetime
from typing import Optional
from pydantic import BaseModel
| 14.727273
| 35
| 0.753086
|
f58e82435946520f98ad569c02443f0eda8332d6
| 1,988
|
py
|
Python
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 6
|
2017-04-06T02:55:16.000Z
|
2020-01-27T05:14:12.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 13
|
2016-09-12T14:24:22.000Z
|
2021-10-22T01:19:43.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 1
|
2016-09-12T14:01:49.000Z
|
2016-09-12T14:01:49.000Z
|
import json
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
| 33.694915
| 153
| 0.639336
|
f5915199b7c0be4872a450c1503f4eb928f9e20f
| 637
|
py
|
Python
|
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from .common import LifeTime, IServiceProvider, ICallSiteResolver
from .descriptors import CallableDescriptor
| 33.526316
| 98
| 0.726845
|
f5931d77f9a036d1b90d5e9b889749394d2eff5e
| 1,124
|
py
|
Python
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-05-31T09:40:19.000Z
|
2021-05-31T09:40:19.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-06-30T14:35:22.000Z
|
2021-06-30T14:35:22.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | null | null | null |
from joblib import Memory
cachedir = "cache"
memory = Memory(cachedir, verbose=10)
# @memory.cache
| 25.545455
| 102
| 0.572954
|
f5936c772afa998f17b6206beacedcb9d549bb50
| 2,167
|
py
|
Python
|
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | 6
|
2021-10-03T10:42:59.000Z
|
2022-01-05T05:25:57.000Z
|
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | null | null | null |
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | 1
|
2021-10-13T08:38:58.000Z
|
2021-10-13T08:38:58.000Z
|
import sys
import helpers.printer
import helpers.parser
import helpers.config
import program.obfuscation
import program.bypass
modes = helpers.config.Modes
bypass_methods = helpers.config.BypassMethods
obfuscation_methods = helpers.config.ObfuscationMethods
printer = helpers.printer.Printer()
parser = helpers.parser.Parser(
printer, modes, bypass_methods, obfuscation_methods)
bypass = program.bypass.Bypass()
obfuscation = program.obfuscation.Obfuscation()
if __name__ == "__main__":
main()
| 29.283784
| 103
| 0.616059
|
f594558a69e840af8885fc68a994d40b44b65eaf
| 1,169
|
py
|
Python
|
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
resnet_18_default = 224
| 33.4
| 107
| 0.718563
|
f5970041908938ed814405d6c8377946dc2070bf
| 3,680
|
py
|
Python
|
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
from scipy import io
import numpy as np
import random
import tensorflow as tf
class_num = 10
image_size = 32
img_channels = 3
| 32.857143
| 108
| 0.6
|
f59759138fa73fcc525ff95be2e388e6c99396f6
| 530
|
py
|
Python
|
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
"""
User models module
"""
from sqlalchemy import Column, Integer, String
from app.models import Base
| 27.894737
| 79
| 0.696226
|
f5987f75714e27b31f8445c59b2b8df50b29383c
| 782
|
py
|
Python
|
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_role_code import v3RoleCode
__all__ = ["ParentRelationshipCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
| 23
| 76
| 0.751918
|
f59c2ee308a2569240f5244e69e497fec60a9ffe
| 253
|
py
|
Python
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 688
|
2020-12-02T18:02:21.000Z
|
2022-03-31T09:56:14.000Z
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 29
|
2020-12-03T00:21:25.000Z
|
2021-12-04T22:32:42.000Z
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 88
|
2020-12-03T00:13:29.000Z
|
2022-03-26T16:03:38.000Z
|
counter = 0
interpolations = None
padding = None
| 15.8125
| 56
| 0.679842
|
f59da655c4bae4aa2a9b07d54d040a1c00439910
| 1,540
|
py
|
Python
|
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
"""Code related to ``django-plugins``.
First, it creates a ``ProjectAppPluginPoint`` for the ``bgjobs`` app.
Second, it creates a new plugin point for the registering ``BackgroundJob``
specializations.
"""
from djangoplugins.point import PluginPoint
from projectroles.plugins import ProjectAppPluginPoint
from .urls import urlpatterns
| 26.551724
| 77
| 0.718831
|
f5a27b850295f14cce9d9e2cff15b6524fbbecf8
| 4,562
|
py
|
Python
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | 1
|
2022-01-18T09:53:34.000Z
|
2022-01-18T09:53:34.000Z
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
import discord as nextcord
import asyncio
from discord.ext import commands
import json
import time
import typing
| 38.016667
| 153
| 0.622534
|
f5a40afb92b821bdbd1bca8cea58ac0b9702d2e6
| 960
|
py
|
Python
|
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
# Adam
# A program that reads in a text
# file and outputs the number of e's it contains
# The program takes the filename from
# an argument on the command line.
# I found information on this website:
# https://www.sanfoundry.com/python-program-read-file-counts-number/
#fname = input("Enter file name: ")
#l = input("Enter letter to be searched: ")
#e = 0
#with open(fname, "r") as f:
#for line in f:
#words = line.split()
#for i in words:
#for letter in i:
#if(letter == e):
#e = e+1
#print("Occurences of the letter: ")
#print(e)
# Requirement for this assignmnet is to only print
# The occurence of letter E.
fname = input("Enter file name: ")
e = 0
with open(fname, "r") as f:
for line in f:
words = line.split()
for i in words:
for letter in i:
if(letter == "e"):
e = e+1
print(e)
| 20.869565
| 68
| 0.558333
|
f5a59287ceaf7b3b0006e335abd2aae06f9ad302
| 3,936
|
py
|
Python
|
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
""" Tests for tinypages build using sphinx extensions """
from os.path import (join as pjoin, dirname, isdir)
import sphinx
SPHINX_ge_1p5 = sphinx.version_info[:2] >= (1, 5)
from sphinxtesters import PageBuilder
HERE = dirname(__file__)
PAGES = pjoin(HERE, 'tinypages')
from texext.tests.test_plotdirective import format_math_block
| 37.485714
| 71
| 0.54497
|
f5a8efb033fff75dd7f358a028f0ce20386e8ec9
| 3,708
|
py
|
Python
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 1
|
2021-04-07T14:25:01.000Z
|
2021-04-07T14:25:01.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 5
|
2021-03-30T14:08:53.000Z
|
2021-09-22T19:29:42.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | null | null | null |
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.http import HttpRequest
from rest_framework.exceptions import NotFound
from rest_framework.test import APIRequestFactory
from rest_framework.views import exception_handler, APIView
from typing import List, TypeVar
logger = logging.getLogger(__name__)
T = TypeVar('T')
NON_CLONEABLE_MODELS: List[str] = [
'User',
]
def exception_logging_handler(exc: Exception, context: dict):
"""
Intercept DRF error handler to log the error message
Update the REST_FRAMEWORK setting in settings.py to use this handler
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'core.exception_logging_handler',
}
"""
logger.warning(exc)
# translate uncaught Django ObjectDoesNotExist exception to NotFound
if isinstance(exc, ObjectDoesNotExist):
logger.error(f'uncaught ObjectDoesNotExist error: {exc} - {context}')
exc = NotFound(str(exc))
# follow DRF default exception handler
response = exception_handler(exc, context)
return response
def make_drf_request(request: HttpRequest = None, headers: dict = None):
"""
The request object made by APIRequestFactory is `WSGIRequest` which
doesn't have `.query_params` or `.data` method as recommended by DRF.
It only gets "upgraded" to DRF `Request` class after passing through
the `APIView`, which invokes `.initialize_request` internally.
This helper method uses a dummy API view to return a DRF `Request`
object for testing purpose.
Ref:
https://stackoverflow.com/questions/28421797/django-rest-framework-apirequestfactory-request-object-has-no-attribute-query-p
https://github.com/encode/django-rest-framework/issues/3608
"""
if request is None:
# use a default request
request = APIRequestFactory().get('/')
drf_request = DummyView().initialize_request(request)
if headers:
drf_request.headers = headers
return drf_request
| 33.107143
| 128
| 0.702805
|
f5aa196ccf6037cd4fcdad669c9f9252c8778f6e
| 436
|
py
|
Python
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
S = input()
arr = []
now = []
counter = 0
for s in S:
now.append(s.lower())
if s.isupper():
if counter == 0:
counter += 1
else:
arr.append(''.join(now))
now = []
counter = 0
arr.sort()
for word in arr:
for i, s in enumerate(word):
if i == 0 or i == len(word) - 1:
print(s.upper(), end='')
else:
print(s, end='')
print()
| 19.818182
| 40
| 0.428899
|
f5aa1cc085abe91e5e9d7da0530662e853080933
| 738
|
py
|
Python
|
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
from lesson12_projects.house3.data.const import E_TURNED_KNOB, MSG_TURN_KNOB, E_FAILED
| 21.085714
| 86
| 0.585366
|
190f3d0f2aa0d41a590c2d4d36fe77e3833762f3
| 2,171
|
py
|
Python
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-10-14T23:02:04.000Z
|
2020-10-14T23:02:04.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 9
|
2020-11-07T23:33:28.000Z
|
2021-12-13T09:22:07.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-11-07T22:35:40.000Z
|
2020-11-07T22:35:40.000Z
|
# -*- coding: utf-8 -*-
"""setup.py"""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
long_description = (
read_content("README.rst") +
read_content(os.path.join("docs/source", "CHANGELOG.rst")))
requires = ['setuptools', 'typeguard==2.5.0', 'pyspark==3.0.1', 'findspark']
extras_require = {
'reST': ['Sphinx'],
}
if os.environ.get('READTHEDOCS', None):
extras_require['reST'].append('recommonmark')
setup(name='pysequila',
version=os.getenv('VERSION', '0.1.0'),
description='An SQL-based solution for large-scale genomic analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
author='biodatageeks',
author_email='team@biodatageeks.org',
url='https://pysequila.biodatageeks.org',
classifiers=classifiers,
packages=['pysequila'],
data_files=[],
install_requires=requires,
include_package_data=True,
extras_require=extras_require,
tests_require=['tox'],
cmdclass={'test': Tox},)
| 28.194805
| 76
| 0.64947
|
1910c0a5070edd02fb3f35021e3104c2486a91bb
| 2,266
|
py
|
Python
|
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | 1
|
2018-11-06T05:11:08.000Z
|
2018-11-06T05:11:08.000Z
|
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | null | null | null |
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | null | null | null |
from sys import stdin, stdout
from struct import pack, unpack
def blob_vertex_write(attrs, verts, out=stdout, little_endian = True):
for blob in blob_vertices(attrs, verts, little_endian):
out.write(blob)
| 35.40625
| 91
| 0.621359
|
1910e99d0b7143a24de2db38d697e59e51df210d
| 2,234
|
py
|
Python
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | 1
|
2020-04-25T00:55:45.000Z
|
2020-04-25T00:55:45.000Z
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | 4
|
2015-05-28T23:20:13.000Z
|
2015-05-28T23:24:01.000Z
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | null | null | null |
from config import *
from template import *
from dictasobject import DictAsObject
| 39.192982
| 83
| 0.656222
|
191159a3e3b8327371261a5ae76fdabd0024bab8
| 4,400
|
py
|
Python
|
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | 1
|
2020-12-15T03:03:47.000Z
|
2020-12-15T03:03:47.000Z
|
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | null | null | null |
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | 1
|
2020-11-05T09:35:11.000Z
|
2020-11-05T09:35:11.000Z
|
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import matplotlib.pyplot as plt
import architecture.default
from architecture.default import Defender
DEBUG=False
BATCH_SIZE=32
FIXED_POLICY=False
NORMALIZE=False
K=10
PENALTY=10
MAX_TARGET_POS=10
torch.set_default_tensor_type(torch.DoubleTensor)
| 30.769231
| 114
| 0.606136
|
1911d18a99f00abe9dd822c30eace393500445cb
| 7,785
|
py
|
Python
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | 1
|
2019-10-21T18:19:12.000Z
|
2019-10-21T18:19:12.000Z
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
import pygame
import random
from time import sleep
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
pygame.init()
largura = 320
altura = 320
fundo = pygame.display.set_mode((largura, altura))
pygame.display.set_caption("TicTacToe")
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
while game:
while fimdejogo:
sleep(0.5)
fundo.fill(white)
texto('Fim de Jogo', red, 50, 65, 30)
if resultado == 1:
texto('Vitoria!!!', black, 30, 70, 80)
if resultado == 3:
texto('Velha', black, 30, 70, 80)
if resultado == 2:
texto('Derrota!!', black, 30, 70, 80)
pygame.draw.rect(fundo, black, [45, 120, 135, 27])
texto('Continuar(C)', white, 30, 50, 125)
pygame.draw.rect(fundo, black, [190, 120, 75, 27])
texto('Sair(S)', white, 30, 195, 125)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
fimdejogo = False
trava = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
if event.key == pygame.K_s:
game = False
fimdejogo = False
evento = False
trava = False
while evento:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
evento = False
trava = False
if event.type == pygame.MOUSEBUTTONDOWN:
mousex = pygame.mouse.get_pos()[0]
mousey = pygame.mouse.get_pos()[1]
evento = False
evento = True
if mousex < 106 and mousey < 106 and mousex != -1 and matriz[0] == 0:
cruz(35, 35)
matriz[0] = 1
if mousex < 212 and mousex > 106 and mousey < 106 and matriz[1] == 0:
cruz(141, 35)
matriz[1] = 1
if mousex < 320 and mousex > 212 and mousey < 106 and matriz[2] == 0:
cruz(247, 35)
matriz[2] = 1
if mousex < 106 and mousey > 106 and mousey < 212 and matriz[3] == 0:
cruz(35, 141)
matriz[3] = 1
if mousex < 212 and mousex > 106 and mousey < 212 and mousey > 106 and matriz[4] == 0:
cruz(141, 141)
matriz[4] = 1
if mousex < 320 and mousex > 212 and mousey < 212 and mousey > 106 and matriz[5] == 0:
cruz(247, 141)
matriz[5] = 1
if mousex < 106 and mousey < 320 and mousey > 212 and matriz[6] == 0:
cruz(35, 247)
matriz[6] = 1
if mousex < 212 and mousex > 106 and mousey < 320 and mousey > 212 and matriz[7] == 0:
cruz(141, 247)
matriz[7] = 1
if mousex < 320 and mousex > 212 and mousey < 320 and mousey > 212 and matriz[8] == 0:
cruz(247, 247)
matriz[8] = 1
endgame()
pygame.display.update()
sleep(0.5)
if trava:
while True:
jogada = random.randint(0, 8)
if matriz[jogada] == 0:
circulo(jogada)
matriz[jogada] = 2
break
else:
if 0 in matriz:
jogada = random.randint(0, 8)
else:
break
endgame()
pygame.display.update()
pygame.display.update()
| 31.26506
| 116
| 0.491715
|
19128f435521b2a41c0130fc202af247adfc091d
| 2,332
|
py
|
Python
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 7
|
2017-10-30T17:12:51.000Z
|
2021-03-03T23:00:35.000Z
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 1
|
2017-09-20T13:34:56.000Z
|
2017-09-20T13:34:56.000Z
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 7
|
2017-09-16T10:39:20.000Z
|
2018-02-28T19:43:57.000Z
|
#/usr/bin/env python
# -*- coding: Utf8 -*-
import event
| 33.797101
| 80
| 0.51458
|
191359000d3e32159cc42150dd476b64da855e66
| 5,794
|
py
|
Python
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | 2
|
2015-03-16T21:18:27.000Z
|
2017-10-09T19:59:24.000Z
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Parse .net format for Bayes nets and return a bayes net
"""
from pyec.config import Config
from pyec.distribution.bayes.net import *
from pyec.distribution.bayes.structure.proposal import StructureProposal
| 43.893939
| 460
| 0.576458
|
1916221a240fca8e366955bf9b55225db064e9c4
| 169
|
py
|
Python
|
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
nome = str(input('Digite seu nome completo: ')).strip()
if 'silva' in nome.lower():
print('Sim, seu nome tem Silva.')
else:
print('No , seu nome no tem Silva')
| 33.8
| 55
| 0.64497
|
19179562beab192ca5fca3ff7c055101546a8163
| 1,527
|
py
|
Python
|
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | 5
|
2021-12-13T14:52:08.000Z
|
2022-03-15T08:59:32.000Z
|
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | null | null | null |
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | 1
|
2021-04-26T06:08:35.000Z
|
2021-04-26T06:08:35.000Z
|
"""empty message
Revision ID: 8b664608a7c7
Revises: ec21e19825ff
Create Date: 2021-06-01 14:37:20.327189
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8b664608a7c7'
down_revision = 'ec21e19825ff'
branch_labels = None
depends_on = None
| 35.511628
| 80
| 0.675835
|