hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
573671e14e06512a6056d7ef96ce655d220e4a19
| 2,857
|
py
|
Python
|
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | 11
|
2016-11-25T13:05:26.000Z
|
2022-03-25T03:24:16.000Z
|
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | null | null | null |
Run_exphydro_distributed_type1_pso.py
|
sopanpatil/exp-hydro
|
7295dddc4df1028f669a223e1b631a4a91669515
|
[
"MIT"
] | 6
|
2017-03-28T12:06:00.000Z
|
2021-09-16T17:50:34.000Z
|
#!/usr/bin/env python
# Programmer(s): Sopan Patil.
""" MAIN PROGRAM FILE
Run this file to optimise the model parameters of the spatially distributed
version of EXP-HYDRO model using Particle Swarm Optimisation (PSO) algorithm.
Type 1 Model:
- This type of distributed model is pixel based (i.e., all sub-components
have the same drainage area).
- All pixels receive the same meteorological inputs.
- Channel routing is ignored and it is assumed that streamflow generated from
each pixel reaches the catchment outlet on same day.
"""
import numpy
import os
import time
import matplotlib.pyplot as plt
from exphydro.distributed import ExphydroDistrParameters
from exphydro.distributed.type1 import ExphydroDistrModel
from hydroutils import Calibration, ObjectiveFunction
start_time = time.time()
######################################################################
# SET WORKING DIRECTORY
# Getting current directory, i.e., directory containing this file
dir1 = os.path.dirname(os.path.abspath('__file__'))
# Setting to current directory
os.chdir(dir1)
######################################################################
# MAIN PROGRAM
# Load meteorological and observed flow data
P = numpy.genfromtxt('SampleData/P_test.txt') # Observed rainfall (mm/day)
T = numpy.genfromtxt('SampleData/T_test.txt') # Observed air temperature (deg C)
PET = numpy.genfromtxt('SampleData/PET_test.txt') # Potential evapotranspiration (mm/day)
Qobs = numpy.genfromtxt('SampleData/Q_test.txt') # Observed streamflow (mm/day)
# Specify the number of pixels in the catchment
npixels = 5
# Specify the no. of parameter sets (particles) in a PSO swarm
npart = 10
# Generate 'npart' initial EXP-HYDRO model parameters
params = [ExphydroDistrParameters(npixels) for j in range(npart)]
# Initialise the model by loading its climate inputs
model = ExphydroDistrModel(P, PET, T, npixels)
# Specify the start and end day numbers of the calibration period.
# This is done separately for the observed and simulated data
# because they might not be of the same length in some cases.
calperiods_obs = [365, 2557]
calperiods_sim = [365, 2557]
# Calibrate the model to identify optimal parameter set
paramsmax = Calibration.pso_maximise(model, params, Qobs, ObjectiveFunction.klinggupta, calperiods_obs, calperiods_sim)
print ('Calibration run KGE value = ', paramsmax.objval)
# Run the optimised model for validation period
Qsim = model.simulate(paramsmax)
kge = ObjectiveFunction.klinggupta(Qobs[calperiods_obs[1]:], Qsim[calperiods_sim[1]:])
print ('Independent run KGE value = ', kge)
print("Total runtime: %s seconds" % (time.time() - start_time))
# Plot the observed and simulated hydrographs
plt.plot(Qobs[calperiods_obs[0]:], 'b-')
plt.plot(Qsim[calperiods_sim[0]:], 'r-')
plt.show()
######################################################################
| 35.7125
| 119
| 0.716136
|
5738d01ad1ed866e8e47c9a1f5dadbf2cfce3611
| 11,104
|
py
|
Python
|
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | null | null | null |
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | 1
|
2021-05-03T18:59:43.000Z
|
2021-05-03T19:04:19.000Z
|
multi_input_multi_output/train.py
|
alt113/CS591-Multimodal-Spring2021
|
f28bade729818aa51fd131e86f1ba2271cca8947
|
[
"MIT"
] | null | null | null |
import os
from multi_input_multi_output.models import MultiNet
from shared_weights.helpers import config, utils
from shared_weights.helpers.siamese_network import create_encoder
from data.data_tf import fat_dataset
import tensorflow as tf
from tensorflow import keras
# ----------------------
""" Data augmentation"""
augmentation_input = keras.layers.Input(shape=config.IMG_SHAPE)
data_augmentation = keras.layers.experimental.preprocessing.RandomTranslation(
height_factor=(-0.2, 0.2),
width_factor=(-0.2, 0.2),
fill_mode="constant"
)(augmentation_input)
data_augmentation = keras.layers.experimental.preprocessing.RandomFlip(mode="horizontal")(data_augmentation)
data_augmentation = keras.layers.experimental.preprocessing.RandomRotation(factor=0.15,
fill_mode="constant")(data_augmentation)
augmentation_output = keras.layers.experimental.preprocessing.RandomZoom(height_factor=(-0.3, 0.1),
width_factor=(-0.3, 0.1),
fill_mode="constant")(data_augmentation)
data_augmentation = keras.Model(augmentation_input, augmentation_output)
""" Unsupervised contrastive loss"""
""" Train the model"""
network_input = keras.layers.Input(shape=config.IMG_SHAPE)
# Load RGB vision encoder.
r_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(r_encoder)
r_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
r_representation_learner = RepresentationLearner(
r_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
r_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
r_representation_learner.load_weights(config.RGB_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(r_representation_learner.layers[0])
rgb_encoder = functional_model.layers[1]
# Load Depth vision encoder.
d_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(d_encoder)
d_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
d_representation_learner = RepresentationLearner(
d_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
d_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
d_representation_learner.load_weights(config.DEPTH_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(d_representation_learner.layers[0])
depth_encoder = functional_model.layers[1]
# ----------------------
# RGB
rgb_input = keras.layers.Input(shape=config.IMG_SHAPE)
# rgb_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
rgb = rgb_encoder(rgb_input)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(rgb)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Flatten()(rgb)
rgb = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(rgb)
rgb_classifier = keras.models.Model(inputs=rgb_input, outputs=rgb, name='rgb_classifier')
for layer in rgb_classifier.layers:
layer._name += '_rgb'
layer.trainable = True
print('[INFO] built rgb classifier')
print(rgb_classifier.summary())
# Depth
depth_input = keras.layers.Input(shape=config.IMG_SHAPE)
# depth_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
depth = depth_encoder(depth_input)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(depth)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Flatten()(depth)
depth = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(depth)
depth_classifier = keras.models.Model(inputs=depth_input, outputs=depth, name='depth_classifier')
for layer in depth_classifier.layers:
layer._name += '_depth'
layer.trainable = True
print('[INFO] built depth classifier')
print(depth_classifier.summary())
# Build and compile MultiNet
multinet_class = MultiNet(rgb_classifier=rgb_classifier,
rgb_output_branch=rgb,
depth_classifier=depth_classifier,
depth_output_branch=depth)
multinet_class.compile()
multinet_model = multinet_class.model
print('[INFO] built MultiNet classifier')
# train the network to perform multi-output classification
train_ds = fat_dataset(split='train',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
val_ds = fat_dataset(split='validation',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
print("[INFO] training MultiNet...")
counter = 0
history = None
toCSV = []
while counter <= config.EPOCHS:
counter += 1
print(f'* Epoch: {counter}')
data_batch = 0
for imgs, labels in train_ds:
data_batch += 1
history = multinet_model.train_on_batch(x=[imgs[:, 0], imgs[:, 1]],
y={'dense_5_rgb': labels[:], 'dense_7_depth': labels[:]},
reset_metrics=False,
return_dict=True)
print(f'* Data Batch: {data_batch}')
print(f'\t{history}')
break
if counter % 10 == 0:
print("[VALUE] Testing model on batch")
for val_data, val_labels in val_ds:
val_results = multinet_model.test_on_batch(x=[val_data[:, 0], val_data[:, 1]],
y={'dense_5_rgb': val_labels[:], 'dense_7_depth': val_labels[:]})
print(val_results)
toCSV.append(val_results)
print('Saving MultiNet validation results as CSV file')
utils.save_model_history(H=toCSV, path_to_csv=config.FROZEN_SIAMESE_TRAINING_HISTORY_CSV_PATH)
rgb_classifier.save_weights(config.MIMO_RGB_WEIGHTS)
print("Saved RGB model weights to disk")
# serialize weights to HDF5
depth_classifier.save_weights(config.MIMO_DEPTH_WEIGHTS)
print("Saved Depth model weights to disk")
| 40.525547
| 132
| 0.665616
|
573a1fa313f96c01ab6df0ada017abeca301701e
| 856
|
py
|
Python
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 148
|
2020-06-14T06:55:42.000Z
|
2022-03-19T05:37:02.000Z
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 261
|
2020-06-16T22:29:27.000Z
|
2022-03-31T10:40:52.000Z
|
tools/rebuild_caches.py
|
newbdoc/lookyloo
|
53a8952fccaf9ae42fa582d3475283babd55d08a
|
[
"BSD-3-Clause"
] | 27
|
2020-06-08T12:28:33.000Z
|
2022-02-15T18:50:50.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
from lookyloo.lookyloo import Indexing, Lookyloo
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
level=logging.INFO)
if __name__ == '__main__':
main()
| 25.939394
| 168
| 0.684579
|
573ad54818708562c075e93c746dc4448d743b12
| 740
|
py
|
Python
|
save_restore_model/tf1/restore1_1.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | 3
|
2017-10-19T23:41:26.000Z
|
2019-10-22T08:59:35.000Z
|
save_restore_model/tf1/restore1_1.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | null | null | null |
save_restore_model/tf1/restore1_1.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
sess=tf.Session()
#First let's load meta graph and restore weights
saver = tf.train.import_meta_graph('/Users/lipingzhang/Downloads/model/my_tf_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('/Users/lipingzhang/Downloads/model/'))
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name("w1:0")
w2 = graph.get_tensor_by_name("w2:0")
feed_dict ={w1:13.0,w2:17.0}
#Now, access the op that you want to run.
op_to_restore = graph.get_tensor_by_name("op_to_restore:0")
#Add more to the current graph
add_on_op = tf.multiply(op_to_restore,2)
print sess.run(add_on_op,feed_dict)
#This will print 120.
| 30.833333
| 94
| 0.777027
|
573b50d93fdcd613c5e4eb9cd5d3608413327c07
| 633
|
py
|
Python
|
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
src/game.py
|
LuisMarques99/Number-Guesser-Terminal
|
6abfac23268022f7ce3776a20d1d6f550955d6c8
|
[
"MIT"
] | null | null | null |
from random import randrange
if __name__ == "__main__":
main()
| 21.827586
| 90
| 0.624013
|
573b7032640a85abec559a72d8a9edcb24834621
| 378
|
py
|
Python
|
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | 1
|
2022-01-22T18:19:07.000Z
|
2022-01-22T18:19:07.000Z
|
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
# ARRAYS-DS HACKERANK SOLUTION:
# creating a function to reverse the array.
# receiving input.
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
# printing the output.
print(reverseArray(arr))
| 22.235294
| 47
| 0.653439
|
573eb0d44cfa9120f4cdede91149047e20c421a4
| 1,456
|
py
|
Python
|
bmds_server/analysis/admin.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 1
|
2019-07-09T16:42:15.000Z
|
2019-07-09T16:42:15.000Z
|
bmds_server/analysis/admin.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 103
|
2016-11-14T15:58:53.000Z
|
2022-03-07T21:01:03.000Z
|
bmds_server/analysis/admin.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 2
|
2017-03-17T20:43:22.000Z
|
2018-01-04T19:15:18.000Z
|
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.db.models import TextChoices
from django.utils.html import format_html
from . import models
| 28.54902
| 97
| 0.691621
|
573ec927838cc2f17f74c48d89acf3a9486bfe1d
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/34/e0/75/b2dceb8ef40c652edb20f4e059370015eddc8cdbde039f92ced519a83d
| 96
| 96
| 0.895833
|
5742d249cea7cefa19d4a4ea9010a2450f58aa8b
| 552
|
py
|
Python
|
03/00/0.py
|
pylangstudy/201801
|
eee9cfd2b370153359183d3c8f7fe117f4392142
|
[
"CC0-1.0"
] | null | null | null |
03/00/0.py
|
pylangstudy/201801
|
eee9cfd2b370153359183d3c8f7fe117f4392142
|
[
"CC0-1.0"
] | null | null | null |
03/00/0.py
|
pylangstudy/201801
|
eee9cfd2b370153359183d3c8f7fe117f4392142
|
[
"CC0-1.0"
] | null | null | null |
#https://qiita.com/stkdev/items/a44976fb81ae90a66381
#import imaplib, re, email, six, dateutil.parser
import imaplib, re, email
email_default_encoding = 'iso-2022-jp'
if __name__ == '__main__':
main()
| 32.470588
| 161
| 0.71558
|
57452ea96aff7c8f3e31ad97f424bdd254f5bb63
| 5,468
|
py
|
Python
|
sql/filewalk.py
|
kylef-lab41/Redwood
|
c4e1c8284444b91246e52c165ea150eea23b26b9
|
[
"Apache-2.0"
] | null | null | null |
sql/filewalk.py
|
kylef-lab41/Redwood
|
c4e1c8284444b91246e52c165ea150eea23b26b9
|
[
"Apache-2.0"
] | null | null | null |
sql/filewalk.py
|
kylef-lab41/Redwood
|
c4e1c8284444b91246e52c165ea150eea23b26b9
|
[
"Apache-2.0"
] | null | null | null |
import binascii
import datetime
import hashlib
import mimetypes
import os
import re
import struct
import subprocess
import sys
import time
import urllib
import csv
from Queue import Queue
# 8 byte unique ID generator give a path.
# - first five bytes are first five from sha1 of path name
# - last 3 are the first three from the current time
# Returns a long
BUFFER = 4096
omitted_dirs = ['/dev', '/proc', '/sys', '/Volumes', '/mnt', '/net']
if __name__=="__main__":
main(sys.argv)
| 32.939759
| 211
| 0.602597
|
574587d505f7c19dabd0452d40b6544e75b9a682
| 10,136
|
py
|
Python
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 24
|
2019-02-02T20:37:53.000Z
|
2022-02-09T13:51:41.000Z
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 671
|
2018-08-20T08:46:35.000Z
|
2022-03-26T00:11:43.000Z
|
processing_scripts/database_update/pokedex_entry.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 68
|
2018-09-25T21:03:40.000Z
|
2022-02-25T19:59:51.000Z
|
import csv_loader
import moves_names
| 37.128205
| 120
| 0.530979
|
5746c4fc2776ee414b40d5372100f22e8a3258f4
| 25,539
|
py
|
Python
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 2
|
2019-02-19T16:15:19.000Z
|
2020-07-25T04:05:45.000Z
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 142
|
2019-03-11T15:14:22.000Z
|
2020-11-11T19:26:09.000Z
|
tests/test_add.py
|
open-contracting/kingfisher-views
|
7887610a144493f2ccd0d9a22cf43157dc180479
|
[
"BSD-3-Clause"
] | 5
|
2019-04-11T14:11:10.000Z
|
2020-07-30T22:45:59.000Z
|
import datetime
import decimal
from unittest.mock import patch
import pytest
from click.testing import CliRunner
from psycopg2 import sql
from manage import SUMMARIES, cli, construct_where_fragment
from tests import assert_bad_argument, assert_log_records, assert_log_running, fixture, noop
command = 'add'
TABLES = {
'note',
}
SUMMARY_TABLES = set()
SUMMARY_VIEWS = set()
FIELD_LIST_TABLES = set()
NO_FIELD_LIST_TABLES = set()
NO_FIELD_LIST_VIEWS = set()
for table_name, table in SUMMARIES.items():
FIELD_LIST_TABLES.add(f'{table_name}_field_list')
if table.is_table:
SUMMARY_TABLES.add(table_name)
NO_FIELD_LIST_TABLES.add(f'{table_name}_no_field_list')
else:
SUMMARY_VIEWS.add(table_name)
NO_FIELD_LIST_VIEWS.add(f'{table_name}_no_field_list')
TABLES.add(f'{table_name}_no_data')
| 40.092622
| 118
| 0.539958
|
57476587984e17ece720d64d289aa21890dba64a
| 3,520
|
py
|
Python
|
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | 1
|
2020-11-30T18:09:40.000Z
|
2020-11-30T18:09:40.000Z
|
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | null | null | null |
ReportGenerator.py
|
taarruunnnn/VAPT-Report-Generator-Vulnerability
|
8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38
|
[
"MIT"
] | 1
|
2020-09-16T20:51:18.000Z
|
2020-09-16T20:51:18.000Z
|
import os
from docx import Document
from docx.shared import Inches
from docx import section
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from docx.shared import Cm
from docx.shared import RGBColor
import docx
| 31.711712
| 92
| 0.609091
|
5750825ae1de9236544f8dff0657979e541dfed6
| 764
|
py
|
Python
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
Pythobit/Python-tutorial
|
b0743eaa9c237c3578131ead1b3f2c295f11b7ee
|
[
"MIT"
] | 3
|
2021-02-19T18:33:00.000Z
|
2021-08-03T14:56:50.000Z
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
barawalojas/Python-tutorial
|
3f4b2b073e421888b3d62ff634658317d9abcb9b
|
[
"MIT"
] | 1
|
2021-07-10T14:37:57.000Z
|
2021-07-20T09:51:39.000Z
|
Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py
|
barawalojas/Python-tutorial
|
3f4b2b073e421888b3d62ff634658317d9abcb9b
|
[
"MIT"
] | 1
|
2021-08-02T05:39:38.000Z
|
2021-08-02T05:39:38.000Z
|
# Copying files
# Ask user for a list of 3 friends.
# for each friend, we'll tell user whether they're nearby.
# for each nearby friend, we'll save their name to `nearby_friends.txt`.
friends = input('Enter three friends name(separated by commas): ').split(',')
people = open('people.txt', 'r')
people_nearby = [line.strip() for line in people.readlines()]
people.close()
# Making set of friends and peoples
friends_set = set(friends)
people_nearby_set = set(people_nearby)
friends_nearby_set = friends_set.intersection(people_nearby_set)
nearby_friends_file = open('nearby_friends.txt', 'w')
for friend in friends_nearby_set:
print(f'{friend} is nearby.! Meet up with them.')
nearby_friends_file.write(f'{friend}\n')
nearby_friends_file.close()
| 27.285714
| 77
| 0.743455
|
5750d5afb4b68c06b08670b53610fc887297a148
| 722
|
py
|
Python
|
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/167/C.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, m, x = map(int, input().split())
ca = [0] * n
ca_sum = [0] * (m+1)
for i in range(n):
ca[i] = list(map(int, input().split()))
for j in range(m+1):
ca_sum[j] += ca[i][j]
ans = 10 ** 10
for i in range(2 ** n):
tmp = 0
tmp_ca_sum = ca_sum.copy()
for j, v in enumerate(format(i, r'0{}b'.format(n))):
if v == '0':
continue
for k in range(m+1):
tmp_ca_sum[k] -= ca[j][k]
flag = True
for v2 in tmp_ca_sum[1:]:
if v2 < x:
flag = False
break
if flag:
ans = min(ans, tmp_ca_sum[0])
if ans == 10 ** 10:
print(-1)
else:
print(ans)
| 21.235294
| 56
| 0.49723
|
5752dc5277d06864407fc67287bd73391b57e2b0
| 923
|
py
|
Python
|
src/ProjectHeart/forms.py
|
LokotamaTheMastermind/secret-password-saver
|
e97f139b2cad9e1b0e9283079252d9a76764e3c1
|
[
"Unlicense"
] | null | null | null |
src/ProjectHeart/forms.py
|
LokotamaTheMastermind/secret-password-saver
|
e97f139b2cad9e1b0e9283079252d9a76764e3c1
|
[
"Unlicense"
] | null | null | null |
src/ProjectHeart/forms.py
|
LokotamaTheMastermind/secret-password-saver
|
e97f139b2cad9e1b0e9283079252d9a76764e3c1
|
[
"Unlicense"
] | null | null | null |
from django import forms
from .models import Passwords
| 43.952381
| 89
| 0.67714
|
57550dfdc85fef1e9e1bc0066478d7d691371d64
| 184
|
py
|
Python
|
data_relay/src/plugins/AzureBlob.py
|
phil-d-wilson/connectorV2
|
7077aa1c74276e8e334a8046793e942eec8d9975
|
[
"Apache-2.0"
] | null | null | null |
data_relay/src/plugins/AzureBlob.py
|
phil-d-wilson/connectorV2
|
7077aa1c74276e8e334a8046793e942eec8d9975
|
[
"Apache-2.0"
] | 49
|
2021-04-09T14:41:50.000Z
|
2021-07-28T10:54:48.000Z
|
data_relay/src/plugins/AzureBlob.py
|
phil-d-wilson/connectorV2
|
7077aa1c74276e8e334a8046793e942eec8d9975
|
[
"Apache-2.0"
] | 2
|
2021-04-24T10:47:57.000Z
|
2021-07-17T07:13:00.000Z
|
NAME = "Azure BLOB storage"
TYPE = "remote"
FILE = "AzureBlob.yaml"
VARS = [
"AZURE_BLOB_STORAGE_ACCOUNT",
"AZURE_BLOB_STORAGE_ACCOUNT_KEY",
"AZURE_BLOB_CONTAINER_NAME",
]
| 20.444444
| 37
| 0.717391
|
5755b24aeb6ff531368ac2aba89c8fd019b3b452
| 8,965
|
py
|
Python
|
tests/application/test_helpers.py
|
alphagov-mirror/performanceplatform-admin
|
b63ae42b1276699623ef208b7d6edd3e0ce4ca59
|
[
"MIT"
] | 1
|
2017-05-14T21:31:33.000Z
|
2017-05-14T21:31:33.000Z
|
tests/application/test_helpers.py
|
alphagov-mirror/performanceplatform-admin
|
b63ae42b1276699623ef208b7d6edd3e0ce4ca59
|
[
"MIT"
] | 33
|
2015-01-05T12:23:45.000Z
|
2021-03-24T10:59:47.000Z
|
tests/application/test_helpers.py
|
alphagov-mirror/performanceplatform-admin
|
b63ae42b1276699623ef208b7d6edd3e0ce4ca59
|
[
"MIT"
] | 4
|
2017-03-16T15:52:33.000Z
|
2021-04-10T20:14:53.000Z
|
import unittest
from application import app
from application.helpers import(
requires_authentication,
requires_feature,
signed_in,
group_by_group,
signed_in_no_access,
no_access,
has_user_with_token,
view_helpers,
user_has_feature,
)
from hamcrest import assert_that, equal_to, is_
from mock import patch
def test_group_by_group_groups_datasets_by_group(self):
data_sets = [
{
'data_group': "group_1",
'data_type': "type1"
},
{
'data_group': "group_1",
'data_type': "type2"
},
{
'data_group': "group_2",
'data_type': "type3"
}
]
grouped_data_sets = {
"group_1": [
{
'data_group': "group_1",
'data_type': "type1"
},
{
'data_group': "group_1",
'data_type': "type2"
}
],
"group_2": [
{
'data_group': "group_2",
'data_type': "type3"
}
]
}
assert_that(group_by_group(data_sets), equal_to(grouped_data_sets))
def test_admin_user_has_bigedit_feature(self):
user = {'permissions': ['admin']}
assert_that(user_has_feature('big-edit', user), equal_to(True))
def test_dashboard_editor_user_does_not_have_bigedit_feature(self):
user = {'permissions': ['dashboard-editor']}
assert_that(user_has_feature('big-edit', user), equal_to(False))
def test_dashboard_editor_and_admin_user_does_have_bigedit_feature(self):
user = {'permissions': ['dashboard-editor', 'admin']}
assert_that(user_has_feature('big-edit', user), equal_to(True))
def test_user_with_permissions_not_in_list_features(self):
user = {'permissions': ['signin']}
assert_that(user_has_feature('big-edit', user), equal_to(False))
| 37.19917
| 78
| 0.636587
|
575730cc1be427336b55d40ef3a3e2821b465a72
| 1,210
|
py
|
Python
|
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | null | null | null |
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | null | null | null |
Unit 7/Ai bot/test bots/SlightlySmartSue.py
|
KevinBoxuGao/ICS3UI
|
2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa
|
[
"MIT"
] | 1
|
2020-03-09T16:22:33.000Z
|
2020-03-09T16:22:33.000Z
|
from random import *
#STRATEGY SUMMARY: DON'T DUCK IF THE OPPONENT HAS NO SNOWBALLS. OTHERWISE, PICK RANDOMLY.
| 31.842105
| 99
| 0.565289
|
5757a45f92b96ddd746ba5a5bd686085a734073c
| 298
|
py
|
Python
|
customers/views/customers.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
customers/views/customers.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
customers/views/customers.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from customers.models import Customer
from customers.serializers.customers import CustomerSerializer
# Create your views here.
| 24.833333
| 62
| 0.825503
|
57580cabba2c7dce9e5d8666af96b5e694af9738
| 5,370
|
py
|
Python
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 91
|
2017-05-08T22:41:33.000Z
|
2022-02-09T11:37:07.000Z
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 63
|
2017-06-14T20:08:49.000Z
|
2021-06-16T23:08:25.000Z
|
pysoa/test/plan/grammar/directives/expects_values.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | 26
|
2017-10-13T23:23:13.000Z
|
2022-01-11T16:58:17.000Z
|
"""
Expect action directives
"""
from __future__ import (
absolute_import,
unicode_literals,
)
from pyparsing import (
CaselessLiteral,
LineEnd,
Literal,
Optional,
Suppress,
)
from pysoa.test.plan.grammar.assertions import (
assert_not_expected,
assert_not_present,
assert_subset_structure,
)
from pysoa.test.plan.grammar.data_types import (
DataTypeGrammar,
get_parsed_data_type_value,
)
from pysoa.test.plan.grammar.directive import (
ActionDirective,
VarNameGrammar,
VarValueGrammar,
register_directive,
)
from pysoa.test.plan.grammar.tools import path_put
class ActionExpectsAnyDirective(ActionExpectsFieldValueDirective):
"""
Set expectations for values to be in the service call response where any value for the given data type will be
accepted.
"""
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if 'expects_not_present' in action_case:
assert_not_present(
action_case['expects_not_present'],
action_response.body,
msg,
)
register_directive(ActionExpectsFieldValueDirective)
register_directive(ActionExpectsAnyDirective)
register_directive(ActionExpectsNoneDirective)
register_directive(ActionExpectsNotPresentDirective)
| 26.716418
| 115
| 0.60298
|
575871e8030b4782c2b2ff33f329031a54131855
| 454
|
py
|
Python
|
src/manual/add_uuid_col.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | null | null | null |
src/manual/add_uuid_col.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | 123
|
2020-10-12T11:06:27.000Z
|
2021-04-28T15:32:29.000Z
|
src/manual/add_uuid_col.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | null | null | null |
'''
Script to add uuid to existing records
Also shifts who_code values to original_who_code
'''
import uuid
import pandas as pd
manually_cleaned = pd.read_csv('data/cleansed/mistress_latest_old.csv', low_memory=False)
manually_cleaned['uuid'] = [str(uuid.uuid4()) for x in manually_cleaned.iloc[:, 1]]
manually_cleaned['original_who_code'] = manually_cleaned['who_code']
manually_cleaned.to_csv('data/cleansed/mistress_latest.csv', index = False)
| 25.222222
| 89
| 0.779736
|
575a4a3127b8298acd5fe22aa043d391fe755667
| 1,821
|
py
|
Python
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
tests/test_qml.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
"""Tests for `prettyqt` package."""
import pathlib
import pytest
from prettyqt import core, qml
from prettyqt.utils import InvalidParamError
# def test_jsvalue():
# val = qml.JSValue(2)
# val["test"] = 1
# assert val["test"].toInt() == 1
# assert "test" in val
# assert val.get_value() == 2
| 24.945205
| 59
| 0.641406
|
9386838c937de37405273fac5771d31ccf1a0479
| 2,550
|
py
|
Python
|
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
demo.py
|
HsienYu/tree_demo
|
aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc
|
[
"Artistic-2.0"
] | null | null | null |
# Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 30
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False,
pixel_order=ORDER)
try:
while True:
print("light start")
repeat_fun(5, white_breath)
# rainbow cycle with 1ms delay per step
repeat_fun(3, rainbow_cycle, 0.01)
# white_breath()
# for i in range(num_pixels):
# for r in range(255):
# pixels[i] = (r, 0, 0)
# pixels.show()
# time.sleep(0.001)
# j = i - 1
# for y in range(255):
# pixels[j] = (y, y, y)
# pixels.show()
# time.sleep(0.001)
# time.sleep(0.01)
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
| 25.757576
| 92
| 0.533333
|
93880a88a41dae3cf1a05e55925780f80609dbdb
| 1,774
|
py
|
Python
|
fsm.py
|
yusun1997/Chatbot
|
ee49d4a64857889ce1d1a8659a1de15cf062bd77
|
[
"MIT"
] | null | null | null |
fsm.py
|
yusun1997/Chatbot
|
ee49d4a64857889ce1d1a8659a1de15cf062bd77
|
[
"MIT"
] | null | null | null |
fsm.py
|
yusun1997/Chatbot
|
ee49d4a64857889ce1d1a8659a1de15cf062bd77
|
[
"MIT"
] | null | null | null |
from transitions.extensions import GraphMachine
| 27.71875
| 89
| 0.638106
|
93881978c162edde4ca5dd970ae7fc5d1d4dfecc
| 1,861
|
py
|
Python
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 15
|
2017-11-30T01:28:11.000Z
|
2021-08-12T09:17:36.000Z
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 71
|
2018-06-22T09:54:50.000Z
|
2020-10-21T07:10:54.000Z
|
rptk/query/__init__.py
|
wolcomm/rptk
|
fe6c1b597741ff14e4c89519458bb0950f0aa955
|
[
"Apache-2.0"
] | 2
|
2019-08-31T20:45:19.000Z
|
2019-10-02T18:26:58.000Z
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query module."""
from __future__ import print_function
from __future__ import unicode_literals
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
| 27.776119
| 79
| 0.667383
|
93888830e4d4bc95cb50e37baa9660d706afdc8a
| 1,697
|
py
|
Python
|
test/__main__.py
|
harisekhon/pylib
|
1d8fcfc0a26251a832536a5ff6bf0ef618b8508e
|
[
"MIT"
] | 1
|
2015-12-17T21:08:22.000Z
|
2015-12-17T21:08:22.000Z
|
test/__main__.py
|
harisekhon/pylib
|
1d8fcfc0a26251a832536a5ff6bf0ef618b8508e
|
[
"MIT"
] | null | null | null |
test/__main__.py
|
harisekhon/pylib
|
1d8fcfc0a26251a832536a5ff6bf0ef618b8508e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2015-11-14 12:21:54 +0000 (Sat, 14 Nov 2015)
#
# https://github.com/HariSekhon/pylib
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve or steer this or other code I publish
#
# http://www.linkedin.com/in/harisekhon
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
#import glob
#import inspect
#import subprocess
#import sys
## using optparse rather than argparse for servers still on Python 2.6
#from optparse import OptionParser
# libdir = os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
libdir = os.path.join(os.path.dirname(__file__), '..')
# sys.path.append(libdir)
# try:
# from harisekhon.utils import *
# except ImportError, e:
# print('module import failed: %s' % e)
# sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
if __name__ == '__main__':
main()
| 28.283333
| 88
| 0.700648
|
9389cb7a39d34434b205d05068e576faba98ddc7
| 1,639
|
py
|
Python
|
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/tests/test_complete_tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
import unittest
import pickle
from array import array
import complete_tdf
from floodberry.floodberry_ed25519 import GE25519
from tdf_strucs import TDFMatrix, TDFError
from complete_tdf import CTDFCodec as Codec, CTDFCipherText as CipherText
from utils import int_lst_to_bitarr
TEST_DIR = "legacy/tests/"
PACK_TEST_KEY_FILE = TEST_DIR + "ctdf_pack_test_keys.p"
PACK_TEST_CT_FILE = TEST_DIR + "ctdf_pack_test_ct.p"
TDF_KEY_FILE = TEST_DIR + "ctdf_test_keys.p"
TDF_CT_FILE = TEST_DIR + "ctdf_test_ct.p"
"""
x = [0,1,2]
ctdf = CTDFCodec(len(x)*8)
u = ctdf.encode(x)
result = ctdf.decode(u)
"""
TDF = Codec.deserialize(TDF_KEY_FILE)
CT = CipherText.deserialize(TDF_CT_FILE)
X = int_lst_to_bitarr([0,1,2], 3)
| 29.267857
| 73
| 0.700427
|
938c81e0713358c52bf4da54926facac71c9eb0c
| 431
|
py
|
Python
|
wherethefuck/celery.py
|
luismasuelli/wherethefuck
|
6e68543a804c299be4362836c518e34f10029b48
|
[
"MIT"
] | 1
|
2019-11-18T15:02:16.000Z
|
2019-11-18T15:02:16.000Z
|
wherethefuck/celery.py
|
luismasuelli/wherethefuck
|
6e68543a804c299be4362836c518e34f10029b48
|
[
"MIT"
] | null | null | null |
wherethefuck/celery.py
|
luismasuelli/wherethefuck
|
6e68543a804c299be4362836c518e34f10029b48
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wherethefuck.settings')
# create and run celery workers.
app = Celery(broker=settings.CELERY_BROKER_URL)
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(settings.INSTALLED_APPS)
if __name__ == '__main__':
app.start()
| 28.733333
| 72
| 0.798144
|
938caebc89dfb3ff5ef96ed29916b89a93439b97
| 395
|
py
|
Python
|
plot_main.py
|
Alexhaoge/MLSR
|
1397176ea4c71533f3995ff476727217125c9f83
|
[
"MIT"
] | 1
|
2020-12-27T15:45:09.000Z
|
2020-12-27T15:45:09.000Z
|
plot_main.py
|
Alexhaoge/MLSR
|
1397176ea4c71533f3995ff476727217125c9f83
|
[
"MIT"
] | null | null | null |
plot_main.py
|
Alexhaoge/MLSR
|
1397176ea4c71533f3995ff476727217125c9f83
|
[
"MIT"
] | 1
|
2021-04-08T17:03:36.000Z
|
2021-04-08T17:03:36.000Z
|
from MLSR.data import DataSet
from MLSR.plot import *
x = DataSet('data/rand_select_400_avg.csv')
x.generate_feature()
y = DataSet('data/not_selected_avg.csv')
y.generate_feature()
z = DataSet.static_merge(x, y)
#plot_tsne(z, 'log/tsne.png')
z = z.convert_to_ssl()
z0, z1 = z.split_by_weak_label()
plot_tsne_ssl(z0, 'log/0_tsne.png', n_iter=300)
plot_tsne_ssl(z1, 'log/1_tsne.png', n_iter=500)
| 28.214286
| 47
| 0.749367
|
939056f893dc7a63b3b4c5c9d0f8b92f4cb9205c
| 7,652
|
py
|
Python
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 138
|
2020-04-18T19:32:12.000Z
|
2022-03-31T06:58:33.000Z
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 19
|
2020-04-21T18:24:20.000Z
|
2022-03-12T00:25:11.000Z
|
utils/utils_convert2hdf5.py
|
jiyeonkim127/PSI
|
5c525d5304fb756c9314ea3e225bbb180e521b9a
|
[
"Xnet",
"X11"
] | 19
|
2020-04-22T01:32:25.000Z
|
2022-03-24T02:52:01.000Z
|
import numpy as np
import scipy.io as sio
import os, glob, sys
import h5py_cache as h5c
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal')
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal/source')
from batch_gen_hdf5 import BatchGeneratorWithSceneMeshMatfile
import torch
'''
In this script, we put all mat files into a hdf5 file, so as to speed up the data loading process.
'''
dataset_path = '/mnt/hdd/PROX/snapshot_realcams_v3'
outfilename = 'realcams.hdf5'
h5file_path = os.path.join('/home/yzhang/Videos/PROXE', outfilename)
batch_gen = BatchGeneratorWithSceneMeshMatfile(dataset_path=dataset_path,
scene_verts_path = '/home/yzhang/Videos/PROXE/scenes_downsampled',
scene_sdf_path = '/home/yzhang/Videos/PROXE/scenes_sdf',
device=torch.device('cuda'))
### create the dataset used in the hdf5 file
with h5c.File(h5file_path, mode='w',chunk_cache_mem_size=1024**2*128) as hdf5_file:
while batch_gen.has_next_batch():
train_data = batch_gen.next_batch(1)
if train_data is None:
continue
train_data_np = [x.detach().cpu().numpy() for x in train_data[:-1]]
break
[depth_batch, seg_batch, body_batch,
cam_ext_batch, cam_int_batch, max_d_batch,
s_verts_batch, s_faces_batch,
s_grid_min_batch, s_grid_max_batch,
s_grid_dim_batch, s_grid_sdf_batch] = train_data_np
n_samples = batch_gen.n_samples
print('-- n_samples={:d}'.format(n_samples))
hdf5_file.create_dataset("sceneid", shape=(1,), chunks=True, dtype=np.float32, maxshape=(None,) )
hdf5_file.create_dataset("depth", shape=(1,)+tuple(depth_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(depth_batch.shape[1:]) )
hdf5_file.create_dataset("seg", shape=(1,)+tuple(seg_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(seg_batch.shape[1:]) )
hdf5_file.create_dataset("body", shape=(1,)+tuple(body_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(body_batch.shape[1:]) )
hdf5_file.create_dataset("cam_ext", shape=(1,)+tuple(cam_ext_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(cam_ext_batch.shape[1:]) )
hdf5_file.create_dataset("cam_int", shape=(1,)+tuple(cam_int_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(cam_int_batch.shape[1:]) )
hdf5_file.create_dataset("max_d", shape=(1,)+tuple(max_d_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(max_d_batch.shape[1:]) )
# hdf5_file.create_dataset("s_verts", shape=(1,)+tuple(s_verts_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_verts_batch.shape[1:]) )
# hdf5_file.create_dataset("s_faces", shape=(1,)+tuple(s_faces_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_faces_batch.shape[1:]) )
# hdf5_file.create_dataset("s_grid_min", shape=(1,)+tuple(s_grid_min_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_min_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_max", shape=(1,)+tuple(s_grid_max_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_max_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_dim", shape=(1,)+tuple(s_grid_dim_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_dim_batch.shape[1:]))
# hdf5_file.create_dataset("s_grid_sdf", shape=(1,)+tuple(s_grid_sdf_batch.shape[1:]) ,chunks = True, dtype=np.float32, maxshape=(None,)+tuple(s_grid_sdf_batch.shape[1:]))
batch_gen.reset()
scene_list = ['BasementSittingBooth','MPH1Library', 'MPH8', 'MPH11', 'MPH16',
'MPH112', 'N0SittingBooth', 'N0Sofa', 'N3Library', 'N3Office',
'N3OpenArea', 'Werkraum'] # !!!! important!cat
### create the dataset used in the hdf5 file
idx = -1
while batch_gen.has_next_batch():
train_data = batch_gen.next_batch(1)
if train_data is None:
continue
[depth_batch, seg_batch, body_batch,
cam_ext_batch, cam_int_batch, max_d_batch,
s_verts_batch, s_faces_batch,
s_grid_min_batch, s_grid_max_batch,
s_grid_dim_batch, s_grid_sdf_batch, filename_list] = train_data
## check unavaliable prox fitting
body_z_batch = body_batch[:,2]
if body_z_batch.abs().max() >= max_d_batch.abs().max():
print('-- encountered bad prox fitting. Skip it')
continue
if body_z_batch.min() <=0:
print('-- encountered bad prox fitting. Skip it')
continue
idx = idx+1
print('-- processing batch idx {:d}'.format(idx))
filename = filename_list[0]
scenename = filename.split('/')[-2].split('_')[0]
sid = [scene_list.index(scenename)]
hdf5_file["sceneid"].resize((hdf5_file["sceneid"].shape[0]+1, ))
hdf5_file["sceneid"][-1,...] = sid[0]
hdf5_file["depth"].resize((hdf5_file["depth"].shape[0]+1, )+hdf5_file["depth"].shape[1:])
hdf5_file["depth"][-1,...] = depth_batch[0].detach().cpu().numpy()
hdf5_file["seg"].resize((hdf5_file["seg"].shape[0]+1, )+hdf5_file["seg"].shape[1:])
hdf5_file["seg"][-1,...] = seg_batch[0].detach().cpu().numpy()
hdf5_file["body"].resize((hdf5_file["body"].shape[0]+1, )+hdf5_file["body"].shape[1:])
hdf5_file["body"][-1,...] = body_batch[0].detach().cpu().numpy()
hdf5_file["cam_ext"].resize((hdf5_file["cam_ext"].shape[0]+1, )+hdf5_file["cam_ext"].shape[1:])
hdf5_file["cam_ext"][-1,...] = cam_ext_batch[0].detach().cpu().numpy()
hdf5_file["cam_int"].resize((hdf5_file["cam_int"].shape[0]+1, )+hdf5_file["cam_int"].shape[1:])
hdf5_file["cam_int"][-1,...] = cam_int_batch[0].detach().cpu().numpy()
hdf5_file["max_d"].resize((hdf5_file["max_d"].shape[0]+1, )+hdf5_file["max_d"].shape[1:])
hdf5_file["max_d"][-1,...] = max_d_batch[0].detach().cpu().numpy()
# hdf5_file["s_verts"].resize((hdf5_file["s_verts"].shape[0]+1, )+hdf5_file["s_verts"].shape[1:])
# hdf5_file["s_verts"][-1,...] = s_verts_batch[0].detach().cpu().numpy()
# hdf5_file["s_faces"].resize((hdf5_file["s_faces"].shape[0]+1, )+hdf5_file["s_faces"].shape[1:])
# hdf5_file["s_faces"][-1,...] = s_faces_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_min"].resize((hdf5_file["s_grid_min"].shape[0]+1, )+hdf5_file["s_grid_min"].shape[1:])
# hdf5_file["s_grid_min"][-1,...] = s_grid_min_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_max"].resize((hdf5_file["s_grid_max"].shape[0]+1, )+hdf5_file["s_grid_max"].shape[1:])
# hdf5_file["s_grid_max"][-1,...] = s_grid_max_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_dim"].resize((hdf5_file["s_grid_dim"].shape[0]+1, )+hdf5_file["s_grid_dim"].shape[1:])
# hdf5_file["s_grid_dim"][-1,...] = s_grid_dim_batch[0].detach().cpu().numpy()
# hdf5_file["s_grid_sdf"].resize((hdf5_file["s_grid_sdf"].shape[0]+1, )+hdf5_file["s_grid_sdf"].shape[1:])
# hdf5_file["s_grid_sdf"][-1,...] = s_grid_sdf_batch[0].detach().cpu().numpy()
print('--file converting finish')
| 49.688312
| 176
| 0.627418
|
939077a570d47a79177487efee3028816d6b91da
| 421
|
py
|
Python
|
trains/models.py
|
Seshathri-saravanan/quest
|
397c92e36167b9554fd72f55bdac0a2cbcdfea6f
|
[
"MIT"
] | null | null | null |
trains/models.py
|
Seshathri-saravanan/quest
|
397c92e36167b9554fd72f55bdac0a2cbcdfea6f
|
[
"MIT"
] | null | null | null |
trains/models.py
|
Seshathri-saravanan/quest
|
397c92e36167b9554fd72f55bdac0a2cbcdfea6f
|
[
"MIT"
] | 1
|
2021-11-09T15:58:33.000Z
|
2021-11-09T15:58:33.000Z
|
from django.db import models
| 32.384615
| 49
| 0.724466
|
93938181b040ac3ac5f94151cbff662943eef747
| 3,324
|
py
|
Python
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | 11
|
2021-11-17T23:51:55.000Z
|
2022-03-17T20:38:14.000Z
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | 4
|
2022-02-21T02:16:06.000Z
|
2022-03-28T02:18:16.000Z
|
tests/test_names.py
|
fabiocaccamo/python-fontbro
|
2ed7ef0d3d1ed4d91387278cfb5f7fd63324451b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from fontbro import Font
from tests import AbstractTestCase
| 38.206897
| 85
| 0.65343
|
93946c005b5692a8ad70e09207171d1d003f400a
| 3,901
|
py
|
Python
|
parse_ecosim_files.py
|
lmorillas/ecosim_to_we
|
2bd158146fa72ec7cbc9bcd1aaa57f3c6715cb56
|
[
"Apache-2.0"
] | null | null | null |
parse_ecosim_files.py
|
lmorillas/ecosim_to_we
|
2bd158146fa72ec7cbc9bcd1aaa57f3c6715cb56
|
[
"Apache-2.0"
] | null | null | null |
parse_ecosim_files.py
|
lmorillas/ecosim_to_we
|
2bd158146fa72ec7cbc9bcd1aaa57f3c6715cb56
|
[
"Apache-2.0"
] | null | null | null |
from amara.bindery import html
from amara.lib import U
import urllib
import urlparse
import time
'''
This script extract data from html pages and write the data into a .json file ready
to create the mediawiki / wikieducator pages
'''
BASE = 'http://academics.smcvt.edu/dmccabe/teaching/Community/'
def parse_notes_file(f):
'''
Parse file like stream and returns title & content.
Title is the first line of the file
Content is delimited by 'beginnotes' and 'endnotes' words
'''
title = f.readline().strip()
content = []
for line in f:
if line.startswith('beginnotes'):
break
for line in f:
if line.startswith('endnotes'):
break
else:
line = line.strip() or '\n\n'
content.append(line)
content = ' '.join(content)
content = content.decode('utf-8', 'replace')
return {'title': title, 'content': content}
def parse_anchor_ecosim(anchor):
'''
It returns text and href url from an html anchor for ecosim
'''
name = U(anchor).lower().strip()
url = urlparse.urljoin(BASE, anchor.href)
return name, url
def parse_anchor_notes(anchor):
'''
It returns the text and href url from an html anchor for ecosim notes. Removes the
'Notes adn data from:' words from teh text.
'''
name = U(anchor).replace('Notes and data from:', '').lower().strip()
url = urlparse.urljoin(BASE, anchor.href)
return name, url
def parse_ecosim_file(url):
'''
Parse the url from an ecosim data file.
It returns the data, species, files
'''
f = urllib.urlopen(url)
lines = [l for l in f.readlines()]
species = len(lines) -1
sites = len(lines[0].split()) -1
return ''.join(lines), species, sites
def change_titles(pages):
'''
Adds numbres to repeated titles
'''
titles = {}
for p in pages:
title = p.get('title')
n = titles.get(title, 0)
if n == 0:
titles[title] = 1
else:
titles[title] = n + 1
title = numbertit(title, n)
p['title'] = title
if __name__ == '__main__':
import json
# Main index file
f = 'http://academics.smcvt.edu/dmccabe/teaching/Community/NullModelData.html'
doc = html.parse(f)
#ecosim data links
ecosim_files = doc.xml_select(u'//a[contains(@href, "matrices_ecosim")]')
# ecosim notes links
notes_files = doc.xml_select(u'//a[contains(@href, "matrices_notes")]')
# name -> url
ecodict = dict([parse_anchor_ecosim(e) for e in ecosim_files])
notesdict = dict([parse_anchor_notes(e) for e in notes_files])
# names sorted
ecokeys = ecodict.keys()
allnotes = parse_all_notes(notesdict)
# json.dump(allnotes, open('allnotes.json', 'w')) # if you want to create a dump
pages = []
for x in ecokeys:
print 'parsing data', x
k = str(x) # element to process Shelve keys must be Str
eco_url = ecodict.get(k)
data, species, sites = parse_ecosim_file(eco_url)
d = allnotes.get(k)
if not d:
print 'Not found', k
continue
d['data'] = data
#create_page(d, eco_url)
d['species'] = species
d['sites'] = sites
d['source'] = eco_url
d['name'] = k
pages.append(d)
time.sleep(0.2) # no want DOS
change_titles(pages)
json.dump(pages, open('pages_to_create.json', 'w'))
| 25.664474
| 93
| 0.605229
|
9394ac8b332dbc27f6671e32b2abfcd0890092b3
| 117
|
py
|
Python
|
web_scraping/ec2files/ec2file78.py
|
nikibhatt/Groa
|
fc2d4ae87cb825e6d54a0831c72be16541eebe61
|
[
"MIT"
] | 1
|
2020-04-08T20:11:48.000Z
|
2020-04-08T20:11:48.000Z
|
web_scraping/ec2files/ec2file78.py
|
cmgospod/Groa
|
31b3624bfe61e772b55f8175b4e95d63c9e67966
|
[
"MIT"
] | null | null | null |
web_scraping/ec2files/ec2file78.py
|
cmgospod/Groa
|
31b3624bfe61e772b55f8175b4e95d63c9e67966
|
[
"MIT"
] | 1
|
2020-09-12T07:07:41.000Z
|
2020-09-12T07:07:41.000Z
|
from scraper import *
s = Scraper(start=138996, end=140777, max_iter=30, scraper_instance=78)
s.scrape_letterboxd()
| 39
| 72
| 0.777778
|
9396f021d37c0bb0196896103dbb10d80bb60437
| 20,826
|
py
|
Python
|
backend/tests/usecases/test_control_resource.py
|
crosspower/naruko
|
4c524e2ef955610a711830bc86d730ffe4fc2bd8
|
[
"MIT"
] | 17
|
2019-01-23T04:37:43.000Z
|
2019-10-15T01:42:31.000Z
|
backend/tests/usecases/test_control_resource.py
|
snickerjp/naruko
|
4c524e2ef955610a711830bc86d730ffe4fc2bd8
|
[
"MIT"
] | 1
|
2019-01-23T08:04:44.000Z
|
2019-01-23T08:44:33.000Z
|
backend/tests/usecases/test_control_resource.py
|
snickerjp/naruko
|
4c524e2ef955610a711830bc86d730ffe4fc2bd8
|
[
"MIT"
] | 6
|
2019-01-23T09:10:59.000Z
|
2020-12-02T04:15:41.000Z
|
from django.core.exceptions import PermissionDenied
from django.test import TestCase
from backend.models import UserModel, AwsEnvironmentModel
from unittest import mock
# mock
with mock.patch('backend.models.OperationLogModel.operation_log', lambda executor_index=None, target_method=None, target_arg_index_list=None: lambda func: func):
from backend.usecases.control_resource import ControlResourceUseCase
# :
def test_start_resource_not_belong_to_tenant(self):
mock_user = mock.Mock(spec=UserModel)
mock_user.is_belong_to_tenant.return_value = False
mock_aws = mock.Mock(spec=AwsEnvironmentModel)
mock_resource = mock.Mock()
#
with self.assertRaises(PermissionDenied):
ControlResourceUseCase(mock.Mock()).start_resource(mock_user, mock_aws, mock_resource)
#
mock_user.is_belong_to_tenant.assert_called_once()
mock_user.has_aws_env.assert_not_called()
mock_resource.start.assert_not_called()
# :AWS
#
# :
# :AWS
#
# :
# :AWS
# :
# :
# :AWS
#
# :
# :AWS
#
# :
# :AWS
#
#
#
# AWS
#
# :
# :AWS
| 39.146617
| 162
| 0.687554
|
93972dbe0dbe487735de9457da88b6e093fc9d7c
| 371
|
py
|
Python
|
cool.py
|
divine-coder/CODECHEF-PYTHON
|
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
|
[
"MIT"
] | null | null | null |
cool.py
|
divine-coder/CODECHEF-PYTHON
|
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
|
[
"MIT"
] | 4
|
2020-10-04T07:49:30.000Z
|
2021-10-02T05:24:40.000Z
|
cool.py
|
divine-coder/CODECHEF-PYTHON
|
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
|
[
"MIT"
] | 7
|
2020-10-04T07:46:55.000Z
|
2021-11-05T14:30:00.000Z
|
a=raw_input()
b=raw_input()
i=0
index=0
co=0
if a[0]==b[0]:
co+=1
i+=1
while i<len(a):
index1=check(b,index+1,a[i])
print a[i],index1
if index1!=-1:
index=index1
co+=1
#print index
i+=1
print co
| 13.740741
| 32
| 0.471698
|
939786f9e786e13e34a09c07c33b9d33a5fb6c2c
| 1,273
|
py
|
Python
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-23T05:24:19.000Z
|
2022-02-17T16:37:51.000Z
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 5
|
2020-10-01T05:08:37.000Z
|
2020-10-12T03:18:10.000Z
|
core-python/Core_Python/file/RemoveTempDirs.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-28T14:06:41.000Z
|
2021-10-19T18:32:28.000Z
|
import os
from pathlib import Path
from shutil import rmtree
# change your parent dir accordingly
try:
directory = "TempDir"
parent_dir = "E:/GitHub/1) Git_Tutorials_Repo_Projects/core-python/Core_Python/"
td1, td2 = "TempA", "TempA"
path = os.path.join(parent_dir, directory)
temp_mul_dirs = os.path.join(path + os.sep + os.sep, td1 + os.sep + os.sep + td2)
''' This methods used to remove single file. all three methods used to delete symlink too'''
os.remove(path +os.sep+os.sep+"TempFile.txt")
os.unlink(path +os.sep+os.sep+td1+os.sep+os.sep+"TempFilea.txt")
''' we can also use this syntax pathlib.Path(path +os.sep+os.sep+"TempFile.txt").unlink() '''
f_path = Path(temp_mul_dirs +os.sep+os.sep+"TempFileb.txt")
f_path.unlink();
''' both methods for delete empty dir if single dir we can use rmdir if nested the
removedirs'''
# os.remove(path)
# os.removedirs(path+os.sep+os.sep+td1)
print("List of dirs before remove : ",os.listdir(path))
''' For remove non empty directory we have to use shutil.rmtree and pathlib.Path(path),rmdir()'''
rmtree(path+os.sep+os.sep+td1)
Path(path).rmdir()
print("List of dirs after remove : ",os.listdir(parent_dir))
except Exception as e:
print(e)
| 47.148148
| 101
| 0.683425
|
939b63bdfc91f71662536be6efe59324a01bcaa9
| 587
|
py
|
Python
|
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | 1
|
2019-06-27T11:34:13.000Z
|
2019-06-27T11:34:13.000Z
|
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
code/python/echomesh/color/WheelColor_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.color import WheelColor
from echomesh.util.TestCase import TestCase
EXPECTED = [
[ 0., 1., 0.],
[ 0.3, 0.7, 0. ],
[ 0.6, 0.4, 0. ],
[ 0.9, 0.1, 0. ],
[ 0. , 0.2, 0.8],
[ 0. , 0.5, 0.5],
[ 0. , 0.8, 0.2],
[ 0.9, 0. , 0.1],
[ 0.6, 0. , 0.4],
[ 0.3, 0. , 0.7],
[ 0., 1., 0.]]
| 25.521739
| 82
| 0.558773
|
939de781cdf5e974811b59296ad87e9307743d04
| 1,000
|
py
|
Python
|
modules/IPlugin.py
|
SRottgardt/data_pipeline
|
8adcc886870f49bf0d544952be689d16825fe38e
|
[
"Apache-2.0"
] | 3
|
2021-02-14T16:28:50.000Z
|
2021-02-16T23:23:49.000Z
|
modules/IPlugin.py
|
SRottgardt/data_pipeline
|
8adcc886870f49bf0d544952be689d16825fe38e
|
[
"Apache-2.0"
] | null | null | null |
modules/IPlugin.py
|
SRottgardt/data_pipeline
|
8adcc886870f49bf0d544952be689d16825fe38e
|
[
"Apache-2.0"
] | null | null | null |
from modules.CommandData import CommandData
| 26.315789
| 90
| 0.586
|
939df3af89d3fd8f0de44f63d3d42fe43872956f
| 4,890
|
py
|
Python
|
PySatImageAnalysis/sample_generator.py
|
danaja/sat_image_building_extraction
|
3d6cc26854666b566af0930a213a6f907733eaf7
|
[
"MIT"
] | 2
|
2017-03-30T16:21:45.000Z
|
2019-01-09T03:01:01.000Z
|
PySatImageAnalysis/sample_generator.py
|
danaja/sat_image_building_extraction
|
3d6cc26854666b566af0930a213a6f907733eaf7
|
[
"MIT"
] | null | null | null |
PySatImageAnalysis/sample_generator.py
|
danaja/sat_image_building_extraction
|
3d6cc26854666b566af0930a213a6f907733eaf7
|
[
"MIT"
] | 1
|
2018-12-18T08:49:55.000Z
|
2018-12-18T08:49:55.000Z
|
# -*- coding: utf-8 -*-
#Used to generate positive building samples from google satellite images
#based on OSM building polygons in geojson format
#
#Note 1: Accuracy of OSM building polygons may vary
#Note 2: Requires downloaded google satellite images(tiles) to
# have the following file name structure
# part_latitude_of_center_longitude_of_center.png
# This code was tested with tiles downloaded using
# https://github.com/tdeo/maps-hd
#Note 3: OSM building data downloaded from
# mapzen.com/data/metro-extracts/
#@Author Danaja Maldeniya
from osgeo import ogr
import os
import geoutils
import image_utils as imu
import cv2
import json
import numpy as np
map_zoom = 19
tile_size = 600
driver = ogr.GetDriverByName('ESRI Shapefile')
shp = driver.Open(r'/home/danaja/Downloads/colombo_sri-lanka.imposm-shapefiles (2)/colombo_sri-lanka_osm_buildings.shp')
layer = shp.GetLayer()
spatialRef = layer.GetSpatialRef()
print spatialRef
#Loop through the image files to get their ref location(center) latitude and longitude
tile_dir="/home/danaja/installations/maps-hd-master/bk3/images-st"
tiles = os.listdir(tile_dir)
#positive sample generation
#==============================================================================
# for tile in tiles:
# tile_name = tile.replace(".png","")
# print(tile)
# center_lat = float(tile_name.split("_")[1])
# center_lon = float(tile_name.split("_")[2])
# extent = geoutils.get_tile_extent(center_lat,center_lon,map_zoom,tile_size)
# layer.SetSpatialFilterRect(extent[2][1],extent[2][0],extent[1][1],extent[1][0])
# print("feature count: "+str(layer.GetFeatureCount()))
# print(tile_dir+"/"+tile)
# image = cv2.imread(tile_dir+"/"+tile)
# b_channel, g_channel, r_channel = cv2.split(image)
# alpha_channel = np.array(np.ones((tile_size,tile_size )) * 255,dtype=np.uint8) #creating a dummy alpha channel image.
# image= cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
# i = 0
# for feature in layer:
# coordinates = []
# geom = feature.GetGeometryRef()
# geom = json.loads(geom.ExportToJson())
#
# for coordinate in geom['coordinates'][0]:
# pixel = geoutils.get_pixel_location_in_tile_for_lat_lon( \
# coordinate[1],coordinate[0],center_lat,center_lon,map_zoom,tile_size)
# if len(coordinates) == 0:
# minx = pixel[0]
# miny = pixel[1]
# maxx = pixel[0]
# maxy = pixel[1]
# minx = min(minx,pixel[0])
# maxx = max(maxx,pixel[0])
# miny = min(miny,pixel[1])
# maxy = max(maxy,pixel[1])
# coordinates.append(tuple(reversed(pixel)))
#
# mask = np.zeros(image.shape, dtype=np.uint8)
# roi_corners = np.array([coordinates], dtype=np.int32)
# channel_count = image.shape[2] # i.e. 3 or 4 depending on your image
# ignore_mask_color = (255,)*channel_count
# cv2.fillPoly(mask, roi_corners, ignore_mask_color)
# masked_image = cv2.bitwise_and(image, mask)
# masked_image = masked_image[minx:maxx,miny:maxy]
# cv2.imwrite("positive/"+tile_name+"_"+str(i)+".png",masked_image)
# i=i+1
# layer.SetSpatialFilter(None)
#
#==============================================================================
#negative sample generation
min_size = 80
max_size = 100
for tile in tiles:
tile_name = tile.replace(".png","")
print(tile)
center_lat = float(tile_name.split("_")[1])
center_lon = float(tile_name.split("_")[2])
extent = geoutils.get_tile_extent(center_lat,center_lon,map_zoom,tile_size)
layer.SetSpatialFilterRect(extent[2][1],extent[2][0],extent[1][1],extent[1][0])
if layer.GetFeatureCount() > 0:
layer.SetSpatialFilter(None)
attempt = 0
success = 0
while (attempt <100 and success <20):
box =imu.generate_random_box(tile_size,min_size,max_size)
nw_corner = geoutils.get_lat_lon_of_point_in_tile(box[0],box[1],center_lat,center_lon,map_zoom,tile_size)
se_corner = geoutils.get_lat_lon_of_point_in_tile(box[2],box[3],center_lat,center_lon,map_zoom,tile_size)
layer.SetSpatialFilterRect(nw_corner[1],se_corner[0],se_corner[1],nw_corner[0])
fCount = layer.GetFeatureCount()
if fCount >0:
continue
else:
image = cv2.imread(tile_dir+"/"+tile)
bld = image[int(box[1]):int(box[3]), \
int(box[0]):int(box[2])]
cv2.imwrite("negative/"+tile_name+"_"+str(success)+".png",bld)
success = success+1
layer.SetSpatialFilter(None)
attempt = attempt +1
| 38.203125
| 124
| 0.616769
|
939e7757a3e174c6114642e42e77179f804882a6
| 779
|
py
|
Python
|
notebook/demo/src/multifuns.py
|
marketmodelbrokendown/1
|
587283fd972d0060815dde82a57667e74765c9ae
|
[
"MIT"
] | 2
|
2019-03-13T15:34:42.000Z
|
2019-03-13T15:34:47.000Z
|
notebook/demo/src/multifuns.py
|
hervey-su/home
|
655b9e7b8180592742a132832795170a00debb47
|
[
"MIT"
] | 1
|
2020-11-18T21:55:20.000Z
|
2020-11-18T21:55:20.000Z
|
notebook/demo/src/multifuns.py
|
marketmodelbrokendown/1
|
587283fd972d0060815dde82a57667e74765c9ae
|
[
"MIT"
] | null | null | null |
from ctypes import cdll,c_int,c_double,POINTER
_lib = cdll.LoadLibrary('./demo/bin/libmultifuns.dll')
# double dprod(double *x, int n)
# int factorial(int n)
# int isum(int array[], int size);
| 26.862069
| 59
| 0.658537
|
93a10bd2227db590b05aec0efe907cfefee1e40e
| 843
|
py
|
Python
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 2
|
2019-05-07T20:23:59.000Z
|
2020-04-26T11:18:38.000Z
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 89
|
2019-08-06T12:47:50.000Z
|
2022-03-28T04:03:25.000Z
|
api/nivo_api/cli/database.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 1
|
2020-06-23T10:07:38.000Z
|
2020-06-23T10:07:38.000Z
|
from nivo_api.core.db.connection import metadata, create_database_connections
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError
| 32.423077
| 85
| 0.720047
|
93a2c7906ab4851fa8921bb2fef6ee5531e13056
| 357
|
py
|
Python
|
start_spiders.py
|
pluto-junzeng/baiduSpider
|
ea591920cd0994e83e36f033f98c6cc6859141d6
|
[
"Apache-2.0"
] | 13
|
2020-12-07T03:19:12.000Z
|
2022-01-19T13:02:41.000Z
|
start_spiders.py
|
zengjunjun/baiduSpider
|
ea591920cd0994e83e36f033f98c6cc6859141d6
|
[
"Apache-2.0"
] | null | null | null |
start_spiders.py
|
zengjunjun/baiduSpider
|
ea591920cd0994e83e36f033f98c6cc6859141d6
|
[
"Apache-2.0"
] | 3
|
2021-07-10T08:24:55.000Z
|
2022-01-19T13:02:43.000Z
|
"""
@Author:lichunhui
@Time:
@Description:
"""
from scrapy import cmdline
# cmdline.execute("scrapy crawl baidu_spider".split())
# cmdline.execute("scrapy crawl baike_spider".split())
# cmdline.execute("scrapy crawl wiki_zh_spider".split())
# cmdline.execute("scrapy crawl wiki_en_spider".split())
cmdline.execute("scrapy crawlall".split())
| 25.5
| 56
| 0.7507
|
93a32ef6fddce5cbc92f060b72225c59adf371f7
| 515
|
py
|
Python
|
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
sources/pysimplegui/simpleeventloop.py
|
kantel/pythoncuriosa
|
4dfb92b443cbe0acf8d8efa5c54efbf13e834620
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
layout = [
[sg.Text("Wie heit Du?")],
[sg.Input(key = "-INPUT-")],
[sg.Text(size = (40, 1), key = "-OUTPUT-")],
[sg.Button("Okay"), sg.Button("Quit")]
]
window = sg.Window("Hallo PySimpleGUI", layout)
keep_going = True
while keep_going:
event, values = window.read()
if event == sg.WINDOW_CLOSED or event == "Quit":
keep_going = False
window["-OUTPUT-"].update("Hallchen " + values["-INPUT-"] + "!")
window.close()
| 24.52381
| 69
| 0.557282
|
93a5a387bf24ca83ae37f5241ea161f3010ef4cf
| 3,247
|
py
|
Python
|
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
datasets/fusiongallery.py
|
weshoke/UV-Net
|
9e833df6868695a2cea5c5b79a0b613b224eacf2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pathlib
from torch.utils.data import Dataset, DataLoader
import dgl
import torch
from dgl.data.utils import load_graphs
import json
from datasets import util
from tqdm import tqdm
| 32.47
| 128
| 0.57838
|
93a73833278709acd49bb46a9f2c8ae73acf367a
| 3,690
|
py
|
Python
|
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
mpa/modules/models/heads/custom_ssd_head.py
|
openvinotoolkit/model_preparation_algorithm
|
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import smooth_l1_loss
from mmdet.models.dense_heads.ssd_head import SSDHead
| 40.108696
| 79
| 0.622493
|
93a84d645ccedf01c50e4963b06e5f5cf6720d08
| 2,918
|
py
|
Python
|
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | 2
|
2021-07-22T07:35:48.000Z
|
2022-03-03T05:48:08.000Z
|
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | null | null | null |
Python/ml_converter.py
|
daduz11/ios-facenet-id
|
0ec634cf7f4f12c2bfa6334a72d5f2ab0a4afde4
|
[
"Apache-2.0"
] | 2
|
2021-03-11T14:50:05.000Z
|
2021-04-18T14:58:24.000Z
|
"""
Copyright 2020 daduz11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Firstly this script is used for the conversion of the freezed inference graph (pb format) into a CoreML model.
Moreover the same script takes the CoreML model at 32bit precision to carries out the quantization from 16 to 1 bit.
"""
import argparse
import sys
import tfcoreml
import coremltools
from coremltools.models.neural_network import quantization_utils
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 38.906667
| 160
| 0.660384
|
93a90aa96a7060708343be286a46a3cbad16b9b8
| 628
|
py
|
Python
|
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
pizza_utils/stringutils.py
|
ILikePizza555/py-pizza-utils
|
f336fc2c391430f5d901d85dfda50974d9f8aba7
|
[
"MIT"
] | null | null | null |
def find_from(string, subs, start = None, end = None):
"""
Returns a tuple of the lowest index where a substring in the iterable "subs" was found, and the substring.
If multiple substrings are found, it will return the first one.
If nothing is found, it will return (-1, None)
"""
string = string[start:end]
last_index = len(string)
substring = None
for s in subs:
i = string.find(s)
if i != -1 and i < last_index:
last_index = i
substring = s
if last_index == len(string):
return (-1, None)
return (last_index, substring)
| 27.304348
| 110
| 0.598726
|
93aa7bc7eef6be2b816f51dac8d5aa561ac4c490
| 4,844
|
py
|
Python
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 16
|
2020-06-22T05:24:20.000Z
|
2022-02-15T11:41:14.000Z
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 24
|
2020-07-07T04:22:03.000Z
|
2021-01-03T07:21:02.000Z
|
lab/experiment_futures.py
|
ajmal017/ta_scanner
|
21f12bfd8b5936d1d1977a32c756715539b0d97c
|
[
"BSD-3-Clause"
] | 3
|
2020-06-21T12:12:14.000Z
|
2021-09-01T04:46:59.000Z
|
# todos
# - [ ] all dates and date deltas are in time, not integers
from loguru import logger
from typing import Dict
import sys
import datetime
from datetime import timedelta
import numpy as np
from ta_scanner.data.data import load_and_cache, db_data_fetch_between, aggregate_bars
from ta_scanner.data.ib import IbDataFetcher
from ta_scanner.experiments.simple_experiment import SimpleExperiment
from ta_scanner.indicators import (
IndicatorSmaCrossover,
IndicatorEmaCrossover,
IndicatorParams,
)
from ta_scanner.signals import Signal
from ta_scanner.filters import FilterCumsum, FilterOptions, FilterNames
from ta_scanner.reports import BasicReport
from ta_scanner.models import gen_engine
ib_data_fetcher = IbDataFetcher()
instrument_symbol = "/NQ"
rth = False
interval = 1
field_name = "ema_cross"
slow_sma = 25
fast_sma_min = 5
fast_sma_max = 20
filter_inverse = True
win_pts = 75
loss_pts = 30
trade_interval = 12
test_total_pnl = 0.0
test_total_count = 0
all_test_results = []
engine = gen_engine()
logger.remove()
logger.add(sys.stderr, level="INFO")
# fetch_data()
for i in range(0, 33):
initial = datetime.date(2020, 7, 10) + timedelta(days=i)
test_start, test_end = initial, initial
if initial.weekday() in [5, 6]:
continue
# fetch training data
train_sd = initial - timedelta(days=5)
train_ed = initial - timedelta(days=1)
df_train = query_data(engine, instrument_symbol, train_sd, train_ed, interval)
# for training data, let's find results for a range of SMA
results = run_cross_range(
df_train,
slow_sma=slow_sma,
fast_sma_min=fast_sma_min,
fast_sma_max=fast_sma_max,
)
fast_sma_pnl = []
for resultindex in range(2, len(results) - 3):
fast_sma = results[resultindex][0]
pnl = results[resultindex][1]
result_set = results[resultindex - 2 : resultindex + 3]
total_pnl = sum([x[1] for x in result_set])
fast_sma_pnl.append([fast_sma, total_pnl, pnl])
arr = np.array(fast_sma_pnl, dtype=float)
max_tuple = np.unravel_index(np.argmax(arr, axis=None), arr.shape)
optimal_fast_sma = int(arr[(max_tuple[0], 0)])
optimal_fast_sma_pnl = [x[2] for x in fast_sma_pnl if x[0] == optimal_fast_sma][0]
# logger.info(f"Selected fast_sma={optimal_fast_sma}. PnL={optimal_fast_sma_pnl}")
test_sd = initial
test_ed = initial + timedelta(days=1)
df_test = query_data(engine, instrument_symbol, test_sd, test_ed, interval)
test_results = run_cross(df_test, optimal_fast_sma, slow_sma)
all_test_results.append([initial] + list(test_results))
logger.info(
f"Test Results. pnl={test_results[0]}, count={test_results[1]}, avg={test_results[2]}, median={test_results[3]}"
)
test_total_pnl += test_results[0]
test_total_count += test_results[1]
logger.info(
f"--- CumulativePnL={test_total_pnl}. Trades Count={test_total_count}. After={initial}"
)
import csv
with open("simple_results.csv", "w") as csvfile:
spamwriter = csv.writer(csvfile)
for row in all_test_results:
spamwriter.writerow(row)
| 28
| 120
| 0.706441
|
93ade385d6ee900f8bf10af83edfd79ce2a15da9
| 841
|
py
|
Python
|
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | null | null | null |
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | 1
|
2020-11-15T15:43:03.000Z
|
2020-11-15T15:43:16.000Z
|
01.Hello_tkinter.py
|
amitdev101/learning-tkinter
|
1f7eabe1ac958c83c8bbe70e15682ecd4f7b5de5
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import os
print(tk)
print(dir(tk))
print(tk.TkVersion)
print(os.getcwd())
'''To initialize tkinter, we have to create a Tk root widget, which is a window with a title bar and
other decoration provided by the window manager. The root widget has to be created before any other widgets and
there can only be one root widget.'''
root = tk.Tk()
'''The next line of code contains the Label widget.
The first parameter of the Label call is the name of the parent window, in our case "root".
So our Label widget is a child of the root widget. The keyword parameter "text" specifies the text to be shown: '''
w = tk.Label(root,text='Hello world')
'''The pack method tells Tk to fit the size of the window to the given text. '''
w.pack()
'''The window won't appear until we enter the Tkinter event loop'''
root.mainloop()
| 36.565217
| 115
| 0.737218
|
93aee3614d8d0959902e63d0a0a8aa33c102d4fd
| 14,700
|
py
|
Python
|
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
myscrumy/remiljscrumy/views.py
|
mikkeyiv/Django-App
|
b1114e9e53bd673119a38a1acfefb7a9fd9f172e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect,get_object_or_404
from remiljscrumy.models import ScrumyGoals,GoalStatus,ScrumyHistory,User
from django.http import HttpResponse,Http404,HttpResponseRedirect
from .forms import SignupForm,CreateGoalForm,MoveGoalForm,DevMoveGoalForm,AdminChangeGoalForm,QAChangeGoalForm,QAChangegoal
from django.contrib.auth import authenticate,login
from django.contrib.auth.models import User,Group
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
#from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
# def move_goal(request, goal_id):
# #response = ScrumyGoals.objects.get(goal_id=goal_id)
# # try:
# #goal = ScrumyGoals.objects.get(goal_id=goal_id)
# # except ScrumyGoals.DoesNotExist:
# # raise Http404 ('A record with that goal id does not exist')
# instance = get_object_or_404(ScrumyGoals,goal_id=goal_id)
# form = MoveGoalForm(request.POST or None, instance=instance)
# if form. is_valid():
# instance = form.save(commit=False)
# instance.save()
# return redirect('home')
# context={
# 'goal_id': instance.goal_id,
# 'user': instance.user,
# 'goal_status': instance.goal_status,
# 'form':form,
# }
# return render(request, 'remiljscrumy/exception.html', context)
#move_goal = form.save(commit=False)
# move_goal =
# form.save()
# # goal_name = form.cleaned_data.get('goal_name')
# # ScrumyGoals.objects.get(goal_name)
# return redirect('home')
# def form_valid(self, form):
# form.instance.goal_status = self.request.user
# return super(addgoalForm, self).form_valid(form)
# }
# return render(request, 'remiljscrumy/exception.html', context=gdict)
#return HttpResponse(response)
# return HttpResponse('%s is the response at the record of goal_id %s' % (response, goal_id))'''
from random import randint
def home(request):
'''# all=','.join([eachgoal.goal_name for eachgoal in ScrumyGoals.objects.all()])
# home = ScrumyGoals.objects.filter(goal_name='keep learning django')
# return HttpResponse(all)
#homedict = {'goal_name':ScrumyGoals.objects.get(pk=3).goal_name,'goal_id':ScrumyGoals.objects.get(pk=3).goal_id, 'user': ScrumyGoals.objects.get(pk=3).user,}
user = User.objects.get(email="louisoma@linuxjobber.com")
name = user.scrumygoal.all()
homedict={'goal_name':ScrumyGoals.objects.get(pk=1).goal_name,'goal_id':ScrumyGoals.objects.get(pk=1).goal_id,'user':ScrumyGoals.objects.get(pk=1).user,
'goal_name1':ScrumyGoals.objects.get(pk=2).goal_name,'goal_id1':ScrumyGoals.objects.get(pk=2).goal_id,'user':ScrumyGoals.objects.get(pk=2).user,
'goal_name2':ScrumyGoals.objects.get(pk=3).goal_name,'goal_id2':ScrumyGoals.objects.get(pk=3).goal_id,'user2':ScrumyGoals.objects.get(pk=3).user}'''
# form = CreateGoalForm
# if request.method == 'POST':
# form = CreateGoalForm(request.POST)
# if form .is_valid():
# add_goal = form.save(commit=True)
# add_goal = form.save()
# # #form.save()
# return redirect('home')
current = request.user
week = GoalStatus.objects.get(pk=1)
day = GoalStatus.objects.get(pk=2)
verify = GoalStatus.objects.get(pk=3)
done = GoalStatus.objects.get(pk=4)
user = User.objects.all()
weeklygoal = ScrumyGoals.objects.filter(goal_status=week)
dailygoal = ScrumyGoals.objects.filter(goal_status=day)
verifygoal = ScrumyGoals.objects.filter(goal_status=verify)
donegoal = ScrumyGoals.objects.filter(goal_status=done)
groups = current.groups.all()
dev = Group.objects.get(name='Developer')
owner = Group.objects.get(name='Owner')
admin = Group.objects.get(name='Admin')
qa = Group.objects.get(name='Quality Assurance')
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
if current.is_authenticated:
if dev in groups or qa in groups or owner in groups:
# if request.method == 'GET':
# return render(request, 'remiljscrumy/home.html', context)
form = CreateGoalForm()
context = {'user': user, 'weeklygoal': weeklygoal, 'dailygoal': dailygoal, 'verifygoal': verifygoal,
'donegoal': donegoal, 'form': form, 'current': current, 'groups': groups,'dev': dev,'owner':owner,'admin':admin,'qa':qa}
if request.method == 'POST':
form = CreateGoalForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
status_name = GoalStatus(id=1)
post.goal_status = status_name
post.user = current
post = form.save()
elif admin in groups:
context = {'user': user, 'weeklygoal': weeklygoal, 'dailygoal': dailygoal, 'verifygoal': verifygoal,
'donegoal': donegoal,'current': current, 'groups': groups,'dev': dev,'owner':owner,'admin':admin,'qa':qa}
return render(request, 'remiljscrumy/home.html', context)
# else:
# form = WeekOnlyAddGoalForm()
# return HttpResponseRedirect(reverse('ayooluwaoyewoscrumy:homepage'))
# if group == 'Admin':
# context ={
# 'user':User.objects.all(),
# 'weeklygoal':ScrumyGoals.objects.filter(goal_status=week),
# 'dailygoal':ScrumyGoals.objects.filter(goal_status=day),
# 'verifiedgoals':ScrumyGoals.objects.filter(goal_status=verify),
# 'donegoal':ScrumyGoals.objects.filter(goal_status=done),
# 'current':request.user,
# 'groups':request.user.groups.all(),
# 'admin': Group.objects.get(name="Admin"),
# 'owner': Group.objects.get(name='Owner'),
# 'dev': Group.objects.get(name='Developer'),
# 'qa': Group.objects.get(name='Quality Assurance'),}
# return render(request,'remiljscrumy/home.html',context=homedict)
# if request.method == 'GET':
# return render(request, 'remiljscrumy/home.html', context)
#
| 49.328859
| 206
| 0.606531
|
93b17847a4ea4d1f1c0c385ce9727ab17aed5c27
| 3,088
|
py
|
Python
|
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
examples/ex03_oscillator_classes.py
|
icemtel/carpet
|
5905e02ab0e44822829a672955dccad3e09eea07
|
[
"MIT"
] | null | null | null |
'''
Cilia classes are used to compute fixed points faster.
- Assume symmetry like in an m-twist (make a plot to see it)
- Assume that symmetries is not broken in time -> define classes of symmetry and interactions between them.
Done:
- Create a ring of cilia.
- Define symmetry classes
- Use classes to solve ODE
- Map back to cilia
'''
import numpy as np
import carpet
import carpet.lattice.ring1d as lattice
import carpet.physics.friction_pairwise as physics
import carpet.classes as cc
import carpet.visualize as vis
import matplotlib.pyplot as plt
## Parameters
# Physics
set_name = 'machemer_1' # hydrodynamic friction coefficients data set
period = 31.25 # [ms] period
freq = 2 * np.pi / period # [rad/ms] angular frequency
order_g11 = (4, 0) # order of Fourier expansion of friction coefficients
order_g12 = (4, 4)
# Geometry
N = 6 # number of cilia
a = 18 # [um] lattice spacing
e1 = (1, 0) # direction of the chain
## Initialize
# Geometry
L1 = lattice.get_domain_size(N, a)
coords, lattice_ids = lattice.get_nodes_and_ids(N, a, e1) # get cilia (nodes) coordinates
NN, TT = lattice.get_neighbours_list(N, a, e1) # get list of neighbours and relative positions
e1, e2 = lattice.get_basis(e1)
get_k = lattice.define_get_k(N, a, e1)
get_mtwist = lattice.define_get_mtwist(coords, N, a, e1)
# Physics
gmat_glob, q_glob = physics.define_gmat_glob_and_q_glob(set_name, e1, e2, a, NN, TT, order_g11, order_g12, period)
right_side_of_ODE = physics.define_right_side_of_ODE(gmat_glob, q_glob)
solve_cycle = carpet.define_solve_cycle(right_side_of_ODE, 2 * period, phi_global_func=carpet.get_mean_phase)
# k-twist
k1 = 2
phi0 = get_mtwist(k1)
vis.plot_nodes(coords, phi=phi0) # visualize!
plt.ylim([-L1 / 10, L1 / 10])
plt.show()
## Solve regularly
tol = 1e-4
sol = solve_cycle(phi0, tol)
phi1 = sol.y.T[-1] - 2 * np.pi # after one cycle
## Now solve with classes
# Map to classes
ix_to_class, class_to_ix = cc.get_classes(phi0)
nclass = len(class_to_ix)
# Get classes representatives
# Get one oscillator from each of cilia classes
unique_cilia_ids = cc.get_unique_cilia_ix(
class_to_ix) # equivalent to sp.array([class_to_ix[iclass][0] for iclass in range(nclass)], dtype=sp.int64)
# Get connections
N1_class, T1_class = cc.get_neighbours_list_class(unique_cilia_ids, ix_to_class, NN, TT)
# Define physics
gmat_glob_class, q_glob_class = physics.define_gmat_glob_and_q_glob(set_name, e1, e2, a, N1_class, T1_class,
order_g11, order_g12, period)
right_side_of_ODE_class = physics.define_right_side_of_ODE(gmat_glob_class, q_glob_class)
solve_cycle_class = carpet.define_solve_cycle(right_side_of_ODE_class, 2 * period, carpet.get_mean_phase)
# Solve ODE
phi0_class = phi0[unique_cilia_ids]
sol = solve_cycle_class(phi0_class, tol)
phi1_class = sol.y.T[-1] - 2 * np.pi
# Map from classes back to cilia
phi1_mapped_from_class = phi1_class[ix_to_class]
## Print how much phase changed
print(phi1_mapped_from_class - phi1) # difference between two - should be on the order of tolerance or smaller
| 36.761905
| 114
| 0.748381
|
93b1f4ae1de1aaae99760a70f835707158943004
| 749
|
py
|
Python
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | 6
|
2018-03-27T15:46:28.000Z
|
2018-06-23T21:56:15.000Z
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | 3
|
2018-03-30T15:54:34.000Z
|
2018-07-11T19:44:59.000Z
|
cars/donkeycar/sim/Adafruit_PCA9685-pkg/Adafruit_PCA9685/__init__.py
|
kuaikai/kuaikai
|
ca7e7b2d2f6f16b892a21c819ba43201beadf370
|
[
"Apache-2.0"
] | null | null | null |
"""
SCL <scott@rerobots.net>
2018
"""
import json
import os
import tempfile
import time
| 22.69697
| 97
| 0.534045
|
93b2760677f1d106e80a9cb1e7a2b2ab58fbe987
| 2,851
|
py
|
Python
|
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | null | null | null |
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | null | null | null |
bayesian-belief/sequential_bayes.py
|
ichko/interactive
|
6659f81c11c0f180295b758b457343d32323eb35
|
[
"MIT"
] | 1
|
2019-02-05T20:22:08.000Z
|
2019-02-05T20:22:08.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
FIG, (LEFT_AX, MIDDLE_AX, RIGHT_AX) = plt.subplots(1, 3, figsize=(10, 3))
X_RANGE = (-2, 2)
Y_RANGE = (-2, 2)
X_DATA = np.array([])
Y_DATA = np.array([])
BELIEF = SequentialBayes(np.array([0, 0]), np.diag([1, 1]))
set_ax_range()
plot_belief(RIGHT_AX)
plot_belief_sample()
FIG.canvas.mpl_connect('button_press_event', on_click)
plt.show()
| 24.791304
| 88
| 0.618029
|
93b33aae2d1691aa0b0588d3a8ea2f43f4819a38
| 9,255
|
py
|
Python
|
cgc/legacy/kmeans.py
|
cffbots/cgc
|
1ea8b6bb6e4e9e728aff493744d8646b4953eaa4
|
[
"Apache-2.0"
] | 11
|
2020-09-04T10:28:48.000Z
|
2022-03-10T13:56:43.000Z
|
cgc/legacy/kmeans.py
|
cffbots/cgc
|
1ea8b6bb6e4e9e728aff493744d8646b4953eaa4
|
[
"Apache-2.0"
] | 40
|
2020-08-19T09:23:15.000Z
|
2022-03-01T16:16:30.000Z
|
cgc/legacy/kmeans.py
|
phenology/geoclustering
|
9b9b6ab8e64cdb62dbed6bdcfe63612e99665fd1
|
[
"Apache-2.0"
] | 4
|
2020-10-03T21:17:18.000Z
|
2022-03-09T14:32:56.000Z
|
import numpy as np
import logging
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from ..results import Results
logger = logging.getLogger(__name__)
| 38.723849
| 79
| 0.581415
|
93b3d6b31717b3ff24e2cbf4724891aa06fd3451
| 5,175
|
py
|
Python
|
BAF2LOH.py
|
Xiaohuaniu0032/HLALOH
|
24587c75fad08e7f1821866fb72f9b7e756689bb
|
[
"MIT"
] | null | null | null |
BAF2LOH.py
|
Xiaohuaniu0032/HLALOH
|
24587c75fad08e7f1821866fb72f9b7e756689bb
|
[
"MIT"
] | 2
|
2020-10-26T01:39:33.000Z
|
2020-12-04T02:41:11.000Z
|
BAF2LOH.py
|
Xiaohuaniu0032/HLALOH
|
24587c75fad08e7f1821866fb72f9b7e756689bb
|
[
"MIT"
] | null | null | null |
import sys
import os
import configparser
import argparse
import glob
if __name__ == "__main__":
main()
| 33.823529
| 183
| 0.592464
|
93b68bf304e52b47592144b9352709027d4393ab
| 3,221
|
py
|
Python
|
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
src/tests/benchmarks/tools/bench/Vellamo3.py
|
VirtualVFix/AndroidTestFramework
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
[
"MIT"
] | null | null | null |
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "$Apr 13, 2014 8:47:25 PM$"
import re
from config import CONFIG
from tests.exceptions import ResultsNotFoundError
from tests.benchmarks.tools.base import OnlineBenchmark
| 46.681159
| 118
| 0.533064
|
93b6e5c40e7caecbcb7b62ae060f41d6eac3c44d
| 3,879
|
py
|
Python
|
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | 1
|
2020-05-07T00:24:21.000Z
|
2020-05-07T00:24:21.000Z
|
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | null | null | null |
tests/commands/test_template_image_apply_overlays.py
|
dsoprea/image_template_overlay_apply
|
ce54429e07ac140b33add685d39221b1fb5cadb2
|
[
"MIT"
] | null | null | null |
import sys
import unittest
import os
import tempfile
import shutil
import contextlib
import json
import subprocess
import PIL
import templatelayer.testing_common
_APP_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
_SCRIPT_PATH = os.path.join(_APP_PATH, 'templatelayer', 'resources', 'scripts')
_TOOL_FILEPATH = os.path.join(_SCRIPT_PATH, 'template_image_apply_overlays')
sys.path.insert(0, _APP_PATH)
| 28.733333
| 80
| 0.467389
|
93b7d3ab9113fe2fed663ad41fb0b7d4b95f018e
| 3,993
|
py
|
Python
|
src/gpt2/evaluate_model.py
|
alexgQQ/GPT2
|
b2d78965f7cdcfe7dcf475969f4d4cce2b3ee82a
|
[
"Apache-2.0"
] | 94
|
2020-05-05T04:27:05.000Z
|
2022-03-31T01:08:20.000Z
|
src/gpt2/evaluate_model.py
|
seeodm/GPT2
|
366d8517ac0bdf85e45e46adbef10cbe55740ee1
|
[
"Apache-2.0"
] | 7
|
2020-09-11T02:25:30.000Z
|
2021-11-23T16:03:01.000Z
|
src/gpt2/evaluate_model.py
|
seeodm/GPT2
|
366d8517ac0bdf85e45e46adbef10cbe55740ee1
|
[
"Apache-2.0"
] | 24
|
2020-07-14T19:15:39.000Z
|
2022-02-18T05:57:31.000Z
|
import argparse
import torch
import torch.nn as nn
from gpt2.modeling import Transformer
from gpt2.data import Dataset, Vocab, TokenizedCorpus
from gpt2.evaluation import EvaluationSpec, EvaluateConfig, Evaluator
from typing import Dict
| 42.478723
| 76
| 0.630604
|
93b7f2e32bcec2e7242f5985332622842d33261b
| 571
|
py
|
Python
|
alerter/src/alerter/alert_data/chainlink_contract_alert_data.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/src/alerter/alert_data/chainlink_contract_alert_data.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/src/alerter/alert_data/chainlink_contract_alert_data.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
from typing import Dict
from src.alerter.alert_data.alert_data import AlertData
| 24.826087
| 74
| 0.698774
|
93ba2653ba488171fc0c6a50b7e6cee03b9a572c
| 1,332
|
py
|
Python
|
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | 5
|
2019-03-28T03:52:32.000Z
|
2021-02-24T07:09:26.000Z
|
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | null | null | null |
mytrain/my_unpack.py
|
JinkelaCrops/t2t-learning
|
5d9b5a5164af763c24f1cbce9d97561e9f2b772c
|
[
"Apache-2.0"
] | 2
|
2018-08-07T03:43:09.000Z
|
2019-12-09T06:41:40.000Z
|
from processutils.textfilter import Unpack
from utils.simplelog import Logger
import argparse
parser = argparse.ArgumentParser(description="my_unpack")
parser.add_argument('-f', "--file_prefix", required=True)
parser.add_argument('-sep', "--separator", required=True)
# args = parser.parse_args([
# "-f", "../test/medicine.sample.data/data.test",
# "-sep", ' ||| '
# ])
args = parser.parse_args()
args.output_src = args.file_prefix + ".src"
args.output_tgt = args.file_prefix + ".tgt"
log = Logger("my_filter", "my_filter.log").log()
if __name__ == '__main__':
with open(args.file_prefix, "r", encoding="utf8") as f:
data = f.readlines()
src_lines, tgt_lines = main(data)
with open(args.output_src, "w", encoding="utf8") as f:
f.writelines(src_lines)
with open(args.output_tgt, "w", encoding="utf8") as f:
f.writelines(tgt_lines)
| 29.6
| 90
| 0.638889
|
93baf5e4d83867b7e987a8bdfa95d1e350aa7b07
| 10,173
|
py
|
Python
|
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 22
|
2021-11-24T01:23:07.000Z
|
2022-03-26T23:24:46.000Z
|
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | null | null | null |
source/api/dataplane/runtime/chalicelib/common.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 3
|
2021-12-10T09:42:51.000Z
|
2022-02-16T02:22:50.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import json
import urllib.parse
import boto3
import decimal
from decimal import Decimal
from datetime import datetime
from chalice import Chalice
from chalice import IAMAuthorizer
from chalice import ChaliceViewError, BadRequestError, NotFoundError
from botocore.config import Config
from botocore.client import ClientError
from boto3.dynamodb.conditions import Key, Attr, In
from jsonschema import validate, ValidationError
from chalicelib import replace_decimals
s3_client = boto3.client("s3")
ddb_resource = boto3.resource("dynamodb")
PLUGIN_RESULT_TABLE_NAME = os.environ['PLUGIN_RESULT_TABLE_NAME']
def get_event_segment_metadata(name, program, classifier, tracknumber):
"""
Gets the Segment Metadata based on the segments found during Segmentation/Optimization process.
"""
name = urllib.parse.unquote(name)
program = urllib.parse.unquote(program)
classifier = urllib.parse.unquote(classifier)
tracknumber = urllib.parse.unquote(tracknumber)
try:
# Get Event Segment Details
# From the PluginResult Table, get the Clips Info
plugin_table = ddb_resource.Table(PLUGIN_RESULT_TABLE_NAME)
response = plugin_table.query(
KeyConditionExpression=Key("PK").eq(f"{program}#{name}#{classifier}"),
ScanIndexForward=False
)
plugin_responses = response['Items']
while "LastEvaluatedKey" in response:
response = plugin_table.query(
ExclusiveStartKey=response["LastEvaluatedKey"],
KeyConditionExpression=Key("PK").eq(f"{program}#{name}#{classifier}"),
ScanIndexForward=False
)
plugin_responses.extend(response["Items"])
# if "Items" not in plugin_response or len(plugin_response["Items"]) == 0:
# print(f"No Plugin Responses found for event '{name}' in Program '{program}' for Classifier {classifier}")
# raise NotFoundError(f"No Plugin Responses found for event '{name}' in Program '{program}' for Classifier {classifier}")
clip_info = []
for res in plugin_responses:
optoLength = 0
if 'OptoEnd' in res and 'OptoStart' in res:
# By default OptoEnd and OptoStart are maps and have no Keys. Only when they do, we check for TrackNumber's
if len(res['OptoEnd'].keys()) > 0 and len(res['OptoStart'].keys()) > 0:
try:
optoLength = res['OptoEnd'][tracknumber] - res['OptoStart'][tracknumber]
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
# Calculate Opto Clip Duration for each Audio Track
optoDurationsPerTrack = []
if 'OptoEnd' in res and 'OptoStart' in res:
for k in res['OptoStart'].keys():
try:
optoDur = {}
optoDur[k] = res['OptoEnd'][k] - res['OptoStart'][k]
optoDurationsPerTrack.append(optoDur)
except Exception as e:
pass # Error if the TrackNumber does not exist. Simply Ignore since its a problem with Clip Gen
optoClipLocation = ''
if 'OptimizedClipLocation' in res:
# This is not ideal. We need to check of there exists a OptimizedClipLocation with the requested TrackNumber.
# If not, likely a problem with Clip Gen. Instead of failing, we send an empty value for optoClipLocation back.
for trackNo in res['OptimizedClipLocation'].keys():
if str(trackNo) == str(tracknumber):
optoClipLocation = create_signed_url(res['OptimizedClipLocation'][tracknumber])
break
origClipLocation = ''
if 'OriginalClipLocation' in res:
for trackNo in res['OriginalClipLocation'].keys():
if str(trackNo) == str(tracknumber):
origClipLocation = create_signed_url(res['OriginalClipLocation'][tracknumber])
break
label = ''
if 'Label' in res:
label = res['Label']
if str(label) == "":
label = '<no label plugin configured>'
clip_info.append({
'OriginalClipLocation': origClipLocation,
'OriginalThumbnailLocation': create_signed_url(
res['OriginalThumbnailLocation']) if 'OriginalThumbnailLocation' in res else '',
'OptimizedClipLocation': optoClipLocation,
'OptimizedThumbnailLocation': create_signed_url(
res['OptimizedThumbnailLocation']) if 'OptimizedThumbnailLocation' in res else '',
'StartTime': res['Start'],
'Label': label,
'FeatureCount': 'TBD',
'OrigLength': 0 if 'Start' not in res else res['End'] - res['Start'],
'OptoLength': optoLength,
'OptimizedDurationPerTrack': optoDurationsPerTrack,
'OptoStartCode': '' if 'OptoStartCode' not in res else res['OptoStartCode'],
'OptoEndCode': '' if 'OptoEndCode' not in res else res['OptoEndCode']
})
final_response = {}
final_response['Segments'] = clip_info
except NotFoundError as e:
print(e)
print(f"Got chalice NotFoundError: {str(e)}")
raise
except Exception as e:
print(e)
print(f"Unable to get the Event '{name}' in Program '{program}': {str(e)}")
raise ChaliceViewError(f"Unable to get the Event '{name}' in Program '{program}': {str(e)}")
else:
return replace_decimals(final_response)
| 45.013274
| 132
| 0.626954
|
93bc932331d06fe620b9dc241c2d48eeb8fdbdb8
| 9,559
|
py
|
Python
|
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
oec_sync/sync/oec.py
|
SamnnyWong/OECSynchronizer
|
9b28c96988158f5717bacd47f59cbabb1ce072cd
|
[
"Unlicense",
"MIT"
] | null | null | null |
from xml.etree import ElementTree as Etree
from model import *
from astro_unit import *
from io import StringIO
import logging
# Maps field name to tuple of (type, unit)
# Only the following columns will be understood
PLANET_FIELDS = {
"semimajoraxis": FieldMeta("number", 'AU'),
"eccentricity": FieldMeta("number"), # unit not needed
"periastron": FieldMeta("number", 'deg'),
"longitude": FieldMeta("number", 'deg'),
"ascendingnode": FieldMeta("number", 'deg'),
"inclination": FieldMeta("number", 'deg'),
"impactparameter": FieldMeta("number"), # unit not needed
"meananomaly": FieldMeta("number", 'deg'),
"period": FieldMeta("number", 'days'),
"transittime": FieldMeta("number", 'BJD'),
"periastrontime": FieldMeta("number", 'BJD'),
"maximumrvtime": FieldMeta("number", 'BJD'),
"separation": FieldMeta("number", 'arcsec'), # unit on xml element
"mass": FieldMeta("number", 'M_j'),
"radius": FieldMeta("number", 'R_j'),
"temperature": FieldMeta("number", 'K'),
"age": FieldMeta("number", 'Gyr'),
# "discoverymethod": FieldMeta("discoverymethodtype"),
# "istransiting": FieldMeta("boolean"),
# "description": "xs:string",
"discoveryyear": FieldMeta("number", None),
# "lastupdate": FieldMeta("lastupdatedef", None),
# "image",
# "imagedescription",
"spinorbitalignment": FieldMeta("number", 'deg'),
"positionangle": FieldMeta("number", 'deg'),
# "metallicity": FieldMeta("number"), # unit not needed
# "spectraltype": FieldMeta("spectraltypedef"),
# "magB": FieldMeta("number", None),
"magH": FieldMeta("number", None),
"magI": FieldMeta("number", None),
"magJ": FieldMeta("number", None),
"magK": FieldMeta("number", None),
# "magR": FieldMeta("number", None),
# "magU": FieldMeta("number", None),
"magV": FieldMeta("number", None)
}
# def validate(self, file: str) -> None:
# Validates an xml using schema defined by OEC.
# Raises an exception if file does not follow the schema.
# :param file: File name.
# """
# return # skip for now, because OEC itself isn't following the schema
# # tree = etree.parse(file)
# # self._schema.assertValid(tree)
def update_str(self, xml_string: str, update: PlanetarySysUpdate) \
-> Tuple[str, bool]:
"""
Apply a system update to an xml string.
Also performs a check afterwards to determine if
the action succeeded.
:param xml_string: containing the xml representation of a system
:param update: Update to be applied to the system
:return: A tuple (content, succeeded) where:
- content is the file content modified
- succeeded indicates whether the update was successful.
"""
tree = Etree.parse(StringIO(xml_string))
ok = Adapter._write_system_update(tree, update)
serialized = Etree.tostring(tree.getroot(), 'unicode', 'xml')
return serialized, ok
def update_file(self, filename: str, update: PlanetarySysUpdate) -> bool:
"""
Apply a system update to an xml file.
:param filename: The system xml file
:param update: Update to be applied to the system
:return: Whether the update was successful
"""
tree = Etree.parse(filename)
succeeded = Adapter._write_system_update(tree, update)
tree.write(filename)
return succeeded
| 37.194553
| 79
| 0.574746
|
93bd3505c0bee8de6a5685c5e02ee9cbc78b0fdd
| 9,072
|
py
|
Python
|
pyAnVIL/anvil/util/ingest_helper.py
|
anvilproject/client-apis
|
cbd892042e092b0a1dede4c561bcfdde15e9a3ad
|
[
"Apache-2.0"
] | 8
|
2019-07-02T20:41:24.000Z
|
2022-01-12T21:50:21.000Z
|
pyAnVIL/anvil/util/ingest_helper.py
|
mmtmn/client-apis
|
215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f
|
[
"Apache-2.0"
] | 37
|
2019-01-16T17:48:02.000Z
|
2021-08-13T21:35:54.000Z
|
pyAnVIL/anvil/util/ingest_helper.py
|
mmtmn/client-apis
|
215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f
|
[
"Apache-2.0"
] | 7
|
2019-05-13T14:59:27.000Z
|
2022-01-12T21:50:22.000Z
|
"""Validate AnVIL workspace(s)."""
import os
from google.cloud.storage import Client
from google.cloud.storage.blob import Blob
from collections import defaultdict
import ipywidgets as widgets
from ipywidgets import interact
from IPython.display import display
import pandas as pd
import firecloud.api as FAPI
from types import SimpleNamespace
import numpy as np
| 53.680473
| 270
| 0.61541
|
93bd854e0f319cd263a25841957dc223b7ca22bf
| 1,513
|
py
|
Python
|
acsl-pydev/acsl/lect03p2/c1_inter_stigid_sol1.py
|
odys-z/hello
|
39ca67cae34eb4bc4cbd848a06b3c0d65a995954
|
[
"MIT"
] | null | null | null |
acsl-pydev/acsl/lect03p2/c1_inter_stigid_sol1.py
|
odys-z/hello
|
39ca67cae34eb4bc4cbd848a06b3c0d65a995954
|
[
"MIT"
] | 3
|
2021-04-17T18:36:24.000Z
|
2022-03-04T20:30:09.000Z
|
acsl-pydev/acsl/lect03p2/c1_inter_stigid_sol1.py
|
odys-z/hello
|
39ca67cae34eb4bc4cbd848a06b3c0d65a995954
|
[
"MIT"
] | null | null | null |
'''
Intermediate C#1, stigid
PROBLEM:
Given a number less than 10^50 and length n, find the sum of all the n -digit
numbers (starting on the left) that are formed such that, after the first n
-digit number is formed all others are formed by deleting the leading digit
and taking the next n -digits.
'''
from unittest import TestCase
# return a list
t = TestCase()
reslt = c1_inter_1([
['1325678905', 2], ['54981230845791', 5], ['4837261529387456', 3],
['385018427388713440', 4], ['623387770165388734652209', 11]])
t.assertEqual(455, reslt[0])
t.assertEqual(489210, reslt[1])
t.assertEqual(7668, reslt[2])
t.assertEqual(75610, reslt[3])
t.assertEqual(736111971668, reslt[4])
reslt = c1_inter_1([[ '834127903876541', 3 ],
[ '2424424442442420', 1 ], [ '12345678909876543210123456789', 12 ],
[ '349216', 6 ], [ '11235813245590081487340005429', 2 ]])
t = TestCase()
t.assertEqual(6947, reslt[0])
t.assertEqual(48, reslt[1])
t.assertEqual(9886419753191, reslt[2])
t.assertEqual(349216, reslt[3])
t.assertEqual(11 + 12 + 23 + 35 + 58 + 81 + 13 + 32 + 24 + 45 + 55 + 59 + 90 + 00 + 8 + 81 + 14 + 48 + 87 + 73 + 34 + 40 + 00 + 00 + 5 + 54 + 42 + 29,
reslt[4])
print('OK!')
| 29.096154
| 150
| 0.643754
|
93be43a153e5c582c6fc97ea9f7ab11f70cb7196
| 1,857
|
py
|
Python
|
otcextensions/tests/functional/sdk/vlb/v3/test_certificate.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | 10
|
2018-03-03T17:59:59.000Z
|
2020-01-08T10:03:00.000Z
|
otcextensions/tests/functional/sdk/vlb/v3/test_certificate.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | 208
|
2020-02-10T08:27:46.000Z
|
2022-03-29T15:24:21.000Z
|
otcextensions/tests/functional/sdk/vlb/v3/test_certificate.py
|
artem-lifshits/python-otcextensions
|
2021da124f393e0429dd5913a3bc635e6143ba1e
|
[
"Apache-2.0"
] | 15
|
2020-04-01T20:45:54.000Z
|
2022-03-23T12:45:43.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from otcextensions.tests.functional.sdk.vlb import TestVlb
| 41.266667
| 76
| 0.736672
|
93c35e82e3070b5dcaa7b5ce0646c0a3d9c9b51e
| 5,760
|
py
|
Python
|
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | 1
|
2017-03-11T14:14:12.000Z
|
2017-03-11T14:14:12.000Z
|
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | null | null | null |
hasy.py
|
MartinThoma/cv-datasets
|
f0566839bc2e625274bd2d439114c6665ba1b37e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Utility file for the HASYv2 dataset.
See https://arxiv.org/abs/1701.08380 for details.
"""
from __future__ import absolute_import
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
import scipy.ndimage
import os
import tarfile
import shutil
import csv
from six.moves import cPickle as pickle
n_classes = 369
labels = []
def _load_csv(filepath, delimiter=',', quotechar="'"):
"""
Load a CSV file.
Parameters
----------
filepath : str
Path to a CSV file
delimiter : str, optional
quotechar : str, optional
Returns
-------
list of dicts : Each line of the CSV file is one element of the list.
"""
data = []
csv_dir = os.path.dirname(filepath)
with open(filepath, 'rb') as csvfile:
reader = csv.DictReader(csvfile,
delimiter=delimiter,
quotechar=quotechar)
for row in reader:
for el in ['path', 'path1', 'path2']:
if el in row:
row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))
data.append(row)
return data
def _generate_index(csv_filepath):
"""
Generate an index 0...k for the k labels.
Parameters
----------
csv_filepath : str
Path to 'test.csv' or 'train.csv'
Returns
-------
dict : Maps a symbol_id as in test.csv and
train.csv to an integer in 0...k, where k is the total
number of unique labels.
"""
symbol_id2index = {}
data = _load_csv(csv_filepath)
i = 0
labels = []
for item in data:
if item['symbol_id'] not in symbol_id2index:
symbol_id2index[item['symbol_id']] = i
labels.append(item['latex'])
i += 1
return symbol_id2index, labels
def load_data():
"""
Load HASYv2 dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
# Download if not already done
fname = 'HASYv2.tar.bz2'
origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'
fpath = get_file(fname, origin=origin, untar=False,
md5_hash='fddf23f36e24b5236f6b3a0880c778e3')
path = os.path.dirname(fpath)
# Extract content if not already done
untar_fpath = os.path.join(path, "HASYv2")
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:bz2')
try:
tfile.extractall(path=untar_fpath)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
# Create pickle if not already done
pickle_fpath = os.path.join(untar_fpath, "fold1.pickle")
if not os.path.exists(pickle_fpath):
# Load mapping from symbol names to indices
symbol_csv_fpath = os.path.join(untar_fpath, "symbols.csv")
symbol_id2index, labels = _generate_index(symbol_csv_fpath)
globals()["labels"] = labels
# Load first fold
fold_dir = os.path.join(untar_fpath, "classification-task/fold-1")
train_csv_fpath = os.path.join(fold_dir, "train.csv")
test_csv_fpath = os.path.join(fold_dir, "test.csv")
train_csv = _load_csv(train_csv_fpath)
test_csv = _load_csv(test_csv_fpath)
WIDTH = 32
HEIGHT = 32
x_train = np.zeros((len(train_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
x_test = np.zeros((len(test_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
y_train, s_train = [], []
y_test, s_test = [], []
# Load training data
for i, data_item in enumerate(train_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_train.append(fname)
x_train[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_train.append(label)
y_train = np.array(y_train, dtype=np.int64)
# Load test data
for i, data_item in enumerate(test_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_test.append(fname)
x_train[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_test.append(label)
y_test = np.array(y_test, dtype=np.int64)
data = {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test,
'labels': labels
}
# Store data as pickle to speed up later calls
with open(pickle_fpath, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(pickle_fpath, 'rb') as f:
data = pickle.load(f)
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
globals()["labels"] = data['labels']
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_dim_ordering() == 'tf':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| 31.823204
| 78
| 0.562326
|
93c6042870f0e48cc7e28c2ead79abb162bef666
| 1,201
|
py
|
Python
|
pyvisdk/do/quiesce_datastore_io_for_ha_failed.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/quiesce_datastore_io_for_ha_failed.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/quiesce_datastore_io_for_ha_failed.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def QuiesceDatastoreIOForHAFailed(vim, *args, **kwargs):
'''A QuiesceDatastoreIOForHAFailed fault occurs when the HA agent on a host cannot
quiesce file activity on a datastore to be unmouonted or removed.'''
obj = vim.client.factory.create('{urn:vim25}QuiesceDatastoreIOForHAFailed')
# do some validation checking...
if (len(args) + len(kwargs)) < 10:
raise IndexError('Expected at least 11 arguments got: %d' % len(args))
required = [ 'ds', 'dsName', 'host', 'hostName', 'name', 'type', 'dynamicProperty',
'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.314286
| 124
| 0.621982
|
93c65b7f60b1d4ed3df0c1dfda29fa877d20e341
| 8,071
|
py
|
Python
|
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
control/tracking.py
|
oholsen/hagedag
|
4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a
|
[
"Apache-2.0"
] | null | null | null |
"""
Based on Extended kalman filter (EKF) localization sample in PythonRobotics by Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
# Simulation parameter
INPUT_NOISE = np.diag([0.1, np.deg2rad(30.0)]) ** 2
GPS_NOISE = np.diag([0.1, 0.1]) ** 2
# Covariance for EKF simulation
Q = np.diag([
0.02, # variance of location on x-axis
0.02, # variance of location on y-axis
np.deg2rad(10.0), # variance of yaw angle
0.1 # variance of velocity
]) ** 2 # predict state covariance
# Observation x,y position covariance, now dynamic from receiver (input stream)
# R = np.diag([0.02, 0.02]) ** 2
def jacob_f(x, u, DT: float):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw)],
[0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
if __name__ == '__main__':
import asyncio
asyncio.run(main())
| 25.143302
| 107
| 0.525585
|
93c6c2347f6844b6a0ab7634a4e1c68474fa2859
| 337
|
py
|
Python
|
tests/test_plugin_setup.py
|
ldb385/q2-winnowing
|
f9c1dc7ecedbd3d204b3a26974f29a164de3eaf1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T21:58:38.000Z
|
2020-07-24T21:58:38.000Z
|
tests/test_plugin_setup.py
|
ldb385/q2-winnowing
|
f9c1dc7ecedbd3d204b3a26974f29a164de3eaf1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-21T16:45:03.000Z
|
2020-07-21T16:45:03.000Z
|
tests/test_plugin_setup.py
|
ldb385/q2-winnowing
|
f9c1dc7ecedbd3d204b3a26974f29a164de3eaf1
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase, main as unittest_main
from q2_winnowing.plugin_setup import plugin as winnowing_plugin
if __name__ == '__main__':
unittest_main()
| 22.466667
| 64
| 0.756677
|
93c70f97a9fcc20d868e2f05ea3a698a7c994530
| 974
|
py
|
Python
|
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | 4
|
2018-02-19T13:57:38.000Z
|
2022-01-08T04:10:54.000Z
|
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | null | null | null |
Lab11/BacktrackingIterative.py
|
alexnaiman/Fundamentals-Of-Programming---Lab-assignments
|
ef066e6036e20b9c686799f507f10e15e50e3285
|
[
"MIT"
] | null | null | null |
l = [0, "-", "+"]
n = int(input("Give number"))
list2 = []
for i in range(n):
list2.append(int(input(str(i) + ":")))
backIter()
print("test")
| 21.173913
| 73
| 0.464066
|
93c86f77e89802184faaf894ae457e773562fb59
| 31,674
|
py
|
Python
|
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
dreadlord_counter_strike.py
|
lorenypsum/dreadlord_counter_strike
|
5f63c97ab28d84f8d7d9ff2f481c5111f0bc2ef1
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from enum import Enum, auto
from random import randint
from time import sleep
from typing import Optional, Tuple
def ask_if_yes(input_text: str) -> bool:
"""
This function asks the player a question,
and returns True if they typed yes,
or False if they typed anything else.
"""
return input(input_text).lower() in ["y", "yes", "s", "sim"]
def ask_if_wanna_continue(player_name: str) -> bool:
"""
This function asks the player if they want to continue the game,
and returns the answer.
"""
print("You reached one possible end!!!")
if ask_if_yes("Wanna change your fate? "):
sleep(2)
print("Very well then...")
sleep(2)
return True
else:
if ask_if_yes(f"{player_name} did you find the treasure I prepared for you? "):
print("I hope you are not lying, you may leave now!!!")
sleep(1)
else:
print("What a shame! you broke my heart :'(")
sleep(1)
return False
def roll_for_item(player_name: str) -> Tuple[Optional[GameItem], GameStatus]:
"""
This function rolls the dice for the player.
It returns the item that the player gained (if any),
and the status of the player after the roll.
"""
roll = randint(1, 20)
if player_name.lower() == "lurin":
print(f"You rolled {roll}!")
sleep(2)
if ask_if_yes("Since you are inspired... wanna roll again? "):
sleep(2)
roll = randint(1, 20)
print(f"Now your roll was {roll}")
if roll == 1:
print(f"HAHAHAHAHA, tragic! You got {roll}")
sleep(2)
if player_name.lower() != "lurin":
print(
f"Unfortunalety {player_name}, you are not Lurin, so you do not have another chance!!!"
)
sleep(4)
else:
print(
f"Unfortunalety fake {player_name}, even inspired you got it? You are a joke!!!"
)
sleep(4)
return None, GameStatus.DEAD
if player_name.lower() == "snow":
print(f"... you may have this *WONDERFUL DEATH* to help you kill STRAHD...")
sleep(3)
print("...the perfect item for you, huh?")
sleep(2)
print("...no, it is not a typo or some faulty logic!")
sleep(2)
print(
"It is indeed the perfect item for you... you will play dead (you are used to it)... STRAHD flew away..."
)
sleep(4)
return GameItem.DEATH, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you may have this *DEATH* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not SNOW....")
sleep(2)
print("...no, it is not a typo or some faulty logic!")
sleep(2)
print("...you are DEAD!")
sleep(2)
print("***Bad end!***")
sleep(1)
return None, GameStatus.DEAD
elif roll <= 5:
print(f"You got {roll}")
if player_name.lower() != "kaede":
print(
f"Well {player_name}, you may have this *VIOLIN* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not KAEDE.... gooood luck!")
sleep(2)
return GameItem.VIOLIN, GameStatus.ALIVE
else:
print(f"Well {player_name}, you may have this ***WONDERFUL VIOLIN***")
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
return GameItem.VIOLIN, GameStatus.ALIVE
elif roll <= 10:
print(f"You got {roll}")
if player_name.lower() != "soren":
print(
f"Well {player_name}, you may have this *SIMPLE BOW* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not Soren... gooood luck!")
sleep(2)
return GameItem.SIMPLE_BOW, GameStatus.ALIVE
else:
print(f"Well {player_name}, you may have this ***WONDERFUl SIMPLE BOW***")
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
print("just.. do not kill any cats with this, moron!!!")
sleep(2)
return GameItem.SIMPLE_BOW, GameStatus.ALIVE
elif roll <= 15:
print(f"You got {roll}")
if player_name.lower() != "vis":
print(
f"Well {player_name}, you may have this *ORDINARY SWORD* to help you kill STRAHD..."
)
sleep(3)
print("...since you are not Vis... gooood luck!")
sleep(2)
print("and pray it won't fly...")
sleep(2)
return GameItem.ORDINARY_SWORD, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you may have this ***FANTASTIC ORDINARY SWORD*** to help you kill STRAHD"
)
sleep(3)
print("the perfect item for you, huh?")
sleep(2)
print("if it doesn't fly...")
sleep(2)
return GameItem.ORDINARY_SWORD, GameStatus.ALIVE
elif roll < 20:
print(f"You got {roll}")
sleep(2)
print(
f"Well {player_name}, you may have ****STRAHD SLAYER SWORD***, go kill STRAHD, "
)
sleep(3)
print("...the legendary item!!!")
sleep(2)
print("...but hope it won't fly!!!")
sleep(2)
return GameItem.STRAHD_SLAYER_SWORD, GameStatus.ALIVE
elif roll == 20:
if player_name.lower() != "snow":
print(
f"Well {player_name}, you may have **** STRAHD SLAYER BOW***, go kill STRAHD, special treasures awaits you!!!"
)
sleep(3)
print("...the legendary perfect item!!!")
sleep(2)
print("...it doesn't even matter if it will fly!!!")
sleep(2)
return GameItem.STRAHD_SLAYER_BOW, GameStatus.ALIVE
else:
print(
f"Well {player_name}, you seduced STRAHD, now you can claim your treasures"
)
sleep(2)
print(f"STRAHD licks you!!!")
sleep(4)
return GameItem.STRAHD_SLAYER_BOW, GameStatus.ALIVE
return None, GameStatus.ALIVE
def flee(player_name: str) -> GameStatus:
"""
This function asks the player if they want to flee.
It returns the status of the player after their decision to flee.
"""
if ask_if_yes("Wanna flee now? "):
sleep(2)
print("...")
sleep(1)
print("We will see if flee you can... *** MUST ROLL THE DICE ***: ")
sleep(2)
print("Careful!!!")
sleep(1)
roll_the_dice = input(
"*** Roll stealth *** (if you type it wrong it means you were not stealth) type: 'roll stealth' "
)
sleep(4)
if roll_the_dice == "roll stealth":
roll = randint(1, 20)
if roll <= 10:
print(f"you rolled {roll}!")
sleep(2)
print("It means STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
return GameStatus.DEAD
else:
print(f"you rolled {roll}!!!")
sleep(2)
print("Congratulations, you managed to be stealth!!!")
sleep(2)
print("...")
sleep(2)
print("You may flee but you will continue being poor and weak...")
sleep(2)
print("...")
sleep(2)
print(
"And remember there are real treasures waiting for you over there..."
)
sleep(4)
print("***Bad end...***")
sleep(1)
return GameStatus.ARREGAO
else:
if player_name.lower() in ["soren", "kaede", "leandro", "snow", "lurin"]:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a DOJI!!!")
sleep(2)
print("It means the STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
else:
print("I told you to be careful!")
sleep(2)
print("...........")
sleep(2)
print(f"...{player_name} you are such a klutz!!!")
sleep(2)
print("It means STRAHD noticed you!")
sleep(2)
print("...")
sleep(2)
print(" You are dead!!! ")
sleep(2)
print(" ***Bad end...*** ")
sleep(1)
return GameStatus.DEAD
else:
return GameStatus.ALIVE
def attack(player_name: str) -> Tuple[Optional[GameItem], GameStatus]:
"""
This function asks the player if they want to attack STRAHD.
If the player answers yes, the player rolls for an item.
This function returns the item obtained by a roll (if any),
and the status of the player.
"""
print("You shall not pass!!!")
if ask_if_yes(f"{player_name}, will you attack STRAHD? "):
sleep(1)
print("I honor your courage!")
sleep(2)
print("therefore...")
sleep(1)
print("I will help you...")
sleep(1)
print("I am giving you a chance to kill STRAHD and reclaim your treasures...")
sleep(2)
print(
"Roll the dice and have a chance to win the perfect item for you... or even some STRAHD Slayer Shit!!!"
)
sleep(3)
print("It will increase your chances...")
sleep(2)
print(
"....or kill you right away if you are as unlucky as Soren using his Sharp Shooting!!!"
)
sleep(2)
if ask_if_yes("Wanna roll the dice? "):
return roll_for_item(player_name)
else:
if ask_if_yes("Are you sure? "):
sleep(2)
print("So you have chosen... Death!")
sleep(2)
return GameItem.DEATH, GameStatus.DEAD
else:
sleep(2)
print("Glad you changed your mind...")
sleep(2)
print("Good... very good indeed...")
sleep(2)
return roll_for_item(player_name)
else:
print("If you won't attack STRAHD... then...")
sleep(2)
return None, flee(player_name)
def decide_if_strahd_flies(player_name: str) -> bool:
"""
This function asks if the player wants to roll for stealth,
which can give a chance for STRAHD not to fly.
It returns whether STRAHD flies.
"""
print(
"This is your chance... STRAHD has his attention captived by his 'vampirish's business'..."
)
sleep(3)
print("You are approaching him...")
sleep(2)
print("Careful...")
sleep(2)
print("Because vampires... can fly...")
sleep(2)
print("Roll stealth (if you type it wrong it means you were not stealth)...")
roll_the_dice = input("type: 'roll stealth' ")
sleep(2)
if roll_the_dice == "roll stealth":
roll = randint(1, 20)
if roll <= 10:
print("...")
sleep(1)
print("Unlucky")
sleep(2)
print(f"You rolled {roll}")
sleep(2)
print("STRAHD...")
sleep(2)
print("...flew up")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
return True
else:
print(f"You rolled {roll}")
sleep(2)
print("Congratulations, you managed to be in stealth!")
sleep(2)
return False
else:
if player_name.lower() in ["soren", "kaede", "leandro", "snow"]:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a DOJI, STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
else:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a KLUTZ, STRAHD flew...")
sleep(2)
print("...STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
return True
def calculate_win_probability(
player_race: str, player_name: str, item: Optional[GameItem],strahd_flying: bool
) -> int:
"""
This function returns the probability
that the player defeats STRAHD.
The probability depends on the item the player is holding,
and whether STRAHD is flying.
"""
if item == GameItem.DEATH:
if player_name.lower() == "snow" and player_race.lower() == "kalashatar":
return 90
else:
return 0
elif item == GameItem.WOODEN_SWORD:
if strahd_flying:
return 5
else:
return 10
elif item == GameItem.SIMPLE_BOW:
if player_name.lower() == "soren" and player_race.lower() in [
"human",
"humano",
"elf",
"elfo",
]:
return 70
else:
return 30
elif item == GameItem.VIOLIN:
if player_name.lower() == "kaede" and player_race.lower() == "tiefling":
return 70
else:
return 30
elif item == GameItem.ORDINARY_SWORD:
if strahd_flying:
return 10
elif player_name.lower() == "vis" and player_race.lower() == "draconato":
return 80
else:
return 40
elif item == GameItem.STRAHD_SLAYER_SWORD:
if strahd_flying:
return 20
else:
return 100
elif item == GameItem.STRAHD_SLAYER_BOW:
return 100
else:
return -1
def roll_for_win(probability: int) -> bool:
"""
This function returns whether the player defeats STRAHD,
given a probability.
"""
return randint(1, 100) <= probability
def after_battle(player_race: str, player_name: str, did_win: bool) -> GameStatus:
"""
This function conducts the scenario
after the player has defeated, or not, STRAHD.
It returns the status depending on whether the player won.
"""
if did_win:
now = datetime.now()
print("A day may come when the courage of men fails")
sleep(2)
print("but it is not THIS day, SATAN...")
sleep(2)
print("Because... you approached STRAHD...")
sleep(2)
print("Almost invisible to his senses...")
sleep(2)
print(
"Somehow your weapon hit the weak point of STRAHD's... revealing his true identity"
)
sleep(4)
print(
"He was just a bat... who looked like a DREADLORD..."
)
sleep(4)
print("It was a huge battle...")
sleep(2)
print(
f"And it was the most awkward {now.strftime('%A')} you will ever remember."
)
sleep(2)
if (
player_race.lower() in ["master", "mestre"]
and player_name.lower() == "zordnael"
):
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive 5000 dullas in Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("link")
sleep(5)
print("***CHEATER GOOD END***")
sleep(2)
return GameStatus.WINNER
elif player_race.lower() == "racist" and player_name.lower() == "lili":
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive the prizes specially prepared for everybody in dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/1Jn8YYdixNNRqCQgIClBmGLiFFxuSCQdc?usp=sharing")
sleep(5)
print("***BEST END***")
sleep(2)
return GameStatus.WINNER
if did_win:
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
if player_name.lower() == "soren":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1FerRt3mmaOm0ohSUXTkO-CmGIAluavXi?usp=sharing")
sleep(5)
print("...Your motherfuger cat killer !!!")
sleep(2)
print("***SOREN'S GOOD END***")
sleep(2)
elif player_name.lower() == "snow":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/16STFQ-_0N_54oNNsVQnMjwjcBgubxgk7?usp=sharing")
sleep(5)
print("...Your motherfuger snow flake !!!")
sleep(2)
print("***SNOW'S GOOD END***")
sleep(2)
elif player_name.lower() == "kaede":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1XN9sItRxYR4Si4gWFeJtI0HGF39zC29a?usp=sharing")
sleep(5)
print("...Your motherfuger idol !!!")
sleep(2)
print("***KAEDE'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1eP552hYwUXImmJ-DIX5o-wlp5VA96Sa0?usp=sharing")
sleep(5)
print("...Your motherfuger only roll 20 !!!")
sleep(2)
print("***LEANDRO'S GOOD END***")
sleep(2)
elif player_name.lower() == "vis":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/19GRJJdlB8NbNl3QDXQM1-0ctXSX3mbwS?usp=sharing")
sleep(5)
print("...Your motherfuger iron wall !!!")
sleep(2)
print("***VIS'S GOOD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("CONGRATULATIONS!!!!! ")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
else:
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you shall receive this link from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/0B_sxkSE6-TfETlZoOHF1bTRGTXM?usp=sharing")
sleep(5)
print("***GOOD END***")
sleep(2)
sleep(1)
return GameStatus.WINNER
if not did_win:
print("You tried to approach the devil carefully...")
sleep(2)
print("... but your hands were trembling...")
sleep(2)
print("...your weapon was not what you expected...")
sleep(2)
print("... It was a shit battle... but")
sleep(2)
print("The journey doesn't end here...")
sleep(2)
print("Death is just another way we have to choose...")
sleep(2)
print("...")
sleep(1)
if player_name.lower() == "vis":
print("I really believed in you...")
sleep(2)
print("...but I guess...")
sleep(1)
print("you shoud have stayed in your bathroom...")
sleep(2)
print("eating lemon pies...")
sleep(2)
print("...")
sleep(1)
print(f"YOU DIED {player_name}")
sleep(2)
print("***VIS'S BAD END***")
sleep(2)
elif player_name.lower() == "soren":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("Did you think it was a cat? ")
sleep(2)
print("Not today Satan!!!")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***SOREN'S BAD END***")
sleep(2)
elif player_name.lower() == "kaede":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("")
sleep(2)
print("")
sleep(2)
print("")
sleep(2)
print("go play you Violin in Hell...")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***KAEDES'S BAD END***")
sleep(2)
elif player_name.lower() == "snow":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("HAHAHAAHHAHAHA")
sleep(2)
print("It is cute you even tried!")
sleep(2)
print("but I will call you Nori!")
sleep(2)
print("...")
sleep(1)
print("You died! Nori!!!")
sleep(2)
print("***SNOW'S BAD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print("...Nicolas Cage Face...")
sleep(2)
print("***LEANDRO'S BAD END***")
sleep(2)
elif player_name.lower() == "buiu":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print(f"Did you really think this would work? Clown!")
sleep(2)
print("***RIDICULOUS BUIU'S END***")
sleep(2)
return GameStatus.HAHA
elif player_name.lower() in ["strahd", "dreadlord"]:
print("good try")
sleep(2)
print("...but I guess...")
sleep(2)
print("I never said you were in a cave...")
sleep(2)
print("There is sunlight now...")
sleep(2)
print("You are burning...")
sleep(2)
print("Till Death...")
sleep(2)
print("***RIDICULOUS STRAHD'S END***")
sleep(2)
else:
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("This is a shit meta game...")
sleep(2)
print(
"Designed for players from a certain 16:20 tabletop Ravenloft campaign"
)
sleep(2)
print(f"Sorry, {player_name}...")
sleep(2)
print("You are dead!!!")
sleep(2)
print("***BAD END***")
sleep(2)
sleep(1)
return GameStatus.DEAD
def main():
"""
This function conducts the entire game.
"""
wanna_continue = True
while wanna_continue:
player_race = input("Your race? ")
player_name = input("Your name? ")
status = flee(player_name)
if status == GameStatus.ALIVE:
item, status = attack(player_name)
if status == GameStatus.ALIVE:
strahd_flight = decide_if_strahd_flies(player_name)
probability = calculate_win_probability(
player_race, player_name, item, strahd_flight
)
did_win = roll_for_win(probability)
status = after_battle(player_race, player_name, did_win)
if status == GameStatus.WINNER:
sleep(5)
print(
"You are a winner, baby. But there are other possibilities over there..."
)
wanna_continue = ask_if_wanna_continue(player_name)
elif status == GameStatus.HAHA:
wanna_continue = False
else:
wanna_continue = ask_if_wanna_continue(player_name)
else:
wanna_continue = ask_if_wanna_continue(player_name)
elif status == GameStatus.DEAD:
wanna_continue = ask_if_wanna_continue(player_name)
else:
print("...")
wanna_continue = ask_if_wanna_continue(player_name)
input()
main()
| 36.281787
| 210
| 0.46224
|
93c8ba0b9839234f94247033001b32b0fa66bf75
| 193
|
py
|
Python
|
redacoes/models/vesibulares.py
|
VictorGM01/controle_de_questoes
|
658e81b2e2fe78fb1e6bb7ff3f537c8a28e7c9e8
|
[
"MIT"
] | 1
|
2022-03-23T12:32:20.000Z
|
2022-03-23T12:32:20.000Z
|
redacoes/models/vesibulares.py
|
VictorGM01/controle_de_questoes
|
658e81b2e2fe78fb1e6bb7ff3f537c8a28e7c9e8
|
[
"MIT"
] | null | null | null |
redacoes/models/vesibulares.py
|
VictorGM01/controle_de_questoes
|
658e81b2e2fe78fb1e6bb7ff3f537c8a28e7c9e8
|
[
"MIT"
] | null | null | null |
from django.db import models
| 17.545455
| 37
| 0.621762
|
93c9a643270a43403d7d70db7f672d353ef62da2
| 635
|
py
|
Python
|
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
backend/helper/mds.py
|
marinaevers/regional-correlations
|
8ca91a5283a92e75f3d99f870c295ca580edb949
|
[
"MIT"
] | null | null | null |
import numpy as np
def mds(d, dimensions=3):
"""
Multidimensional Scaling - Given a matrix of interpoint distances,
find a set of low dimensional points that have similar interpoint
distances.
"""
(n, n) = d.shape
E = (-0.5 * d ** 2)
# Use mat to get column and row means to act as column and row means.
Er = np.mat(np.mean(E, 1))
Es = np.mat(np.mean(E, 0))
# From Principles of Multivariate Analysis: A User's Perspective (page 107).
F = np.array(E - np.transpose(Er) - Es + np.mean(E))
[U, S, V] = np.linalg.svd(F)
Y = U * np.sqrt(S)
return (Y[:, 0:dimensions], S)
| 24.423077
| 80
| 0.601575
|
93c9ac724fdd806412549f0dec59d52778127c89
| 492
|
py
|
Python
|
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | 2
|
2018-09-19T00:59:09.000Z
|
2022-01-09T18:38:01.000Z
|
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | null | null | null |
sm3.py
|
matthewmuccio/InterviewPrepKit
|
13dabeddc3c83866c88bef1c80498c313e4c233e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from collections import Counter
# Complete the isValid function below.
if __name__ == "__main__":
s = input()
result = isValid(s)
print(result)
| 25.894737
| 217
| 0.630081
|
93caeae8160e612312e97b73a71f33bfdd865b27
| 10,238
|
py
|
Python
|
etiquette/constants.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | 20
|
2018-03-20T01:40:13.000Z
|
2022-02-11T20:23:41.000Z
|
etiquette/constants.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | null | null | null |
etiquette/constants.py
|
voussoir/etiquette
|
e982858c28335b11528c52af181abd1bbc71673f
|
[
"BSD-3-Clause"
] | 1
|
2018-03-20T13:10:31.000Z
|
2018-03-20T13:10:31.000Z
|
'''
This file provides data and objects that do not change throughout the runtime.
'''
import converter
import string
import traceback
import warnings
from voussoirkit import sqlhelpers
from voussoirkit import winwhich
# FFmpeg ###########################################################################################
FFMPEG_NOT_FOUND = '''
ffmpeg or ffprobe not found.
Add them to your PATH or use symlinks such that they appear in:
Linux: which ffmpeg ; which ffprobe
Windows: where ffmpeg & where ffprobe
'''
ffmpeg = _load_ffmpeg()
# Database #########################################################################################
DATABASE_VERSION = 20
DB_VERSION_PRAGMA = f'''
PRAGMA user_version = {DATABASE_VERSION};
'''
DB_PRAGMAS = f'''
PRAGMA cache_size = 10000;
PRAGMA count_changes = OFF;
PRAGMA foreign_keys = ON;
'''
DB_INIT = f'''
BEGIN;
{DB_PRAGMAS}
{DB_VERSION_PRAGMA}
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS albums(
id TEXT PRIMARY KEY NOT NULL,
title TEXT,
description TEXT,
created INT,
thumbnail_photo TEXT,
author_id TEXT,
FOREIGN KEY(author_id) REFERENCES users(id),
FOREIGN KEY(thumbnail_photo) REFERENCES photos(id)
);
CREATE INDEX IF NOT EXISTS index_albums_id on albums(id);
CREATE INDEX IF NOT EXISTS index_albums_author_id on albums(author_id);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS bookmarks(
id TEXT PRIMARY KEY NOT NULL,
title TEXT,
url TEXT,
created INT,
author_id TEXT,
FOREIGN KEY(author_id) REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS index_bookmarks_id on bookmarks(id);
CREATE INDEX IF NOT EXISTS index_bookmarks_author_id on bookmarks(author_id);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS photos(
id TEXT PRIMARY KEY NOT NULL,
filepath TEXT COLLATE NOCASE,
basename TEXT COLLATE NOCASE,
override_filename TEXT COLLATE NOCASE,
extension TEXT COLLATE NOCASE,
mtime INT,
sha256 TEXT,
width INT,
height INT,
ratio REAL,
area INT,
duration INT,
bytes INT,
created INT,
thumbnail TEXT,
tagged_at INT,
author_id TEXT,
searchhidden INT,
FOREIGN KEY(author_id) REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS index_photos_id on photos(id);
CREATE INDEX IF NOT EXISTS index_photos_filepath on photos(filepath COLLATE NOCASE);
CREATE INDEX IF NOT EXISTS index_photos_override_filename on
photos(override_filename COLLATE NOCASE);
CREATE INDEX IF NOT EXISTS index_photos_created on photos(created);
CREATE INDEX IF NOT EXISTS index_photos_extension on photos(extension);
CREATE INDEX IF NOT EXISTS index_photos_author_id on photos(author_id);
CREATE INDEX IF NOT EXISTS index_photos_searchhidden on photos(searchhidden);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS tags(
id TEXT PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
description TEXT,
created INT,
author_id TEXT,
FOREIGN KEY(author_id) REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS index_tags_id on tags(id);
CREATE INDEX IF NOT EXISTS index_tags_name on tags(name);
CREATE INDEX IF NOT EXISTS index_tags_author_id on tags(author_id);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS users(
id TEXT PRIMARY KEY NOT NULL,
username TEXT NOT NULL COLLATE NOCASE,
password BLOB NOT NULL,
display_name TEXT,
created INT
);
CREATE INDEX IF NOT EXISTS index_users_id on users(id);
CREATE INDEX IF NOT EXISTS index_users_username on users(username COLLATE NOCASE);
----------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS album_associated_directories(
albumid TEXT NOT NULL,
directory TEXT NOT NULL COLLATE NOCASE,
FOREIGN KEY(albumid) REFERENCES albums(id)
);
CREATE INDEX IF NOT EXISTS index_album_associated_directories_albumid on
album_associated_directories(albumid);
CREATE INDEX IF NOT EXISTS index_album_associated_directories_directory on
album_associated_directories(directory);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS album_group_rel(
parentid TEXT NOT NULL,
memberid TEXT NOT NULL,
FOREIGN KEY(parentid) REFERENCES albums(id),
FOREIGN KEY(memberid) REFERENCES albums(id)
);
CREATE INDEX IF NOT EXISTS index_album_group_rel_parentid on album_group_rel(parentid);
CREATE INDEX IF NOT EXISTS index_album_group_rel_memberid on album_group_rel(memberid);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS album_photo_rel(
albumid TEXT NOT NULL,
photoid TEXT NOT NULL,
FOREIGN KEY(albumid) REFERENCES albums(id),
FOREIGN KEY(photoid) REFERENCES photos(id)
);
CREATE INDEX IF NOT EXISTS index_album_photo_rel_albumid on album_photo_rel(albumid);
CREATE INDEX IF NOT EXISTS index_album_photo_rel_photoid on album_photo_rel(photoid);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS id_numbers(
tab TEXT NOT NULL,
last_id TEXT NOT NULL
);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS photo_tag_rel(
photoid TEXT NOT NULL,
tagid TEXT NOT NULL,
FOREIGN KEY(photoid) REFERENCES photos(id),
FOREIGN KEY(tagid) REFERENCES tags(id)
);
CREATE INDEX IF NOT EXISTS index_photo_tag_rel_photoid on photo_tag_rel(photoid);
CREATE INDEX IF NOT EXISTS index_photo_tag_rel_tagid on photo_tag_rel(tagid);
CREATE INDEX IF NOT EXISTS index_photo_tag_rel_photoid_tagid on photo_tag_rel(photoid, tagid);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS tag_group_rel(
parentid TEXT NOT NULL,
memberid TEXT NOT NULL,
FOREIGN KEY(parentid) REFERENCES tags(id),
FOREIGN KEY(memberid) REFERENCES tags(id)
);
CREATE INDEX IF NOT EXISTS index_tag_group_rel_parentid on tag_group_rel(parentid);
CREATE INDEX IF NOT EXISTS index_tag_group_rel_memberid on tag_group_rel(memberid);
----------------------------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS tag_synonyms(
name TEXT NOT NULL,
mastername TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS index_tag_synonyms_name on tag_synonyms(name);
----------------------------------------------------------------------------------------------------
COMMIT;
'''
SQL_COLUMNS = sqlhelpers.extract_table_column_map(DB_INIT)
SQL_INDEX = sqlhelpers.reverse_table_column_map(SQL_COLUMNS)
ALLOWED_ORDERBY_COLUMNS = {
'area',
'basename',
'bitrate',
'bytes',
'created',
'duration',
'extension',
'height',
'random',
'ratio',
'tagged_at',
'width',
}
# Janitorial stuff #################################################################################
FILENAME_BADCHARS = '\\/:*?<>|"'
USER_ID_CHARACTERS = string.digits + string.ascii_uppercase
ADDITIONAL_MIMETYPES = {
'7z': 'archive',
'gz': 'archive',
'rar': 'archive',
'aac': 'audio/aac',
'ac3': 'audio/ac3',
'dts': 'audio/dts',
'm4a': 'audio/mp4',
'opus': 'audio/ogg',
'mkv': 'video/x-matroska',
'ass': 'text/plain',
'md': 'text/plain',
'nfo': 'text/plain',
'rst': 'text/plain',
'srt': 'text/plain',
}
# Photodb ##########################################################################################
DEFAULT_DATADIR = '_etiquette'
DEFAULT_DBNAME = 'phototagger.db'
DEFAULT_CONFIGNAME = 'config.json'
DEFAULT_THUMBDIR = 'thumbnails'
DEFAULT_CONFIGURATION = {
'cache_size': {
'album': 1000,
'bookmark': 100,
'photo': 100000,
'tag': 10000,
'user': 200,
},
'enable_feature': {
'album': {
'edit': True,
'new': True,
},
'bookmark': {
'edit': True,
'new': True,
},
'photo': {
'add_remove_tag': True,
'new': True,
'edit': True,
'generate_thumbnail': True,
'reload_metadata': True,
},
'tag': {
'edit': True,
'new': True,
},
'user': {
'edit': True,
'login': True,
'new': True,
},
},
'tag': {
'min_length': 1,
'max_length': 32,
# 'valid_chars': string.ascii_lowercase + string.digits + '_()',
},
'user': {
'min_username_length': 2,
'min_password_length': 6,
'max_display_name_length': 24,
'max_username_length': 24,
'valid_chars': string.ascii_letters + string.digits + '_-',
},
'digest_exclude_files': [
'phototagger.db',
'desktop.ini',
'thumbs.db',
],
'digest_exclude_dirs': [
'_etiquette',
'_site_thumbnails',
'site_thumbnails',
'thumbnails',
],
'file_read_chunk': 2 ** 20,
'id_length': 12,
'thumbnail_width': 400,
'thumbnail_height': 400,
'recycle_instead_of_delete': True,
'motd_strings': [
'Good morning, Paul. What will your first sequence of the day be?',
],
}
| 31.696594
| 100
| 0.567298
|
93cb4419d9691b2ed3418c709e86de6b48657ce2
| 122
|
py
|
Python
|
Day_2_Software_engineering_best_practices/solutions/06_07_08_full_package/spectra_analysis/__init__.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | null | null | null |
Day_2_Software_engineering_best_practices/solutions/06_07_08_full_package/spectra_analysis/__init__.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | null | null | null |
Day_2_Software_engineering_best_practices/solutions/06_07_08_full_package/spectra_analysis/__init__.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Spectra analysis utilities
"""
from ._version import __version__
__all__ = ['__version__']
| 12.2
| 33
| 0.663934
|
93cc27f8724fb44128386ebba57195949fa0feb9
| 88,309
|
py
|
Python
|
tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | null | null | null |
tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 1
|
2020-11-16T02:14:35.000Z
|
2020-11-16T02:14:35.000Z
|
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import os
from kubernetes import client
from tacker.common import exceptions
from tacker import context
from tacker.db.db_sqlalchemy import models
from tacker.extensions import vnfm
from tacker import objects
from tacker.objects import fields
from tacker.objects.vnf_instance import VnfInstance
from tacker.objects import vnf_package
from tacker.objects import vnf_package_vnfd
from tacker.objects import vnf_resources as vnf_resource_obj
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \
fixture_data_utils as fd_utils
from tacker.vnfm.infra_drivers.kubernetes import kubernetes_driver
from unittest import mock
| 51.372309
| 79
| 0.691651
|
93cd3692a60479202468f2712c8bb24c8cc1672a
| 841
|
py
|
Python
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 14
|
2015-04-27T20:40:46.000Z
|
2019-02-01T09:22:02.000Z
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 10
|
2015-01-05T18:11:28.000Z
|
2018-09-03T08:42:50.000Z
|
src/codplayer/__init__.py
|
petli/codplayer
|
172187b91662affd8e89f572c0db9be1c4257627
|
[
"MIT"
] | 4
|
2017-03-03T16:59:39.000Z
|
2019-11-08T11:15:06.000Z
|
# codplayer supporting package
#
# Copyright 2013-2014 Peter Liljenberg <peter.liljenberg@gmail.com>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
# Don't include the audio device modules in the list of modules,
# as they may not be available on all systems
from pkg_resources import get_distribution
import os
import time
version = get_distribution('codplayer').version
# Check what file we are loaded from
try:
date = time.ctime(os.stat(__file__).st_mtime)
except OSError as e:
date = 'unknown ({})'.format(e)
__all__ = [
'audio',
'command',
'config',
'db',
'model',
'player',
'rest',
'rip',
'serialize',
'sink',
'source',
'state',
'toc',
'version'
]
| 19.55814
| 70
| 0.65874
|
93cf143d7b69f8a96f36f23910ce3b0b601f20d1
| 436
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/bigtableclusteradmin/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/bigtableclusteradmin/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/bigtableclusteradmin/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
"""Common imports for generated bigtableclusteradmin client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudsdk.third_party.apitools.base.py import *
from googlecloudsdk.third_party.apis.bigtableclusteradmin.v1.bigtableclusteradmin_v1_client import *
from googlecloudsdk.third_party.apis.bigtableclusteradmin.v1.bigtableclusteradmin_v1_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
| 39.636364
| 102
| 0.855505
|
93d04402f33cb3c06a7016fef8b0328a457f038a
| 4,229
|
py
|
Python
|
elementalcms/management/pages.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | 3
|
2022-01-12T09:11:54.000Z
|
2022-02-24T22:39:11.000Z
|
elementalcms/management/pages.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | null | null | null |
elementalcms/management/pages.py
|
paranoid-software/elemental-cms
|
7f09f9cd5498577d23fa70d1a51497b9de232598
|
[
"MIT"
] | 1
|
2022-01-12T09:11:56.000Z
|
2022-01-12T09:11:56.000Z
|
from typing import Tuple, Optional
import click
from cloup import constraint, option, command, pass_context
from cloup.constraints import RequireExactly
from .pagescommands import Create, Remove, Push, Pull, List, Publish, Unpublish
| 34.663934
| 119
| 0.581698
|
93d13c525fccba1c9782ed2b28a9ab8aac0b37da
| 339
|
py
|
Python
|
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
shapesimage.py
|
riddhigupta1318/menu_driven
|
1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6
|
[
"Apache-2.0"
] | null | null | null |
#!/user/bin/python3
import cv2
#loading image
img=cv2.imread("dog.jpeg")
img1=cv2.line(img,(0,0),(200,114),(110,176,123),2)
#print height and width
print(img.shape)
#to display that image
cv2.imshow("dogg",img1)
#image window holder activate
#wait key will destroy by pressing q button
cv2.waitKey(0)
cv2.destroyAllWindows()
| 16.142857
| 50
| 0.719764
|
93d37d046fccd50496fe96e2714742d3c5e3222c
| 2,139
|
py
|
Python
|
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
RNNS/utils/wrdembdGen.py
|
CenIII/Text-style-transfer-DeleteRetrieve
|
2b7aa017765dcae65b42fc94d3ccaddc57ac8661
|
[
"MIT"
] | null | null | null |
import gensim
import fnmatch
import os
import pickle
import numpy as np
# from symspellpy.symspellpy import SymSpell, Verbosity # import the module
# initial_capacity = 83000
# # maximum edit distance per dictionary precalculation
# max_edit_distance_dictionary = 2
# prefix_length = 7
# sym_spell = SymSpell(initial_capacity, max_edit_distance_dictionary,
# prefix_length)
# # load dictionary
# dictionary_path = os.path.join(os.path.dirname(__file__),
# "frequency_dictionary_en_82_765.txt")
# term_index = 0 # column of the term in the dictionary text file
# count_index = 1 # column of the term frequency in the dictionary text file
# if not sym_spell.load_dictionary(dictionary_path, term_index, count_index):
# print("Dictionary file not found")
# max_edit_distance_lookup = 2
model = gensim.models.KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300.bin', binary=True)
wordlist = []
for dataset in ['yelp/']:
filelist = os.listdir('../../Data/'+dataset)
for file in filelist:
with open('../../Data/'+dataset+file,'r') as f:
line = f.readline()
while line:
# suggestions = sym_spell.lookup_compound(line, max_edit_distance_lookup)
wordlist += line.split(' ')
line = f.readline()
wordlist.append('<unk>')
wordlist.append('<m_end>')
wordlist.append('@@START@@')
wordlist.append('@@END@@')
vocabs = set(wordlist)
print(len(vocabs))
wordDict = {}
word2vec = []
wastewords = []
word2vec.append(np.zeros(300))
wordDict['<PAD>']=0
cnt=1
for word in vocabs:
if word in model.wv:
word2vec.append(model.wv[word])
wordDict[word] = cnt
cnt += 1
else:
# wastewords.append(word)
word2vec.append(np.random.uniform(-1,1,300))
wordDict[word] = cnt
cnt += 1
word2vec = np.array(word2vec)
# with open('./word2vec', "wb") as fp: #Pickling
np.save('word2vec.npy',word2vec)
with open('./wordDict', "wb") as fp: #Pickling
pickle.dump(wordDict, fp)
# with open('./word2vec', "rb") as fp: #Pickling
# word2vec = pickle.load(fp)
# with open('./wordDict', "rb") as fp: #Pickling
# wordDict = pickle.load(fp)
# pass
| 27.423077
| 118
| 0.694717
|
93d43839068d5fe40ab642bf29baf0d261531656
| 8,611
|
py
|
Python
|
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
cls_utils/job.py
|
prmurali1leo/Engineering_challenge
|
d73dcba265587c22f0869880bf372cfaa045bfa6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from hashlib import md5
import datetime
import pyarrow.parquet as pq
import pyarrow as pa
from src.dimension_surrogate_resolver import DimensionSurrogateResolver
| 42.004878
| 120
| 0.648124
|
93d52227fd91adf6e2131607d2e901a6c4913898
| 3,294
|
py
|
Python
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 462
|
2017-10-14T16:58:36.000Z
|
2022-03-24T01:40:23.000Z
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 371
|
2017-11-28T14:00:02.000Z
|
2022-03-31T21:44:07.000Z
|
busy_home.py
|
jerr0328/HAP-python
|
87199a1fb7ffc451961948c634e46439cbace370
|
[
"Apache-2.0"
] | 129
|
2017-11-23T20:50:28.000Z
|
2022-03-17T01:26:53.000Z
|
"""Starts a fake fan, lightbulb, garage door and a TemperatureSensor
"""
import logging
import signal
import random
from pyhap.accessory import Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import (CATEGORY_FAN,
CATEGORY_LIGHTBULB,
CATEGORY_GARAGE_DOOR_OPENER,
CATEGORY_SENSOR)
logging.basicConfig(level=logging.INFO, format="[%(module)s] %(message)s")
def get_bridge(driver):
bridge = Bridge(driver, 'Bridge')
bridge.add_accessory(LightBulb(driver, 'Lightbulb'))
bridge.add_accessory(FakeFan(driver, 'Big Fan'))
bridge.add_accessory(GarageDoor(driver, 'Garage'))
bridge.add_accessory(TemperatureSensor(driver, 'Sensor'))
return bridge
driver = AccessoryDriver(port=51826, persist_file='busy_home.state')
driver.add_accessory(accessory=get_bridge(driver))
signal.signal(signal.SIGTERM, driver.signal_handler)
driver.start()
| 31.371429
| 77
| 0.683667
|
93d680ecf48e6dbb1495bab46f68ebdbe3aea08b
| 574
|
py
|
Python
|
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | 1
|
2020-12-31T00:07:40.000Z
|
2020-12-31T00:07:40.000Z
|
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | null | null | null |
Backend/src/commercial/urls.py
|
ChristianTaborda/Energycorp
|
2447b5af211501450177b0b60852dcb31d6ca12d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
# CRUDS
CommercialList,
CommercialDelete,
CommercialDetail,
CommercialCreate,
CommercialUpdate,
CommercialDelete,
CommercialInactivate,
# QUERY
)
urlpatterns = [
#CRUD
path('', CommercialList.as_view()),
path('create/', CommercialCreate.as_view()),
path('<pk>/', CommercialDetail.as_view()),
path('update/<pk>/', CommercialUpdate.as_view()),
path('inactivate/<pk>/', CommercialInactivate.as_view()),
path('delete/<pk>', CommercialDelete.as_view())
#QUERY
]
| 22.076923
| 61
| 0.667247
|
93d7075c75f515ae6f7dbc9fddf988695545df0c
| 2,715
|
py
|
Python
|
src/traquitanas/geo/layers.py
|
traquitanas/traquitanas
|
788a536de4c762b050e9d09c55b15e4d0bee3434
|
[
"MIT"
] | null | null | null |
src/traquitanas/geo/layers.py
|
traquitanas/traquitanas
|
788a536de4c762b050e9d09c55b15e4d0bee3434
|
[
"MIT"
] | null | null | null |
src/traquitanas/geo/layers.py
|
traquitanas/traquitanas
|
788a536de4c762b050e9d09c55b15e4d0bee3434
|
[
"MIT"
] | 1
|
2021-10-07T20:58:56.000Z
|
2021-10-07T20:58:56.000Z
|
import folium
if __name__ == '__main__':
pass
| 27.15
| 92
| 0.537385
|
93d7e71a979233c8c73b2a4018aacf592bc1a08e
| 1,277
|
py
|
Python
|
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
migrations/versions/6e5e2b4c2433_add_hometasks_for_students.py
|
AnvarGaliullin/LSP
|
ed1f00ddc6346c5c141b421c7a3305e4c9e1b0d1
|
[
"MIT"
] | null | null | null |
"""Add Hometasks for Students
Revision ID: 6e5e2b4c2433
Revises: b9acba47fd53
Create Date: 2020-01-10 20:52:40.063133
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e5e2b4c2433'
down_revision = 'b9acba47fd53'
branch_labels = None
depends_on = None
| 31.925
| 103
| 0.702428
|
93d903dc4a4d4fc536ec37d420b4604d14554d90
| 1,759
|
py
|
Python
|
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/plotting.py
|
intelligent-soft-robots/o80_roboball2d
|
094d36f870b9c20ef5e05baf92ed8ed5b9a5277c
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import math
import fyplot
import o80_roboball2d
from functools import partial
if __name__ == "__main__":
run()
| 30.327586
| 84
| 0.651507
|
93d96a3758d5ca27cf2434f779255814b61dd0c7
| 10,099
|
py
|
Python
|
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | 6
|
2020-12-15T04:26:43.000Z
|
2020-12-15T13:26:09.000Z
|
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | null | null | null |
kvm_pirate/elf/structs.py
|
Mic92/kvm-pirate
|
26626db320b385f51ccb88dad76209a812c40ca6
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This file contains ELF C structs and data types."""
import ctypes
from typing import Any
from . import consts
# ELF data types.
Elf32_Addr = ctypes.c_uint32
Elf32_Off = ctypes.c_uint32
Elf32_Half = ctypes.c_uint16
Elf32_Word = ctypes.c_uint32
Elf32_Sword = ctypes.c_int32
Elf64_Addr = ctypes.c_uint64
Elf64_Off = ctypes.c_uint64
Elf64_Half = ctypes.c_uint16
Elf64_Word = ctypes.c_uint32
Elf64_Sword = ctypes.c_int32
Elf64_Xword = ctypes.c_uint64
Elf64_Sxword = ctypes.c_int64
# ELF C structs.
| 26.231169
| 77
| 0.600951
|
93db2131f51a021bb76ace2f9993a86a1d6b0e6b
| 469
|
py
|
Python
|
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | 5
|
2019-06-03T21:02:32.000Z
|
2020-12-01T08:59:50.000Z
|
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect-2018
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | null | null | null |
connect-2018/exercises/2018/lr-automation/cbr.py
|
cbcommunity/cb-connect-2018
|
3ccfd1ed51e808f567f9f0fc4e8fe2688ef9ee76
|
[
"MIT"
] | 1
|
2019-07-09T20:09:14.000Z
|
2019-07-09T20:09:14.000Z
|
from cbapi.response import *
from lrjob import run_liveresponse
from cbapi.example_helpers import get_cb_response_object, build_cli_parser
if __name__ == '__main__':
main()
| 26.055556
| 74
| 0.742004
|
93db88634b9a24a07909d849964c5b879194e57a
| 6,655
|
py
|
Python
|
sqlite_to_stasis.py
|
mrozekma/Sprint
|
0bf531d2f16a7bc5b56dbc8c6eae5dc9e251b2f1
|
[
"MIT"
] | 2
|
2015-03-18T13:58:46.000Z
|
2020-04-10T14:54:56.000Z
|
sqlite_to_stasis.py
|
mrozekma/Sprint
|
0bf531d2f16a7bc5b56dbc8c6eae5dc9e251b2f1
|
[
"MIT"
] | 20
|
2015-01-16T18:46:53.000Z
|
2016-02-18T22:01:00.000Z
|
sqlite_to_stasis.py
|
mrozekma/Sprint
|
0bf531d2f16a7bc5b56dbc8c6eae5dc9e251b2f1
|
[
"MIT"
] | 2
|
2015-08-24T15:39:20.000Z
|
2016-01-03T06:03:13.000Z
|
from os import rename
from os.path import isfile
import pickle
import sqlite3
from stasis.DiskMap import DiskMap
from utils import tsToDate, dateToTs
from datetime import timedelta
source = sqlite3.connect('db')
source.row_factory = sqlite3.Row
dest = DiskMap('db-new', create = True, cache = False)
# Some cleanup, because sqlite apparently doesn't cascade deletes
# This probably isn't comprehensive, but most databases shouldn't really need it anyway
queries = [
"DELETE FROM availability WHERE NOT EXISTS (SELECT * FROM users WHERE availability.userid = users.id)",
"DELETE FROM availability WHERE NOT EXISTS (SELECT * FROM sprints WHERE availability.sprintid = sprints.id)",
"DELETE FROM grants WHERE NOT EXISTS (SELECT * FROM users WHERE grants.userid = users.id)",
"DELETE FROM members WHERE NOT EXISTS (SELECT * FROM sprints WHERE members.sprintid = sprints.id)",
"DELETE FROM tasks WHERE NOT EXISTS (SELECT * FROM sprints WHERE tasks.sprintid = sprints.id)",
"DELETE FROM assigned WHERE NOT EXISTS (SELECT * FROM tasks WHERE assigned.taskid = tasks.id AND assigned.revision = tasks.revision)",
]
for query in queries:
cur = source.cursor()
cur.execute(query)
cur.close()
# Some tables get converted directly:
for table in ['users', 'sprints', 'groups', 'goals', 'log', 'projects', 'notes', 'messages', 'searches', 'retrospective_categories', 'retrospective_entries', 'changelog_views']:
cur = source.cursor()
cur.execute("SELECT * FROM %s" % table)
for row in cur:
data = {k: row[k] for k in row.keys()}
print "%-20s %d" % (table, data['id'])
dest[table][data['id']] = data
cur.close()
# Settings are converted to a straight key/value store; no IDs
cur = source.cursor()
cur.execute("SELECT * FROM settings WHERE name != 'gitURL'")
for row in cur:
data = {k: row[k] for k in row.keys()}
print "%-20s %d" % ('settings', row['id'])
dest['settings'][row['name']] = row['value']
cur.close()
# Tasks have multiple revisions; they're stored as a list
cur = source.cursor()
cur.execute("SELECT * FROM tasks ORDER BY id, revision")
for row in cur:
rev = {k: row[k] for k in row.keys()}
print "%-20s %d (revision %d)" % ('tasks', row['id'], row['revision'])
if int(rev['revision']) == 1:
dest['tasks'][rev['id']] = [rev]
else:
with dest['tasks'].change(rev['id']) as data:
assert len(data) + 1 == rev['revision']
data.append(rev)
cur.close()
# Linking tables no longer exist
# Instead, add the lists directly to the appropriate parent class
# grants -> users.privileges
# (the privileges table is gone entirely)
for userid in dest['users']:
with dest['users'].change(userid) as data:
data['privileges'] = set()
cur = source.cursor()
cur.execute("SELECT g.userid, p.name FROM grants AS g, privileges AS p WHERE g.privid = p.id")
for row in cur:
print "%-20s %d (%s)" % ('grants', row['userid'], row['name'])
with dest['users'].change(int(row['userid'])) as data:
data['privileges'].add(row['name'])
cur.close()
# members -> sprints.members
if 'sprints' in dest:
for sprintid in dest['sprints']:
with dest['sprints'].change(sprintid) as data:
data['memberids'] = set()
cur = source.cursor()
cur.execute("SELECT * FROM members")
for row in cur:
print "%-20s %d (%d)" % ('members', row['sprintid'], row['userid'])
with dest['sprints'].change(int(row['sprintid'])) as data:
data['memberids'].add(row['userid'])
cur.close()
# assigned -> tasks.assigned
if 'tasks' in dest:
for taskid in dest['tasks']:
with dest['tasks'].change(taskid) as data:
for rev in data:
rev['assignedids'] = set()
cur = source.cursor()
cur.execute("SELECT * FROM assigned")
for row in cur:
print "%-20s %d (revision %d) %s" % ('assigned', row['taskid'], row['revision'], row['userid'])
with dest['tasks'].change(int(row['taskid'])) as data:
data[int(row['revision']) - 1]['assignedids'].add(row['userid'])
cur.close()
# search_uses -> searches.followers
if 'searches' in dest:
for searchid in dest['searches']:
with dest['searches'].change(searchid) as data:
data['followerids'] = set()
cur = source.cursor()
cur.execute("SELECT * FROM search_uses")
for row in cur:
print "%-20s %d (%d)" % ('search_uses', row['searchid'], row['userid'])
with dest['searches'].change(int(row['searchid'])) as data:
data['followerids'].add(row['userid'])
cur.close()
# prefs is converted normally, except the id is now set to the userid
# prefs_backlog_styles -> prefs.backlogStyles
# prefs_messages -> prefs.messages
cur = source.cursor()
cur.execute("SELECT * FROM prefs")
for row in cur:
print "%-20s %d" % ('prefs', row['userid'])
dest['prefs'][int(row['userid'])] = {}
with dest['prefs'].change(int(row['userid'])) as data:
data['id'] = int(row['userid'])
data['defaultSprintTab'] = row['defaultSprintTab']
data['backlogStyles'] = {}
cur2 = source.cursor()
cur2.execute("SELECT * FROM prefs_backlog_styles WHERE userid = %d" % int(row['userid']))
for row2 in cur2:
data['backlogStyles'][row2['status']] = row2['style']
cur2.close()
data['messages'] = {}
cur2 = source.cursor()
cur2.execute("SELECT * FROM prefs_messages WHERE userid = %d" % int(row['userid']))
for row2 in cur2:
data['messages'][row2['type']] = not not row2['enabled']
cur2.close()
cur.close()
# Anyone who doesn't have prefs gets a default record
for userid in dest['users']:
if userid not in dest['prefs']:
dest['prefs'][userid] = {'id': userid, 'defaultSprintTab': 'backlog', 'backlogStyles': {status: 'show' for status in ['not started', 'in progress', 'complete', 'blocked', 'deferred', 'canceled', 'split']}, 'messages': {'sprintMembership': False, 'taskAssigned': False, 'noteRelated': True, 'noteMention': True, 'priv': True}}
# Availability is now stored by sprint id
# The contents are {user_id: {timestamp: hours}}
if 'sprints' in dest:
oneday = timedelta(1)
for sprintid, data in dest['sprints'].iteritems():
m = {}
for userid in data['memberids']:
m[userid] = {}
print "%-20s %d %d" % ('availability', sprintid, userid)
cur = source.cursor()
cur.execute("SELECT hours, timestamp FROM availability WHERE sprintid = %d AND userid = %d AND timestamp != 0" % (sprintid, userid))
for row in cur:
m[userid][int(row['timestamp'])] = int(row['hours'])
cur.close()
dest['availability'][sprintid] = m
# Make search.public a bool instead of an int
if 'searches' in dest:
for searchid, data in dest['searches'].iteritems():
with dest['searches'].change(searchid) as data:
data['public'] = bool(data['public'])
# Bump the DB version
dest['settings']['dbVersion'] = 20
source.close()
# Rename
rename('db', 'db-old.sqlite')
rename('db-new', 'db')
| 37.59887
| 327
| 0.677536
|
93db9daeaca176a0d9639c9a8adf4162b78f5785
| 52
|
py
|
Python
|
list_ebs.py
|
willfong/aws-helper
|
21708044fbf95b76393e9b5f0e86c5e74ff11c77
|
[
"MIT"
] | null | null | null |
list_ebs.py
|
willfong/aws-helper
|
21708044fbf95b76393e9b5f0e86c5e74ff11c77
|
[
"MIT"
] | null | null | null |
list_ebs.py
|
willfong/aws-helper
|
21708044fbf95b76393e9b5f0e86c5e74ff11c77
|
[
"MIT"
] | null | null | null |
import boto3
aws_ebs_client = boto3.client('ebs')
| 10.4
| 36
| 0.75
|
93dfe7ab2f36df70ba6de51ccd1196139a54d7d0
| 1,211
|
py
|
Python
|
MakeSlides/MakeSlides.py
|
bobm123/BeeWareTalk
|
d6df32320f59bcd0f71a181c3d67ce4cbe5eb1b3
|
[
"MIT"
] | null | null | null |
MakeSlides/MakeSlides.py
|
bobm123/BeeWareTalk
|
d6df32320f59bcd0f71a181c3d67ce4cbe5eb1b3
|
[
"MIT"
] | null | null | null |
MakeSlides/MakeSlides.py
|
bobm123/BeeWareTalk
|
d6df32320f59bcd0f71a181c3d67ce4cbe5eb1b3
|
[
"MIT"
] | null | null | null |
'''
Generate slideshows from markdown that use the remark.js script
details here:
https://github.com/gnab/remark
Run it like this:
python MakeSlides.py <source_text.md> <Slidestack Title> index.html
'''
import sys
import os
template = '''
<!DOCTYPE html>
<html>
<head>
<title>{title_string}</title>
<meta charset="utf-8">
<style>{css_string}</style>
</head>
<body>
<textarea id="source">{markdown_string}</textarea>
<script src="https://remarkjs.com/downloads/remark-latest.min.js">
</script>
<script>
var slideshow = remark.create();
</script>
</body>
</html>
'''
| 20.525424
| 71
| 0.625929
|
93e030c92ac6f8fce1b888c7a5422a8bac82faba
| 144
|
py
|
Python
|
makesolid/__init__.py
|
aarchiba/makesolid
|
121fca121a838fa4d62ae96ce1fc81dba64c2198
|
[
"MIT"
] | null | null | null |
makesolid/__init__.py
|
aarchiba/makesolid
|
121fca121a838fa4d62ae96ce1fc81dba64c2198
|
[
"MIT"
] | null | null | null |
makesolid/__init__.py
|
aarchiba/makesolid
|
121fca121a838fa4d62ae96ce1fc81dba64c2198
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from ._mesh import *
from ._openscad import *
from ._threads import *
| 18
| 47
| 0.722222
|
93e13a546c607eee62ff4605caebeeafa51bfb7a
| 6,805
|
py
|
Python
|
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | null | null | null |
pricePrediction/preprocessData/prepareDataMol2Price.py
|
rsanchezgarc/CoPriNet
|
33708a82746278270fd1aa600d4b562ea0f62c1c
|
[
"MIT"
] | 1
|
2022-03-02T16:21:16.000Z
|
2022-03-02T16:21:16.000Z
|
import gzip
import os
import re
import sys
import time
from functools import reduce
from itertools import chain
from multiprocessing import cpu_count
import lmdb
import psutil
import joblib
from joblib import Parallel, delayed
import numpy as np
from pricePrediction import config
from pricePrediction.config import USE_MMOL_INSTEAD_GRAM
from pricePrediction.preprocessData.serializeDatapoints import getExampleId, serializeExample
from pricePrediction.utils import tryMakedir, getBucketRanges, search_buckedId, EncodedDirNamesAndTemplates
from .smilesToGraph import smiles_to_graph, compute_nodes_degree, fromPerGramToPerMMolPrice
PER_WORKER_MEMORY_GB = 2
if __name__ == "__main__":
print( " ".join(sys.argv))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputDir", type=str, default=config.DATASET_DIRNAME, help="Directory where smiles-price pairs are located")
parser.add_argument("-o", "--encodedDir", type=str, default=config.ENCODED_DIR)
parser.add_argument("-n", "--ncpus", type=int, default=config.N_CPUS)
args = vars( parser.parse_args())
config.N_CPUS = args.get("ncpus", config.N_CPUS)
dataBuilder = DataBuilder(n_cpus=config.N_CPUS)
dataBuilder.prepareDataset(datasetSplit="train", **args)
dataBuilder.prepareDataset(datasetSplit="val", **args)
dataBuilder.prepareDataset(datasetSplit="test", **args)
'''
python -m pricePrediction.preprocessData.prepareDataMol2Price
'''
| 42.006173
| 140
| 0.627039
|
93e251e9378f58f91368189ca0f98d7e9d184630
| 173
|
py
|
Python
|
Demos/Demo-4.2 Modules/script_3.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | 4
|
2018-04-28T13:43:20.000Z
|
2021-03-11T16:10:35.000Z
|
Demos/Demo-4.2 Modules/script_3.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
Demos/Demo-4.2 Modules/script_3.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
# import just one function from a module
# to save memory
from module import dowork
#now we can us a different name to get to the imported function
#
dowork(13,45)
dir()
| 19.222222
| 63
| 0.745665
|
93e2b831da7ddd82cdee3f6c7c6866a56f385beb
| 2,894
|
py
|
Python
|
lanzou/gui/workers/more.py
|
WaterLemons2k/lanzou-gui
|
f5c57f980ee9a6d47164a39b90d82eb0391ede8b
|
[
"MIT"
] | 1,093
|
2019-12-25T10:42:34.000Z
|
2022-03-28T22:35:32.000Z
|
lanzou/gui/workers/more.py
|
Enrontime/lanzou-gui
|
8e89438d938ee4994a4118502c3f14d467b55acc
|
[
"MIT"
] | 116
|
2019-12-24T04:01:43.000Z
|
2022-03-26T16:12:41.000Z
|
lanzou/gui/workers/more.py
|
Enrontime/lanzou-gui
|
8e89438d938ee4994a4118502c3f14d467b55acc
|
[
"MIT"
] | 188
|
2020-01-11T14:17:13.000Z
|
2022-03-29T09:18:34.000Z
|
from PyQt5.QtCore import QThread, pyqtSignal, QMutex
from lanzou.api import LanZouCloud
from lanzou.gui.models import Infos
from lanzou.debug import logger
| 34.86747
| 106
| 0.516586
|
93e32bae11b86f9998eb40958c5a33c52acf9800
| 2,728
|
py
|
Python
|
src/ipu/source/folder_monitor.py
|
feagi/feagi-core
|
d83c51480fcbe153fa14b2360b4d61f6ae4e2811
|
[
"Apache-2.0"
] | 11
|
2020-02-18T16:03:10.000Z
|
2021-12-06T19:53:06.000Z
|
src/ipu/source/folder_monitor.py
|
feagi/feagi-core
|
d83c51480fcbe153fa14b2360b4d61f6ae4e2811
|
[
"Apache-2.0"
] | 34
|
2019-12-17T04:59:42.000Z
|
2022-01-18T20:58:46.000Z
|
src/ipu/source/folder_monitor.py
|
feagi/feagi-core
|
d83c51480fcbe153fa14b2360b4d61f6ae4e2811
|
[
"Apache-2.0"
] | 3
|
2019-12-16T06:09:56.000Z
|
2020-10-18T12:01:31.000Z
|
"""
Source: https://camcairns.github.io/python/2017/09/06/python_watchdog_jobs_queue.html
This class inherits from the Watchdog PatternMatchingEventHandler class. In this code our watchdog will only be
triggered if a file is moved to have a .trigger extension. Once triggered the watchdog places the event object on the
queue, ready to be picked up by the worker thread
"""
import string
import time
from queue import Queue
from threading import Thread
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from inf import runtime_data
from ipu.processor import utf
# todo: combine all of this module into a single class
| 36.864865
| 117
| 0.66129
|