hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23944dce2a5292a5f4dccddbeb6c459b0db88881 | 821 | py | Python | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
] | 2 | 2017-08-23T16:58:01.000Z | 2020-07-03T01:53:34.000Z | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
] | 11 | 2017-08-18T17:19:21.000Z | 2022-03-18T15:54:40.000Z | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
] | 2 | 2017-09-11T19:38:04.000Z | 2020-05-28T00:58:05.000Z | import pandas as pd
from typing import List, NamedTuple
from .timeseries import agg_by_category_by_date
from primitive_interfaces.base import PrimitiveBase
| 28.310345 | 94 | 0.65408 |
2395f7861779117a4a46333dc411993e7c87448d | 2,723 | py | Python | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
] | null | null | null | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
] | null | null | null | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
] | null | null | null | # 804. Unique Morse Code Words
""" https://leetcode.com/problems/unique-morse-code-words/discuss/120675/\
Easy-and-Concise-Solution-C++JavaPython
def uniqueMorseRepresentations(self, words):
d = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---",
"-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-",
"..-", "...-", ".--", "-..-", "-.--", "--.."]
return len({''.join(d[ord(i) - ord('a')] for i in w) for w in words})
"""
# 771. Jewels and Stones, 98.33%
# https://leetcode.com/problems/jewels-and-stones/description/
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
count = 0
for jewel in J:
for stone in S:
if jewel == stone:
count += 1
return count
""" https://leetcode.com/problems/jewels-and-stones/discuss/113553/\
Easy-and-Concise-Solution-using-hash-set-C++JavaPython
def numJewelsInStones(self, J, S):
setJ = set(J)
return sum(s in setJ for s in S)
"""
# 806. Number of Lines To Write String
# https://leetcode.com/problems/number-of-lines-to-write-string/
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
lines = 1
line_width = 0
for ch in S:
index = ord(ch) - ord('a')
if line_width + widths[index] <= 100:
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width]
""" https://leetcode.com/problems/number-of-lines-to-write-string/discuss/\
120666/Easy-Solution-6-lines-C++JavaPython
def numberOfcurs(self, widths, S):
res, cur = 1, 0
for i in S:
width = widths[ord(i) - ord('a')]
res += 1 if cur + width > 100 else 0
cur = width if cur + width > 100 else cur + width
return [res, cur]
"""
| 27.785714 | 79 | 0.480353 |
2396d1a675b3960ca8025853ba1b4a50d69159c9 | 19,779 | py | Python | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 32 | 2019-07-11T22:58:14.000Z | 2022-03-04T19:34:55.000Z | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 3 | 2021-05-24T14:03:13.000Z | 2022-01-07T03:47:32.000Z | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
] | 5 | 2019-07-24T09:38:07.000Z | 2021-12-30T09:20:20.000Z | import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import logging
from pygna import output
from pygna.utils import YamlConfig
import pandas as pd
import random
import string
import seaborn as sns
import pygna.output as output
def generate_graph_from_sm(n_nodes: int, block_model: pd.DataFrame, nodes_in_block: list = False,
node_names: list = None, nodes_percentage: list = None) -> nx.Graph:
"""
This function creates a graph with n_nodes number of vertices and a matrix block_model that describes the intra e inter-block connectivity.
The nodes_in_block is parameter, list, to control the number of nodes in each cluster
:param n_nodes: the number of nodes in the block model
:param block_model: the block model to elaborate
:param nodes_in_block: the list of nodes in the block model
:param node_names: the list of names in the block model
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> nodes = list("A","B","C")
>>> graph = generate_graph_from_sm(n_nodes, bm, nodes_in_block, nodes, nodes_percentage)
"""
if not node_names:
node_names = range(n_nodes)
edges = []
G = nx.Graph()
if nodes_percentage:
cluster = np.random.choice(block_model.shape[0], size=n_nodes, p=nodes_percentage)
np.random.shuffle(cluster)
elif nodes_in_block:
list_temp = [nodes_in_block[i] * [i] for i in range(len(nodes_in_block))]
cluster = np.array([val for sublist in list_temp for val in sublist])
np.random.shuffle(cluster)
else:
# cluster is an array of random numbers corresponding to the cluster of each node
cluster = np.random.randint(block_model.shape[0], size=n_nodes)
for i in range(n_nodes):
G.add_node(node_names[i], cluster=cluster[i])
for i in range(n_nodes):
for j in range(i + 1, n_nodes):
if np.random.rand() < block_model[cluster[i], cluster[j]]:
edges.append((node_names[i], node_names[j]))
G.add_edges_from(edges)
return G
def plot_bm_graph(graph: nx.Graph, block_model: pd.DataFrame, output_folder: str = None) -> None:
"""
Save the graph on a file
:param graph: the graph with name of the nodes
:param block_model: the block model
:param output_folder: the folder where to save the file
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> graph = nx.complete_graph(100)
>>> plot_bm_graph(graph, bm, output_folder="./results/")
"""
nodes = graph.nodes()
colors = ['#b15928', '#1f78b4', '#6a3d9a', '#33a02c', '#ff7f00']
cluster = nx.get_node_attributes(graph, 'cluster')
labels = [colors[cluster[n]] for n in nodes]
layout = nx.spring_layout(graph)
plt.figure(figsize=(13.5, 5))
plt.subplot(1, 3, 1)
nx.draw(graph, nodelist=nodes, pos=layout, node_color='#636363', node_size=50, edge_color='#bdbdbd')
plt.title("Observed network")
plt.subplot(1, 3, 2)
plt.imshow(block_model, cmap='OrRd', interpolation='nearest')
plt.title("Stochastic block matrix")
plt.subplot(1, 3, 3)
legend = []
for ix, c in enumerate(colors):
legend.append(mpatches.Patch(color=c, label='C%d' % ix))
nx.draw(graph, nodelist=nodes, pos=layout, node_color=labels, node_size=50, edge_color='#bdbdbd')
plt.legend(handles=legend, ncol=len(colors), mode="expand", borderaxespad=0)
plt.title("SB clustering")
plt.savefig(output_folder + 'block_model.pdf', bbox_inches='tight')
def generate_sbm_network(input_file: "yaml configuration file") -> None:
"""
This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes.
All parameters must be specified in a yaml file.
This function allows to create network and geneset for any type of SBM
"""
ym = YamlConfig()
config = ym.load_config(input_file)
print(config)
bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"],
nodes_percentage=config["BlockModel"]["nodes_percentage"])
outpath = config["Simulations"]["output_folder"]
suffix = config["Simulations"]["suffix"]
for i in range(config["Simulations"]["n_simulated"]):
bm.create_graph()
bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv")
bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt")
# bm.plot_graph(outpath+suffix+"_s_"+str(i))
def generate_sbm2_network(output_folder: 'folder where the simulations are saved',
prefix: 'prefix for the simulations' = 'sbm',
n_nodes: 'nodes in the network' = 1000,
theta0: 'probability of connection in the cluster' = '0.9,0.7,0.5,0.2',
percentage: 'percentage of nodes in cluster 0, use ratio 0.1 = 10 percent' = '0.1',
density: 'multiplicative parameter used to define network density' = '0.06,0.1,0.2',
n_simulations: 'number of simulated networks for each configuration' = 3
):
"""
This function generates the simulated networks and genesets using the stochastic block model with 2 BLOCKS as described in the paper. The output names are going to be prefix_t_<theta0>_p_<percentage>_d_<density>_s_<n_simulation>_network.tsv or _genes.gmt
One connected cluster while the rest of the network has the same probability of connection. SBM = d *[theta0, 1-theta0 1-theta0, 1-theta0]
The simulator checks for connectedness of the generated network, if the generated net is not connected, a new simulation is generated.
"""
teta_ii = [float(i) for i in theta0.replace(' ', '').split(',')]
percentages = [float(i) for i in percentage.replace(' ', '').split(',')]
density = [float(i) for i in density.replace(' ', '').split(',')]
n_simulated = int(n_simulations)
n_nodes = int(n_nodes)
for p in percentages:
for t in teta_ii:
for d in density:
matrix = np.array([[d * t, d * (1 - t)], [d * (1 - t), d * (1 - t)]])
bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
for i in range(n_simulated):
name = output_folder + prefix + "_t_" + str(t) + "_p_" + str(p) + "_d_" + str(d) + "_s_" + str(i)
bm.create_graph()
bm.write_network(name + "_network.tsv")
bm.write_cluster_genelist(name + "_genes.gmt")
#########################################################################
####### COMMAND LINE FUNCTIONS ##########################################
#########################################################################
def generate_gna_sbm( output_tsv: 'output_network',
output_gmt: 'output geneset filename, this contains only the blocks',
output_gmt2: 'mixture output geneset filename, this contains the mixture blocks'=None,
N:'number of nodes in the network' = 1000,
block_size:'size of the first 8 blocks' = 50,
d:'baseline probability of connection, p0 in the paper' = 0.06,
fc_cis:'positive within-block scaling factor for the probability of connection, Mii = fc_cis * d (alpha parameter in the paper)' = 2.,
fc_trans:'positive between-block scaling factor for the probability of connection, (beta parameter in the paper)' = .5,
pi : 'percentage of block-i nodes for the genesets made of block-i and block-j. Use symmetrical values (5,95),use string comma separated' = '4,6,10,12,88,90,94,96',
descriptor='crosstalk_sbm',
sbm_matrix_figure: 'shows the blockmodel matrix' = None):
"""
This function generates benchmark network and geneset to test
the crosstalk between two blocks.
This function generates 4 blocks with d*fold change probability
and other 4 blocks with d probability.
The crosstalk is set both between the the first 4 blocks and the others.
Make sure that 8*cluster_size < N
"""
clusters = 8
lc = N - (block_size*clusters)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = clusters*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((9,9))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
A = fc_cis*d
B = d + fc_trans*(d*(fc_cis-1))
probs[0,1] = B
probs[2,3] = B
probs[1,0] = B
probs[3,2] = B
probs[4,5] = B
probs[6,7] = B
probs[5,4] = B
probs[7,6] = B
probs[0,0] = A
probs[1,1] = A
probs[2,2] = A
probs[3,3] = A
if type(sbm_matrix_figure)==str:
f,ax = plt.subplots(1)
sns.heatmap(probs, ax = ax, cmap = 'YlOrRd', annot=True)
f.savefig(sbm_matrix_figure)
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<4:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
if type(output_gmt2)==str:
perc = [float(i) for i in pi.split(',')]
logging.info('Generating mixes with perc = %s')
gmt_diz2={}
mix_dix = get_mix_genesets(gmt_diz, perc = perc)
for name,i in mix_dix.items():
gmt_diz2[name]={}
gmt_diz2[name]['genes']=i
gmt_diz2[name]['descriptor']=descriptor
output.print_GMT(gmt_diz2, output_gmt2)
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
print('Generated'+output_tsv)
def generate_gnt_sbm( output_tsv: 'output network filename',
output_gmt: 'output geneset filename, this contains only the blocks',
N:'number of nodes in the network' = 1000,
block_size: 'size of the first 6 blocks'= 50,
d: 'baseline probability of connection, p0 in the paper' = 0.06,
fold_change:'positive within-block scaling factor for the probability of connection, Mii = fold_change * d (alpha parameter in the paper)' = 2.,
descriptor:'descriptor for the gmt file'='mixed_sbm'):
"""
This function generates 3 blocks with d*fold_change probability
and other 3 blocks with d probability.
Make sure that 6*cluster_size < N
"""
lc = N - (block_size*6)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = 6*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((7,7))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
probs[0,0]=fold_change*d
probs[1,1]=fold_change*d
probs[2,2]=fold_change*d
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<3:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
| 36.027322 | 258 | 0.590323 |
23971993e9893cd5f385730b84276166fd285f88 | 184 | py | Python | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 18.4 | 34 | 0.684783 |
2398b8c755adf06d3f7f1e5cae4d4aedb1f1899b | 443 | py | Python | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | 5 | 2021-09-09T21:08:14.000Z | 2021-12-14T02:30:52.000Z | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | null | null | null | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | 8 | 2021-09-09T17:46:07.000Z | 2022-02-08T22:41:35.000Z | import pandas as pd
people_dict = {
"weight": pd.Series([145, 182, 191],index=["joan", "bob", "mike"]),
"birthyear": pd.Series([2002, 2000, 1999], index=["bob", "joan", "mike"], name="year"),
"children": pd.Series([1, 2], index=["mike", "bob"]),
"hobby": pd.Series(["Rock Climbing", "Scuba Diving", "Sailing"], index=["joan", "bob", "mike"]),
}
people = pd.DataFrame(people_dict)
print ( people )
| 31.642857 | 104 | 0.557562 |
239de3aa205a8c68e33dedf541996817e27acfa5 | 3,440 | py | Python | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
] | 1 | 2021-11-09T12:01:56.000Z | 2021-11-09T12:01:56.000Z | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
] | null | null | null | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
] | null | null | null | #
# Copyright (C) 2014 Dominik Oepen
#
# This file is part of virtualsmartcard.
#
# virtualsmartcard is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# virtualsmartcard is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# virtualsmartcard. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
from virtualsmartcard.SmartcardSAM import *
if __name__ == "__main__":
unittest.main()
# CF = CryptoflexSE(None)
# print CF.generate_public_key_pair(0x00, 0x80, "\x01\x00\x01\x00")
# print MyCard._get_referenced_key(0x01)
| 40 | 79 | 0.670058 |
239ed9095bc55c203b6c4b8328d5c14492d59001 | 6,762 | py | Python | test/phagesExperiment/runTableCases.py | edsaac/bioparticle | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
] | null | null | null | test/phagesExperiment/runTableCases.py | edsaac/bioparticle | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
] | 1 | 2020-09-25T23:31:21.000Z | 2020-09-25T23:31:21.000Z | test/phagesExperiment/runTableCases.py | edsaac/VirusTransport_RxSandbox | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
] | 1 | 2021-09-30T05:00:58.000Z | 2021-09-30T05:00:58.000Z | ###############################################################
# _ _ _ _ _
# | |__ (_) ___ _ __ __ _ _ __| |_(_) ___| | ___
# | '_ \| |/ _ \| '_ \ / _` | '__| __| |/ __| |/ _ \
# | |_) | | (_) | |_) | (_| | | | |_| | (__| | __/
# |_.__/|_|\___/| .__/ \__,_|_| \__|_|\___|_|\___|
# |_|
#
###############################################################
#
# $ python3 runTableCases.py [CASES.CSV] [TEMPLATE.IN] -run
#
# Where:
# - [CASES.CSV] path to csv file with the list of
# parameters and the corresponding tags
# - [TEMPLATE.IN] input file template for PFLOTRAN and
# the corresponding tags
# - [shouldRunPFLOTRAN = "-run"]
#
###############################################################
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from os import system
import sys
## Global variables
ColumnLenght = 50.0
ConcentrationAtInlet = 1.66E-16
## Non-dimensional numbers
## Tags dictionary for variables in input file
tagsReplaceable = {
"Porosity" : "<porosity>",
"DarcyVel" : "<darcyVel>", # q = u*porosity
"CleanTime" : "<elutionTime>", # t @ C0 = 0
"FinalTime" : "<endTime>", # @ 10 pore volumes
"AttachRate": "<katt>",
"DetachRate": "<kdet>",
"DecayAq" : "<decayAq>",
"DecayIm" : "<decayIm>",
"LongDisp" : "<longDisp>"
}
## Tags dictionary for other parameters
tagsAccesory = {
"FlowVel" : "poreWaterVel",
"PoreVol" : "poreVolume",
"pH" : "pH",
"IonicStr" : "IS"
}
## Path to PFLOTRAN executable
PFLOTRAN_path = "$PFLOTRAN_DIR/src/pflotran/pflotran "
## Table with the set of parameters
try:
parameters_file = str(sys.argv[1])
except IndexError:
sys.exit("Parameters file not defined :(")
setParameters = read_csv(parameters_file)
total_rows = setParameters.shape[0]
## Template for the PFLOTRAN input file
try:
template_file = str(sys.argv[2])
except IndexError:
sys.exit("Template file not found :(")
## Run cases?
try:
shouldRunPFLOTRAN = "-run" in str(sys.argv[3])
except IndexError:
shouldRunPFLOTRAN = False
## Delete previous cases
system("rm -rf CASE*")
## Row in the set of parameters table = case to be run
for i in range(total_rows):
#for i in range(1):
## Create a folder for the case
current_folder = "./CASE_" + "{0:03}".format(i+1)
system("mkdir " + current_folder)
## Copy template input file to folder
system("cp " + template_file + " " + current_folder+"/pflotran.in")
current_file = current_folder + "/pflotran.in"
## Replace tags for values in case
for current_tag in tagsReplaceable:
COMM = "sed -i 's/" + tagsReplaceable[current_tag] + "/"\
+'{:.3E}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])\
+ "/g' " + current_file
system(COMM)
## Run PFLOTRAN in that case
if shouldRunPFLOTRAN:
#print(PFLOTRAN_path + "-pflotranin " + current_file)
system(PFLOTRAN_path + "-pflotranin " + current_file)
#system("python3 ./miscellaneous/organizeResults.py " + current_folder + "/pflotran-obs-0.tec -clean")
current_U = setParameters.loc[i,tagsAccesory["FlowVel"]]
current_pH = setParameters.loc[i,tagsAccesory["pH"]]
current_IS = setParameters.loc[i,tagsAccesory["IonicStr"]]
current_PV = setParameters.loc[i,tagsAccesory["PoreVol"]]
#Porosity = setParameters.loc[i,tagsReplaceable["Porosity"]]
#input("Press Enter to continue...")
plotResults(current_U,current_pH,current_IS,current_PV,\
setParameters.loc[i,tagsReplaceable["AttachRate"]],\
setParameters.loc[i,tagsReplaceable["DetachRate"]],\
setParameters.loc[i,tagsReplaceable["DecayAq"]],\
setParameters.loc[i,tagsReplaceable["DecayIm"]],\
setParameters.loc[i,tagsReplaceable["LongDisp"]])
#input("Press Enter to continue...")
system("rm -r pictures ; mkdir pictures")
system("cp CASE**/*.png ./pictures/") | 34.676923 | 106 | 0.603963 |
239f83a7c0d314a200223629c25572a463600e23 | 593 | py | Python | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
] | null | null | null | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
] | null | null | null | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
#
# This software is covered by The Unlicense license
#
import os, pymongo, sys
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 17.969697 | 64 | 0.60371 |
23a0582a156a5116f9a3e62beef47135533e30c9 | 203 | py | Python | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
] | null | null | null | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
] | null | null | null | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
| 22.555556 | 49 | 0.73399 |
23a2a97bb6db12d817c114dd0b13665cae319c12 | 2,185 | py | Python | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
] | 2 | 2022-01-05T08:41:38.000Z | 2022-02-14T01:30:08.000Z | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
] | 1 | 2022-03-28T03:23:36.000Z | 2022-03-28T03:23:36.000Z | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
] | 2 | 2022-01-07T05:56:43.000Z | 2022-02-16T13:26:13.000Z | import time
import torch
from torch import nn
from torch.nn import functional as F
#import spconv
import torchplus
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
import sys
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
| 33.106061 | 92 | 0.556522 |
23a456677b9384e5a17f6de8dcdc1e93e2a745f9 | 3,001 | py | Python | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
] | 1 | 2021-04-16T13:05:20.000Z | 2021-04-16T13:05:20.000Z | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
] | null | null | null | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
] | 2 | 2019-06-24T06:45:46.000Z | 2019-06-28T19:43:20.000Z | import string
from typing import List, Dict
# inject code here #
_HYPHEN_CHARS = {
'\u002D', # HYPHEN-MINUS
'\u00AD', # SOFT HYPHEN
'\u2010', # HYPHEN
'\u2011', # NON-BREAKING HYPHEN
}
| 26.557522 | 97 | 0.55115 |
23a5398ab784fc5aa194816a75732cc159a8849f | 1,241 | py | Python | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
] | null | null | null | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
] | null | null | null | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
] | null | null | null | from django.conf.urls import include
from django.urls import path
from django.contrib import admin
from users.views import FacebookLogin
import django_js_reverse.views
from rest_framework.routers import DefaultRouter
from common.routes import routes as common_routes
router = DefaultRouter()
routes = common_routes
for route in routes:
router.register(route['regex'], route['viewset'],
basename=route['basename'])
urlpatterns = [
path("", include("common.urls"), name="common"),
path("assignments/", include("assignments.urls"), name='assignments'),
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'),
path("admin/", admin.site.urls, name="admin"),
path("jsreverse/", django_js_reverse.views.urls_js, name="js_reverse"),
path("api/", include(router.urls), name="api"),
path("api/assignments/", include("assignments.api.assignment.urls")),
path("api/grade-assignment/", include("assignments.api.graded-assignment.urls")),
path("api/", include("users.urls"), name="user"),
]
| 37.606061 | 85 | 0.706688 |
23a5e45f9981098530b74e9239812e4a0d27fb21 | 7,302 | py | Python | core/dataset/data_loader.py | thuzhaowang/idn-solver | 7da29ce0b0bd7e76023e1cae56e3d186b324a394 | [
"MIT"
] | 22 | 2021-10-11T02:31:52.000Z | 2022-02-23T08:06:14.000Z | core/dataset/data_loader.py | xubin1994/idn-solver | 6b5dcfd94f35cc118c5dee0f98401e4848e670e3 | [
"MIT"
] | 4 | 2021-12-02T02:36:30.000Z | 2022-03-16T01:04:47.000Z | core/dataset/data_loader.py | xubin1994/idn-solver | 6b5dcfd94f35cc118c5dee0f98401e4848e670e3 | [
"MIT"
] | 4 | 2022-01-20T03:12:23.000Z | 2022-03-16T00:08:54.000Z | import numpy as np
from path import Path
import random
import pickle
import torch
import os
import cv2
def load_as_float(path):
"""Loads image"""
im = cv2.imread(path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB).astype(np.float32)
return im
| 38.840426 | 161 | 0.65037 |
23a6372b0029d78dd5def2146734771fbbe2bd48 | 1,632 | py | Python | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
] | null | null | null | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
] | null | null | null | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
] | null | null | null | import Queue
import select
import socket
from conf import ADDRESS, BACKLOG, SIZE
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
print 'starting up on %s port %s' % ADDRESS
server.bind(ADDRESS)
server.listen(BACKLOG)
inputs = [server]
outputs = []
message_queues = {}
while inputs:
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s is server:
connection, client_address = s.accept()
print 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
message_queues[connection] = Queue.Queue()
else:
data = s.recv(SIZE)
if data:
print 'received from %s' % str(s.getpeername())
message_queues[s].put(data)
if s not in outputs:
outputs.append(s)
else:
print 'closing socket after reading no data'
inputs.remove(s)
s.close()
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
print 'sending to %s' % str(s.getpeername())
s.send(next_msg)
except Queue.Empty:
print 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
for s in exceptional:
print 'handling exceptional condition for', s.getpeername()
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
| 27.661017 | 76 | 0.578431 |
23a7e7e53ed3f920173ee73d17e3e8afad1d765f | 3,813 | py | Python | glue.py | mkechagia/android-survey | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
] | 1 | 2022-01-26T08:14:24.000Z | 2022-01-26T08:14:24.000Z | glue.py | mkechagia/android-survey-tool | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
] | null | null | null | glue.py | mkechagia/android-survey-tool | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
] | null | null | null | import re
import copy
from collections import defaultdict
from string import Template
# initialize the dictionary for the methods with checked exceptions such as {fake method: real method}
method_dict_checked = {'deleteRecord' : 'delete', \
'editText' : 'setText_new', \
'insertData' : 'insert_new', \
'setLayout' : 'setContentView_new', \
'findViewId' : 'findViewById_new', \
'changeTextColor' : 'setTextColor_new', \
'getCursorString' : 'getString', \
'queryData' : 'query_new', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText_new'}
# initialize the dictionary for the methods with unchecked exceptions such as {fake method: real method}
method_dict_unchecked = {'deleteRecord' : 'delete', \
'editText' : 'setText', \
'insertData' : 'insert', \
'setLayout' : 'setContentView', \
'findViewId' : 'findViewById', \
'changeTextColor' : 'setTextColor', \
'getCursorString' : 'getString', \
'queryData' : 'query', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText'}
# answer_block is a dict of user's answers,
# i.e. answer_block = {'answer_1' : fake_answer}
# survey type refers to the different surveys
# (methods with checked exceptions Vs. methods with unchecked exceptions--documented and undocumented)
# Bind the answers' methods to the real Android's API methods
# answers is a dict, i.e. answers = {'answer_1' : fake_answer}
# This function returns a dict of answers with real Android's
# API methods, i.e. real_answers = {'answer_1' : real_answer}
# dict depending on the survey type
# replace line numbers with spaces
# vim: tabstop=8 noexpandtab shiftwidth=8 softtabstop=0
| 34.981651 | 104 | 0.710464 |
23a85b9835619dae1db6fad9d342a22f09ccf61a | 272 | py | Python | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
] | null | null | null | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
] | null | null | null | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
] | null | null | null | # a = 1
# b = 1
# while (not ((a==0) and (b==0))):
# a, b = map(int, input().split())
# print(a+b)
while True:
a, b = map(int, input().split())
if a == 0 and b == 0:
break
print(a+b) | 17 | 42 | 0.338235 |
23a8d2f8440fc0f4ab166887414f385e16797422 | 381 | py | Python | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
] | 3 | 2015-10-12T21:32:37.000Z | 2021-09-16T16:51:03.000Z | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
] | null | null | null | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
] | null | null | null | from builtins import str
from .helpers import run
import logging
import subprocess
import functools
import types
logger = logging.getLogger("commander")
def maestro(scriptId):
"""Run a Keyboard Maestro script by ID (more robust) or name."""
run(
"""osascript -e 'tell application "Keyboard Maestro Engine" to """
"""do script "%s"'\n""" % scriptId
)
| 22.411765 | 74 | 0.67979 |
23a8d8b1cd48f9fd55d3941e62fe86313bca756e | 764 | py | Python | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
] | null | null | null | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
] | null | null | null | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
] | null | null | null | from planning_system.db.schema.views import _get_set_cols
def definition(session):
"""
Return UI view.
Complex view, which requires a dynamic pivot.
"""
pvt_list = _get_set_cols(session)
sql = f"""
SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level, {pvt_list}
FROM (SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level,
CAST(f_Set.acad_year as CHAR(4)) + ' ' + f_set.set_cat_id as finance_summary, amount as amount
FROM [v_mri_finance_grouped_subtotal] f INNER JOIN f_set ON f_set.set_id = f.set_id) p
PIVOT
(SUM(amount) FOR finance_summary in ({pvt_list})) as pvt
"""
return sql
| 38.2 | 120 | 0.700262 |
23abc12980cb0a7128b692a9097ad4b745fb655b | 756 | py | Python | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T11:49:35.000Z | 2021-03-24T11:49:35.000Z | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
] | null | null | null | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torchvision.models as models
'''
Description:
convert torch module to JIT TracedModule.
torch JIT TracedModule
'''
if __name__ == "__main__":
dummy_input = torch.randn(1, 3, 224, 224) # dummy_input is customized by user
model = models.resnet18(pretrained=True) # model is customized by user
model = model.cpu().eval()
traced_model = torch.jit.trace(model, dummy_input)
model_name = 'model_name' # model_name is customized by user
TracedModelFactory(model_name + '.pth', traced_model)
| 28 | 81 | 0.718254 |
23ad1135866d4f8277494a12a0ed3be2f1311aa3 | 9,739 | py | Python | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | 1 | 2021-05-30T13:27:33.000Z | 2021-05-30T13:27:33.000Z | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | null | null | null | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | null | null | null | # cppsimdata.py
# written by Michael H. Perrott
# with minor modifications by Doug Pastorello to work with both Python 2.7 and Python 3.4
# available at www.cppsim.com as part of the CppSim package
# Copyright (c) 2013-2017 by Michael H. Perrott
# This file is disributed under the MIT license (see Copying file)
import ctypes as ct
import numpy as np
import sys
import os
import platform
import subprocess as sp
import contextlib
from scipy.signal import lfilter,welch
def cppsim_unbuffer_for_print(status, stream='stdout'):
newline_chars = ['\r', '\n', '\r\n']
stream = getattr(status, stream)
with contextlib.closing(stream):
while True:
out = []
last = stream.read(1)
if last == '' and status.poll() is not None:
break
while last not in newline_chars:
if last == '' and status.poll() is not None:
break
out.append(last)
last = stream.read(1)
out = ''.join(out)
yield out
def cppsim(sim_file="test.par"):
if sim_file.find('.par') < 0:
sim_file = sim_file + '.par'
cppsim_home = os.getenv('CppSimHome')
if cppsim_home == None:
cppsim_home = os.getenv('CPPSIMHOME')
if cppsim_home == None:
home = os.getenv('HOME')
if sys.platform == 'win32':
default_cppsim_home = "%s\\CppSim" % (home)
else:
default_cppsim_home = "%s/CppSim" % (home)
if os.path.isdir(default_cppsim_home):
cppsim_home = default_cppsim_home
else:
print('Error running cppsim from Python: environment variable')
print(' CPPSIMHOME is undefined')
cppsimshared_home = os.getenv('CppSimSharedHome')
if cppsimshared_home == None:
cppsimshared_home = os.getenv('CPPSIMSHAREDHOME')
if cppsimshared_home == None:
if sys.platform == 'win32':
default_cppsimshared_home = "%s\\CppSimShared" % (cppsim_home)
else:
default_cppsimshared_home = "%s/CppSimShared" % (cppsim_home)
if os.path.isdir(default_cppsimshared_home):
cppsimshared_home = default_cppsimshared_home
else:
print('Error running cppsim: environment variable')
print(' CPPSIMSHAREDHOME is undefined')
# print('cppsimhome: %s' % cppsim_home)
# print('cppsimsharedhome: %s' % cppsimshared_home)
cur_dir = os.getcwd()
if sys.platform == 'win32':
i = cur_dir.lower().find('\\simruns\\')
else:
i = cur_dir.lower().find('/simruns/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
if sys.platform == 'win32':
print(' .....\\SimRuns\\Library_name\\Module_name')
else:
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_cell = cur_dir[i+9:1000]
if sys.platform == 'win32':
i = library_cell.find('\\')
else:
i = library_cell.find('/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_name = library_cell[0:i]
cell_name = library_cell[i+1:1000]
print("Running CppSim on module '%s' (Lib:'%s'):" % (cell_name, library_name))
print("\n... netlisting ...\n")
if sys.platform == 'win32':
rp_base = '%s/Sue2/bin/win32/sue_cppsim_netlister' % (cppsimshared_home)
else:
rp_base = '%s/Sue2/bin/sue_cppsim_netlister' % (cppsimshared_home)
rp_arg1 = cell_name
rp_arg2 = '%s/Sue2/sue.lib' % (cppsim_home)
rp_arg3 = '%s/Netlist/netlist.cppsim' % (cppsim_home)
rp = [rp_base, rp_arg1, rp_arg2, rp_arg3]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('\n... running net2code ...\n')
if sys.platform == 'win32':
rp_base = '%s/bin/win32/net2code' % (cppsimshared_home)
else:
rp_base = '%s/bin/net2code' % (cppsimshared_home)
rp_arg1 = '-cpp'
rp_arg2 = sim_file
rp = [rp_base, rp_arg1, rp_arg2]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('... compiling ...\n')
if sys.platform == 'win32':
rp_base = '%s/msys/bin/make' % (cppsimshared_home)
else:
rp_base = 'make'
rp = [rp_base]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
# calculate phase noise: returns frequency (Hz) and specral density (dBc/Hz)
| 38.800797 | 149 | 0.629736 |
23aebda5722243d52ce15ff9c4cb52dbd5434d9f | 1,217 | py | Python | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 1 | 2021-07-30T19:06:07.000Z | 2021-07-30T19:06:07.000Z | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 8 | 2021-04-22T20:47:48.000Z | 2021-07-30T19:06:01.000Z | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | null | null | null |
# Lambda processing
| 30.425 | 101 | 0.758422 |
23b3590bb9d68aac5032da0773011d5e1741a6b6 | 5,977 | py | Python | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
] | 28 | 2019-12-01T21:45:28.000Z | 2021-05-05T17:46:09.000Z | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
] | null | null | null | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
] | 2 | 2020-08-04T12:55:04.000Z | 2020-12-20T22:23:47.000Z | import logging
from datetime import datetime
from typing import List
from notify.backends import BackendFactory
from notify.commands import Command
from notify.config import Config, Stack
from notify.notifications import Factory, Notification
from notify.strategies import StrategyFactory
| 42.091549 | 115 | 0.736657 |
23b5a122ef2746145b44e7be72e1b2d49508e86c | 254 | py | Python | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
] | null | null | null | technology = {
'kb': '''
Oculus(rift)
HTC(vive)
VR(Zuck, rift)
VR(Gabe, vive)
(Oculus(O) & HTC(H)) ==> Dominates(H, O)
(VR(V)) ==> Technology(T)
''',
'queries':'''
VR(x)
Dominates(x, y)
''',
}
Examples = {
'technology': technology,
} | 14.111111 | 40 | 0.527559 |
23ba2ecb3b446799d3bd04447ada1a6c88421c82 | 8,113 | py | Python | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
] | 1 | 2019-12-12T13:21:56.000Z | 2019-12-12T13:21:56.000Z | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
] | null | null | null | import logging
import multiprocessing
import os
import time
from functools import partial
from multiprocessing import Process, Queue, Pool
from typing import Iterable
import pandas as pd
import pyarrow as pa
from feast.feature_set import FeatureSet
from feast.type_map import convert_dict_to_proto_values
from feast.types.FeatureRow_pb2 import FeatureRow
from kafka import KafkaProducer
from tqdm import tqdm
from feast.constants import DATETIME_COLUMN
_logger = logging.getLogger(__name__)
GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
CPU_COUNT = os.cpu_count() # type: int
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
def _kafka_feature_row_producer(
feature_row_queue: Queue, row_count: int, brokers, topic, ctx: dict, pbar: tqdm
):
"""
Pushes Feature Rows to Kafka. Reads rows from a queue. Function will run
until total row_count is reached.
Args:
feature_row_queue: Queue containing feature rows.
row_count: Total row count to process
brokers: Broker to push to
topic: Topic to push to
ctx: Context dict used to communicate with primary process
pbar: Progress bar object
"""
# Callback for failed production to Kafka
# Callback for succeeded production to Kafka
producer = KafkaProducer(bootstrap_servers=brokers)
processed_rows = 0
# Loop through feature rows until all rows are processed
while processed_rows < row_count:
# Wait if queue is empty
if feature_row_queue.empty():
time.sleep(1)
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
else:
while not feature_row_queue.empty():
row = feature_row_queue.get()
if row is not None:
# Push row to Kafka
producer.send(topic, row.SerializeToString()).add_callback(
on_success
).add_errback(on_error)
processed_rows += 1
# Force an occasional flush
if processed_rows % 10000 == 0:
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
del row
pbar.refresh()
# Ensure that all rows are pushed
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
# Using progress bar as counter is much faster than incrementing dict
ctx["success_count"] = pbar.n
pbar.close()
def ingest_table_to_kafka(
feature_set: FeatureSet,
table: pa.lib.Table,
max_workers: int,
chunk_size: int = 5000,
disable_pbar: bool = False,
timeout: int = None,
) -> None:
"""
Ingest a PyArrow Table to a Kafka topic based for a Feature Set
Args:
feature_set: FeatureSet describing PyArrow table.
table: PyArrow table to be processed.
max_workers: Maximum number of workers.
chunk_size: Maximum size of each chunk when PyArrow table is batched.
disable_pbar: Flag to indicate if tqdm progress bar should be disabled.
timeout: Maximum time before method times out
"""
pbar = tqdm(unit="rows", total=table.num_rows, disable=disable_pbar)
# Use a small DataFrame to validate feature set schema
ref_df = table.to_batches(max_chunksize=100)[0].to_pandas()
df_datetime_dtype = ref_df[DATETIME_COLUMN].dtype
# Validate feature set schema
_validate_dataframe(ref_df, feature_set)
# Create queue through which encoding and production will coordinate
row_queue = Queue()
# Create a context object to send and receive information across processes
ctx = multiprocessing.Manager().dict(
{"success_count": 0, "error_count": 0, "last_exception": ""}
)
# Create producer to push feature rows to Kafka
ingestion_process = Process(
target=_kafka_feature_row_producer,
args=(
row_queue,
table.num_rows,
feature_set.get_kafka_source_brokers(),
feature_set.get_kafka_source_topic(),
ctx,
pbar,
),
)
try:
# Start ingestion process
print(
f"\n(ingest table to kafka) Ingestion started for {feature_set.name}:{feature_set.version}"
)
ingestion_process.start()
# Iterate over chunks in the table and return feature rows
for row in _encode_pa_chunks(
tbl=table,
fs=feature_set,
max_workers=max_workers,
chunk_size=chunk_size,
df_datetime_dtype=df_datetime_dtype,
):
# Push rows onto a queue for the production process to pick up
row_queue.put(row)
while row_queue.qsize() > chunk_size:
time.sleep(0.1)
row_queue.put(None)
except Exception as ex:
_logger.error(f"Exception occurred: {ex}")
finally:
# Wait for the Kafka production to complete
ingestion_process.join(timeout=timeout)
failed_message = (
""
if ctx["error_count"] == 0
else f"\nFail: {ctx['error_count']}/{table.num_rows}"
)
last_exception_message = (
""
if ctx["last_exception"] == ""
else f"\nLast exception:\n{ctx['last_exception']}"
)
print(
f"\nIngestion statistics:"
f"\nSuccess: {ctx['success_count']}/{table.num_rows}"
f"{failed_message}"
f"{last_exception_message}"
)
def _validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
"""
Validates a Pandas dataframe based on a feature set
Args:
dataframe: Pandas dataframe
feature_set: Feature Set instance
"""
if "datetime" not in dataframe.columns:
raise ValueError(
f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
)
for entity in feature_set.entities:
if entity.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
)
for feature in feature_set.features:
if feature.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
)
| 31.815686 | 103 | 0.646986 |
23bb7ae2de638bcc64e1ae2469bf78db888b942c | 389 | py | Python | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | 2 | 2020-04-24T18:36:52.000Z | 2020-04-25T00:15:57.000Z | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | import collections
s = "abcd"
t = "abcde"
p = Solution()
print(p.findTheDifference(s,t)) | 24.3125 | 71 | 0.539846 |
23bbe8dfe70d77ea6c966fa54a0f12dbc414a437 | 16,580 | py | Python | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BackendAddressPoolAddressArgs', 'BackendAddressPoolAddress']
class BackendAddressPoolAddress(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackendAddressPoolAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackendAddressPoolAddressArgs.__new__(BackendAddressPoolAddressArgs)
if backend_address_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__.__dict__["backend_address_pool_id"] = backend_address_pool_id
if ip_address is None and not opts.urn:
raise TypeError("Missing required property 'ip_address'")
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["name"] = name
if virtual_network_id is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_id'")
__props__.__dict__["virtual_network_id"] = virtual_network_id
super(BackendAddressPoolAddress, __self__).__init__(
'azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress',
resource_name,
__props__,
opts)
| 47.643678 | 282 | 0.68076 |
23bd05e550888fff887e56ad22915b9704444c37 | 4,136 | py | Python | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
] | 60 | 2015-12-04T20:11:23.000Z | 2019-03-17T20:00:56.000Z | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
] | 68 | 2019-03-22T01:07:32.000Z | 2021-07-02T04:48:57.000Z | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
] | 19 | 2015-09-15T17:30:29.000Z | 2019-03-17T18:05:30.000Z | # -*- coding: utf-8 -*-
import pickle
import os
# third-party imports
import jsonpickle
def writeOutSubmissionsAsJson(redditList, file):
file.write('{\n'.encode('utf8'))
for submission in redditList:
outputString = submission.getJson() + u',\n'
file.write(outputString.encode('utf8'))
file.write('}'.encode('utf8'))
def saveSubmissionsAsJson(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsJson(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsHtml(redditList, file):
submissionsStr = ""
for submission in redditList:
submissionsStr += submission.getHtml() + u'\n'
htmlStructure = u"""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Reddit Saved Comments</title>
</head>
<body>
{0}
</body>
</html>
""".format(submissionsStr)
file.write(htmlStructure.encode('utf8'))
def saveSubmissionsAsHtml(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsHtml(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsXML(redditList, file):
for submission in redditList:
outputString = u'<submission>\n' + submission.getXML() + u'</submission>\n'
file.write(outputString.encode('utf8'))
| 30.637037 | 100 | 0.604691 |
23bd7796ce5dbbe94cd644365987adb6f71698db | 191 | py | Python | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
] | null | null | null | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
] | null | null | null | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.7
from decimal import Decimal
from collections import namedtuple
EventPrizeLevel = namedtuple(
"EventPrizeLevel", ["packs", "gems", "gold"], defaults=[0, 0, 0],
)
| 23.875 | 69 | 0.712042 |
23be2b4e5eb31b3f80e1bec885f51c83e38a6703 | 621 | py | Python | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
] | null | null | null | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
] | null | null | null | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
] | null | null | null | """
Escreva um programa que faa o computador 'Pensar' em um nmero inteiro entre 0 e 5
e pea para o usurio tentar descobrir qual foi o nmero escolhido pelo computador.
"""
from random import randint
numero_gerado_aleatoriamente = randint(0,5)
numero_digitado_pelo_usuario = int(input('Adivinhe qual nmero estou pensando, uma dica: entre 0 e 5! '))
if numero_digitado_pelo_usuario == numero_gerado_aleatoriamente:
print(f'VOC ACERTOU! O nmero que estava pensando era mesmo o {numero_gerado_aleatoriamente}!')
else:
print(f'Voc errou! O nmero que pensei era {numero_gerado_aleatoriamente}')
| 38.8125 | 107 | 0.772947 |
23be77dcebe4a2a83f67827319e9327e25df75de | 1,699 | py | Python | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
] | 38 | 2020-06-07T14:44:11.000Z | 2022-03-08T06:19:49.000Z | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
] | 9 | 2020-10-22T02:38:01.000Z | 2022-03-15T09:53:30.000Z | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
] | 6 | 2021-03-04T21:32:34.000Z | 2021-12-24T05:58:35.000Z | import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
| 33.313725 | 116 | 0.575633 |
23c09d19f8336af168a12e16ec8d400bf72a904d | 7,740 | py | Python | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
] | null | null | null | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
] | null | null | null | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : scene_graph.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 07/19/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Scene Graph generation.
"""
import os
import torch
import torch.nn as nn
import jactorch
import jactorch.nn as jacnn
from . import functional
DEBUG = bool(int(os.getenv('DEBUG_SCENE_GRAPH', 0)))
__all__ = ['SceneGraph']
| 47.484663 | 120 | 0.588889 |
23c22b89349457ba83481e99d719de420d9ae033 | 645 | py | Python | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
] | 1 | 2021-02-11T10:03:42.000Z | 2021-02-11T10:03:42.000Z | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
] | 2 | 2020-02-24T11:52:51.000Z | 2021-07-05T19:38:55.000Z | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
] | 4 | 2020-02-10T14:49:41.000Z | 2021-05-07T14:41:32.000Z | import re
from abc import ABCMeta
from dateutil import parser
| 22.241379 | 77 | 0.615504 |
23c2c0ad760da305cb104343e55a702bf05d28ce | 630 | py | Python | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
] | 2 | 2019-02-06T19:47:48.000Z | 2019-10-30T07:30:14.000Z | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
] | 6 | 2019-03-21T18:29:04.000Z | 2019-04-11T18:31:34.000Z | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
] | null | null | null | import unittest
import networkx as nx
from core.placement.spsolver import DPShortestPathSolver
if __name__ == '__main__':
unittest.main() | 30 | 113 | 0.680952 |
23c38e57ef816e8a8c15f2598a7fb8639340906e | 1,285 | py | Python | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 6 | 2021-07-29T03:26:20.000Z | 2022-01-28T15:11:45.000Z | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 2 | 2021-09-30T09:47:23.000Z | 2022-01-31T03:08:24.000Z | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
] | 5 | 2021-08-10T06:41:11.000Z | 2022-01-29T17:50:20.000Z | """
# INTEGER BREAK
Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get.
Example 1:
Input: 2
Output: 1
Explanation: 2 = 1 + 1, 1 1 = 1.
Example 2:
Input: 10
Output: 36
Explanation: 10 = 3 + 3 + 4, 3 3 4 = 36.
Note: You may assume that n is not less than 2 and not larger than 58.
"""
| 26.770833 | 167 | 0.525292 |
23c54e92e07439e85887d70b0a443815fb516d17 | 1,112 | py | Python | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
] | 27 | 2015-03-06T05:50:35.000Z | 2021-03-01T07:54:03.000Z | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
] | 2 | 2015-02-05T14:59:07.000Z | 2016-02-19T00:18:52.000Z | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
] | 13 | 2015-01-25T12:43:42.000Z | 2019-11-25T17:46:42.000Z | #!/usr/bin/env python
from distutils.core import setup
SHORT_DESCR = "CAmera MOtion COMPensation using image stiching techniques to generate stabilized videos"
try:
LONG_DESCR = open('README.rst').read()
except IOError:
LONG_DESCR = SHORT_DESCR
setup(
name='camocomp',
version='0.1',
author='Adrien Gaidon',
author_email='easy_to_guess@googleme.com',
keywords='camera motion compensation, video stabilization, stitching, opencv, hugin',
packages=['camocomp'],
url='http://pypi.python.org/pypi/camocomp/',
license='New BSD License',
description=SHORT_DESCR,
long_description=LONG_DESCR,
platforms=["Linux"],
requires=['numpy', 'ffmpeg', 'cv2', 'hsi'],
scripts=['scripts/camocomp_video'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
]
)
| 30.054054 | 104 | 0.660072 |
23c622f1dbca6b4b9f0f05bf93b50ad3b73a9109 | 408 | py | Python | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
] | null | null | null | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
] | null | null | null | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
# app.add_template_filter(li_reverse, 'li_rv') #
if __name__ == "__main__":
app.run(debug=True)
| 17 | 59 | 0.647059 |
23c6a65b4e2832bc68e0d04d1fcc2bd1ed8f0280 | 801 | py | Python | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
] | 16 | 2017-02-22T02:26:41.000Z | 2021-04-05T10:28:02.000Z | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
] | 22 | 2017-02-27T21:50:45.000Z | 2021-05-21T02:31:35.000Z | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
] | 8 | 2017-09-30T09:50:44.000Z | 2021-05-20T22:29:54.000Z | """"""
import matplotlib as mpl
__all__ = ["set"]
def set(tick_scale=1, rc=dict()):
"""
Control plot style and scaling using seaborn and the
matplotlib rcParams interface.
:param tick_scale: A scaler number controling the spacing
on tick marks, defaults to 1.
:type tick_scale: float
:param rc: Additional settings to pass to rcParams.
:type rc: dict
"""
rc_log_defaults = {
'xtick.major.size': 10. * tick_scale,
'xtick.minor.size': 6. * tick_scale,
'ytick.major.size': 10. * tick_scale,
'ytick.minor.size': 6. * tick_scale,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75,
'mathtext.default': 'regular'
}
mpl.rcParams.update(dict(rc_log_defaults, **rc))
| 26.7 | 62 | 0.601748 |
23c7aeb8b7efffbb30d83d454153984dd31f2ff4 | 169 | py | Python | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | # M1
myresult = mul1(3)
print(myresult(7))
#M-2
mul = lambda a = 3: (lambda b: a*b)
myres = mul()
print(myres)
print(myres(7))
| 14.083333 | 35 | 0.633136 |
23c87c4cfb4e5c6fd8c9ed42a1f9fee075d07137 | 414 | py | Python | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 1 | 2020-08-08T16:01:55.000Z | 2020-08-08T16:01:55.000Z | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 4 | 2022-02-15T02:42:33.000Z | 2022-02-28T01:30:07.000Z | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 1 | 2020-11-22T12:08:51.000Z | 2020-11-22T12:08:51.000Z | from enum import Enum, auto
from typing import NamedTuple, Optional
| 21.789474 | 49 | 0.611111 |
23c9d0fc017e203c468d9f46add866be9898f0bd | 2,961 | py | Python | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
] | 2 | 2022-03-16T13:50:21.000Z | 2022-03-27T15:14:09.000Z | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
] | null | null | null | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
] | 2 | 2021-07-18T03:10:12.000Z | 2022-03-27T15:14:11.000Z | # -*- coding: utf-8 -*-
import numpy as np
from odbAccess import *
from abaqusConstants import *
filename = 'Job-4e-SS-Pulse'
"""
LOAD DATA
===============================================================================
"""
results = np.load(filename + '.npz')
vonMisesMax = results['vonMisesMax'].transpose()
vonMisesMin = results['vonMisesMin'].transpose()
vonMisesStatic = results['vonMisesStatic'].transpose()
nodeNum = results['nodeNum'].transpose()
nodeCoord = results['nodeCoord']
# Sort nodeCoord on nodal values
nodeCoord = nodeCoord[nodeCoord[:,0].argsort()]
# Calculate Mean and Amplitude
vonMisesAmp = (vonMisesMax - vonMisesMin)/2
vonMisesMean = (vonMisesMax + vonMisesMin)/2
"""
LOAD ODB
===============================================================================
"""
odb = openOdb(filename+'.odb',readOnly=False)
# Get Instance
allInstances = (odb.rootAssembly.instances.keys())
odbInstance = odb.rootAssembly.instances[allInstances[-1]]
"""
FORMAT AND SAVE DATA TO ODB
===============================================================================
"""
vMNodes = np.ascontiguousarray(nodeNum, dtype=np.int32)
vMMax = np.ascontiguousarray(np.reshape(vonMisesMax,(-1,1)), dtype=np.float32)
vMMin = np.ascontiguousarray(np.reshape(vonMisesMin,(-1,1)), dtype=np.float32)
vMStatic = np.ascontiguousarray(np.reshape(vonMisesStatic,(-1,1)), dtype=np.float32)
vMMean = np.ascontiguousarray(np.reshape(vonMisesMean,(-1,1)), dtype=np.float32)
vMAmp = np.ascontiguousarray(np.reshape(vonMisesAmp,(-1,1)), dtype=np.float32)
newFieldOutputMax = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMax', description = 'Max Signed von Mises', type = SCALAR)
newFieldOutputMax.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMax.tolist())
newFieldOutputMin = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMin', description = 'Min Signed von Mises', type = SCALAR)
newFieldOutputMin.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMin.tolist())
newFieldOutputMStatic = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMStatic', description = 'Static Signed von Mises', type = SCALAR)
newFieldOutputMStatic.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMStatic.tolist())
newFieldOutputMean = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMean', description = 'Signed von Mises Mean', type = SCALAR)
newFieldOutputMean.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMean.tolist())
newFieldOutputAmp = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMAmp', description = 'Signed von Mises Amplitude', type = SCALAR)
newFieldOutputAmp.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMAmp.tolist())
"""
SAVE AND CLOSE
===============================================================================
"""
odb.save()
odb.close()
| 37.961538 | 150 | 0.660588 |
23cbfc7fdcdcf980a0e3a9a727e48fece2483a0e | 7,014 | py | Python | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
] | null | null | null | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
] | null | null | null | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
] | null | null | null | """
SSH reimplementation in Python, made by Unazed Spectaculum under the MIT license
"""
import socket
import struct
| 44.675159 | 120 | 0.681637 |
23ce177acd70b69372b2d3dd196d4ee81ee251d0 | 1,140 | py | Python | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 104 | 2015-11-02T00:08:32.000Z | 2022-02-17T23:17:14.000Z | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 68 | 2015-11-09T05:33:24.000Z | 2020-04-10T06:46:54.000Z | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
] | 25 | 2015-11-19T05:34:09.000Z | 2021-07-20T13:54:03.000Z | import random
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d
def probably_prime(n, k=10):
"""
Miller-Rabin primality test
Input: n > 3
k: accuracy of test
Output: True if n is "probably prime", False if it is composite
From psuedocode at https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if n == 2:
return True
if n % 2 == 0:
return False
r, d = find_spelling(n)
for check in range(k):
a = random.randint(2, n - 1)
x = pow(a, d, n) # a^d % n
if x == 1 or x == n - 1:
continue
for i in range(r):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True | 24.782609 | 89 | 0.497368 |
23ce1db523427cb59d90dd66571f9536a6eda982 | 4,859 | py | Python | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | #MARTY I2C PI
#SCRIPT BASED ON MATS WORK
#SCRIPT PUSHED INSIDE inmoovCustom : https://github.com/MyRobotLab/inmoov/tree/master/InmoovScript
raspi = Runtime.createAndStart("RasPi","RasPi")
adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver")
adaFruit16c.setController("RasPi","1","0x40")
#
# This part is common for both devices and creates two servo instances
# on port 3 and 8 on the Adafruit16CServoDriver
# Change the names of the servos and the pin numbers to your usage
cuisseDroite = Runtime.createAndStart("cuisseDroite", "Servo")
genouDroite = Runtime.createAndStart("genouDroite", "Servo")
chevilleDroite = Runtime.createAndStart("chevilleDroite", "Servo")
cuisseGauche = Runtime.createAndStart("cuisseGauche", "Servo")
genouGauche = Runtime.createAndStart("genouGauche", "Servo")
chevilleGauche = Runtime.createAndStart("chevilleGauche", "Servo")
eyes = Runtime.createAndStart("eyes", "Servo")
armLeft = Runtime.createAndStart("armLeft", "Servo")
armRight = Runtime.createAndStart("armRight", "Servo")
sleep(1)
ledBlue=14
ledRed=13
ledGreen=12
vitesse=80
cuisseDroiteRest=90
genouDroiteRest=90
chevilleDroiteRest=80
cuisseGaucheRest=97
genouGaucheRest=95
chevilleGaucheRest=90
armLeftRest=90
armRightRest=120
eyesRest=90
cuisseDroite.setRest(cuisseDroiteRest)
genouDroite.setRest(genouDroiteRest)
chevilleDroite.setRest(chevilleDroiteRest)
cuisseGauche.setRest(cuisseGaucheRest)
genouGauche.setRest(genouGaucheRest)
chevilleGauche.setRest(chevilleGaucheRest)
eyes.setRest(eyesRest)
eyes.map(0,180,66,100)
armLeft.setRest(armLeftRest)
armRight.setRest(armRightRest)
cuisseDroite.attach(adaFruit16c,0)
genouDroite.attach(adaFruit16c,1)
chevilleDroite.attach(adaFruit16c,2)
cuisseGauche.attach(adaFruit16c,4)
genouGauche.attach(adaFruit16c,5)
chevilleGauche.attach(adaFruit16c,15)
eyes.attach(adaFruit16c,8)
armLeft.attach(adaFruit16c,9)
armRight.attach(adaFruit16c,10)
eyes.setVelocity(-1)
armLeft.setVelocity(-1)
armRight.setVelocity(-1)
cuisseDroite.rest()
genouDroite.rest()
chevilleDroite.rest()
cuisseGauche.rest()
genouGauche.rest()
chevilleGauche.rest()
eyes.rest()
armLeft.rest()
armRight.rest()
sleep(2)
cuisseDroite.detach()
genouDroite.detach()
chevilleDroite.detach()
cuisseGauche.detach()
genouGauche.detach()
chevilleGauche.detach()
armLeft.detach()
armRight.detach()
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,0)
red()
sleep(1)
green()
sleep(1)
blue()
sleep(1)
noLed()
led = Runtime.start("led","Clock")
led.setInterval(100)
global i
i=0
led.addListener("pulse", python.name, "ledFunc")
| 22.919811 | 98 | 0.787611 |
23ce6753d608fd795d0aebbaec8257e2469df9e3 | 7,214 | py | Python | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
] | null | null | null | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
] | null | null | null | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
] | null | null | null | from sklearn import tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import numpy as np
from generate_dataset import generate_dataset, preparing_dataset
from storeExperimentalInformations import store_experimental_informations, prepare_legends
import baseGraph
import ape_tabular
import warnings
import pickle
#from keras.models import Sequential
#from keras.layers import Dense
if __name__ == "__main__":
# Filter the warning from matplotlib
warnings.filterwarnings("ignore")
# Datasets used for the experiments
dataset_names = ["generate_circles", "generate_moons", "blood", "diabete", "generate_blobs"]# "compas", "adult", "titanic"
# array of the models used for the experiments
models = [GradientBoostingClassifier(n_estimators=20, learning_rate=1.0),
RandomForestClassifier(n_estimators=20),
#MLPClassifier(random_state=1, activation="logistic"),
VotingClassifier(estimators=[('lr', LogisticRegression()), ('gnb', GaussianNB()), ('rc', LogisticRegression())], voting="soft"),
MLPClassifier(random_state=1),
RidgeClassifier()]#,
#LogisticRegression(),
#tree.DecisionTreeClassifier(),
#Sequential(),
#models=[RidgeClassifier(), MLPClassifier(random_state=1)]
# Number of instances explained by each model on each dataset
max_instance_to_explain = 10
# Print explanation result
illustrative_example = False
""" All the variable necessaries for generating the graph results """
# Store results inside graph if set to True
graph = True
verbose = False
growing_sphere = False
if growing_sphere:
label_graph = "growing spheres "
growing_method = "GS"
else:
label_graph = ""
growing_method = "GF"
# Threshold for explanation method precision
threshold_interpretability = 0.99
linear_separability_index = 1
interpretability_name = ['ls', 'ls regression', 'ls raw data', 'ls extend']
#interpretability_name = ['ls log reg', 'ls raw data']
# Initialize all the variable needed to store the result in graph
for dataset_name in dataset_names:
if graph: experimental_informations = store_experimental_informations(len(models), len(interpretability_name), interpretability_name, len(models))
models_name = []
# Store dataset inside x and y (x data and y labels), with aditional information
x, y, class_names, regression, multiclass, continuous_features, categorical_features, \
categorical_values, categorical_names, transformations = generate_dataset(dataset_name)
for nb_model, model in enumerate(models):
model_name = type(model).__name__
if "MLP" in model_name and nb_model <=2 :
model_name += "logistic"
if growing_sphere:
filename = "./results/"+dataset_name+"/"+model_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
filename_all = "./results/"+dataset_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
else:
filename="./results/"+dataset_name+"/"+model_name+"/"+str(threshold_interpretability)+"/sup_mat_"
filename_all="./results/"+dataset_name+"/"+str(threshold_interpretability)+"/sup_mat_"
if graph: experimental_informations.initialize_per_models(filename)
models_name.append(model_name)
# Split the dataset inside train and test set (50% each set)
dataset, black_box, x_train, x_test, y_train, y_test = preparing_dataset(x, y, dataset_name, model)
print("###", model_name, "training on", dataset_name, "dataset.")
if 'Sequential' in model_name:
# Train a neural network classifier with 2 relu and a sigmoid activation function
black_box.add(Dense(12, input_dim=len(x_train[0]), activation='relu'))
black_box.add(Dense(8, activation='relu'))
black_box.add(Dense(1, activation='sigmoid'))
black_box.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
black_box.fit(x_train, y_train, epochs=50, batch_size=10)
else:
black_box = black_box.fit(x_train, y_train)
predict = black_box.predict
score = black_box.score
print('### Accuracy:', score(x_test, y_test))
cnt = 0
explainer = ape_tabular.ApeTabularExplainer(x_train, class_names, predict, black_box.predict_proba,
continuous_features=continuous_features,
categorical_features=categorical_features, categorical_values=categorical_values,
feature_names=dataset.feature_names, categorical_names=categorical_names,
verbose=verbose, threshold_precision=threshold_interpretability,
linear_separability_index=linear_separability_index,
transformations=transformations)
for instance_to_explain in x_test:
if cnt == max_instance_to_explain:
break
print("### Instance number:", cnt + 1, "over", max_instance_to_explain)
print("### Models ", nb_model + 1, "over", len(models))
print("instance to explain:", instance_to_explain)
try:
precision, coverage, f2 = explainer.explain_instance(instance_to_explain,
growing_method=growing_method,
local_surrogate_experiment=True)
print("precision", precision)
print("coverage", coverage)
print("f2", f2)
if graph: experimental_informations.store_experiments_information_instance(precision, 'precision.csv', coverage, 'coverage.csv', f2, 'f2.csv')
cnt += 1
except Exception as inst:
print(inst)
if graph: experimental_informations.store_experiments_information(max_instance_to_explain, nb_model, 'precision.csv', 'coverage.csv', 'f2.csv', filename_all=filename_all)
| 59.131148 | 182 | 0.624896 |
23ceb4be40ab14b96763eb535badca57463b0253 | 8,099 | py | Python | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
] | 5 | 2021-03-17T00:33:53.000Z | 2022-03-07T18:16:25.000Z | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
] | null | null | null | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
] | 2 | 2022-01-14T03:50:10.000Z | 2022-02-14T00:45:56.000Z | import pandas as pd
import numpy as np
import data_inputs, evaluate_EWRs
#--------------------------------------------------------------------------------------------------
def sum_events(events):
'''returns a sum of events'''
return int(round(events.sum(), 0))
def get_frequency(events):
'''Returns the frequency of years they occur in'''
if events.count() == 0:
result = 0
else:
result = (int(events.sum())/int(events.count()))*100
return int(round(result, 0))
def get_average(input_events):
'''Returns overall average length of events'''
events = input_events.dropna()
if len(events) == 0:
result = 0
else:
result = round(sum(events)/len(events),1)
return result
def initialise_summary_df_columns(input_dict):
'''Ingest a dictionary of ewr yearly results and a list of statistical tests to perform
initialises a dataframe with these as a multilevel heading and returns this'''
analysis = data_inputs.analysis()
column_list = []
list_of_arrays = []
for scenario, scenario_results in input_dict.items():
for sub_col in analysis:
column_list = tuple((scenario, sub_col))
list_of_arrays.append(column_list)
array_of_arrays =tuple(list_of_arrays)
multi_col_df = pd.MultiIndex.from_tuples(array_of_arrays, names = ['scenario', 'type'])
return multi_col_df
def initialise_summary_df_rows(input_dict):
'''Ingests a dictionary of ewr yearly results
pulls the location information and the assocaited ewrs at each location,
saves these as respective indexes and return the multi-level index'''
index_1 = list()
index_2 = list()
index_3 = list()
combined_index = list()
# Get unique col list:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
site_list = []
for col in site_results[PU]:
if '_' in col:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR_code = '_'.join(remove_end)
else:
EWR_code = remove_end[0]
else:
EWR_code = col
if EWR_code in site_list:
continue
else:
site_list.append(EWR_code)
add_index = tuple((site, PU, EWR_code))
if add_index not in combined_index:
combined_index.append(add_index)
unique_index = tuple(combined_index)
multi_index = pd.MultiIndex.from_tuples(unique_index, names = ['gauge', 'planning unit', 'EWR'])
return multi_index
def allocate(df, add_this, idx, site, PU, EWR, scenario, category):
'''Save element to a location in the dataframe'''
df.loc[idx[[site], [PU], [EWR]], idx[scenario, category]] = add_this
return df
def summarise(input_dict):
'''Ingests a dictionary with ewr pass/fails
summarises these results and returns a single summary dataframe'''
PU_items = data_inputs.get_planning_unit_info()
EWR_table, see_notes_ewrs, undefined_ewrs, noThresh_df, no_duration, DSF_ewrs = data_inputs.get_EWR_table()
# Initialise dataframe with multi level column heading and multi-index:
multi_col_df = initialise_summary_df_columns(input_dict)
index = initialise_summary_df_rows(input_dict)
df = pd.DataFrame(index = index, columns=multi_col_df)
# Run the analysis and add the results to the dataframe created above:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
for col in site_results[PU]:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR = '_'.join(remove_end)
else:
EWR = remove_end[0]
idx = pd.IndexSlice
if ('_eventYears' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event years')
F = get_frequency(site_results[PU][col])
df = allocate(df, F, idx, site, PU, EWR, scenario, 'Frequency')
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['TF'])
TF = EWR_info['frequency']
df = allocate(df, TF, idx, site, PU, EWR, scenario, 'Target frequency')
elif ('_numAchieved' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Achievement count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Achievements per year')
elif ('_numEvents' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Events per year')
elif ('_eventLength' in col):
EL = get_event_length(site_results[PU][col], S)
df = allocate(df, EL, idx, site, PU, EWR, scenario, 'Event length')
elif ('_totalEventDays' in col):
AD = get_average(site_results[PU][col])
df = allocate(df, AD, idx, site, PU, EWR, scenario, 'Threshold days')
elif ('daysBetweenEvents' in col):
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
DB = count_exceedence(site_results[PU][col], EWR_info)
df = allocate(df, DB, idx, site, PU, EWR, scenario, 'Inter-event exceedence count')
# Also save the max inter-event period to the data summary for reference
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
MIE = EWR_info['max_inter-event']
df = allocate(df, MIE, idx, site, PU, EWR, scenario, 'Max inter event period (years)')
elif ('_missingDays' in col):
MD = sum_events(site_results[PU][col])
df = allocate(df, MD, idx, site, PU, EWR, scenario, 'No data days')
elif ('_totalPossibleDays' in col):
TD = sum_events(site_results[PU][col])
df = allocate(df, TD, idx, site, PU, EWR, scenario, 'Total days')
return df | 47.087209 | 118 | 0.548463 |
23cf8e518be1c460ad577e7a202dfb564e60b6c9 | 247 | py | Python | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
] | 3 | 2021-04-23T08:04:14.000Z | 2021-05-08T01:24:08.000Z | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
] | null | null | null | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
] | 1 | 2021-05-08T01:24:46.000Z | 2021-05-08T01:24:46.000Z | import pandas as pd
writer = pd.ExcelWriter("data.xlsx", engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', index=False)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
| 24.7 | 57 | 0.765182 |
23cf95b3c49a497e9b4fcecf5c43de957206031c | 1,564 | py | Python | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
] | 1 | 2016-05-16T23:13:47.000Z | 2016-05-16T23:13:47.000Z | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
] | 41 | 2016-01-22T00:56:14.000Z | 2016-05-12T14:38:37.000Z | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name="dem",
version="0.0.8",
author="Ian Macaulay, Jeremy Opalach",
author_email="ismacaul@gmail.com",
url="http://www.github.com/nitehawck/dem",
description="An agnostic library/package manager for setting up a development project environment",
long_description=readme,
license="MIT License",
classifiers=[
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production / Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Build Tools',
],
packages=['dem', 'dem.dependency', 'dem.project'],
install_requires=[
'virtualenv',
'PyYaml',
'wget',
'gitpython'
],
tests_require=[
'pyfakefs',
'mock'
],
entry_points={
'console_scripts': [
'dem = dem.__main__:main'
]
},
) | 31.28 | 103 | 0.575448 |
23d1de5c4b1de87a253332547b768f99517edb24 | 326 | py | Python | lfs/core/admin.py | restless/django-lfs | 4058f9d45b416ef2e8c28a87856ea0f1550b523d | [
"BSD-3-Clause"
] | 1 | 2020-02-26T03:07:39.000Z | 2020-02-26T03:07:39.000Z | lfs/core/admin.py | mxins/django-lfs | bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8 | [
"BSD-3-Clause"
] | null | null | null | lfs/core/admin.py | mxins/django-lfs | bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8 | [
"BSD-3-Clause"
] | null | null | null | # django imports
from django.contrib import admin
# lfs imports
from lfs.core.models import Action
from lfs.core.models import ActionGroup
from lfs.core.models import Shop
from lfs.core.models import Country
admin.site.register(Shop)
admin.site.register(Action)
admin.site.register(ActionGroup)
admin.site.register(Country)
| 23.285714 | 39 | 0.819018 |
23d1f2c4f4ea5639727ded8d5757f9d66fc0cc39 | 13,959 | py | Python | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
] | 1 | 2022-03-20T12:25:34.000Z | 2022-03-20T12:25:34.000Z | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
] | null | null | null | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""TarSync.py: Synchronize .fcstd and .tar files.
Usage: TarSync.py [OPTIONS] [DIR] ...
Recursively scans directories searching for `.fcstd`/`.FCstd` files
and synchronizes them with associated `.tar` files. The current
directory is used if no explicit directory or files are listed.
Options:
* [-n] Visit all files without doing anything. Use with [-v] option.
* [-v] Verbose mode.
Rationale:
A FreeCAD `.fcstd` file is basically a bunch of text files compressed with gzip.
For fun, the `unzip -l XYZ.fcstd` command lists the files contained in `XYZ.fcstd`.
Due to the repetitive nature of the text files contained therein, the gzip algorithm
can achieve significant overall file compression.
A `git` repository basically consists of a bunch files called blob's, where the
term "blob" stands for Binary Large Object. Each blob represents some version
of a file stored the repository. Being binary files, `.fcstd` files can be
stored inside of a git repository. However, the compressed (i.e. binary)
nature of `.fcstd` files can make the git repository storage requirements
grow at a pretty rapid rate as multiple versions of the `.fcstd` files get stored
into a git repository.
To combat the storage growth requirements, `git` uses a compression algorithm that
is applied to the repository as a whole. These compressed files are called Pack files.
Pack files are generated and updated whenever git decides to do so. Over time,
the overall git storage requirements associated with uncompressed files grows at a
slower rate than gzip compressed files. In addition, each time a git repositories
are synchronized, the over the wire protocol is via Pack file.
This program will convert a file from compressed in gzip format into simpler
uncompressed format call a `.tar` file. (`tar` stands for Tape ARchive for
back in the days of magnetic tapes.) Basically, what this program does is
manage two files in tandem, `XYZ.fcstd` and `XYZ.tar`. It does this by
comparing the modification times between the two files translates the content
of the newer file on top of the older file. When done, both files will have
the same modification time. This program works recursively over an entire
directory tree.
To use this program with a git repository, configure your `.gitignore` to
ignore `.fcstd` files in your repository by adding `*.fcstd` to your
`.gitignore` file. Run this program before doing a `git commit`
Whenever you update your git repository from a remote one, run this program
to again, to keep the `.fcstd` files in sync with any updated `.tar` files.
"""
# [Basic Git Concepts]
# (https://www.oreilly.com/library/view/version-control-with/9781449345037/ch04.html)
#
# FreeCAD forum topics:
# [https://forum.freecadweb.org/viewtopic.php?t=38353&start=30](1)
# [https://forum.freecadweb.org/viewtopic.php?f=8&t=36844a](2)
# [https://forum.freecadweb.org/viewtopic.php?t=40029&start=10](3)
# [https://forum.freecadweb.org/viewtopic.php?p=1727](4)
# [https://forum.freecadweb.org/viewtopic.php?t=8688](5)
# [https://forum.freecadweb.org/viewtopic.php?t=32521](6)
# [https://forum.freecadweb.org/viewtopic.php?t=57737)(7)
# [https://blog.lambda.cx/posts/freecad-and-git/](8)
# [https://tante.cc/2010/06/23/managing-zip-based-file-formats-in-git/](9)
from argparse import ArgumentParser
from io import BytesIO
import os
from pathlib import Path
from tarfile import TarFile, TarInfo
from tempfile import TemporaryDirectory
from typing import List, IO, Optional, Tuple
import time
from zipfile import ZIP_DEFLATED, ZipFile
# main():
def main() -> None:
"""Execute the main program."""
# Create an *argument_parser*:
parser: ArgumentParser = ArgumentParser(
description="Synchronize .fcstd/.tar files."
)
parser.add_argument("directories", metavar="DIR", type=str, nargs="*",
help="Directory to recursively scan")
parser.add_argument("-n", "--dry-run", action="store_true",
help="verbose mode")
parser.add_argument("-v", "--verbose", action="store_true",
help="verbose mode")
parser.add_argument("--unit-test", action="store_true",
help="run unit tests")
# Parse arguments:
arguments = parser.parse_args()
directories: Tuple[str, ...] = tuple(arguments.directories)
if arguments.unit_test:
# Run the unit test:
unit_test()
directories = ()
synchronize_directories(directories, arguments.dry_run, arguments.verbose)
# synchronize_directories():
def synchronize_directories(directory_names: Tuple[str, ...],
dry_run: bool, verbose: bool) -> Tuple[str, ...]:
"""Synchronize some directories.
* Arguments:
* *directory_names* (Tuple[str, ...):
A list of directories to recursively synchronize.
* dry_run (bool):
If False, the directories are scanned, but not synchronized. If True, the directories
are both scanned and synchronized.
* verbose (bool):
If True, the a summary message is printed if for each (possible) synchronization.
The actual synchronization only occurs if *dry_run* is False.
* Returns
* (Tuple[str, ...]) containing the summary
"""
# Recursively find all *fcstd_paths* in *directories*:
fcstd_paths: List[Path] = []
directory_name: str
for directory_name in directory_names:
suffix: str = "fcstd"
for suffix in ("fcstd", "fcSTD"):
fcstd_paths.extend(Path(directory_name).glob(f"**/*.{suffix}"))
# Perform all of the synchronizations:
summaries: List[str] = []
for fcstd_path in fcstd_paths:
summary: str = synchronize(fcstd_path, dry_run)
summaries.append(summary)
if verbose:
print(summary) # pragma: no unit cover
return tuple(summaries)
# Synchronize():
def synchronize(fcstd_path: Path, dry_run: bool = False) -> str:
"""Synchronize an .fcstd file with associated .tar file.
* Arguments:
* fcstd_path (Path):
The `.fcstd` file to synchronize.
* dry_run (bool):
If True, no synchronization occurs and only the summary string is returned.
(Default: False)
* Returns:
* (str) a summary string.
Synchronizes an `.fcstd` file with an associated `.tar` file and.
A summary is always returned even in *dry_run* mode.
"""
# Determine timestamps for *fstd_path* and associated *tar_path*:
tar_path: Path = fcstd_path.with_suffix(".tar")
fcstd_timestamp: int = int(fcstd_path.stat().st_mtime) if fcstd_path.exists() else 0
tar_timestamp: int = int(tar_path.stat().st_mtime) if tar_path.exists() else 0
# Using the timestamps do the synchronization (or not):
zip_file: ZipFile
tar_file: TarFile
tar_info: TarInfo
fcstd_name: str = str(fcstd_path)
tar_name: str = str(tar_path)
summary: str
if fcstd_timestamp > tar_timestamp:
# Update *tar_path* from *tar_path*:
summary = f"{fcstd_name} => {tar_name}"
if not dry_run:
with ZipFile(fcstd_path, "r") as zip_file:
with TarFile(tar_path, "w") as tar_file:
from_names: Tuple[str, ...] = tuple(zip_file.namelist())
for from_name in from_names:
from_content: bytes = zip_file.read(from_name)
# print(f"Read {fcstd_path}:{from_name}:"
# f"{len(from_content)}:{is_ascii(from_content)}")
tar_info = TarInfo(from_name)
tar_info.size = len(from_content)
# print(f"tar_info={tar_info} size={tar_info.size}")
tar_file.addfile(tar_info, BytesIO(from_content))
os.utime(tar_path, (fcstd_timestamp, fcstd_timestamp)) # Force modification time.
elif tar_timestamp > fcstd_timestamp:
# Update *fcstd_path* from *tar_path*:
summary = f"{tar_name} => {fcstd_name}"
if not dry_run:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
with ZipFile(fcstd_path, "w", ZIP_DEFLATED) as zip_file:
for tar_info in tar_infos:
buffered_reader: Optional[IO[bytes]] = tar_file.extractfile(tar_info)
assert buffered_reader
buffer: bytes = buffered_reader.read()
# print(f"{tar_info.name}: {len(buffer)}")
zip_file.writestr(tar_info.name, buffer)
os.utime(fcstd_path, (tar_timestamp, tar_timestamp)) # Force modification time.
else:
summary = f"{fcstd_name} in sync with {tar_name}"
return summary
# unit_test():
def unit_test() -> None:
"""Run the unit test."""
directory_name: str
# Use create a temporary *directory_path* to run the tests in:
with TemporaryDirectory() as directory_name:
a_content: str = "a contents"
b_content: str = "b contents"
buffered_reader: Optional[IO[bytes]]
c_content: str = "c contents"
directory_path: Path = Path(directory_name)
tar_name: str
tar_file: TarFile
tar_path: Path = directory_path / "test.tar"
tar_path_name: str = str(tar_path)
zip_file: ZipFile
zip_name: str
zip_path: Path = directory_path / "test.fcstd"
zip_path_name: str = str(zip_path)
# Create *zip_file* with a suffix of `.fcstd`:
with ZipFile(zip_path, "w", ZIP_DEFLATED) as zip_file:
zip_file.writestr("a", a_content)
zip_file.writestr("b", b_content)
assert zip_path.exists(), f"{zip_path_name=} not created"
zip_timestamp: int = int(zip_path.stat().st_mtime)
assert zip_timestamp > 0, f"{zip_path=} had bad timestamp."
# Perform synchronize with a slight delay to force a different modification time:
time.sleep(1.1)
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only 1 summary expected"
summary: str = summaries[0]
desired_summary: str = f"{zip_path_name} => {tar_path_name}"
assert summary == desired_summary, f"{summary} != {desired_summary}"
assert tar_path.exists(), f"{tar_path_name=} not created"
tar_timestamp: int = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"{zip_timestamp=} != {tar_timestamp=}"
# Now read *tar_file* and verify that it has the correct content:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
for tar_info in tar_infos:
buffered_reader = tar_file.extractfile(tar_info)
assert buffered_reader, f"Unable to read {tar_file=}"
content: str = buffered_reader.read().decode("latin-1")
found: bool = False
if tar_info.name == "a":
assert content == a_content, f"'{content}' != '{a_content}'"
found = True
elif tar_info.name == "b":
assert content == b_content, f"'{content}' != '{b_content}'"
found = True
assert found, f"Unexpected tar file name {tar_info.name}"
# Now run synchronize again and verify that nothing changed:
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only one summary expected"
summary = summaries[0]
desired_summary = f"{str(zip_path)} in sync with {str(tar_path)}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
zip_timestamp = int(zip_path.stat().st_mtime)
tar_timestamp = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"timestamps {zip_timestamp=} != {tar_timestamp=}"
# Now update *tar_file* with new content (i.e. `git pull`).:
time.sleep(1.1) # Use delay to force a different timestamp.
with TarFile(tar_path, "w") as tar_file:
tar_info = TarInfo("c")
tar_info.size = len(c_content)
tar_file.addfile(tar_info, BytesIO(bytes(c_content, "latin-1")))
tar_info = TarInfo("a")
tar_info.size = len(a_content)
tar_file.addfile(tar_info, BytesIO(bytes(a_content, "latin-1")))
# Verify that the timestamp changed and force a synchronize().
new_tar_timestamp: int = int(tar_path.stat().st_mtime)
assert new_tar_timestamp > tar_timestamp, f"{new_tar_timestamp=} <= {tar_timestamp=}"
summary = synchronize(zip_path)
desired_summary = f"{tar_path_name} => {zip_path_name}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
# Verify that the *zip_path* got updated verify that the content changed:
new_zip_timestamp: int = int(zip_path.stat().st_mtime)
assert new_zip_timestamp == new_tar_timestamp, (
f"{new_zip_timestamp=} != {new_tar_timestamp=}")
with ZipFile(zip_path, "r") as zip_file:
zip_names: Tuple[str, ...] = tuple(zip_file.namelist())
for zip_name in zip_names:
zip_content: str = zip_file.read(zip_name).decode("latin-1")
assert buffered_reader
found = False
if zip_name == "a":
assert zip_content == a_content, "Content mismatch"
found = True
elif zip_name == "c":
assert zip_content == c_content, "Content mismatch"
found = True
assert found, "Unexpected file '{zip_name}'"
if __name__ == "__main__":
main()
| 45.617647 | 98 | 0.646321 |
23d1f9c2f299c304c7761f6ac8842a0f28c28618 | 20,325 | py | Python | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | 2 | 2017-12-19T15:32:46.000Z | 2018-01-12T11:24:24.000Z | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
#from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.data.hallmark_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | 42.080745 | 222 | 0.667847 |
23d49ee738e43aa66d515d38988b95d1c1f66917 | 102 | py | Python | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
] | 541 | 2015-05-27T04:34:38.000Z | 2022-03-23T18:00:16.000Z | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
] | 85 | 2015-05-27T14:27:27.000Z | 2022-02-27T18:51:08.000Z | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
] | 129 | 2015-05-27T20:55:43.000Z | 2022-03-23T14:18:07.000Z |
from XXX_PROJECT_NAME_XXX.settings import * # noqa
# Override any settings required for tests here
| 20.4 | 51 | 0.794118 |
23d6f93dd725259d766c98af0f0522d89793519e | 3,808 | py | Python | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
] | null | null | null | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
] | null | null | null | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
] | 1 | 2018-06-27T14:05:43.000Z | 2018-06-27T14:05:43.000Z | from django.db import models
#from djangosphinx import SphinxSearch, SphinxRelation, SphinxQuerySet
#import djangosphinx.apis.current as sphinxapi
from advancedsearch.models import Movie, Episode, Song
from browseNet.models import Host, Path
# Create your models here.
| 39.666667 | 110 | 0.665966 |
23d7aa18934d135f4447648b4a864fe8e8b4a99c | 1,790 | py | Python | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
] | 27 | 2017-01-31T03:28:26.000Z | 2021-09-05T21:02:36.000Z | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
] | 18 | 2018-02-03T16:44:18.000Z | 2021-06-26T04:12:17.000Z | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
] | 5 | 2017-09-23T15:53:08.000Z | 2020-07-26T06:19:13.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2020, henry232323
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
| 42.619048 | 76 | 0.701117 |
23d7e7b0e05f376311c1a1430b049eda79a5c69d | 4,465 | py | Python | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
] | 101 | 2015-01-09T14:59:57.000Z | 2021-11-06T23:33:50.000Z | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
] | 48 | 2015-01-30T05:53:47.000Z | 2019-03-21T23:17:40.000Z | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
] | 50 | 2015-01-30T08:56:07.000Z | 2020-12-25T02:34:08.000Z | #
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright 200714 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
from reclass.utils.refvalue import RefValue
from reclass.defaults import PARAMETER_INTERPOLATION_SENTINELS, \
PARAMETER_INTERPOLATION_DELIMITER
from reclass.errors import UndefinedVariableError, \
IncompleteInterpolationError
import unittest
CONTEXT = {'favcolour':'yellow',
'motd':{'greeting':'Servus!',
'colour':'${favcolour}'
},
'int':1,
'list':[1,2,3],
'dict':{1:2,3:4},
'bool':True
}
if __name__ == '__main__':
unittest.main()
| 34.882813 | 76 | 0.600224 |
23d88124e0abeec9041b9f813d746d7445479956 | 1,506 | py | Python | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
] | null | null | null | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
] | null | null | null | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
] | null | null | null | """
Author: Isamu Isozaki (isamu.website@gmail.com)
Description: description
Created: 2021-12-01T16:32:53.089Z
Modified: !date!
Modified By: modifier
"""
from flask import Blueprint, redirect, jsonify, url_for, request
from neuroflow.repository import create_mood, get_authorized, load_moods_from_user
from functools import wraps
from flask_cors import cross_origin
blueprint = Blueprint('mood', __name__,
url_prefix='/mood')
| 30.12 | 82 | 0.616866 |
23d8fd0ae625c1772c3f3bb0a2d8ee76180f8da6 | 2,684 | py | Python | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
] | 2 | 2020-03-07T23:32:41.000Z | 2020-05-22T15:35:16.000Z | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
] | 1 | 2020-05-25T11:17:15.000Z | 2020-05-26T06:58:37.000Z | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
] | 2 | 2020-03-31T13:00:01.000Z | 2021-07-14T14:34:37.000Z | import logging
import time
from pathlib import Path
from configparser import ConfigParser
import boto3
from botocore.exceptions import ClientError
def create_bucket(bucket_name: str, region: str = 'us-west-2'):
"""
Create S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-creating-buckets.html
:param bucket_name: Name of S3 bucket
:param region: AWS region where bucket is created
:return: True if bucket is created or already exists, False if ClientError occurs
"""
try:
s3_client = boto3.client('s3', region=region)
# list buckets
response = s3_client.list_buckets()
# check if bucket exists
if bucket_name not in response['Buckets']:
s3_client.create_bucket(Bucket=bucket_name)
else:
logging.warning(f"{bucket_name} already exist in AWS region {region}")
except ClientError as e:
logging.exception(e)
return False
return True
def upload_file(file_name: str, bucket: str, object_name: str = None, region: str = 'us-west-2'):
"""
Upload file to S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
:param file_name: Path to file including filename
:param bucket: Bucket where file is uploaded to
:param object_name: Name of file inside S3 bucket
:param region: AWS region where bucket is located
:return: True if upload succeeds, False if ClientError occurs
"""
if object_name is None:
object_name = file_name
try:
s3_client = boto3.client('s3', region=region)
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.exception(e)
return False
return True
if __name__ == '__main__':
# load config
config = ConfigParser()
config.read('app.cfg')
# start logging
logging.basicConfig(level=config.get("logging", "level"), format="%(asctime)s - %(levelname)s - %(message)s")
logging.info("Started")
# start timer
start_time = time.perf_counter()
# define
data_path = Path(__file__).parent.joinpath('data')
# check if bucket exists
create_bucket(bucket_name='fff-streams')
# upload files to S3
upload_file(data_path.joinpath('world_happiness_2017.csv'), bucket='fff-streams', object_name='world_happiness.csv')
upload_file(data_path.joinpath('temp_by_city_clean.csv'), bucket='fff-streams', object_name='temp_by_city.csv')
# stop timer
stop_time = time.perf_counter()
logging.info(f"Uploaded files in {(stop_time - start_time):.2f} seconds")
logging.info("Finished")
| 31.209302 | 120 | 0.688897 |
23da034ad35f31e90c8e53d6592ca43cf2dabf3f | 4,734 | py | Python | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | null | null | null | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | null | null | null | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
help_msg = '''------ aMCR f------
b!!time help f- c
b!!time ct f- c
b!!time timer [] f- c
b!!time stopwatch start f- c
b!!time stopwatch stop f- c
--------------------------------'''
no_input = '''------ a f------
c !!time help
--------------------------------'''
stop_T = False
| 41.526316 | 114 | 0.444022 |
23dbf2b9d9cefc92e0075e49e75f8a00b52cb7f9 | 4,174 | py | Python | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
] | 3 | 2020-12-04T07:29:31.000Z | 2022-01-30T10:14:41.000Z | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
] | null | null | null | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
] | 1 | 2021-03-27T06:14:43.000Z | 2021-03-27T06:14:43.000Z | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import threading
import os
from core.badges import badges
from core.helper import helper | 43.030928 | 115 | 0.598946 |
23dc4f684d9d5300357e5bf6d8fabca6e13f5585 | 8,556 | py | Python | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
] | null | null | null | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
] | null | null | null | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 5
@author: Melisa Maidana
This script runs different cropping parameters, motion correct the cropped images using reasonable motion correction parameters that were previously selected
by using the parameters_setting_motion_correction scripts, and then run source extraction (with multiple parameters) and creates figures of the cropped
image and the extracted cells from that image. The idea is to compare the resulting source extraction neural footprint for different cropping selections.
Ideally the extracted sources should be similar. If that is the case, then all the parameter setting for every step can be run in small pieces of the image,
select the best ones, and implemented lated in the complete image.
"""
import os
import sys
import psutil
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pylab as pl
# This should be in another file. Let's leave it here for now
sys.path.append('/home/sebastian/Documents/Melisa/calcium_imaging_analysis/src/')
sys.path.remove('/home/sebastian/Documents/calcium_imaging_analysis')
import src.configuration
import caiman as cm
import src.data_base_manipulation as db
from src.steps.cropping import run_cropper as main_cropping
from src.steps.motion_correction import run_motion_correction as main_motion_correction
from src.steps.source_extraction import run_source_extraction as main_source_extraction
import src.analysis.metrics as metrics
from caiman.source_extraction.cnmf.cnmf import load_CNMF
#Paths
analysis_states_database_path = 'references/analysis/analysis_states_database.xlsx'
backup_path = 'references/analysis/backup/'
#parameters_path = 'references/analysis/parameters_database.xlsx'
## Open thw data base with all data
states_df = db.open_analysis_states_database()
mouse = 51565
session = 1
trial = 1
is_rest = 1
# CROPPING
# Select the rows for cropping
x1_crops = np.arange(200,0,-50)
x2_crops = np.arange(350,550,50)
y1_crops = np.arange(200,0,-50)
y2_crops = np.arange(350,550,50)
n_processes = psutil.cpu_count()
cm.cluster.stop_server()
# Start a new cluster
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=n_processes, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
logging.info(f'Starting cluster. n_processes = {n_processes}.')
#parametrs for motion correction
parameters_motion_correction = {'motion_correct': True, 'pw_rigid': True, 'save_movie_rig': False,
'gSig_filt': (5, 5), 'max_shifts': (25, 25), 'niter_rig': 1,
'strides': (48, 48),
'overlaps': (96, 96), 'upsample_factor_grid': 2, 'num_frames_split': 80,
'max_deviation_rigid': 15,
'shifts_opencv': True, 'use_cuda': False, 'nonneg_movie': True, 'border_nan': 'copy'}
#parameters for source extraction
gSig = 5
gSiz = 4 * gSig + 1
corr_limits = np.linspace(0.4, 0.6, 5)
pnr_limits = np.linspace(3, 7, 5)
cropping_v = np.zeros(5)
motion_correction_v = np.zeros(5)
selected_rows = db.select(states_df,'cropping', mouse = mouse, session = session, trial = trial , is_rest = is_rest)
mouse_row = selected_rows.iloc[0]
for kk in range(4):
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
parameters_cropping = {'crop_spatial': True, 'cropping_points_spatial': cropping_interval,
'crop_temporal': False, 'cropping_points_temporal': []}
mouse_row = main_cropping(mouse_row, parameters_cropping)
cropping_v[kk] = mouse_row.name[5]
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
states_df = db.open_analysis_states_database()
for kk in range(4):
selected_rows = db.select(states_df, 'motion_correction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_motion_correction(mouse_row, parameters_motion_correction, dview)
mouse_row_new = metrics.get_metrics_motion_correction(mouse_row_new, crispness=True)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
motion_correction_v[kk]=mouse_row_new.name[6]
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
parameters_source_extraction = {'session_wise': False, 'fr': 10, 'decay_time': 0.1,
'min_corr': corr_limits[ii],
'min_pnr': pnr_limits[jj], 'p': 1, 'K': None, 'gSig': (gSig, gSig),
'gSiz': (gSiz, gSiz),
'merge_thr': 0.7, 'rf': 60, 'stride': 30, 'tsub': 1, 'ssub': 2, 'p_tsub': 1,
'p_ssub': 2, 'low_rank_background': None, 'nb': 0, 'nb_patch': 0,
'ssub_B': 2,
'init_iter': 2, 'ring_size_factor': 1.4, 'method_init': 'corr_pnr',
'method_deconvolution': 'oasis', 'update_background_components': True,
'center_psf': True, 'border_pix': 0, 'normalize_init': False,
'del_duplicates': True, 'only_init': True}
for kk in range(4):
selected_rows = db.select(states_df, 'source_extraction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_source_extraction(mouse_row, parameters_source_extraction, dview)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path=backup_path)
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
figure, axes = plt.subplots(4, 3, figsize=(50, 30))
version = ii * pnr_limits.shape[0] + jj +1
for kk in range(4):
selected_rows = db.select(states_df, 'component_evaluation', 56165, cropping_v=cropping_v[kk], motion_correction_v = 1, source_extraction_v= version)
mouse_row = selected_rows.iloc[0]
decoding_output = mouse_row['decoding_output']
decoded_file = eval(decoding_output)['main']
m = cm.load(decoded_file)
axes[kk,0].imshow(m[0, :, :], cmap='gray')
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
[x_, _x, y_, _y] = cropping_interval
rect = Rectangle((y_, x_), _y - y_, _x - x_, fill=False, color='r', linestyle='--', linewidth = 3)
axes[kk,0].add_patch(rect)
output_cropping = mouse_row['cropping_output']
cropped_file = eval(output_cropping)['main']
m = cm.load(cropped_file)
axes[kk,1].imshow(m[0, :, :], cmap='gray')
output_source_extraction = eval(mouse_row['source_extraction_output'])
cnm_file_path = output_source_extraction['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
corr_path = output_source_extraction['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
axes[kk, 2].imshow(cn_filter)
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[kk, 2].plot(*v.T, c='w',linewidth=3)
fig_dir ='/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/cropping/meta/figures/cropping_inicialization/'
fig_name = fig_dir + db.create_file_name(2,mouse_row.name) + '_corr_' + f'{round(corr_limits[ii],1)}' + '_pnr_' + f'{round(pnr_limits[jj])}' + '.png'
figure.savefig(fig_name)
| 50.329412 | 161 | 0.661524 |
23dd6ab36e5a83840094cc404aedad771f6f9076 | 1,676 | py | Python | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
] | 1 | 2021-04-15T13:05:03.000Z | 2021-04-15T13:05:03.000Z | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
] | 1 | 2018-12-11T13:41:45.000Z | 2018-12-11T14:15:15.000Z | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
] | 1 | 2020-01-01T21:03:02.000Z | 2020-01-01T21:03:02.000Z |
import pandas as pd
import json
import urllib2
def download_nordpool(limit, output_file):
'''
The method downloads the nordpool available data from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=8bd7a37f-1098-4643-865a-01eb55c62d21&limit=' + str(limit)
print("downloading nordpool data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("nordpool data has been downloaded and saved")
def download_dayforward(limit, output_file):
'''
The method downloads the available day ahead spotprices in DK and neighboring countries data
from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=c86859d2-942e-4029-aec1-32d56f1a2e5d&limit=' + str(limit)
print("downloading day forward data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("day forward data has been downloaded and saved")
if __name__ == '__main__':
print("connecting with the API")
download_nordpool(10000000, 'nordpool_data.csv')
download_dayforward(10000000, 'dayforward_data.csv') | 37.244444 | 127 | 0.7679 |
23df352466c71a2286ba6b66bb76f8b89e0ba1ff | 1,873 | py | Python | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
] | 1 | 2022-03-16T22:20:12.000Z | 2022-03-16T22:20:12.000Z | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
] | 2 | 2022-03-22T23:34:38.000Z | 2022-03-24T17:35:53.000Z | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
] | null | null | null | from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
| 31.745763 | 75 | 0.538708 |
23df5a83027200920168a92b6eedd813725d6db4 | 2,608 | py | Python | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import *
| 29.636364 | 107 | 0.71434 |
23e0261a193fa6f445356c45a1780f878354e500 | 157 | py | Python | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
] | null | null | null | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
] | null | null | null | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
] | null | null | null | # -*- encoding:utf-8 -*- | 11.214286 | 24 | 0.687898 |
23e0459ade4fcfb40deaedb8969b8ab2785c8442 | 1,801 | py | Python | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | null | null | null | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | 18 | 2016-03-30T08:43:45.000Z | 2017-03-27T11:14:17.000Z | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | 2 | 2016-03-06T20:38:06.000Z | 2019-09-10T14:46:35.000Z | '''
Created on 19 de ene. de 2016
@author: david
'''
import time
| 20.465909 | 110 | 0.494725 |
23e397535cfd73ea5daf63a3a67cc1be6978c490 | 29,136 | py | Python | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
] | 6 | 2019-12-31T17:25:14.000Z | 2021-12-15T14:30:05.000Z | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
] | 17 | 2020-01-03T00:03:30.000Z | 2022-03-14T19:17:50.000Z | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
] | 6 | 2020-06-24T03:23:37.000Z | 2021-12-17T14:20:46.000Z | import asyncio
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import Union
try:
import simplejson as json
except ImportError:
import json
import websockets
from valr_python.enum import AccountEvent
from valr_python.enum import CurrencyPair
from valr_python.enum import MessageFeedType
from valr_python.enum import TradeEvent
from valr_python.enum import WebSocketType
from valr_python.exceptions import HookNotFoundError
from valr_python.exceptions import WebSocketAPIException
from valr_python.utils import JSONType
from valr_python.utils import _get_valr_headers
__all__ = ('WebSocketClient',)
| 30.864407 | 120 | 0.512699 |
23e4cf7747f358650ecc3229b90396e47c6f5137 | 110 | py | Python | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
] | 1 | 2021-06-23T08:13:15.000Z | 2021-06-23T08:13:15.000Z | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
] | null | null | null | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
] | null | null | null | from enum import Enum
| 15.714286 | 35 | 0.736364 |
23e64fd0f143ca1fd055ab9e432dcd782eb331eb | 2,215 | py | Python | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
] | null | null | null | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
] | null | null | null | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from database import Database
from rafflecollector import RaffleCollector
import os
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import schedule
import time
if __name__ == "__main__":
e = Emailer()
schedule.every().day.at("22:00").do(e.__init__)
while True:
schedule.run_pending()
time.sleep(1)
| 31.642857 | 91 | 0.621219 |
23e79af618c8a287421e1a5d39cd45ed069fab6f | 4,391 | py | Python | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | null | null | null | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | 3 | 2021-04-29T22:57:09.000Z | 2021-05-03T15:32:39.000Z | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | 1 | 2021-08-29T09:53:09.000Z | 2021-08-29T09:53:09.000Z | import os
import json
import shutil
import time
from pathlib import Path
from sys import platform
# TODO: (stackoverflow.com/question/17136514/how-to-get-3rd-party-cookies)
# stackoverflow.com/questions/22200134/make-selenium-grab-all-cookies, add the selenium, phantomjs part to catch ALL cookies
# TODO: Maybe save cookies to global variable to compare them in another function without saving them?
'''
loading more than one addon for firefox to use with selenium:
extensions = [
'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi',
'',
''
]
for extension in extensions:
driver.install_addon(extension_dir + extension, temporary=True)
'''
def load_with_addon(driver, websites):
"""This method will load all websites with 'i don't care about cookies' preinstalled.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies with addon...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/with_addon/')) != 0:
shutil.rmtree('data/save/with_addon/')
os.mkdir('data/save/with_addon/')
print('saving cookies in firefox with addons ...')
# the extension directory needs to be the one of your local machine
# linux
if platform == "linux":
extension_dir = os.getenv("HOME") + "/.mozilla/firefox/7ppp44j6.default-release/extensions/"
driver.install_addon(extension_dir + 'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi', temporary=True)
# windows
if platform == "win32":
extension_dir = str(
Path.home()) + "/AppData/Roaming/Mozilla/Firefox/Profiles/shdzeteb.default-release/extensions/"
print(extension_dir)
driver.install_addon(extension_dir + 'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi', temporary=True)
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
cookies_addons = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_addons:
cookies_dict = cookie
print('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/with_addon/%s/' % name):
os.mkdir('data/save/with_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def load_without_addon(driver, websites):
"""This method will load all websites on a vanilla firefox version.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies in vanilla...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/without_addon/')) != 0:
shutil.rmtree('data/save/without_addon/')
os.mkdir('data/save/without_addon')
print('saving cookies in firefox without addons ...')
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
time.sleep(5)
cookies_vanilla = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_vanilla:
cookies_dict = cookie
print('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/without_addon/%s/' % name):
os.mkdir('data/save/without_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def close_driver_session(driver):
"""This method will end the driver session and close all windows. Driver needs to be initialized again afterwards"""
driver.quit()
| 35.128 | 125 | 0.662491 |
23e9be3b6c2cc45718ae9d2bebea994634002d02 | 925 | py | Python | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
] | 3 | 2021-11-08T05:23:08.000Z | 2021-11-08T09:46:51.000Z | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
] | null | null | null | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
] | 2 | 2021-11-08T05:23:12.000Z | 2021-11-16T01:16:35.000Z | from flask import current_app
from ..internals.database.database import get_cursor
| 35.576923 | 144 | 0.68973 |
23ecadb81a5ec6b2f9e0c728e946a750d6f1f36e | 93 | py | Python | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-11-21T04:50:57.000Z | 2018-11-21T04:50:57.000Z | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """ Tank shapes package for Guns.
This init file marks the package as a usable module.
"""
| 15.5 | 52 | 0.709677 |
23ece7de650d89db697b4f1ccb8b587a85d078b4 | 99 | py | Python | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
] | 5 | 2021-12-01T21:44:22.000Z | 2021-12-09T19:11:21.000Z | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
] | null | null | null | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
] | 3 | 2021-12-01T21:41:20.000Z | 2021-12-03T14:17:24.000Z |
if __name__ == '__main__':
main()
| 14.142857 | 46 | 0.565657 |
23ed67548a141b4172f60911a628a2325339dc44 | 4,468 | py | Python | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
] | 1 | 2017-06-08T11:41:00.000Z | 2017-06-08T11:41:00.000Z | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
] | null | null | null | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
] | null | null | null | import feedparser
import vlc
import argparse
import sys
import time
import curses
import wget
if __name__ == '__main__':
main()
| 30.813793 | 75 | 0.57744 |
23edadd6c1315ae3bef9cd266a3d92857c911930 | 229 | py | Python | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
] | null | null | null | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
] | null | null | null | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience wrapper for running tfbs_footprinter directly from source tree."""
from tfbs_footprinter.tfbs_footprinter import main
if __name__ == '__main__':
main()
| 17.615385 | 81 | 0.694323 |
23ee7f3b59a96672f837686dde3019287c34f061 | 2,573 | py | Python | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 2 | 2019-12-05T07:57:14.000Z | 2019-12-05T13:02:08.000Z | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 31 | 2019-12-05T15:14:47.000Z | 2020-12-04T14:37:46.000Z | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
] | 1 | 2020-12-04T13:40:11.000Z | 2020-12-04T13:40:11.000Z | import shap
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
from metalfi.src.data.meta.importance.featureimportance import FeatureImportance
| 34.306667 | 93 | 0.629227 |
23f06c21c858b67e6817ed29322c8b3b1f30395d | 2,281 | py | Python | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
] | null | null | null | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
] | null | null | null | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
] | null | null | null | """
Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
Changed by Mohammad Tayseer to add CSS classes to table
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.extensions.tables import TableProcessor
from markdown.util import etree
def makeExtension(*args, **kwargs):
return BootstrapTableExtension(*args, **kwargs)
| 35.092308 | 88 | 0.621657 |
23f14aa8cb681028e47a2e9707262f0b7d8d18f4 | 6,320 | py | Python | NAS/single-path-one-shot/src/MNIST/test.py | naviocean/SimpleCVReproduction | 61b43e3583977f42e6f91ef176ec5e1701e98d33 | [
"Apache-2.0"
] | 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | NAS/single-path-one-shot/src/MNIST/test.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
] | 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | NAS/single-path-one-shot/src/MNIST/test.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
] | 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | import argparse
import json
import logging
import os
import sys
import time
import cv2
import numpy as np
import PIL
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from angle import generate_angle
# from cifar100_dataset import get_dataset
from slimmable_resnet20 import mutableResNet20
from utils import (ArchLoader, AvgrageMeter, CrossEntropyLabelSmooth, accuracy,
get_lastest_model, get_parameters, save_checkpoint, bn_calibration_init)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == "__main__":
main()
| 31.287129 | 91 | 0.612025 |
23f14e1f84f7c3d2bff9dca3e337c8e7cd4c2c5e | 3,231 | py | Python | examples/pixel/plot_0_image.py | DeepanshS/csdmpy | ae8d20dd09f217bb462af67a3145bb6fcb025def | [
"BSD-3-Clause"
] | 7 | 2020-01-04T20:46:08.000Z | 2021-05-26T21:09:25.000Z | examples/pixel/plot_0_image.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
] | 16 | 2021-06-09T06:28:27.000Z | 2022-03-01T18:12:33.000Z | examples/pixel/plot_0_image.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
] | 1 | 2020-01-03T17:04:16.000Z | 2020-01-03T17:04:16.000Z | # -*- coding: utf-8 -*-
"""
Image, 2D{3} datasets
^^^^^^^^^^^^^^^^^^^^^
"""
# %%
# The 2D{3} dataset is two dimensional, :math:`d=2`, with
# a single three-component dependent variable, :math:`p=3`.
# A common example from this subset is perhaps the RGB image dataset.
# An RGB image dataset has two spatial dimensions and one dependent
# variable with three components corresponding to the red, green, and blue color
# intensities.
#
# The following is an example of an RGB image dataset.
import csdmpy as cp
filename = "https://osu.box.com/shared/static/vdxdaitsa9dq45x8nk7l7h25qrw2baxt.csdf"
ImageData = cp.load(filename)
print(ImageData.data_structure)
# %%
# The tuple of the dimension and dependent variable instances from
# ``ImageData`` instance are
x = ImageData.dimensions
y = ImageData.dependent_variables
# %%
# respectively. There are two dimensions, and the coordinates along each
# dimension are
print("x0 =", x[0].coordinates[:10])
# %%
print("x1 =", x[1].coordinates[:10])
# %%
# respectively, where only first ten coordinates along each dimension is displayed.
# %%
# The dependent variable is the image data, as also seen from the
# :attr:`~csdmpy.DependentVariable.quantity_type` attribute
# of the corresponding :ref:`dv_api` instance.
print(y[0].quantity_type)
# %%
# From the value `pixel_3`, `pixel` indicates a pixel data, while `3`
# indicates the number of pixel components.
# %%
# As usual, the components of the dependent variable are accessed through
# the :attr:`~csdmpy.DependentVariable.components` attribute.
# To access the individual components, use the appropriate array indexing.
# For example,
print(y[0].components[0])
# %%
# will return an array with the first component of all data values. In this case,
# the components correspond to the red color intensity, also indicated by the
# corresponding component label. The label corresponding to
# the component array is accessed through the
# :attr:`~csdmpy.DependentVariable.component_labels`
# attribute with appropriate indexing, that is
print(y[0].component_labels[0])
# %%
# To avoid displaying larger output, as an example, we print the shape of
# each component array (using Numpy array's `shape` attribute) for the three
# components along with their respective labels.
# %%
print(y[0].component_labels[0], y[0].components[0].shape)
# %%
print(y[0].component_labels[1], y[0].components[1].shape)
# %%
print(y[0].component_labels[2], y[0].components[2].shape)
# %%
# The shape (768, 1024) corresponds to the number of points from the each
# dimension instances.
# %%
# .. note::
# In this example, since there is only one dependent variable, the index
# of `y` is set to zero, which is ``y[0]``. The indices for the
# :attr:`~csdmpy.DependentVariable.components` and the
# :attr:`~csdmpy.DependentVariable.component_labels`,
# on the other hand, spans through the number of components.
# %%
# Now, to visualize the dataset as an RGB image,
import matplotlib.pyplot as plt
ax = plt.subplot(projection="csdm")
ax.imshow(ImageData, origin="upper")
plt.tight_layout()
plt.show()
| 31.990099 | 85 | 0.701021 |
23f1798fb64ee4b5169a0bf90b985ef75feb7390 | 76 | py | Python | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | from .procedure import (
CrossCouplingBlueprint,
GenericBlueprint
)
| 15.2 | 27 | 0.75 |
23f2b2f6f97b3acdf979b2b92b12fa1475acc97b | 141 | py | Python | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
] | null | null | null | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
] | null | null | null | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
] | null | null | null | s = float(input('Digite o valor do salrio: R$ '))
p = s + (s * 15 / 100)
print('o salrio de R$ {} com mais 15% ficar {:.2f}'.format(s, p)) | 47 | 67 | 0.58156 |
23f63778d171661ca3379def8f64e54d84bf8d22 | 2,868 | py | Python | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
] | 1 | 2022-03-21T01:51:44.000Z | 2022-03-21T01:51:44.000Z | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
] | null | null | null | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
] | null | null | null | import codecs
import sqlite3
import json
from fnmatch import fnmatch
from abc import ABC, abstractmethod
| 32.224719 | 132 | 0.709902 |
23f755b41ceb13c51fd1941958609398bf18c29d | 3,615 | py | Python | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
] | null | null | null | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
] | 3 | 2020-02-11T23:47:00.000Z | 2021-06-10T21:13:10.000Z | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
] | null | null | null | # Create your models here.
import datetime
from django.db import models
from rest_framework.compat import MinValueValidator
| 41.079545 | 93 | 0.458645 |
23faddb427ccf2b4a51011515cdd3a2b5edefbe2 | 1,211 | py | Python | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakotathon | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 8 | 2019-09-11T12:59:57.000Z | 2021-08-11T16:31:58.000Z | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 66 | 2015-04-06T17:11:21.000Z | 2019-02-03T18:09:52.000Z | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 5 | 2015-03-24T22:39:34.000Z | 2018-04-21T12:14:05.000Z | """An example of using Dakota as a component with PyMT.
This example requires a WMT executor with PyMT installed, as well as
the CSDMS Dakota interface and FrostNumberModel installed as
components.
"""
import os
from pymt.components import MultidimParameterStudy, FrostNumberModel
from dakotathon.utils import configure_parameters
c, d = FrostNumberModel(), MultidimParameterStudy()
parameters = {
"component": type(c).__name__,
"descriptors": ["T_air_min", "T_air_max"],
"partitions": [3, 3],
"lower_bounds": [-20.0, 5.0],
"upper_bounds": [-5.0, 20.0],
"response_descriptors": [
"frostnumber__air",
"frostnumber__surface",
"frostnumber__stefan",
],
"response_statistics": ["median", "median", "median"],
}
parameters, substitutes = configure_parameters(parameters)
parameters["run_directory"] = c.setup(os.getcwd(), **substitutes)
cfg_file = "frostnumber_model.cfg" # get from pymt eventually
parameters["initialize_args"] = cfg_file
dtmpl_file = cfg_file + ".dtmpl"
os.rename(cfg_file, dtmpl_file)
parameters["template_file"] = dtmpl_file
d.setup(parameters["run_directory"], **parameters)
d.initialize("dakota.yaml")
d.update()
d.finalize()
| 27.522727 | 68 | 0.721718 |
23fdbc64ade39f6aaca5e42eb2790bc7ac6b2823 | 4,427 | py | Python | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
] | 1 | 2020-01-17T07:54:02.000Z | 2020-01-17T07:54:02.000Z | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
] | null | null | null | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
] | null | null | null | from callback import ValidationHistory
from dataloader import Dataloader
from normalizer import Normalizer
import tensorflow as tf
import numpy as np
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument(
"-f",
"--folder",
required=True,
help="Path to directory containing images")
# Optional arguments.
parser.add_argument(
"-s",
"--input_size",
type=int,
default=224,
help="Input image size.")
parser.add_argument(
"-b",
"--batch_size",
type=int,
default=2,
help="Number of images in a training batch.")
parser.add_argument(
"-e",
"--epochs",
type=int,
default=100,
help="Number of training epochs.")
parser.add_argument(
"-seed",
"--seed",
type=int,
default=42,
help="Seed for data reproducing.")
parser.add_argument(
"-n",
"--n_folds",
type=int,
default=5,
help="Number of folds for CV Training")
args = parser.parse_args()
for fold_idx in range(args.n_folds):
train(args, fold_idx)
| 33.793893 | 87 | 0.5733 |
23fe13301d5fe663179594a9c1c64fdce727026b | 1,354 | py | Python | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
] | 52 | 2016-08-04T02:15:52.000Z | 2021-12-20T20:33:07.000Z | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
] | 3 | 2019-11-15T15:13:51.000Z | 2020-11-25T10:42:34.000Z | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
] | 9 | 2019-03-06T04:21:29.000Z | 2021-08-16T02:28:33.000Z | # coding=utf-8
from orgmode_entry import OrgmodeEntry
entry = u'#A Etwas machen:: DL: Morgen S: Heute Ausstellung am 23.09.2014 12:00 oder am Montag bzw. am 22.10 13:00 sollte man anschauen. '
org = OrgmodeEntry()
# Use an absolute path
org.inbox_file = '/Users/Alex/Documents/Planung/Planning/Inbox.org'
org.delimiter = ':: ' # tag to separate the head from the body of the entry
org.heading_suffix = "\n* " # depth of entry
org.use_priority_tags = True # use priority tags: #b => [#B]
org.priority_tag = '#' # tag that marks a priority value
org.add_creation_date = True # add a creation date
org.replace_absolute_dates = True # convert absolute dates like 01.10 15:00 into orgmode dates => <2016-10-01 Sun 15:00>
org.replace_relative_dates = True # convert relative dates like monday or tomorrow into orgmode dates
# Convert a schedule pattern into an org scheduled date
org.convert_scheduled = True # convert sche
org.scheduled_pattern = "S: "
# Convert a deadline pattern into an org deadline
org.convert_deadlines = True
org.deadline_pattern = "DL: "
org.smart_line_break = True # convert a pattern into a linebreak
org.line_break_pattern = "\s\s" # two spaces
# Cleanup spaces (double, leading, and trailing)
org.cleanup_spaces = True
entry = 'TODO ' + entry
message = org.add_entry(entry).encode('utf-8')
print(message)
| 33.02439 | 140 | 0.739291 |
23fead2b5260640c347d0b505721cb2630c98560 | 407 | py | Python | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | null | null | null | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | 70 | 2017-06-01T11:02:51.000Z | 2017-06-30T00:35:32.000Z | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | null | null | null | import gzip
import bz2
import lzma
s = b'witch which has which witches wrist watch'
with open('2.txt', 'wb') as f: f.write(s)
with gzip.open('2.txt.gz', 'wb') as f: f.write(s)
with bz2.open('2.txt.bz2', 'wb') as f: f.write(s)
with lzma.open('2.txt.xz', 'wb') as f: f.write(s)
print('txt', len(s))
print('gz ', len(gzip.compress(s)))
print('bz2', len(bz2.compress(s)))
print('xz ', len(lzma.compress(s)))
| 25.4375 | 49 | 0.641278 |
23ff90db58dc31d3acc655b347ff8c32734fce8f | 751 | py | Python | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | import pytz
from datetime import datetime
MEETING_HOURS = range(6, 23) # meet from 6 - 22 max
TIMEZONES = set(pytz.all_timezones)
def within_schedule(utc, *timezones):
"""Receive a utc datetime and one or more timezones and check if
they are all within schedule (MEETING_HOURS)"""
times = []
timezone_list = list(timezones)
for zone in timezone_list:
if zone not in TIMEZONES:
raise ValueError
tz = pytz.timezone(zone)
times.append(pytz.utc.localize(utc).astimezone(tz))
boolean = []
for time in times:
if time.hour in MEETING_HOURS:
boolean.append(True)
else:
boolean.append(False)
return all(boolean)
pass | 25.033333 | 68 | 0.624501 |
9b000540f0f753d3e1bc63731ed866572a4a795c | 450 | py | Python | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
] | null | null | null | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
] | null | null | null | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
] | null | null | null | from os import sys, environ
from tracker.__main__ import args
# Name of the file to save kernel versions json
DB_FILE_NAME = "data.json"
# By default looks up in env for api and chat id or just put your stuff in here
# directly if you prefer it that way
BOT_API = environ.get("BOT_API")
CHAT_ID = environ.get("CHAT_ID")
if args.notify:
if (BOT_API and CHAT_ID) is None:
print("Either BOT_API or CHAT_ID is empty!")
sys.exit(1)
| 28.125 | 79 | 0.717778 |
9b019d69f7dc7afa332c3b317d1c035ebf327b40 | 94 | py | Python | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
] | null | null | null | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
] | 7 | 2020-06-05T21:03:39.000Z | 2021-09-22T18:33:33.000Z | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.666667 | 33 | 0.765957 |
9b02acdde4f64a083c7db9498cddd0e187f2c1df | 615 | py | Python | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
] | 189 | 2019-11-15T11:33:50.000Z | 2022-03-27T08:23:35.000Z | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
] | 3 | 2020-05-29T03:26:32.000Z | 2021-07-11T15:46:07.000Z | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
] | 39 | 2019-11-16T04:02:06.000Z | 2022-03-21T04:18:14.000Z | # test_utils.py overwrite(-a !)
import pytest
import pandas as pd
import datetime
from utils import is_working_day, load_data
| 21.964286 | 59 | 0.747967 |
9b02d42862a5d0797afc71d43094512a70c96510 | 3,302 | py | Python | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import json
import subprocess
from CommonServerPython import *
TWIST_EXE = '/dnstwist/dnstwist.py'
if demisto.command() == 'dnstwist-domain-variations':
KEYS_TO_MD = ["whois_updated", "whois_created", "dns_a", "dns_mx", "dns_ns"]
DOMAIN = demisto.args()['domain']
LIMIT = int(demisto.args()['limit'])
WHOIS = demisto.args().get('whois')
dnstwist_result = get_dnstwist_result(DOMAIN, WHOIS == 'yes')
new_result = get_domain_to_info_map(dnstwist_result)
md = tableToMarkdown('dnstwist for domain - ' + DOMAIN, new_result,
headers=["domain-name", "IP Address", "dns_mx", "dns_ns", "whois_updated", "whois_created"])
domain_context = new_result[0] # The requested domain for variations
domains_context_list = new_result[1:LIMIT + 1] # The variations domains
domains = []
for item in domains_context_list:
temp = {"Name": item["domain-name"]}
if "IP Address" in item:
temp["IP"] = item["IP Address"]
if "dns_mx" in item:
temp["DNS-MX"] = item["dns_mx"]
if "dns_ns" in item:
temp["DNS-NS"] = item["dns_ns"]
if "whois_updated" in item:
temp["WhoisUpdated"] = item["whois_updated"]
if "whois_created" in item:
temp["WhoisCreated"] = item["whois_created"]
domains.append(temp)
ec = {"Domains": domains}
if "domain-name" in domain_context:
ec["Name"] = domain_context["domain-name"]
if "IP Address" in domain_context:
ec["IP"] = domain_context["IP Address"]
if "dns_mx" in domain_context:
ec["DNS-MX"] = domain_context["dns_mx"]
if "dns_ns" in domain_context:
ec["DNS-NS"] = domain_context["dns_ns"]
if "whois_updated" in domain_context:
ec["WhoisUpdated"] = domain_context["whois_updated"]
if "whois_created" in domain_context:
ec["WhoisCreated"] = domain_context["whois_created"]
entry_result = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': dnstwist_result,
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'dnstwist.Domain(val.Name == obj.Name)': ec}
}
demisto.results(entry_result)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
subprocess.check_output([TWIST_EXE, '-h'], stderr=subprocess.STDOUT)
demisto.results('ok')
sys.exit(0)
| 35.891304 | 117 | 0.58934 |
9b036ad8294f9db8fecca4b31663a18176793718 | 595 | py | Python | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import inspect
def caller(frame=2):
"""
Returns the object that called the object that called this function.
e.g. A calls B. B calls calling_object. calling object returns A.
:param frame: 0 represents this function
1 represents the caller of this function (e.g. B)
2 (default) represents the caller of B
:return: object reference
"""
stack = inspect.stack()
try:
obj = stack[frame][0].f_locals[u'self']
except KeyError:
pass # Not called from an object
else:
return obj
| 24.791667 | 72 | 0.616807 |
9b049ff801a11852ac7c1f7e34a2e069aca68527 | 3,395 | py | Python | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 6 | 2018-08-10T17:11:10.000Z | 2020-04-29T07:05:36.000Z | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 58 | 2018-08-13T08:36:08.000Z | 2021-07-07T08:32:52.000Z | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 7 | 2018-08-10T12:53:18.000Z | 2021-11-08T05:15:42.000Z | # pylint: disable=missing-docstring,protected-access
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from icetea_lib.ResourceProvider.ResourceRequirements import ResourceRequirements
if __name__ == '__main__':
unittest.main()
| 38.146067 | 90 | 0.648895 |
9b04ad53449f706663e52db825a5918226304aab | 321 | py | Python | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
] | 1 | 2016-05-26T08:18:36.000Z | 2016-05-26T08:18:36.000Z | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
] | null | null | null | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
prev = ''
cnt = 0
for x in sys.stdin.readlines():
q, w = x.split('\t')[0], int(x.split('\t')[1])
if (prev == q):
cnt += 1
else:
if (cnt > 0):
print(prev + '\t' + str(cnt))
prev = q
cnt = w
if (cnt > 0):
print(prev + '\t' + str(cnt))
| 17.833333 | 50 | 0.433022 |
9b076c62dfd81be9905f0f82e953e93e7d7c02e5 | 313 | py | Python | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
] | null | null | null | import attr
from covid19_id.utils import ValueInt
| 20.866667 | 43 | 0.782748 |
9b0792a063a2b49e22d50a2e57caac25388b1b3e | 511 | py | Python | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
] | null | null | null | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
] | null | null | null | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
] | null | null | null | import json
import hashlib
from .test_case.blockchain import BlockchainTestCase
| 26.894737 | 61 | 0.702544 |
9b0816140cf40f94ed1ecf980a99d990c62d409b | 14,495 | py | Python | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
] | null | null | null | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
] | null | null | null | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as st
from sklearn.neighbors import BallTree
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
from xgbse.non_parametric import (
calculate_kaplan_vectorized,
get_time_bins,
calculate_interval_failures,
)
# at which percentiles will the KM predict
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
# class to turn XGB into a kNN with a kaplan meier in the NNs
# class to turn XGB into a kNN with a kaplan meier in the NNs
| 34.186321 | 135 | 0.635598 |
9b086dcb5153716593628ec1966115cfb5eef668 | 3,932 | py | Python | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
] | null | null | null | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
] | null | null | null | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
] | 1 | 2021-05-30T15:26:52.000Z | 2021-05-30T15:26:52.000Z | import math
import numpy as np
import pandas as pd
import tensorflow as tf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import warnings
warnings.filterwarnings('ignore')
# Hiperparametri
epoch_max = 10
alpha_max = 0.025
alpha_min = 0.001
batch_size = 32
window_size = 14
test_ratio = 0.1
max_time = 16
lstm_size = 64
# Ucitavanje podataka
csv = pd.read_csv('data/sp500.csv')
dates, data = csv['Date'].values, csv['Close'].values
# Konverzija datuma
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]
dates = [dates[i + max_time] for i in range(len(dates) - max_time)]
# Grupisanje podataka pomocu kliznog prozora
data = [data[i : i + window_size] for i in range(len(data) - window_size)]
# Normalizacija podataka
norm = [data[0][0]] + [data[i-1][-1] for i, _ in enumerate(data[1:])]
data = [curr / norm[i] - 1.0 for i, curr in enumerate(data)]
nb_samples = len(data) - max_time
nb_train = int(nb_samples * (1.0 - test_ratio))
nb_test = nb_samples - nb_train
nb_batches = math.ceil(nb_train / batch_size)
# Grupisanje podataka za propagaciju greske kroz vreme
x = [data[i : i + max_time] for i in range(nb_samples)]
y = [data[i + max_time][-1] for i in range(nb_samples)]
# Skup podataka za treniranje
train_x = [x[i : i + batch_size] for i in range(0, nb_train, batch_size)]
train_y = [y[i : i + batch_size] for i in range(0, nb_train, batch_size)]
# Skup podataka za testiranje
test_x, test_y = x[-nb_test:], y[-nb_test:]
# Skup podataka za denormalizaciju
norm_y = [norm[i + max_time] for i in range(nb_samples)]
norm_test_y = norm_y[-nb_test:]
tf.reset_default_graph()
# Cene tokom prethodnih dana
X = tf.placeholder(tf.float32, [None, max_time, window_size])
# Cena na trenutni dan
Y = tf.placeholder(tf.float32, [None])
# Stopa ucenja
L = tf.placeholder(tf.float32)
# LSTM sloj
rnn = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(lstm_size)])
# Izlaz LSTM sloja
val, _ = tf.nn.dynamic_rnn(rnn, X, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
# Poslednji izlaz LSTM sloja
last = tf.gather(val, val.get_shape()[0] - 1)
# Obucavajuci parametri
weight = tf.Variable(tf.random_normal([lstm_size, 1]))
bias = tf.Variable(tf.constant(0.0, shape=[1]))
# Predvidjena cena
prediction = tf.add(tf.matmul(last, weight), bias)
# MSE za predikciju
loss = tf.reduce_mean(tf.square(tf.subtract(prediction, Y)))
# Gradijentni spust pomocu Adam optimizacije
optimizer = tf.train.AdamOptimizer(L).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Treniranje modela
for epoch in range(epoch_max):
# Adaptiranje stope ucenja
epoch_loss, alpha = 0, max(alpha_min, alpha_max * (1 - epoch / epoch_max))
# Mini batch gradijentni spust
for b in np.random.permutation(nb_batches):
loss_val, _ = sess.run([loss, optimizer], {X: train_x[b], Y: train_y[b], L: alpha})
epoch_loss += loss_val
print('Epoch: {}/{}\tLoss: {}'.format(epoch+1, epoch_max, epoch_loss))
# Testiranje modela
test_pred = sess.run(prediction, {X: test_x, Y: test_y, L: alpha})
# Tacnost modela za predikciju monotonosti fluktuacije cene
acc = sum(1 for i in range(nb_test) if test_pred[i] * test_y[i] > 0) / nb_test
print('Accuracy: {}'.format(acc))
# Denormalizacija podataka
denorm_y = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_y)]
denorm_pred = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_pred)]
# Prikazivanje predikcija
plt.figure(figsize=(16,4))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
plt.plot(dates[-nb_test:], denorm_y, '-b', label='Actual')
plt.plot(dates[-nb_test:], denorm_pred, '--r', label='Predicted')
plt.gcf().autofmt_xdate()
plt.legend()
plt.show()
| 31.206349 | 95 | 0.694557 |
9b091fad5fab76f79772a42218911d8db0cd0709 | 420 | py | Python | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 418 | 2017-10-05T05:52:49.000Z | 2022-03-24T09:50:06.000Z | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 1,049 | 2017-09-16T09:34:55.000Z | 2022-03-23T16:13:04.000Z | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 155 | 2017-10-16T18:32:01.000Z | 2022-03-15T12:48:33.000Z | # Generated by Django 3.1 on 2020-10-10 14:31
from django.db import migrations, models
| 22.105263 | 67 | 0.621429 |
9b09888d30cc7622a264796e061dbd4cba10dd9a | 440 | py | Python | zzzeeksphinx/theme.py | aidos/zzzeeksphinx | c0fa4be4d40752632e879ec109850caa316ec8af | [
"MIT"
] | 3 | 2017-08-10T22:26:25.000Z | 2017-09-10T16:07:23.000Z | zzzeeksphinx/theme.py | zzzeek/zzzeeksphinx | 663f5c353e9c3ef3f9676384d429f504feaf20d3 | [
"MIT"
] | 9 | 2020-07-18T12:31:49.000Z | 2021-10-08T15:19:43.000Z | zzzeeksphinx/theme.py | zzzeek/zzzeeksphinx | 663f5c353e9c3ef3f9676384d429f504feaf20d3 | [
"MIT"
] | 1 | 2021-02-20T20:57:00.000Z | 2021-02-20T20:57:00.000Z | from os import path
package_dir = path.abspath(path.dirname(__file__))
| 25.882353 | 76 | 0.665909 |
9b0a82ae7938b94fafa2d863a1f8c7ee8913dbbc | 2,674 | py | Python | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
] | 2 | 2019-08-15T11:51:17.000Z | 2019-08-15T12:59:37.000Z | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
] | 1 | 2020-08-11T14:25:45.000Z | 2020-08-11T14:25:45.000Z | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
] | 1 | 2021-03-16T12:30:31.000Z | 2021-03-16T12:30:31.000Z | # -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'MacSeNet'
import torch
from torch.autograd import Variable
import numpy as np
dtype = torch.DoubleTensor
np.random.seed(2183)
torch.manual_seed(2183)
# D is the "batch size"; N is input dimension;
# H is hidden dimension; N_out is output dimension.
D, N, H, N_out = 1, 20, 20, 20
# Create random Tensors to hold input and outputs, and wrap them in Variables.
# Setting requires_grad=False indicates that we do not need to compute gradients
# with respect to these Variables during the backward pass.
x = Variable(torch.randn(N, D).type(dtype), requires_grad=True)
y = Variable(torch.randn(N_out, D).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Variables during the backward pass.
layers = []
biases = []
w_e = Variable(torch.randn(N, H).type(dtype), requires_grad=True)
b_e = Variable(torch.randn(H,).type(dtype), requires_grad=True)
w_d = Variable(torch.randn(H, N_out).type(dtype), requires_grad=True)
b_d = Variable(torch.randn(N_out,).type(dtype), requires_grad=True)
layers.append(w_e)
layers.append(w_d)
biases.append(b_e)
biases.append(b_d)
# Matrices we need the gradients wrt
parameters = torch.nn.ParameterList()
p_e = torch.nn.Parameter(torch.randn(N, H).type(dtype), requires_grad=True)
p_d = torch.nn.Parameter(torch.randn(H, N_out).type(dtype), requires_grad=True)
parameters.append(p_e)
parameters.append(p_d)
# Non-linearity
relu = torch.nn.ReLU()
comb_matrix = torch.autograd.Variable(torch.eye(N), requires_grad=True).double()
for index in range(2):
b_sc_m = relu(parameters[index].mm((layers[index] + biases[index]).t()))
b_scaled = layers[index] * b_sc_m
comb_matrix = torch.matmul(b_scaled, comb_matrix)
y_pred = torch.matmul(comb_matrix, x)
loss = (y - y_pred).norm(1)
loss.backward()
delta_term = (torch.sign(y_pred - y)).mm(x.t())
# With relu
w_tilde_d = relu(parameters[1].mm((layers[1] + biases[1]).t())) * w_d
w_tilde_e = w_e * relu(parameters[0].mm((layers[0] + biases[0]).t()))
relu_grad_dec = p_d.mm((w_d + b_d).t()).gt(0).double()
relu_grad_enc = p_e.mm((w_e + b_e).t()).gt(0).double()
p_d_grad_hat = (delta_term.mm(w_tilde_e.t()) * w_d * relu_grad_dec).mm((w_d + b_d))
p_e_grad_hat = (w_tilde_d.t().mm(delta_term) * w_e * relu_grad_enc).mm((w_e + b_e))
print('Error between autograd computation and calculated:'+str((parameters[1].grad - p_d_grad_hat).abs().max()))
print('Error between autograd computation and calculated:'+str((parameters[0].grad - p_e_grad_hat).abs().max()))
# EOF
| 33.012346 | 112 | 0.726253 |