hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a2736448f820e4e81087e8a5353235f998513f8
| 55,584
|
py
|
Python
|
fhir/resources/tests/test_claim.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/tests/test_claim.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/tests/test_claim.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Claim
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import claim
def test_claim_1(base_settings):
"""No. 1 tests collection for Claim.
Test File: claim-example-institutional-rich.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-institutional-rich.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_1(inst2)
def test_claim_2(base_settings):
"""No. 2 tests collection for Claim.
Test File: claim-example-professional.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-professional.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_2(inst2)
def test_claim_3(base_settings):
"""No. 3 tests collection for Claim.
Test File: claim-example.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_3(inst2)
def test_claim_4(base_settings):
"""No. 4 tests collection for Claim.
Test File: claim-example-vision.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-vision.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_4(inst2)
def test_claim_5(base_settings):
"""No. 5 tests collection for Claim.
Test File: claim-example-vision-glasses-3tier.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-vision-glasses-3tier.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_5(inst2)
def test_claim_6(base_settings):
"""No. 6 tests collection for Claim.
Test File: claim-example-institutional.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-institutional.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_6(inst2)
def test_claim_7(base_settings):
"""No. 7 tests collection for Claim.
Test File: claim-example-oral-contained.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-oral-contained.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_7(inst2)
def test_claim_8(base_settings):
"""No. 8 tests collection for Claim.
Test File: claim-example-pharmacy-medication.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-pharmacy-medication.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_8(inst2)
def test_claim_9(base_settings):
"""No. 9 tests collection for Claim.
Test File: claim-example-oral-orthoplan.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-oral-orthoplan.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_9(inst2)
def test_claim_10(base_settings):
"""No. 10 tests collection for Claim.
Test File: claim-example-cms1500-medical.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-cms1500-medical.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_10(inst2)
| 43.527016
| 88
| 0.664904
|
9a27eb495106ade83e880e4a8a449d48c322f96d
| 2,708
|
py
|
Python
|
bin/main.py
|
ubern-mia/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 20
|
2019-10-14T06:03:10.000Z
|
2022-02-04T04:44:38.000Z
|
bin/main.py
|
ubern-mia/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 11
|
2019-06-10T12:31:23.000Z
|
2022-03-12T00:04:28.000Z
|
bin/main.py
|
fabianbalsiger/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 3
|
2019-11-06T14:06:44.000Z
|
2021-08-11T18:46:25.000Z
|
import argparse
import os.path
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import pymia.deeplearning.logging as log
import tensorflow as tf
import pc.configuration.config as cfg
import pc.data.handler as hdlr
import pc.data.split as split
import pc.model.point_cnn as net
import pc.utilities.filesystem as fs
import pc.utilities.seeding as seed
import pc.utilities.training as train
if __name__ == '__main__':
"""The program's entry point.
Parse the arguments and run the program.
"""
parser = argparse.ArgumentParser(description='Deep learning for shape learning on point clouds')
parser.add_argument(
'--config_file',
type=str,
default='./bin/config.json',
help='Path to the configuration file.'
)
args = parser.parse_args()
main(args.config_file)
| 33.85
| 113
| 0.679838
|
9a285d7173b98f84f370605c57bfb8c26d5b2158
| 1,586
|
py
|
Python
|
spynoza/unwarping/topup/nodes.py
|
spinoza-centre/spynoza
|
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
|
[
"MIT"
] | 7
|
2016-06-21T11:51:07.000Z
|
2018-08-10T15:41:37.000Z
|
spynoza/unwarping/topup/nodes.py
|
spinoza-centre/spynoza
|
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
|
[
"MIT"
] | 12
|
2017-07-05T09:14:31.000Z
|
2018-09-13T12:19:14.000Z
|
spynoza/unwarping/topup/nodes.py
|
spinoza-centre/spynoza
|
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
|
[
"MIT"
] | 8
|
2016-09-26T12:35:59.000Z
|
2021-06-05T05:50:23.000Z
|
from nipype.interfaces.utility import Function
Topup_scan_params = Function(function=topup_scan_params,
input_names=['pe_direction', 'te', 'epi_factor'],
output_names=['fn'])
Apply_scan_params = Function(function=apply_scan_params,
input_names=['pe_direction', 'te', 'epi_factor',
'nr_trs'],
output_names=['fn'])
| 33.041667
| 78
| 0.592686
|
9a287484855658cc91349375e1c4b8e475ab1fe0
| 1,317
|
py
|
Python
|
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
import os
import pickle
from deployConfig import workDir
import sys
env_fp = f"{workDir}/env.pickle"
if not os.path.exists(env_fp):
env = {}
with open(env_fp, "wb") as fh:
pickle.dump(env, fh)
# add_to_env("LD_LIBRARY_PATH", "/usr/local/lib/eopenssl11/")
# add_to_env("LD_LIBRARY_PATH", f"{project_root}/Builds/Python-3.10.0/")
# add_to_env("PATH", f"{project_root}/Builds/Python-3.10.0/bin")
# add_to_env("PATH", f"{project_root}/Builds/postgresql-14.0/bin")
| 28.630435
| 90
| 0.59757
|
9a2921aafee477055d03e47abb30d023e2f9b7df
| 2,645
|
py
|
Python
|
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
"""
from: http://adventofcode.com/2017/day/6
--- Day 6: Memory Reallocation ---
A debugger program here is having an issue: it is trying to repair a memory reallocation routine,
but it keeps getting stuck in an infinite loop.
In this area, there are sixteen memory banks; each memory bank can hold any number of blocks. The
goal of the reallocation routine is to balance the blocks between the memory banks.
The reallocation routine operates in cycles. In each cycle, it finds the memory bank with the most
blocks (ties won by the lowest-numbered memory bank) and redistributes those blocks among the banks.
To do this, it removes all of the blocks from the selected bank, then moves to the next (by index)
memory bank and inserts one of the blocks. It continues doing this until it runs out of blocks; if
it reaches the last memory bank, it wraps around to the first one.
The debugger would like to know how many redistributions can be done before a blocks-in-banks
configuration is produced that has been seen before.
For example, imagine a scenario with only four memory banks:
The banks start with 0, 2, 7, and 0 blocks. The third bank has the most blocks, so it is chosen for
redistribution.
Starting with the next bank (the fourth bank) and then continuing to the first bank, the second
bank, and so on, the 7 blocks are spread out over the memory banks. The fourth, first, and second
banks get two blocks each, and the third bank gets one back. The final result looks like this:
2 4 1 2.
Next, the second bank is chosen because it contains the most blocks (four). Because there are four
memory banks, each gets one block. The result is: 3 1 2 3.
Now, there is a tie between the first and fourth memory banks, both of which have three blocks. The
first bank wins the tie, and its three blocks are distributed evenly over the other three banks,
leaving it with none: 0 2 3 4.
The fourth bank is chosen, and its four blocks are distributed such that each of the four banks
receives one: 1 3 4 1.
The third bank is chosen, and the same thing happens: 2 4 1 2.
At this point, we've reached a state we've seen before: 2 4 1 2 was already seen. The infinite loop
is detected after the fifth block redistribution cycle, and so the answer in this example is 5.
Given the initial block counts in your puzzle input, how many redistribution cycles must be
completed before a configuration is produced that has been seen before?
"""
def main():
"""Solve the problem!"""
with open('input.txt') as input_file:
data = input_file.read()
banks = [int(x) for x in data.split()]
print(banks)
if __name__ == "__main__":
main()
| 56.276596
| 100
| 0.761815
|
9a29485e3ae58c67b4c0c486240c276c76016ab2
| 3,328
|
py
|
Python
|
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | 1
|
2021-09-16T08:03:31.000Z
|
2021-09-16T08:03:31.000Z
|
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | null | null | null |
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unittests for the GDAl tools.
This file is part of the REDRESS algorithm
M. Lamare, M. Dumont, G. Picard (IGE, CEN).
"""
import pytest
from geojson import Polygon, Feature, FeatureCollection, dump
from redress.geospatial.gdal_ops import (build_poly_from_coords,
build_poly_from_geojson,
geom_contains)
| 36.571429
| 73
| 0.60607
|
9a2995b77fe8a7759abd5fe12be41e28897fa1b0
| 112
|
py
|
Python
|
output/models/ms_data/regex/letterlike_symbols_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/regex/letterlike_symbols_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/regex/letterlike_symbols_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.ms_data.regex.letterlike_symbols_xsd.letterlike_symbols import Doc
__all__ = [
"Doc",
]
| 18.666667
| 85
| 0.776786
|
9a2ad5d8f34b4182942a86d8ef3f197c1b06c12e
| 1,296
|
py
|
Python
|
test.py
|
MarkMurillo/python_ctype_structure_example
|
9e889cc4cbdeab8433c396262f086071bb961e13
|
[
"MIT"
] | null | null | null |
test.py
|
MarkMurillo/python_ctype_structure_example
|
9e889cc4cbdeab8433c396262f086071bb961e13
|
[
"MIT"
] | null | null | null |
test.py
|
MarkMurillo/python_ctype_structure_example
|
9e889cc4cbdeab8433c396262f086071bb961e13
|
[
"MIT"
] | null | null | null |
"""test.py
Python3
Test script that demonstrates the passing of an
initialized python structure to C and retrieving
the structure back.
"""
import testMod
from ctypes import *
TESTSTRUCT._fields_ = [
("name", c_char_p),
("next", POINTER(TESTSTRUCT), #We can use a structure pointer for a linked list.
("next2", c_void_p) #We can use void pointers for structures as well!
]
struct1 = TESTSTRUCT(c_char_p("Hello!".encode()), None, None)
struct2 = TESTSTRUCT(c_char_p("Goodbye!".encode()), None, None)
struct22 = TESTSTRUCT(c_char_p("My Love!".encode()), None, None)
struct1.next = pointer(struct2)
#Must cast lp to void p before assigning it or it will complain...
struct1.next2 = cast(pointer(struct22), c_void_p)
outbytes = testMod.py_returnMe(struct1)
#Must cast outbytes back into pointer for a struct and retrieve contents.
struct3 = cast(outbytes, POINTER(TESTSTRUCT)).contents
#Next is already a pointer so all we need are just the contents.
nextStruct = struct3.next.contents
#Next2 is a void p so we need to cast it back to TESTSTRUCT pointer and get
#the contents.
next2Struct = cast(struct3.next2, POINTER(TESTSTRUCT)).contents
print ("Result: {}, {}, {}".format(struct3.name, nextStrut.name, next2Struct.name)
| 31.609756
| 88
| 0.73534
|
9a2cec396ceac73b9f9e17a3fefcecf0959ae15d
| 33,258
|
py
|
Python
|
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | null | null | null |
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | 2
|
2020-03-31T01:45:13.000Z
|
2020-09-25T23:39:43.000Z
|
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import csv
import graphviz
import numpy as np
import plotly.graph_objs as go
import plotly
import plotly.plotly as py
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import copy
import warnings
import matplotlib as mpl
from plotly.offline import download_plotlyjs, plot, iplot
mpl.use('TkAgg')
plotly.tools.set_credentials_file(username=os.environ['PLOTLY_USERNAME'],
api_key=os.environ['PLOTLY_API_KEY'])
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn(
"This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
median_fitness = np.array(statistics.get_fitness_median())
plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.plot(generation, median_fitness, 'y-', label="median")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
plt.close()
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn(
"This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
plt.figure(figsize=(12, 9))
_, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn(
"This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box'}
input_attrs['fillcolor'] = node_colors.get(k, 'lightgray')
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled'}
node_attrs['fillcolor'] = node_colors.get(k, 'lightblue')
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
# if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={
'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
def plot_single_run_scatter(scatter, dt, title):
"""Plots a single run with MAX, AVG, MEDIAN, All individuals"""
l = []
y = []
N = len(scatter.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = scatter.loc[scatter['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=7,
line=dict(width=1),
color=c[i],
opacity=0.5
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome']
)
l.append(trace0)
trace0 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=1.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=1.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=1.0,
width=2
)
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=data+l, layout=layout)
return py.iplot(fig, filename='single-run-scater-line-plot', layout=layout)
def plot_runs(dt, title, offline=True):
"""Plots the Max/Average/Median"""
trace0 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
layout = go.Layout(
showlegend=True,
hovermode='closest',
title=title,
xaxis=dict(
autorange=False,
range=[0, 20],
showspikes=False,
title="Generations",
ticklen=5,
gridwidth=1,
),
yaxis=dict(
autorange=True,
title="Fitness",
ticklen=5,
gridwidth=1,
),
)
data = [trace0, trace1, trace2]
fig = go.Figure(data, layout=layout)
return py.iplot(fig, filename=title)
l = []
y = []
N = len(scatter.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = scatter.loc[scatter['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=7,
line=dict(width=1),
color=c[i],
opacity=0.5
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome']
)
l.append(trace0)
trace0 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=data+l, layout=layout)
return py.iplot(fig, filename='fitness-average-n-runs', layout=layout)
def plot_scatter(dt, title):
"""Plots a Scatter plot of each individual in the population"""
l = []
y = []
N = len(dt.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = dt.loc[dt['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=14,
line=dict(width=1),
color=c[i],
opacity=0.3
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome'],
)
l.append(trace0)
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=l, layout=layout)
return py.iplot(fig, filename='population-scatter')
| 25.7017
| 115
| 0.500992
|
9a2d4e4783b1e8d97223132070735cfa9ed1e2ca
| 1,683
|
py
|
Python
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 27
|
2019-08-30T07:09:53.000Z
|
2021-08-29T07:37:24.000Z
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 2
|
2020-08-10T03:11:32.000Z
|
2020-08-24T13:39:24.000Z
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 28
|
2019-12-14T03:54:42.000Z
|
2022-03-12T14:38:22.000Z
|
# -*- coding: utf-8 -*-
"""
---------------------------------------------
File Name:
Desciption:
Author: fanzhiwei
date: 2019/9/5 9:58
---------------------------------------------
Change Activity: 2019/9/5 9:58
---------------------------------------------
"""
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import ndimage
from PIL import Image
LongRangeScanRaw = plt.imread("./1.tif")
ShortRangeScanRaw = plt.imread("./2.tif")
ShortRangeScanMean = ndimage.median_filter(ShortRangeScanRaw, 10)
LongRangeScanMean = ndimage.median_filter(LongRangeScanRaw, 10)
SizeLong = math.sqrt(LongRangeScanRaw.size)
SizeShort = math.sqrt(ShortRangeScanRaw.size)
if __name__ == "__main__":
Longimage = Image.fromarray(ToBinary(LongRangeScanMean))
Shortimage = Image.fromarray(ToBinary(ShortRangeScanMean))
Longimage.save("new_1.bmp")
Shortimage.save("new_2.bmp")
| 29.017241
| 65
| 0.633393
|
9a2d7ee04fd9497228365f3b015187758913933a
| 965
|
py
|
Python
|
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | null | null | null |
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | 8
|
2019-04-14T13:53:55.000Z
|
2019-07-11T18:06:57.000Z
|
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | null | null | null |
from django.utils.text import slugify
from django_extensions.db.fields import AutoSlugField
from django.db import models
from datetime import datetime
# Create your models here.
| 29.242424
| 101
| 0.78342
|
9a2e437ae8b03063acc62700c14efeca6658092a
| 145
|
py
|
Python
|
brl_gym/estimators/learnable_bf/__init__.py
|
gilwoolee/brl_gym
|
9c0784e9928f12d2ee0528c79a533202d3afb640
|
[
"BSD-3-Clause"
] | 2
|
2020-08-07T05:50:44.000Z
|
2022-03-03T08:46:10.000Z
|
brl_gym/estimators/learnable_bf/__init__.py
|
gilwoolee/brl_gym
|
9c0784e9928f12d2ee0528c79a533202d3afb640
|
[
"BSD-3-Clause"
] | null | null | null |
brl_gym/estimators/learnable_bf/__init__.py
|
gilwoolee/brl_gym
|
9c0784e9928f12d2ee0528c79a533202d3afb640
|
[
"BSD-3-Clause"
] | null | null | null |
from brl_gym.estimators.learnable_bf.learnable_bf import LearnableBF
#from brl_gym.estimators.learnable_bf.bf_dataset import BayesFilterDataset
| 36.25
| 74
| 0.889655
|
9a337713256137d5fcba2e7758391c4a3d42f204
| 4,156
|
py
|
Python
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 78
|
2019-10-02T01:34:46.000Z
|
2022-03-21T11:18:04.000Z
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 17
|
2019-10-04T17:04:00.000Z
|
2021-05-17T19:02:12.000Z
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 18
|
2019-10-03T05:02:21.000Z
|
2021-06-22T15:54:15.000Z
|
import os
import argparse
import logging
import numpy as np
import torch as th
from torch.utils.data import DataLoader
from torchvision import transforms
import ttools
from ttools.modules.image_operators import crop_like
import rendernet.dataset as dset
import rendernet.modules.preprocessors as pre
import rendernet.modules.models as models
import rendernet.interfaces as interfaces
import rendernet.callbacks as cb
import rendernet.viz as viz
from sbmc.utils import make_variable
import skimage.io as skio
log = logging.getLogger("rendernet")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", required=True)
parser.add_argument("--data", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
ttools.set_logger(True)
main(args)
| 31.24812
| 113
| 0.677334
|
9a33a34b59f215b243d9da922749fa4b6ad17b64
| 1,002
|
py
|
Python
|
code/analytics/models.py
|
harryface/url-condenser
|
800b573a82f41dd4900c8264007c1a0260a1a8b4
|
[
"MIT"
] | null | null | null |
code/analytics/models.py
|
harryface/url-condenser
|
800b573a82f41dd4900c8264007c1a0260a1a8b4
|
[
"MIT"
] | null | null | null |
code/analytics/models.py
|
harryface/url-condenser
|
800b573a82f41dd4900c8264007c1a0260a1a8b4
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from shortener.models import CondenseURL
| 31.3125
| 69
| 0.653693
|
9a3726435cdad9b9e21619560262a26d9cbff99c
| 299
|
py
|
Python
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | 2
|
2020-06-18T03:25:52.000Z
|
2020-06-18T07:33:45.000Z
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | 15
|
2021-01-28T07:11:04.000Z
|
2021-05-24T07:11:37.000Z
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__":
from pathlib import Path
clean_pycache(Path(__file__).parents[2])
| 19.933333
| 44
| 0.638796
|
9a3a8f8810da891a7c03436b0f8a519f17f8d1e7
| 212
|
py
|
Python
|
orb_simulator/orbsim_language/orbsim_ast/tuple_creation_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 1
|
2022-01-19T22:49:09.000Z
|
2022-01-19T22:49:09.000Z
|
orb_simulator/orbsim_language/orbsim_ast/tuple_creation_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 15
|
2021-11-10T14:25:02.000Z
|
2022-02-12T19:17:11.000Z
|
orb_simulator/orbsim_language/orbsim_ast/tuple_creation_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
from orbsim_language.orbsim_ast.expression_node import ExpressionNode
| 30.285714
| 69
| 0.858491
|
9a4004b98dc117b5e58a273f30a560e340d87721
| 1,345
|
py
|
Python
|
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy as np
import pandas as pd
if __name__ == '__main__':
argv = sys.argv
if len(argv) < 7:
print('USAGE: %s <in1_name> <in1_suffix> <in2_name> <in2_suffix> <out_name> <add_keys>'%argv[0])
sys.exit(-1)
in1_name = argv[1]
in1_suffix = argv[2]
in2_name = argv[3]
in2_suffix = argv[4]
out_name = argv[5]
add_keys = int(argv[6])
in1 = pd.read_csv(in1_name)
in2 = pd.read_csv(in2_name)
if add_keys:
df_add_keys(in1)
df_add_keys(in2)
merged = df_merge(in1, in1_suffix, in2, in2_suffix)
merged.to_csv(out_name)
| 24.907407
| 104
| 0.594052
|
9a409844ea8ff87b62a343aba1bddbe1b4acc686
| 649
|
py
|
Python
|
Toolkits/VCS/mygulamali__repo-mine/mine/helpers.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
Toolkits/VCS/mygulamali__repo-mine/mine/helpers.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
Toolkits/VCS/mygulamali__repo-mine/mine/helpers.py
|
roscopecoltran/SniperKit-Core
|
4600dffe1cddff438b948b6c22f586d052971e04
|
[
"MIT"
] | null | null | null |
from sys import stdout
| 27.041667
| 52
| 0.628659
|
9a4099a116dd4efb8f2b5619fb34ffe71a578a58
| 1,845
|
py
|
Python
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | 1
|
2019-04-21T07:09:52.000Z
|
2019-04-21T07:09:52.000Z
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | 35
|
2019-01-21T23:53:52.000Z
|
2022-02-12T04:28:17.000Z
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import csv
import os
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="Input path of the missing urls CSV file")
parser.add_argument('-o', '--output', help="Output directory where the new CSV files will be stored")
parser.add_argument('-q', '--quiet', action='store_true', help="Do not print the list of missing files")
args = parser.parse_args()
with open(args.input) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
missing_urls_output = os.path.join(args.output, 'silknow-missing-urls.csv')
missing_files_output = os.path.join(args.output, 'silknow-missing-files.csv')
with open(missing_urls_output, mode='w') as missing_url:
missing_url_writer = csv.writer(missing_url, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
with open(missing_files_output, mode='w') as missing_file:
missing_file_writer = csv.writer(missing_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
header = next(csv_reader)
missing_file_writer.writerow(header);
filepath_cache = []
for row in csv_reader:
museum = row[3].split('/')[5]
filename = os.path.basename(row[3])
filepath = os.path.normpath(os.path.join(museum, filename))
filepath_cache.append(filepath)
if not os.path.exists(filepath):
missing_file_writer.writerow(row)
if not args.quiet:
print(filepath + ' does not exist in files')
for root, dirs, files in os.walk('./'):
for file in files:
if file.endswith('.jpg'):
filepath = os.path.normpath(os.path.join(root, file))
if filepath not in filepath_cache:
missing_url_writer.writerow([filepath])
if not args.quiet:
print(filepath + ' does not exist in query result')
| 38.4375
| 105
| 0.666667
|
9a40c18aa2fcf755b162532d605ac1593ac74650
| 2,302
|
py
|
Python
|
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | 1
|
2019-01-28T09:43:41.000Z
|
2019-01-28T09:43:41.000Z
|
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | null | null | null |
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 11:20:06 2017
@author: NPB
"""
import cv2
import pickle
| 26.45977
| 69
| 0.613814
|
9a417a0a839c157704c0bb9c7d9a86e16b358f3e
| 22,087
|
py
|
Python
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | 5
|
2020-10-27T12:02:00.000Z
|
2021-11-05T06:51:59.000Z
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | 9
|
2021-01-07T04:47:58.000Z
|
2021-09-22T13:20:35.000Z
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | null | null | null |
# @Created Date: 2019-12-08 06:46:49 pm
# @Filename: api.py
# @Email: 1730416009@stu.suda.edu.cn
# @Author: ZeFeng Zhu
# @Last Modified: 2020-02-16 10:54:32 am
# @Copyright (c) 2020 MinghuiGroup, Soochow University
from typing import Iterable, Iterator, Optional, Union, Generator, Dict, List
from time import perf_counter
from numpy import nan, array
from pathlib import Path
from unsync import unsync, Unfuture
from copy import deepcopy
from pdb_profiling.log import Abclog
from pdb_profiling.utils import init_semaphore, init_folder_from_suffix, init_folder_from_suffixes, a_read_csv
from pdb_profiling.fetcher.webfetch import UnsyncFetch
from uuid import uuid4
from pdb_profiling.cif_gz_stream import iter_index
from aiohttp import ClientSession
from aiofiles import open as aiofiles_open
from pdb_profiling.ensure import EnsureBase
from tenacity import wait_random, stop_after_attempt
ensure = EnsureBase()
rt_kw = dict(wait=wait_random(max=20), stop=stop_after_attempt(6))
"""QUERY_COLUMNS: List[str] = [
'id', 'length', 'reviewed',
'comment(ALTERNATIVE%20PRODUCTS)',
'feature(ALTERNATIVE%20SEQUENCE)',
'genes', 'organism', 'protein%20names']
RESULT_COLUMNS: List[str] = [
'Entry', 'Length', 'Status',
'Alternative products (isoforms)',
'Alternative sequence',
'Gene names', 'Organism', 'Protein names']
COLUMNS_DICT: Dict = dict(zip(QUERY_COLUMNS, RESULT_COLUMNS))
RESULT_NEW_COLUMN: List[str] = ['yourlist', 'isomap']"""
BASE_URL: str = 'https://www.uniprot.org'
"""PARAMS: Dict = {
# 'fil': 'organism%3A"Homo+sapiens+(Human)+[9606]"+AND+reviewed%3Ayes',
# reviewed:yes+AND+organism:9606
'columns': None,
'query': None,
'from': None,
'to': 'ACC',
'format': 'tab'}"""
"""
class MapUniProtID(Abclog):
'''
Implement UniProt Retrieve/ID Mapping API
'''
def __init__(self, id_col: str, id_type: str,
dfrm: Optional[DataFrame],
ids: Optional[Iterable] = None,
sites: Optional[Iterable] = None,
genes: Optional[Iterable] = None,
usecols: Optional[Iterable] = QUERY_COLUMNS,
site_col: Optional[str] = None,
gene_col: Optional[str] = None,
logger: Optional[logging.Logger] = None,
loggingPath: Optional[str] = None):
self.init_logger(self.__class__.__name__, logger)
if dfrm is not None:
self.dfrm = dfrm.drop_duplicates().reset_index(drop=True)
else:
'''
the length of dataframe is based on:
* the num of `ids` if there is more than one id
* the num of `sites` if there is just one id with specified `sites`
'''
if isinstance(ids, str):
if sites is not None and not isinstance(sites, str):
index_len = len(sites)
else:
index_len = 1
else:
index_len = len(ids)
self.dfrm = DataFrame(dict(zip(
(col for col in (id_col, site_col, gene_col) if col is not None),
(value for value in (ids, sites, genes) if value is not None))),
index=list(range(index_len)))
self.index = dfrm.index
self.id_col = id_col
self.id_type = id_type
self.site_col = site_col
self.gene_col = gene_col
self.loggingPath = loggingPath
if isinstance(usecols, str):
PARAMS['columns'] = usecols
usecols = usecols.split(',')
elif isinstance(usecols, (Iterable, Iterator)):
PARAMS['columns'] = ','.join(usecols)
else:
raise ValueError('Invalid usecols')
self.usecols = usecols
PARAMS['from'] = id_type
if isinstance(loggingPath, (str, Path)):
self.set_logging_fileHandler(loggingPath)
@property
def sites(self) -> Generator:
if self.site_col is not None:
for name, group in self.dfrm.groupby(by=self.id_col, sort=False):
yield name, group[self.site_col]
else:
yield None
@staticmethod
def split_df(dfrm, colName, sep):
'''Split DataFrame'''
df = dfrm.copy()
return df.drop([colName], axis=1).join(df[colName].str.split(sep, expand=True).stack().reset_index(level=1, drop=True).rename(colName))
def yieldTasks(self, lyst: Iterable, chunksize: int = 100, sep: str = ',') -> Generator:
fileName = self.outputPath.stem
for i in range(0, len(lyst), chunksize):
cur_fileName = f'{fileName}+{i}'
cur_params = deepcopy(PARAMS)
cur_params['query'] = sep.join(lyst[i:i+chunksize]) # self.outputPath.suffix
yield ('get', {'url': f'{BASE_URL}/uploadlists/', 'params': cur_params}, str(Path(self.outputPath.parent, cur_fileName+'.tsv')))
def retrieve(self, outputPath: Union[str, Path],
finishedPath: Optional[str] = None,
sep: str = '\t',
chunksize: int = 100,
concur_req: int = 20,
rate: float = 1.5,
ret_res: bool = True,
semaphore = None):
finish_id = list()
self.outputPath = Path(outputPath)
self.result_cols = [COLUMNS_DICT.get(
i, i) for i in self.usecols] + RESULT_NEW_COLUMN
if finishedPath is not None:
try:
target_col = RESULT_NEW_COLUMN[0]
finish: Series = read_csv(
finishedPath,
sep=sep,
usecols=[target_col],
names=self.result_cols,
skiprows=1,
header=None)[target_col]
except Exception as e:
col_to_add = RESULT_NEW_COLUMN[1]
self.logger.warning(
f"{e}\nSomething wrong with finished raw file, probably without '{col_to_add}' column.")
finish_df = read_csv(
finishedPath, sep=sep, names=self.result_cols[:-1], skiprows=1, header=None)
finish_df[col_to_add] = nan
finish_df.to_csv(finishedPath, sep=sep, index=False)
finish: Series = finish_df[target_col]
for query_id in finish:
if ',' in query_id:
finish_id.extend(query_id.split(','))
else:
finish_id.append(query_id)
query_id: Series = self.dfrm[self.id_col]
if finish_id:
rest_id = list(set(query_id) - set(finish_id))
else:
rest_id = query_id.unique()
self.logger.info(
f"Have finished {len(finish_id)} ids, {len(rest_id)} ids left.")
res = UnsyncFetch.multi_tasks(
tasks=self.yieldTasks(rest_id, chunksize),
to_do_func=self.process,
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=semaphore)
return res
def getCanonicalInfo(self, dfrm: DataFrame):
'''
Will Change the dfrm
* Add new column (canonical_isoform)
* Change the content of column (UniProt)
'''
# Get info from Alt Product file
if self.altProPath is None:
dfrm['canonical_isoform'] = nan
return dfrm
else:
usecols = ["IsoId", "Sequence", "Entry", "UniProt"]
altPro_df = read_csv(self.altProPath, sep="\t", usecols=usecols)
altPro_df = altPro_df[altPro_df["Sequence"]
== "Displayed"].reset_index(drop=True)
altPro_df.rename(
columns={"IsoId": "canonical_isoform"}, inplace=True)
# Modify dfrm
dfrm = merge(
dfrm, altPro_df[["canonical_isoform", "Entry"]], how="left")
return dfrm
def getGeneStatus(self, handled_df: DataFrame, colName: str = 'GENE_status'):
'''
Will Change the dfrm, add Gene Status
* Add new column (GENE) # if id_col != gene_col
* Add new column (GENE_status)
**About GENE_status**
* ``False`` : First element of Gene names is not correspond with refSeq's GENE (e.g)
* others(corresponding GENE)
'''
self.gene_status_col = colName
if self.id_type != 'GENENAME':
if self.gene_col is None:
handled_df[colName] = True
return None
gene_map = self.dfrm[[self.id_col,
self.gene_col]].drop_duplicates()
gene_map = gene_map.groupby(self.id_col)[self.gene_col].apply(
lambda x: array(x) if len(x) > 1 else list(x)[0])
handled_df['GENE'] = handled_df.apply(
lambda z: gene_map[z['yourlist']], axis=1)
handled_df[colName] = handled_df.apply(lambda x: x['GENE'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
handled_df['GENE'] = handled_df['GENE'].apply(
lambda x: ','.join(x) if not isinstance(x, str) else x)
else:
handled_df[colName] = handled_df.apply(lambda x: x['yourlist'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
def label_mapping_status(self, dfrm: DataFrame, colName: str = 'Mapping_status'):
self.mapping_status_col = colName
gene_status_col = self.gene_status_col
dfrm[colName] = 'No'
dfrm[gene_status_col] = dfrm[gene_status_col].apply(
lambda x: x.any() if isinstance(x, Iterable) else x)
if self.id_col == 'GENENAME':
pass_df = dfrm[
(dfrm[gene_status_col] == True) &
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
else:
pass_df = dfrm[
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
pass_index = pass_df.index
dfrm.loc[pass_index, colName] = 'Yes'
# Deal with 'one to many' situation
multipleCounter = Counter(dfrm.loc[pass_index, 'yourlist'])
err_li = [i for i, j in multipleCounter.items() if j > 1]
err_index = pass_df[pass_df['yourlist'].isin(err_li)].index
dfrm.loc[err_index, colName] = 'Error'
@unsync
async def process(self, path: Union[str, Path, Unfuture], sep: str = '\t'):
self.logger.debug("Start to handle id mapping result")
if not isinstance(path, (Path, str)):
path = await path # .result()
if not Path(path).stat().st_size:
return None
self.altSeqPath, self.altProPath = ExtractIsoAlt.main(path=path)
try:
df = read_csv(
path, sep='\t', names=self.result_cols, skiprows=1, header=None)
except ValueError:
df = read_csv(
path, sep='\t', names=self.result_cols[:-1], skiprows=1, header=None)
# Add New Column: canonical_isoform
df = self.getCanonicalInfo(df)
# Add New Column: unp_map_tage
df['unp_map_tage'] = nan
# Classification
df_with_no_isomap = df[df['isomap'].isnull()] # Class A
df_with_isomap = df[df['isomap'].notnull()] # Class B
# ----------------------------------------------------------------------
# In Class A
# ----------------------------------------------------------------------
if len(df_with_no_isomap) > 0:
df_wni_split = self.split_df(df_with_no_isomap, 'yourlist', ',')
df_wni_split.drop(columns=['isomap'], inplace=True)
# [yourlist <-> UniProt]
df_wni_split['UniProt'] = df_wni_split['Entry']
df_wni_split['unp_map_tage'] = 'Trusted & No Isoform'
# Find out special cases 1
df_wni_split_warn = df_wni_split[df_wni_split['Alternative products (isoforms)'].notnull(
)].index
df_wni_split.loc[df_wni_split_warn,
'unp_map_tage'] = 'Untrusted & No Isoform'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# ----------------------------------------------------------------------
# In Class B
# ----------------------------------------------------------------------
if len(df_with_isomap) > 0:
wi_yourlist_count = df_with_isomap.apply(
lambda x: x['yourlist'].count(','), axis=1)
wi_isomap_count = df_with_isomap.apply(
lambda x: x['isomap'].count(','), axis=1)
# In subClass 1
df_wi_eq = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count ==
wi_isomap_count].index]
if len(df_wi_eq) > 0:
df_wi_eq_split = self.split_df(
df_wi_eq.drop(columns=['yourlist']), 'isomap', ',')
df_wi_eq_split[['yourlist', 'UniProt']] = df_wi_eq_split['isomap'].str.split(
' -> ', expand=True)
# [yourlist <-> UniProt]
df_wi_eq_split.drop(columns=['isomap'], inplace=True)
df_wi_eq_split['unp_map_tage'] = 'Trusted & Isoform'
# # 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# In subClass 2
df_wi_ne = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count !=
wi_isomap_count].index]
if len(df_wi_ne) > 0:
df_wi_ne_split = self.split_df(df_wi_ne, 'isomap', ',')
df_wi_ne_split.rename(
columns={'yourlist': 'checkinglist'}, inplace=True)
df_wi_ne_split[['yourlist', 'UniProt']] = df_wi_ne_split['isomap'].str.split(
' -> ', expand=True)
df_wi_ne_split.drop(columns=['isomap'], inplace=True)
df_wi_ne_split['unp_map_tage'] = 'Trusted & Isoform & Contain Warnings'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt', 'checkinglist'
# Find out special cases 2
usecols = Index(set(df_wi_ne_split.columns) -
{'yourlist', 'UniProt'})
df_wi_ne_warn = self.split_df(
df_wi_ne_split[usecols].drop_duplicates(), 'checkinglist', ',')
df_wi_ne_warn = df_wi_ne_warn[~df_wi_ne_warn['checkinglist'].isin(
df_wi_ne_split['yourlist'])].rename(columns={'checkinglist': 'yourlist'})
df_wi_ne_warn['UniProt'] = df_wi_ne_warn['Entry']
# sequence conflict
df_wi_ne_warn['unp_map_tage'] = 'Untrusted & No Isoform'
df_wi_ne_split.drop(columns=['checkinglist'], inplace=True)
# Concat Dfrm
variables = ["df_wni_split", "df_wi_eq_split",
"df_wi_ne_split", "df_wi_ne_warn"]
lvs = locals()
varLyst = [lvs[variable] for variable in variables if variable in lvs]
final_df = concat(varLyst, sort=False).reset_index(drop=True)
cano_index = final_df[final_df["canonical_isoform"].notnull()].index
if len(cano_index) > 0:
final_df.loc[cano_index, "UniProt"] = final_df.loc[cano_index, ].apply(
lambda x: x["Entry"] if x["UniProt"] in x["canonical_isoform"] else x["UniProt"], axis=1)
# Add Gene Status
self.getGeneStatus(final_df)
# Label Mapping Status
self.label_mapping_status(final_df)
pathOb = Path(path)
edPath = str(Path(pathOb.parent, f'{pathOb.stem}_ed.tsv')) # {pathOb.suffix}
final_df.to_csv(edPath, sep=sep, index=False)
self.logger.debug(f"Handled id mapping result saved in {edPath}")
return edPath
"""
def task_unit(self, unp:str):
cur_fileName = f'{unp}.{self.suffix}'
return ('get', {'url': f'{BASE_URL}/uniprot/{cur_fileName}', 'params': self.params}, self.get_cur_folder()/cur_fileName)
def single_retrieve(self, identifier: str, rate: float = 1.5):
return UnsyncFetch.single_task(
task=self.task_unit(identifier),
semaphore=self.web_semaphore,
rate=rate)
def stream_retrieve_txt(self, identifier, name_suffix='VAR_SEQ', **kwargs):
assert self.suffix == 'txt'
return self.txt_writer(handle=self.txt_reader(f'{BASE_URL}/uniprot/{identifier}.{self.suffix}'), path=self.get_cur_folder()/f'{identifier}+{name_suffix}.{self.suffix}', **kwargs)
| 42.55684
| 186
| 0.55467
|
9a41e415317ae7c881f36ab4cbf51cbe613df940
| 9,409
|
py
|
Python
|
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | null | null | null |
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | null | null | null |
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | 1
|
2021-11-03T03:36:15.000Z
|
2021-11-03T03:36:15.000Z
|
'''
Function and classes representing statistical tools.
'''
__author__ = ['Miguel Ramos Pernas']
__email__ = ['miguel.ramos.pernas@cern.ch']
from hep_spt.stats.core import chi2_one_dof, one_sigma
from hep_spt.core import decorate, taking_ndarray
from hep_spt import PACKAGE_PATH
import numpy as np
import os
from scipy.stats import poisson
from scipy.optimize import fsolve
import warnings
__all__ = ['calc_poisson_fu',
'calc_poisson_llu',
'gauss_unc',
'poisson_fu',
'poisson_llu',
'sw2_unc'
]
# Number after which the poisson uncertainty is considered to
# be the same as that of a gaussian with "std = sqrt(lambda)".
__poisson_to_gauss__ = 200
def _access_db(name):
'''
Access a database table under 'data/'.
:param name: name of the file holding the data.
:type name: str
:returns: Array holding the data.
:rtype: numpy.ndarray
'''
ifile = os.path.join(PACKAGE_PATH, 'data', name)
table = np.loadtxt(ifile)
return table
def gauss_unc(s, cl=one_sigma):
'''
Calculate the gaussian uncertainty for a given confidence level.
:param s: standard deviation of the gaussian.
:type s: float or numpy.ndarray(float)
:param cl: confidence level.
:type cl: float
:returns: Gaussian uncertainty.
:rtype: float or numpy.ndarray(float)
.. seealso:: :func:`poisson_fu`, :func:`poisson_llu`, :func:`sw2_unc`
'''
n = np.sqrt(chi2_one_dof.ppf(cl))
return n*s
def poisson_fu(m):
'''
Return the poisson frequentist uncertainty at one standard
deviation of confidence level.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:returns: Lower and upper frequentist uncertainties.
:rtype: numpy.ndarray(float, float)
.. seealso:: :func:`gauss_unc`, :func:`poisson_llu`, :func:`sw2_unc`
'''
return _poisson_unc_from_db(m, 'poisson_fu.dat')
def poisson_llu(m):
'''
Return the poisson uncertainty at one standard deviation of
confidence level. The lower and upper uncertainties are defined
by those two points with a variation of one in the value of the
negative logarithm of the likelihood multiplied by two:
.. math::
\\sigma_\\text{low} = n_\\text{obs} - \\lambda_\\text{low}
.. math::
\\alpha - 2\\log P(n_\\text{obs}|\\lambda_\\text{low}) = 1
.. math::
\\sigma_\\text{up} = \\lambda_\\text{up} - n_\\text{obs}
.. math::
\\alpha - 2\\log P(n_\\text{obs}|\\lambda_\\text{up}) = 1
where :math:`\\alpha = 2\\log P(n_\\text{obs}|n_\\text{obs})`.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:returns: Lower and upper frequentist uncertainties.
:rtype: numpy.ndarray(float, float)
.. seealso:: :func:`gauss_unc`, :func:`poisson_fu`, :func:`sw2_unc`
'''
return _poisson_unc_from_db(m, 'poisson_llu.dat')
def _poisson_unc_from_db(m, database):
'''
Used in functions to calculate poissonian uncertainties,
which are partially stored on databases. If "m" is above the
maximum number stored in the database, the gaussian approximation
is taken instead.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:param database: name of the database.
:type database: str
:returns: Lower and upper frequentist uncertainties.
:rtype: (float, float) or numpy.ndarray(float, float)
:raises TypeError: if the input is a (has) non-integer value(s).
:raises ValueError: if the input value(s) is(are) not positive.
'''
m = np.array(m)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError('Calling function with a non-integer value')
if np.any(m < 0):
raise ValueError('Values must be positive')
scalar_input = False
if m.ndim == 0:
m = m[None]
scalar_input = True
no_app = (m < __poisson_to_gauss__)
if np.count_nonzero(no_app) == 0:
# We can use the gaussian approximation in all
out = np.array(2*[np.sqrt(m)]).T
else:
# Non-approximated uncertainties
table = _access_db(database)
out = np.zeros((len(m), 2), dtype=np.float64)
out[no_app] = table[m[no_app]]
mk_app = np.logical_not(no_app)
if mk_app.any():
# Use the gaussian approximation for the rest
out[mk_app] = np.array(2*[np.sqrt(m[mk_app])]).T
if scalar_input:
return np.squeeze(out)
return out
def _process_poisson_unc(m, lw, up):
'''
Calculate the uncertainties and display an error if they
have been incorrectly calculated.
:param m: mean value.
:type m: float
:param lw: lower bound.
:type lw: float
:param up: upper bound.
:type up: float
:returns: Lower and upper uncertainties.
:type: numpy.ndarray(float, float)
'''
s_lw = m - lw
s_up = up - m
if any(s < 0 for s in (s_lw, s_up)):
warnings.warn('Poisson uncertainties have been '
'incorrectly calculated')
# numpy.vectorize needs to know the exact type of the output
return float(s_lw), float(s_up)
def sw2_unc(arr, bins=20, range=None, weights=None):
'''
Calculate the errors using the sum of squares of weights.
The uncertainty is calculated as follows:
.. math::
\\sigma_i = \\sqrt{\\sum_{j = 0}^{n - 1} \\omega_{i,j}^2}
where *i* refers to the i-th bin and :math:`j \\in [0, n)` refers to
each entry in that bin with weight :math:`\\omega_{i,j}`. If "weights" is
None, then this coincides with the square root of the number of entries
in each bin.
:param arr: input array of data to process.
:param bins: see :func:`numpy.histogram`.
:type bins: int, sequence of scalars or str
:param range: range to process in the input array.
:type range: None or tuple(float, float)
:param weights: possible weights for the histogram.
:type weights: None or numpy.ndarray(value-type)
:returns: Symmetric uncertainty.
:rtype: numpy.ndarray
.. seealso:: :func:`gauss_unc`, :func:`poisson_fu`, :func:`poisson_llu`
'''
if weights is not None:
values = np.histogram(arr, bins, range, weights=weights*weights)[0]
else:
values = np.histogram(arr, bins, range)[0]
return np.sqrt(values)
if __name__ == '__main__':
'''
Generate the tables to store the pre-calculated values of
some uncertainties.
'''
m = np.arange(__poisson_to_gauss__)
print('Creating databases:')
for func in (calc_poisson_fu, calc_poisson_llu):
ucts = np.array(func(m, one_sigma)).T
name = func.__name__.replace('calc_', r'') + '.dat'
fpath = os.path.join('data', name)
print('- {}'.format(fpath))
np.savetxt(fpath, ucts)
| 27.755162
| 103
| 0.636199
|
9a43ea16514e92431028e9e426f7d3c0a8b72e9b
| 3,088
|
py
|
Python
|
src/octopus/core/framework/__init__.py
|
smaragden/OpenRenderManagement
|
cf3ab356f96969d7952b60417b48e941955e435c
|
[
"BSD-3-Clause"
] | 35
|
2015-02-23T23:13:13.000Z
|
2021-01-03T05:56:39.000Z
|
src/octopus/core/framework/__init__.py
|
smaragden/OpenRenderManagement
|
cf3ab356f96969d7952b60417b48e941955e435c
|
[
"BSD-3-Clause"
] | 15
|
2015-01-12T12:58:29.000Z
|
2016-03-30T13:10:19.000Z
|
src/octopus/core/framework/__init__.py
|
mikrosimage/OpenRenderManagement
|
6f9237a86cb8e4b206313f9c22424c8002fd5e4d
|
[
"BSD-3-Clause"
] | 20
|
2015-03-18T06:57:13.000Z
|
2020-07-01T15:09:36.000Z
|
import tornado
import logging
import httplib
try:
import simplejson as json
except ImportError:
import json
from octopus.core.framework.wsappframework import WSAppFramework, MainLoopApplication
from octopus.core.framework.webservice import MappingSet
from octopus.core.communication.http import Http400
from octopus.core.tools import Workload
__all__ = ['WSAppFramework', 'MainLoopApplication']
__all__ += ['Controller', 'ControllerError', 'ResourceNotFoundErro', 'BaseResource']
logger = logging.getLogger('main.dispatcher.webservice')
| 29.409524
| 85
| 0.663536
|
9a4542a7758b9c15cb5e2c79c2e2a38319b81b96
| 127
|
py
|
Python
|
provstore/__init__.py
|
vinisalazar/provstore-api
|
0dd506b4f0e00623b95a52caa70debe758817179
|
[
"MIT"
] | 5
|
2015-03-09T20:07:08.000Z
|
2018-07-26T19:59:11.000Z
|
provstore/__init__.py
|
vinisalazar/provstore-api
|
0dd506b4f0e00623b95a52caa70debe758817179
|
[
"MIT"
] | 2
|
2016-03-16T06:13:59.000Z
|
2020-11-06T20:53:28.000Z
|
provstore/__init__.py
|
vinisalazar/provstore-api
|
0dd506b4f0e00623b95a52caa70debe758817179
|
[
"MIT"
] | 2
|
2016-09-01T09:09:05.000Z
|
2020-11-06T22:13:58.000Z
|
from provstore.document import Document
from provstore.bundle_manager import BundleManager
from provstore.bundle import Bundle
| 31.75
| 50
| 0.88189
|
9a45c1430c4ad59b5117e98f3291087d7df4a619
| 834
|
py
|
Python
|
print-server/src/auth/Singleton.py
|
Multi-Agent-io/feecc-io-consolidated
|
9ba60176346ca9e15b22c09c2d5f1e1a5ac3ced6
|
[
"Apache-2.0"
] | null | null | null |
print-server/src/auth/Singleton.py
|
Multi-Agent-io/feecc-io-consolidated
|
9ba60176346ca9e15b22c09c2d5f1e1a5ac3ced6
|
[
"Apache-2.0"
] | 2
|
2021-11-27T09:31:12.000Z
|
2022-03-23T13:15:57.000Z
|
print-server/src/auth/Singleton.py
|
Multi-Agent-io/feecc-io-consolidated
|
9ba60176346ca9e15b22c09c2d5f1e1a5ac3ced6
|
[
"Apache-2.0"
] | 2
|
2021-12-09T13:23:17.000Z
|
2022-03-23T13:04:41.000Z
|
from __future__ import annotations
import typing as tp
from loguru import logger
| 33.36
| 113
| 0.655875
|
9a467e6fc069bf386281b9a110e435f9e100a70b
| 139
|
py
|
Python
|
exercises/spotify/auth_data.py
|
introprogramming/exercises
|
8e52f3fa87d29a14ddcf00e8d87598d0721a41f6
|
[
"MIT"
] | 2
|
2018-08-20T22:44:40.000Z
|
2018-09-14T17:03:35.000Z
|
exercises/spotify/auth_data.py
|
introprogramming/exercises
|
8e52f3fa87d29a14ddcf00e8d87598d0721a41f6
|
[
"MIT"
] | 31
|
2015-08-06T16:25:57.000Z
|
2019-06-11T12:22:35.000Z
|
exercises/spotify/auth_data.py
|
introprogramming/exercises
|
8e52f3fa87d29a14ddcf00e8d87598d0721a41f6
|
[
"MIT"
] | 1
|
2016-08-15T15:06:40.000Z
|
2016-08-15T15:06:40.000Z
|
# Login to https://developer.spotify.com/dashboard/, create an application and fill these out before use!
client_id = ""
client_secret = ""
| 46.333333
| 105
| 0.755396
|
9a47729e5dc9d9a2649d73a1b1f6d29309683f2b
| 7,871
|
py
|
Python
|
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.image as mpimg
from pathlib import Path
from model import *
CAMERA_STEERING_CORRECTION = 0.2
def image_path(sample, camera="center"):
""" Transform the sample path to the repository structure.
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera: the camera to extract the path for
Returns:
the converted image path string
"""
return str(Path(f"./data/{sample[camera].split('data')[-1]}"))
def crop_image(image, top=60, bot=25):
""" Crop the upper and lower borders of the given image.
Args:
image: the image to crop
top: the pixels to crop from the upper part
bot: the pixels to crop from the bottom part
Returns:
the cropped image
"""
return image[top:-bot, :, :]
def resize_image(image, shape=NVIDIA_SHAPE[0:2]):
""" Resize the image to shape.
Args:
image: input image
shape: (height, width) tuple, defaults to Nvidia input shape (66, 200)
Returns:
the resized image
"""
h, w = shape
return cv2.resize(image, dsize=(w, h), interpolation=cv2.INTER_AREA)
def rgb2yuv(rgb_image):
""" Convert the RGB image to YUV space. """
return cv2.cvtColor(rgb_image, cv2.COLOR_RGB2YUV)
def rgb2hsv(rgb_image):
""" Convert the RGB image to HSV space. """
return cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)
def hsv2rgb(hsv_image):
""" Convert the HSV image to RGB space. """
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
def choose_camera(sample, camera='random', probs=None):
"""
Choose an image for a specific camera and eventually adjust the steering.
The steering of the left and right cameras is adjusted according to the
defined constant CAMERA_STEERING_CONSTANT
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera: 'random', 'left', 'center' or 'right'. If 'random' choose the
camera with the given probabilities.
probs: the probabilities to choose the left, center or right cameras. If
None, the probabilities are uniform.
Returns:
a (image, steering) tuple
"""
if camera == 'random':
camera = np.random.choice(["left", "center", "right"], p=probs)
image = mpimg.imread(image_path(sample, camera=camera))
steering = sample["steer"]
if camera == "left":
steering += CAMERA_STEERING_CORRECTION
elif camera == "right":
steering -= CAMERA_STEERING_CORRECTION
return image, steering
def flip(image, steering, prob=0.5):
""" Flip the image and steering with the given probability.
Args:
image: the image to flip
steering: the steering corresponding to the image
prob: the flip probability
Returns:
the augmented image
"""
if np.random.random() < prob:
image = cv2.flip(image, 1)
steering *= -1
return image, steering
def shadow(rgb_image, prob=0.5):
""" Add a shadow to the rgb image with the given probability.
The shadow is created by converting the RGB image into HSV space and
modifying the value channel in a random range. The area in which the value
is modified is defined by a convex hull created for 6 randomly chosen points
in the lower half of the image.
Args:
rgb_image: the image to add the shadow to. Has to be in RGB space.
prob: the probability to add the shadow
Returns:
the augmented image
"""
if np.random.random() < prob:
width, height = rgb_image.shape[1], rgb_image.shape[0]
# Get 6 random vertices in the lower half of the image
x = np.random.randint(-0.1 * width, 1.1 * width, 6)
y = np.random.randint(height * 0.5, 1.1 * height, 6)
vertices = np.column_stack((x, y)).astype(np.int32)
vertices = cv2.convexHull(vertices).squeeze()
# Intilialize mask
mask = np.zeros((height, width), dtype=np.int32)
# Create the polygon mask
cv2.fillPoly(mask, [vertices], 1)
# Adjust value
hsv = rgb2hsv(rgb_image)
v = hsv[:, :, 2]
hsv[:, :, 2] = np.where(mask, v * np.random.uniform(0.5, 0.8), v)
rgb_image = hsv2rgb(hsv)
return rgb_image
def brightness(rgb_image, low=0.6, high=1.4, prob=0.5):
""" Modify the brighntess of the rgb image with the given probability.
The brightness is modified by converting the RGB image into HSV space and
adusting the value channel in a random range between the low and high
bounds.
Args:
rgb_image: the image to modify the brightness. Has to be in RGB space.
low: lower value bound
high: upper value bound
prob: the probability to modify the brightness
Returns:
the augmented image
"""
if np.random.random() < prob:
hsv = rgb2hsv(rgb_image)
value = hsv[:, :, 2]
hsv[:, :, 2] = np.clip(value * np.random.uniform(low, high), 0, 255)
rgb_image = hsv2rgb(hsv)
return rgb_image
def shift(image, steering, shiftx=60, shifty=20, prob=0.5):
""" Shift the image and adjust the steering with the given probability.
The steering of the shifted image is adjusted depending on the amount of
pixels shifted in the width direction.
Args:
image: the image to shift.
steering: the corresponding steering.
shiftx: the upper bound of pixels to shift in the width direction
shifty: the upper bound of pixels to shift in the height direction
prob: the probability to shift the image
Returns:
the augmented image
"""
if np.random.random() < prob:
# The angle correction per pixel is derived from the angle correction
# specified for the side cameras. It is estimated that the images of two
# adjacent cameras are shifted by 80 pixels (at the bottom of the image)
angle_correction_per_pixel = CAMERA_STEERING_CORRECTION / 80
# Draw translations in x and y directions from a uniform distribution
tx = int(np.random.uniform(-shiftx, shiftx))
ty = int(np.random.uniform(-shifty, shifty))
# Transformation matrix
mat = np.float32([[1, 0, tx],
[0, 1, ty]])
# Transform image and correct steering angle
height, width, _ = image.shape
image = cv2.warpAffine(image, mat, (width, height),
borderMode=cv2.BORDER_REPLICATE)
steering += tx * angle_correction_per_pixel
return image, steering
def augment(sample, camera_probs=None, flip_prob=0.5, shadow_prob=0.5,
bright_prob=0.5, shift_prob=0.5, ):
""" Augment the sample with the given probabilities.
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera_probs: the probabilities to draw left, center or right camera
images
flip_prob: probability for an image to be flipped
shadow_prob: probability of shadow additon to the image
bright_prob: probability to modify the brightness of the image
shift_prob: probability for and image to be shifed
"""
image, steering = choose_camera(sample, probs=camera_probs)
image, steering = flip(image, steering, prob=flip_prob)
image = shadow(image, prob=shadow_prob)
image = brightness(image, prob=bright_prob)
image, steering = shift(image, steering, prob=shift_prob)
return image, steering
| 35.138393
| 81
| 0.632575
|
9a483acc0e1727f56a550dc2b790cfba50c01c45
| 4,848
|
py
|
Python
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 17
|
2021-07-30T14:08:24.000Z
|
2022-03-30T13:57:02.000Z
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 4
|
2021-09-09T03:02:18.000Z
|
2022-03-24T13:55:55.000Z
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 2
|
2021-08-30T11:51:16.000Z
|
2021-09-03T09:18:50.000Z
|
import json
import logging
from typing import List
import os
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, BertTokenizer
from vilbert.vilbert import BertConfig
from utils.cli import get_parser
from utils.dataset.common import pad_packed, load_json_data
from utils.dataset.zero_shot_dataset import ZeroShotDataset
from utils.dataset import PanoFeaturesReader
from airbert import Airbert
from train import get_model_input, get_mask_options
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
# ------------- #
# batch parsing #
# ------------- #
if __name__ == "__main__":
main()
| 27.545455
| 88
| 0.65821
|
9a49459be97466ed19cf1a661276df8eb41c082e
| 3,184
|
py
|
Python
|
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
from modeledcommandparameter import *
from pseudoregion import *
| 38.829268
| 139
| 0.451005
|
9a4a243b2c4f9a84354c254f16486d8c603e8178
| 10,620
|
py
|
Python
|
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | 5
|
2021-06-26T07:45:50.000Z
|
2022-03-31T11:41:29.000Z
|
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | null | null | null |
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | 1
|
2021-11-26T09:14:03.000Z
|
2021-11-26T09:14:03.000Z
|
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
from torchvision.utils import save_image
from utils.fast_tensor_dataloader import FastTensorDataLoader
| 44.06639
| 177
| 0.645104
|
9a4a26f9a634d7ab72a8a79970898804d2a1b1c4
| 1,780
|
py
|
Python
|
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
from flask import (
Blueprint,session, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from anonymail.auth import login_required
from anonymail.db import get_db
import datetime
now = datetime.datetime.now()
current_year = now.year
bp = Blueprint('posts', __name__)
| 28.253968
| 78
| 0.580337
|
9a4a94c02a87e8e977bec5709e692ef62684b7c3
| 959
|
py
|
Python
|
app.py
|
pic-metric/data-science
|
89bf6e3733a3595220c945269b66befcaf82a3be
|
[
"MIT"
] | null | null | null |
app.py
|
pic-metric/data-science
|
89bf6e3733a3595220c945269b66befcaf82a3be
|
[
"MIT"
] | null | null | null |
app.py
|
pic-metric/data-science
|
89bf6e3733a3595220c945269b66befcaf82a3be
|
[
"MIT"
] | 3
|
2020-01-31T22:34:00.000Z
|
2020-03-06T01:56:06.000Z
|
# from python-decouple import config
from flask import Flask, request, jsonify
from .obj_detector import object_detection
# from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
load_dotenv()
| 28.205882
| 78
| 0.607925
|
9a4bcff10fc3fa7d7e56bb3812a166c957678a62
| 2,579
|
py
|
Python
|
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | 4
|
2020-08-30T08:56:57.000Z
|
2020-08-31T21:32:03.000Z
|
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | null | null | null |
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | 1
|
2020-10-01T22:15:33.000Z
|
2020-10-01T22:15:33.000Z
|
from .subroutine import subroutine
from parameters.string_parameter import string_parameter as String
from parameters.numeric_parameter import numeric_parameter as Numeric
from parameters.array_parameter import array_parameter as Array
from ast import literal_eval
| 47.759259
| 154
| 0.606437
|
9a4cab617527bcae29b76af4b2c39e67572e4127
| 1,164
|
py
|
Python
|
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | null | null | null |
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | null | null | null |
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | 1
|
2020-06-24T16:52:59.000Z
|
2020-06-24T16:52:59.000Z
|
import requests
import json
from config import config
from logbook import Logger, StreamHandler
import sys
StreamHandler(sys.stdout).push_application()
log = Logger('auth')
| 31.459459
| 83
| 0.629725
|
9a4d61b4c436761ff6069be2e39ac836e18b0130
| 1,540
|
py
|
Python
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 83
|
2017-08-27T15:09:13.000Z
|
2022-01-18T17:03:41.000Z
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 808
|
2017-08-27T15:35:01.000Z
|
2021-12-14T17:30:50.000Z
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 55
|
2017-08-27T15:09:22.000Z
|
2022-03-25T12:07:34.000Z
|
# Copyright (c) 2019 Bita Hasheminezhad
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# #942: `fold_left`, `fold_right` and `fmap` do not work with a lazy function
import numpy as np
from phylanx import Phylanx, PhylanxSession, execution_tree
PhylanxSession.init(1)
sum = Phylanx.lazy(sum_eager)
result = test_map(np.array([[1, 2, 3]]))
assert(np.all(result == [6])), result
result = test_map(np.array([1, 2, 3]))
assert(np.all(result == [1, 2, 3])), result
| 24.0625
| 79
| 0.670779
|
9a4f44e640692a4adea1bc6d6ea01c4fe9188da3
| 644
|
py
|
Python
|
main.py
|
DanTheBow/Fibonacci
|
6b2b694174041c59c1cc151f775772056d88749b
|
[
"Unlicense"
] | 1
|
2022-01-02T19:50:55.000Z
|
2022-01-02T19:50:55.000Z
|
main.py
|
DanTheBow/Fibonacci
|
6b2b694174041c59c1cc151f775772056d88749b
|
[
"Unlicense"
] | null | null | null |
main.py
|
DanTheBow/Fibonacci
|
6b2b694174041c59c1cc151f775772056d88749b
|
[
"Unlicense"
] | null | null | null |
# Die Fibonacci-Folge ist die unendliche Folge natrlicher Zahlen, die (ursprnglich) mit zweimal der Zahl 1 beginnt
# oder (hufig, in moderner Schreibweise) zustzlich mit einer fhrenden Zahl 0 versehen ist.
# Im Anschluss ergibt jeweils die Summe zweier aufeinanderfolgender Zahlen die unmittelbar danach folgende Zahl:
# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55 -> Hier fangen wir mit der 0 an zu zhlen.
# 1, 1, 2, 3, 5, 8, 13, 21, 34, 55 -> Hier fangen wir mit der 1 an zu zhlen.
| 58.545455
| 116
| 0.706522
|
9a51a2dfb9ee0eb5c3e19b169561bb01b5b7ae90
| 4,063
|
py
|
Python
|
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
import numpy as np
import dnnlib.tflib as tflib
from training import dataset
tflib.init_tf()
| 35.330435
| 99
| 0.502092
|
9a51f5406e8b8b4afa3d8bc309049e92a8011b92
| 3,333
|
py
|
Python
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 69
|
2015-10-03T20:27:53.000Z
|
2021-04-06T05:26:18.000Z
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 73
|
2015-10-03T17:53:47.000Z
|
2020-10-01T03:08:01.000Z
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 29
|
2015-10-23T22:00:13.000Z
|
2021-11-30T04:48:06.000Z
|
from collections import namedtuple
import pytest
from rest_framework.authtoken.models import Token
from tests.conftest import twilio_vcr
from apostello import models
StatusCode = namedtuple("StatusCode", "anon, user, staff")
| 40.646341
| 105
| 0.615362
|
9a52f446636c4417f93211b5960e9ec09c902310
| 2,491
|
py
|
Python
|
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | 1
|
2018-01-10T17:54:18.000Z
|
2018-01-10T17:54:18.000Z
|
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | null | null | null |
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | null | null | null |
"""
A *really* simple guestbook flask app. Data is stored in a SQLite database that
looks something like the following:
+------------+------------------+------------+
| Name | Email | signed_on |
+============+==================+============+
| John Doe | jdoe@example.com | 2012-05-28 |
+------------+------------------+------------+
| Jane Doe | jane@example.com | 2012-05-28 |
+------------+------------------+------------+
This can be created with the following SQL (see bottom of this file):
create table guestbook (name text, email text, signed_on date);
Related Docs:
* `sqlite3 <http://docs.python.org/library/sqlite3.html>`_
* `datetime <http://docs.python.org/library/datetime.html>`_
* `Flask <http://flask.pocoo.org/docs/>`_
"""
from datetime import date
from flask import Flask, redirect, request, url_for, render_template
import sqlite3
app = Flask(__name__) # our Flask app
DB_FILE = 'guestbook.db' # file for our Database
def _select():
"""
just pull all the results from the database
"""
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute("SELECT * FROM guestbook")
return cursor.fetchall()
def _insert(name, email):
"""
put a new entry in the database
"""
params = {'name':name, 'email':email, 'date':date.today()}
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute("insert into guestbook (name, email, signed_on) VALUES (:name, :email, :date)", params)
connection.commit()
cursor.close()
if __name__ == '__main__':
# Make sure our database exists
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
try:
cursor.execute("select count(rowid) from guestbook")
except sqlite3.OperationalError:
cursor.execute("create table guestbook (name text, email text, signed_on date)")
cursor.close()
app.run(host='0.0.0.0', debug=True)
| 29.654762
| 106
| 0.609394
|
9a555159031db4d7f16f4b7224046ffb7dcc0810
| 25,673
|
py
|
Python
|
lingvodoc/scripts/lingvodoc_converter.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 5
|
2017-03-30T18:02:11.000Z
|
2021-07-20T16:02:34.000Z
|
lingvodoc/scripts/lingvodoc_converter.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 15
|
2016-02-24T13:16:59.000Z
|
2021-09-03T11:47:15.000Z
|
lingvodoc/scripts/lingvodoc_converter.py
|
Winking-maniac/lingvodoc
|
f037bf0e91ccdf020469037220a43e63849aa24a
|
[
"Apache-2.0"
] | 22
|
2015-09-25T07:13:40.000Z
|
2021-08-04T18:08:26.000Z
|
import sqlite3
import base64
import requests
import json
import hashlib
import logging
from lingvodoc.queue.client import QueueClient
#def change_dict_status(session, converting_status_url, status, task_id, progress):
# def change_dict_status(task_id, progress):
# #session.put(converting_status_url, json={'status': status})
# QueueClient.update_progress(task_id, progress)
if __name__ == "__main__":
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s\t%(levelname)s\t[%(name)s]\t%(message)s')
log.debug("!!!!!!!!!! YOU SHOULD NOT SEE IT !!!!!!!!")
convert_one(filename="/home/student/dicts-current/nenets_kaninski.sqlite", login="Test",
password_hash="$2a$12$zBMnhV9oUfKehlHJCHnsPuGM98Wwq/g9hlWWNqg8ZGDuLNyUSfxza",
language_client_id=1, language_object_id=1,
dictionary_client_id=None, dictionary_object_id=None,
perspective_client_id=None, perspective_object_id=None,
server_url="http://lingvodoc.ispras.ru/")
| 51.346
| 159
| 0.569158
|
9a56a9cb8a9973d77c62dc8bff13ecc6a5a858c1
| 1,550
|
py
|
Python
|
tests/test_all.py
|
euranova/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | 6
|
2021-09-17T02:09:29.000Z
|
2022-03-20T04:15:15.000Z
|
tests/test_all.py
|
Jason-Xu-Ncepu/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
Jason-Xu-Ncepu/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | 4
|
2021-06-29T22:57:18.000Z
|
2022-03-09T09:19:17.000Z
|
""" Tests the code. """
from torch.utils.data import DataLoader
from models import MODELS
from pipeline import argument_parser
from pipeline.datasets import DATASETS, get_dataset
from run import main
def test_datasets():
""" Tests all the datasets defined in pipeline.datasets.DATASETS. """
for ds_name in DATASETS:
train_set, test_set, _ = get_dataset(ds_name, seed=42)
for set_ in (train_set, test_set):
dl = DataLoader(list(zip(*set_)), batch_size=5)
for data, missing_data, mask in dl:
assert len(data) == 5, f"The {ds_name} dataset has less than 5 samples."
assert data.shape[1] > 1, f"The {ds_name} dataset has none or one column only."
print("data:", data, "missing_data:", missing_data, "mask:", mask, sep="\n")
break
def test_general(capsys):
""" Tests most of the code by checking it produces the expected result. """
main(argument_parser.get_args(["--metric_steps", "50", "--datasets", "Boston", "--seeds", "0", "1"]))
captured = capsys.readouterr()
with open("tests/current_output.txt", "w") as f:
assert f.write(captured.out)
with open("tests/gold_output.txt", "r") as f:
assert captured.out == f.read()
def test_models():
""" Tests all the models (only checks if these run). """
for model in MODELS:
main(argument_parser.get_args(["--model", model, "--metric_steps", "0", "1", "5", "--datasets", "Boston",
"--seeds", "0"]))
| 38.75
| 113
| 0.614839
|
9a586ac04d9d83458edb9f23d9cb90fb787462de
| 2,185
|
py
|
Python
|
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | 3
|
2021-10-15T00:57:05.000Z
|
2021-12-16T13:00:05.000Z
|
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | null | null | null |
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import random
import os
import sys
import torch
from src.agent import (
EpsilonGreedyAgent,
MaxAgent,
RandomAgent,
RandomCreateBVAgent,
ProbabilityAgent,
QAgent,
QAndUtilityAgent,
MultiEpsilonGreedyAgent,
MultiMaxAgent,
MultiProbabilityAgent,
MultiQAgent,
MultiQAndUtilityAgent,
)
| 27.3125
| 74
| 0.644851
|
9a599c01b7e7a6eb5de9e8bf5a694c44420b04db
| 101
|
py
|
Python
|
python/testData/editing/spaceDocStringStubInFunction.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/editing/spaceDocStringStubInFunction.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/editing/spaceDocStringStubInFunction.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def func(x, y, z):
"""
:param x: <caret>
:param y:
:param z:
:return:
"""
| 14.428571
| 21
| 0.386139
|
9a5ad370a80119a4cd36243d371bcf4ccf37a3ae
| 1,439
|
py
|
Python
|
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
from hashlib import sha3_256
import magic
from enums import Dep, MangoType
MIME_MTYPE = {
'text/plain': MangoType.text,
'audio/flac': MangoType.audio_flac,
'audio/wav': MangoType.audio_wav,
'image/png': MangoType.picture_png,
'image/jpeg': MangoType.picture_jpg,
'video/x-matroska': MangoType.video_mkv,
'video/mp4': MangoType.video_mp4
}
TYPE_ALLOWED = {
Dep.d51: (MangoType.audio_flac, ),
Dep.d59: (MangoType.audio_flac, ),
Dep.d60: (MangoType.picture_png, ),
Dep.d71: (MangoType.audio_flac, ),
Dep.d72: (MangoType.text, ),
Dep.d73: (MangoType.video_mkv, MangoType.video_mp4)
}
EXTS = {
MangoType.audio_flac: 'flac',
MangoType.picture_png: 'png',
MangoType.text: 'txt',
MangoType.video_mkv: 'mkv',
MangoType.video_mp4: 'mp4'
}
| 24.810345
| 73
| 0.635858
|
9a5cc32eb8d423266537616c2fd2072b4114deb3
| 2,258
|
py
|
Python
|
fabric_cm/credmgr/swagger_server/__main__.py
|
fabric-testbed/CredentialManager
|
da8ce54ab78544ff907af81d8cd7723ff48f6652
|
[
"MIT"
] | 1
|
2021-05-24T17:20:07.000Z
|
2021-05-24T17:20:07.000Z
|
fabric_cm/credmgr/swagger_server/__main__.py
|
fabric-testbed/CredentialManager
|
da8ce54ab78544ff907af81d8cd7723ff48f6652
|
[
"MIT"
] | 4
|
2021-06-07T16:18:45.000Z
|
2021-06-29T20:13:21.000Z
|
fabric_cm/credmgr/swagger_server/__main__.py
|
fabric-testbed/CredentialManager
|
da8ce54ab78544ff907af81d8cd7723ff48f6652
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author Komal Thareja (kthare10@renci.org)
"""
Main Entry Point
"""
import os
import signal
import connexion
import prometheus_client
import waitress
from flask import jsonify
from fabric_cm.credmgr.swagger_server import encoder
from fabric_cm.credmgr.config import CONFIG_OBJ
from fabric_cm.credmgr.logging import LOG
def main():
"""
Main Entry Point
"""
log = LOG
try:
app = connexion.App(__name__, specification_dir='swagger/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('swagger.yaml',
arguments={'title': 'Fabric Credential Manager API'},
pythonic_params=True)
port = CONFIG_OBJ.get_rest_port()
# prometheus server
prometheus_port = CONFIG_OBJ.get_prometheus_port()
prometheus_client.start_http_server(prometheus_port)
# Start up the server to expose the metrics.
waitress.serve(app, port=port)
except Exception as ex:
log.error("Exception occurred while starting Flask app")
log.error(ex)
raise ex
if __name__ == '__main__':
main()
| 32.724638
| 80
| 0.724978
|
9a5d1a5d6e04e787d275225f739fe6d7102b20fa
| 1,529
|
py
|
Python
|
backendapi/icon/migrations/0001_initial.py
|
fredblade/Pictogram
|
d5cc4a25f28b6d80facf51fa9528e8ff969f7c46
|
[
"MIT"
] | null | null | null |
backendapi/icon/migrations/0001_initial.py
|
fredblade/Pictogram
|
d5cc4a25f28b6d80facf51fa9528e8ff969f7c46
|
[
"MIT"
] | null | null | null |
backendapi/icon/migrations/0001_initial.py
|
fredblade/Pictogram
|
d5cc4a25f28b6d80facf51fa9528e8ff969f7c46
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2022-02-27 17:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import versatileimagefield.fields
| 41.324324
| 177
| 0.646828
|
9a5f6f4fdf92f5d8e97feaed00a42aa430e9c51a
| 424,971
|
py
|
Python
|
src/fmiprot.py
|
tanisc/FMIPROT
|
9035b5f89768e1028edd08dc7568b3208552f164
|
[
"Apache-2.0"
] | 4
|
2019-02-25T11:53:55.000Z
|
2021-03-16T20:16:56.000Z
|
src/fmiprot.py
|
tanisc/FMIPROT
|
9035b5f89768e1028edd08dc7568b3208552f164
|
[
"Apache-2.0"
] | 2
|
2021-09-14T09:54:42.000Z
|
2021-11-12T13:30:10.000Z
|
src/fmiprot.py
|
tanisc/FMIPROT
|
9035b5f89768e1028edd08dc7568b3208552f164
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# python version 2.7
# Cemal Melih Tanis (C)
###############################################################################
import os
import shutil
import datetime
from pytz import timezone
from uuid import uuid4
from definitions import *
import fetchers
import calculations
from calculations import calcnames, calccommands, paramnames, paramdefs, paramopts, calcids, calcdescs,paramhelps, calcnames_en
import maskers
import parsers
import sources
from data import *
import readers
import calcfuncs
import matplotlib, sys
import numpy as np
if sysargv['gui']:
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import Tkinter, Tkconstants, tkFileDialog, tkMessageBox, tkSimpleDialog, tkFont
import Tkinter as tk
import ttk
import matplotlib.dates as mdate
import PIL
from PIL import Image,ImageDraw, ImageFont
if sysargv['gui']:
from PIL import ImageTk
if os.path.sep == '/':
from PIL import _tkinter_finder
import mahotas
from copy import deepcopy
import subprocess
from auxdata import auxlist, auxnamelist
import auxdata
import comparators
import webbrowser
import h5py
import textwrap
import gc
if sysargv['gui']:
import FileDialog
if not sysargv['gui']:
Tkinter = None
import noTk as Tkinter
import noTk as tk
import noTk as tkMessageBox
import noTk as tkSimpleDialog
import noTk as webbrowser
import noTk as tkFont
if __name__ == "__main__":
app = monimet_gui(None)
app.title('FMIPROT ' + sysargv['version'])
if os.path.sep != "/":
app.iconbitmap(os.path.join(ResourcesDir,'monimet.ico'))
app.mainloop()
| 64.15625
| 917
| 0.714124
|
9a61264c94a41a473e6cc008dcf849ae78b0596c
| 898
|
py
|
Python
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 11
|
2019-06-25T17:01:12.000Z
|
2022-01-21T18:53:13.000Z
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 253
|
2019-05-24T12:48:32.000Z
|
2022-03-29T11:00:25.000Z
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 93
|
2019-04-17T09:22:43.000Z
|
2022-03-21T18:53:28.000Z
|
import sys
import subprocess
if __name__ == "__main__":
main()
| 30.965517
| 105
| 0.615813
|
9a61c54ca6366d9eef60d2491aa686f033543efd
| 3,261
|
py
|
Python
|
GAparsimony/util/config.py
|
misantam/GAparsimony
|
0241092dc5d7741b5546151ff829167588e4f703
|
[
"MIT"
] | null | null | null |
GAparsimony/util/config.py
|
misantam/GAparsimony
|
0241092dc5d7741b5546151ff829167588e4f703
|
[
"MIT"
] | 1
|
2021-12-05T10:24:55.000Z
|
2021-12-05T11:01:25.000Z
|
GAparsimony/util/config.py
|
misantam/GAparsimony
|
0241092dc5d7741b5546151ff829167588e4f703
|
[
"MIT"
] | null | null | null |
#################################################
#****************LINEAR MODELS******************#
#################################################
CLASSIF_LOGISTIC_REGRESSION = {"C":{"range": (1., 100.), "type": 1},
"tol":{"range": (0.0001,0.9999), "type": 1}}
CLASSIF_PERCEPTRON = {"tol":{"range": (0.0001,0.9999), "type": 1},
"alpha":{"range": (0.0001,0.9999), "type": 1}}
REG_LASSO = {"tol":{"range": (0.0001,0.9999), "type": 1},
"alpha":{"range": (1., 100.), "type": 1}}
REG_RIDGE = {"tol":{"range": (0.0001,0.9999), "type": 1},
"alpha":{"range": (1., 100.), "type": 1}}
################################################
#*****************SVM MODELS*******************#
################################################
CLASSIF_SVC = {"C":{"range": (1.,100.), "type": 1},
"alpha":{"range": (0.0001,0.9999), "type": 1}}
REG_SVR = {"C":{"range": (1.,100.), "type": 1},
"alpha":{"range": (0.0001,0.9999), "type": 1}}
##################################################
#******************KNN MODELS********************#
##################################################
CLASSIF_KNEIGHBORSCLASSIFIER = {"n_neighbors":{"range": (2,11), "type": 0},
"p":{"range": (1, 3), "type": 0}}
REG_KNEIGHBORSREGRESSOR = {"n_neighbors":{"range": (2,11), "type": 0},
"p":{"range": (1, 3), "type": 0}}
##################################################
#******************MLP MODELS********************#
##################################################
CLASSIF_MLPCLASSIFIER = {"tol":{"range": (0.0001,0.9999), "type": 1},
"alpha":{"range": (0.0001, 0.999), "type": 1}}
REG_MLPREGRESSOR = {"tol":{"range": (0.0001,0.9999), "type": 1},
"alpha":{"range": (0.0001, 0.999), "type": 1}}
##################################################
#*************Random Forest MODELS***************#
##################################################
CLASSIF_RANDOMFORESTCLASSIFIER = {"n_estimators":{"range": (100,250), "type": 0},
"max_depth":{"range": (4, 20), "type": 0},
"min_samples_split":{"range": (2,25), "type": 0}}
REG_RANDOMFORESTREGRESSOR = {"n_estimators":{"range": (100,250), "type": 0},
"max_depth":{"range": (4, 20), "type": 0},
"min_samples_split":{"range": (2,25), "type": 0}}
##################################################
#*************Decision trees MODELS**************#
##################################################
CLASSIF_DECISIONTREECLASSIFIER = {"min_weight_fraction_leaf":{"range": (0,20), "type": 0},
"max_depth":{"range": (4, 20), "type": 0},
"min_samples_split":{"range": (2,25), "type": 0}}
REG_DECISIONTREEREGRESSOR = {"min_weight_fraction_leaf":{"range": (0,20), "type": 0},
"max_depth":{"range": (4, 20), "type": 0},
"min_samples_split":{"range": (2,25), "type": 0}}
| 40.259259
| 90
| 0.340693
|
9a620af02d14a583cea144484597abc9077f8497
| 6,300
|
py
|
Python
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 1,109
|
2019-06-20T19:23:27.000Z
|
2022-03-20T14:03:43.000Z
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 63
|
2019-06-21T05:36:17.000Z
|
2021-05-26T21:08:15.000Z
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 181
|
2019-06-20T19:42:05.000Z
|
2022-03-21T13:05:13.000Z
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import logging
from delorean import Delorean
import tornado.web
from gryphon.dashboards.handlers.admin_base import AdminBaseHandler
from gryphon.lib.exchange import exchange_factory
from gryphon.lib.models.order import Order
from gryphon.lib.models.exchange import Exchange as ExchangeData
from gryphon.lib.models.exchange import Balance
from gryphon.lib.models.transaction import Transaction
from gryphon.lib.money import Money
logger = logging.getLogger(__name__)
BANK_ACCOUNT_HIGHLIGHT_THRESHOLD = 30000
| 33.157895
| 87
| 0.623968
|
9a63239cdeadf5547e515d79f10a494c6c3288e7
| 4,897
|
py
|
Python
|
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
import yfinance as yf
import numpy as np
import pandas as pd
if __name__ == "__main__":
stock = StockSetup('SPY', 3)
print(stock.data.tail())
print(stock.data.isna().sum())
| 44.926606
| 195
| 0.596488
|
9a636c8c285701e4e227ff48aaa2926973c39b10
| 1,893
|
py
|
Python
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 47
|
2019-08-15T21:36:36.000Z
|
2022-03-18T23:44:59.000Z
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 52
|
2019-06-17T09:43:04.000Z
|
2022-03-22T05:00:53.000Z
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 55
|
2019-06-02T22:18:01.000Z
|
2022-03-29T07:20:31.000Z
|
from collections import OrderedDict
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
| 25.581081
| 77
| 0.59588
|
9a64215513cbe7b2b8f68643b42ce0ea2da19bba
| 147
|
py
|
Python
|
api/schema/__init__.py
|
wepickheroes/wepickheroes.github.io
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | 3
|
2018-02-15T20:04:23.000Z
|
2018-09-29T18:13:55.000Z
|
api/schema/__init__.py
|
wepickheroes/wepickheroes.github.io
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | 5
|
2018-01-31T02:01:15.000Z
|
2018-05-11T04:07:32.000Z
|
api/schema/__init__.py
|
prattl/wepickheroes
|
032c2a75ef058aaceb795ce552c52fbcc4cdbba3
|
[
"MIT"
] | null | null | null |
import graphene
from schema.queries import Query
from schema.mutations import Mutations
schema = graphene.Schema(query=Query, mutation=Mutations)
| 24.5
| 57
| 0.836735
|
9a6446896e65dc764ddad3e136039fc438fa2758
| 1,710
|
py
|
Python
|
airbox/commands/__init__.py
|
lewisjared/airbox
|
56bfdeb3e81bac47c80fbf249d9ead31c94a2139
|
[
"MIT"
] | null | null | null |
airbox/commands/__init__.py
|
lewisjared/airbox
|
56bfdeb3e81bac47c80fbf249d9ead31c94a2139
|
[
"MIT"
] | null | null | null |
airbox/commands/__init__.py
|
lewisjared/airbox
|
56bfdeb3e81bac47c80fbf249d9ead31c94a2139
|
[
"MIT"
] | null | null | null |
"""
This module contains a number of other commands that can be run via the cli.
All classes in this submodule which inherit the baseclass `airbox.commands.base.Command` are automatically included in
the possible commands to execute via the commandline. The commands can be called using their `name` property.
"""
from logging import getLogger
from .backup import BackupCommand
from .backup_sync import BackupSyncCommand
from .basic_plot import BasicPlotCommand
from .create_mounts import CreateMountsCommand
from .install import InstallCommand
from .print_fstab import PrintFstabCommand
from .run_schedule import RunScheduleCommand
from .spectronus_subset import SpectronusSubsetCommand
from .subset import SubsetCommand
logger = getLogger(__name__)
# Commands are registered below
_commands = [
BackupCommand(),
BackupSyncCommand(),
BasicPlotCommand(),
CreateMountsCommand(),
InstallCommand(),
PrintFstabCommand(),
RunScheduleCommand(),
SpectronusSubsetCommand(),
SubsetCommand()
]
def find_commands():
"""
Finds all the Commands in this package
:return: List of Classes within
"""
# TODO: Make this actually do that. For now commands are manually registered
pass
def initialise_commands(parser):
"""
Initialise the parser with the commandline arguments for each parser
:param parser:
:return:
"""
find_commands()
for c in _commands:
p = parser.add_parser(c.name, help=c.help)
c.initialise_parser(p)
def run_command(cmd_name):
"""
Attempts to run a command
:param config: Configuration data
"""
for c in _commands:
if cmd_name == c.name:
return c.run()
| 26.307692
| 118
| 0.729825
|
9a67bbeeb8843ddedf058092d195c66fcbe342a3
| 1,881
|
py
|
Python
|
waveguide/waveguide_test.py
|
DentonGentry/gfiber-platform
|
2ba5266103aad0b7b676555eebd3c2061ddb8333
|
[
"Apache-2.0"
] | 8
|
2017-09-24T03:11:46.000Z
|
2021-08-24T04:29:14.000Z
|
waveguide/waveguide_test.py
|
DentonGentry/gfiber-platform
|
2ba5266103aad0b7b676555eebd3c2061ddb8333
|
[
"Apache-2.0"
] | null | null | null |
waveguide/waveguide_test.py
|
DentonGentry/gfiber-platform
|
2ba5266103aad0b7b676555eebd3c2061ddb8333
|
[
"Apache-2.0"
] | 1
|
2017-10-05T23:04:10.000Z
|
2017-10-05T23:04:10.000Z
|
#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import waveguide
from wvtest import wvtest
if __name__ == '__main__':
wvtest.wvtest_main()
| 28.938462
| 74
| 0.696438
|
9a67d0c9f6bb396b9d590ca653e1ee83e64bff97
| 3,421
|
py
|
Python
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | 2
|
2019-03-26T15:37:48.000Z
|
2020-01-03T03:47:30.000Z
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | 2
|
2021-03-25T21:27:09.000Z
|
2021-06-01T21:20:04.000Z
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | null | null | null |
import re
from ava.common.check import _ValueCheck, _TimingCheck
from ava.common.exception import InvalidFormatException
# metadata
name = __name__
description = "checks for shell injection"
| 31.385321
| 117
| 0.501315
|
7bd4127115e5637b5b3d7a956f2d5a45c70e9ad5
| 5,536
|
py
|
Python
|
matlab/FRCNN/For_LOC/python/Generate_Trecvid_Data.py
|
xyt2008/frcnn
|
32a559e881cceeba09a90ff45ad4aae1dabf92a1
|
[
"BSD-2-Clause"
] | 198
|
2018-01-07T13:44:29.000Z
|
2022-03-21T12:06:16.000Z
|
matlab/FRCNN/For_LOC/python/Generate_Trecvid_Data.py
|
xyt2008/frcnn
|
32a559e881cceeba09a90ff45ad4aae1dabf92a1
|
[
"BSD-2-Clause"
] | 18
|
2018-02-01T13:24:53.000Z
|
2021-04-26T10:51:47.000Z
|
matlab/FRCNN/For_LOC/python/Generate_Trecvid_Data.py
|
xyt2008/frcnn
|
32a559e881cceeba09a90ff45ad4aae1dabf92a1
|
[
"BSD-2-Clause"
] | 82
|
2018-01-06T14:21:43.000Z
|
2022-02-16T09:39:58.000Z
|
import os
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import cPickle
import subprocess
import uuid
if __name__ == '__main__':
#Save_Name = './dataset/8.train_val'
ImageSets = ['../LOC/LOC_Split/trecvid_val_8.txt', '../LOC/LOC_Split/trecvid_train_8.txt']
ImageSets = ['../LOC/LOC_Split/trecvid_train_Animal_Music.txt', '../LOC/LOC_Split/trecvid_val_Animal_Music.txt']
ImageSets = ['../LOC/LOC_Split/trecvid_5_manual_train.txt']
ImageSets = ['../LOC/LOC_Split/trecvid_train_8.txt', '../LOC/LOC_Split/trecvid_val_8.txt', '../LOC/LOC_Split/trecvid_train_Animal_Music.txt', '../LOC/LOC_Split/trecvid_val_Animal_Music.txt']
num_cls = 10
Save_Name = '../dataset/{}.train_val'.format(num_cls)
_wind_to_ind, _class_to_ind = Get_Class_Ind(num_cls)
for ImageSet in ImageSets:
if not os.path.isfile(ImageSet):
print 'File({}) does not exist'.format(ImageSet)
sys.exit(1)
else:
print 'Open File : {} '.format(ImageSet)
print 'Save into : {} '.format(Save_Name)
out_file = open(Save_Name, 'w')
ids = 0
count_cls = np.zeros((num_cls+1), dtype=np.int32)
assert count_cls.shape[0]-1 == len(_class_to_ind)
for ImageSet in ImageSets:
file = open(ImageSet, 'r')
while True:
line = file.readline()
if line == '':
break
line = line.strip('\n')
xml_path = '../LOC/BBOX/{}.xml'.format(line)
rec = load_annotation(xml_path, _wind_to_ind)
out_file.write('# {}\n'.format(ids))
ids = ids + 1
out_file.write('{}.JPEG\n'.format(line))
boxes = rec['boxes']
gt_classes = rec['gt_classes']
assert boxes.shape[0] == gt_classes.shape[0]
out_file.write('{}\n'.format(boxes.shape[0]))
for j in range(boxes.shape[0]):
out_file.write('{} {} {} {} {} 0\n'.format(int(gt_classes[j]),int(boxes[j,0]),int(boxes[j,1]),int(boxes[j,2]),int(boxes[j,3])))
count_cls[ int(gt_classes[j]) ] = count_cls[ int(gt_classes[j]) ] + 1
if ids % 2000 == 0:
print 'print {} image with recs into {}'.format(ids, Save_Name)
file.close()
for i in range(count_cls.shape[0]):
print ('%2d th : %4d' % (i, count_cls[i]))
i = i + 1
out_file.close()
| 37.659864
| 194
| 0.588873
|
7bd4c7d5599bd575e062c27d1c3e19928097f821
| 5,967
|
py
|
Python
|
train.py
|
ProfessorHuang/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 11
|
2020-12-09T10:38:47.000Z
|
2022-03-07T13:12:48.000Z
|
train.py
|
lllllllllllll-llll/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 3
|
2020-11-24T02:23:02.000Z
|
2021-04-18T15:31:51.000Z
|
train.py
|
ProfessorHuang/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 2
|
2021-04-07T06:17:46.000Z
|
2021-11-11T07:41:46.000Z
|
import argparse
import logging
import os
import sys
import numpy as np
from tqdm import tqdm
import time
import torch
import torch.nn as nn
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from models.unet import UNet
from models.nested_unet import NestedUNet
from datasets.promise12 import Promise12
from datasets.chaos import Chaos
from dice_loss import DiceBCELoss, dice_coeff
from eval import eval_net
torch.manual_seed(2020)
if __name__ == '__main__':
args = get_args()
args.save = 'logs_train/{}-{}-{}'.format(args.model, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(args.save):
os.makedirs(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(f'''
Model: {args.model}
Dataset: {args.dataset}
Total Epochs: {args.epochs}
Batch size: {args.batch_size}
Learning rate: {args.lr}
Weight decay: {args.weight_decay}
Device: GPU{args.gpu}
Log name: {args.save}
''')
torch.cuda.set_device(args.gpu)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# choose a model
if args.model == 'unet':
net = UNet()
elif args.model == 'nestedunet':
net = NestedUNet()
net.to(device=device)
# choose a dataset
if args.dataset == 'promise12':
dir_data = '../data/promise12'
trainset = Promise12(dir_data, mode='train')
valset = Promise12(dir_data, mode='val')
elif args.dataset == 'chaos':
dir_data = '../data/chaos'
trainset = Chaos(dir_data, mode='train')
valset = Chaos(dir_data, mode='val')
try:
train_net(net=net,
trainset=trainset,
valset=valset,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
device=device,
log_save_path=args.save)
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 37.062112
| 121
| 0.622256
|
7bd5134da373e6ab71f1575fcac61884fd8fa7f9
| 41
|
py
|
Python
|
bot/run.py
|
anhhanuman/python-selenium
|
6dbb169282c44c50189447a1c9a303ae1a790a8b
|
[
"Apache-2.0"
] | null | null | null |
bot/run.py
|
anhhanuman/python-selenium
|
6dbb169282c44c50189447a1c9a303ae1a790a8b
|
[
"Apache-2.0"
] | 5
|
2021-09-02T13:02:25.000Z
|
2021-09-20T04:58:37.000Z
|
bot/run.py
|
anhhanuman/python-selenium
|
6dbb169282c44c50189447a1c9a303ae1a790a8b
|
[
"Apache-2.0"
] | null | null | null |
from booking.constants import myConstant
| 20.5
| 40
| 0.878049
|
7bd7021be4efb1d2b67a9ea0b8c76a83b68b38ed
| 411
|
py
|
Python
|
geoxml.py
|
ssubramanian90/UMich-Python-coursera
|
35aa6b7d939852e7e9f1751d6a7b369910c5a572
|
[
"bzip2-1.0.6"
] | null | null | null |
geoxml.py
|
ssubramanian90/UMich-Python-coursera
|
35aa6b7d939852e7e9f1751d6a7b369910c5a572
|
[
"bzip2-1.0.6"
] | null | null | null |
geoxml.py
|
ssubramanian90/UMich-Python-coursera
|
35aa6b7d939852e7e9f1751d6a7b369910c5a572
|
[
"bzip2-1.0.6"
] | null | null | null |
import urllib
import xml.etree.ElementTree as ET
address = raw_input('Enter location: ')
url = address
print 'Retrieving', url
uh = urllib.urlopen(url)
data = uh.read()
print 'Retrieved',len(data),'characters'
tree = ET.fromstring(data)
sumcount=count=0
counts = tree.findall('.//count')
for i in counts:
count+=1
sumcount+= int(i.text)
print 'Count: '+str(count)
print 'Sum: '+str(sumcount)
| 17.125
| 40
| 0.690998
|
7bd7513f32c35775cd41faee3dba10cf9bfca50a
| 882
|
py
|
Python
|
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | 3
|
2016-01-28T20:35:46.000Z
|
2020-03-08T08:49:07.000Z
|
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | null | null | null |
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from flask.ext.tweepy import Tweepy
app = Flask(__name__)
app.config.setdefault('TWEEPY_CONSUMER_KEY', 'sve32G2LtUhvgyj64J0aaEPNk')
app.config.setdefault('TWEEPY_CONSUMER_SECRET', '0z4NmfjET4BrLiOGsspTkVKxzDK1Qv6Yb2oiHpZC9Vi0T9cY2X')
app.config.setdefault('TWEEPY_ACCESS_TOKEN_KEY', '1425531373-dvjiA55ApSFEnTAWPzzZAZLRoGDo3OTTtt4ER1W')
app.config.setdefault('TWEEPY_ACCESS_TOKEN_SECRET', '357nVGYtynDtDBmqAZw2vxeXE3F8GbqBSqWInwStDluDX')
tweepy = Tweepy(app)
| 38.347826
| 102
| 0.794785
|
7bd7c0bcead87f462866473027496b7fc3302170
| 128
|
py
|
Python
|
sftp_sync/__init__.py
|
bluec0re/python-sftpsync
|
f68a8cb47ff38cdf883d93c448cf1bcc9df7f532
|
[
"MIT"
] | 3
|
2017-06-09T09:23:03.000Z
|
2021-12-10T00:52:27.000Z
|
sftp_sync/__init__.py
|
bluec0re/python-sftpsync
|
f68a8cb47ff38cdf883d93c448cf1bcc9df7f532
|
[
"MIT"
] | null | null | null |
sftp_sync/__init__.py
|
bluec0re/python-sftpsync
|
f68a8cb47ff38cdf883d93c448cf1bcc9df7f532
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .__main__ import main
from .sftp import *
from .sync import *
__version__ = '0.6'
| 16
| 38
| 0.765625
|
7bd8ac16582450f85a23c7ef200dbfd91aa09837
| 2,636
|
py
|
Python
|
core/predictor/RF/rf_predict.py
|
LouisYZK/dds-avec2019
|
9a0ee86bddf6c23460a689bde8d75302f1d5aa45
|
[
"BSD-2-Clause"
] | 8
|
2020-02-28T04:04:30.000Z
|
2021-12-28T07:06:06.000Z
|
core/predictor/RF/rf_predict.py
|
LouisYZK/dds-avec2019
|
9a0ee86bddf6c23460a689bde8d75302f1d5aa45
|
[
"BSD-2-Clause"
] | 1
|
2021-04-18T09:35:13.000Z
|
2021-04-18T09:35:13.000Z
|
core/predictor/RF/rf_predict.py
|
LouisYZK/dds-avec2019
|
9a0ee86bddf6c23460a689bde8d75302f1d5aa45
|
[
"BSD-2-Clause"
] | 2
|
2020-03-26T21:42:15.000Z
|
2021-09-09T12:50:41.000Z
|
"""Simple predictor using random forest
"""
import pandas as pd
import numpy as np
import math
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from core.predictor.predictor import Predictor
from common.sql_handler import SqlHandler
from common.metric import ccc_score
import config
from global_values import *
from common.log_handler import get_logger
logger = get_logger()
| 30.651163
| 90
| 0.638088
|
7bd8f52d214214860defef756924562c2d718956
| 2,135
|
py
|
Python
|
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | 1
|
2022-02-12T18:43:43.000Z
|
2022-02-12T18:43:43.000Z
|
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | null | null | null |
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | null | null | null |
def showSpeed(func, r, *args):
'''Usage: showSpeed(function, runs)
You can also pass arguments into <function> like so:
showSpeed(function, runs, <other>, <args>, <here> ...)
showSpeed() prints the average execution time of <function> over <runs> runs
'''
import os, sys, gc
from time import perf_counter as pf
garbage = gc.isenabled()
gc.disable()
start = pf()
with noPrint():
for _ in range(r):
func(*args)
end = pf()
if garbage:
gc.enable()
print(f'{formatted((end-start)/r)}')
def getSpeed(func, r, *args):
'''Usage: getSpeed(function, runs)
You can also pass arguments into <function> like so:
getSpeed(function, runs, <other>, <args>, <here> ...)
getSpeed() returns the average execution time of <function> over <runs> runs, as a float
'''
import os, sys, gc
from time import perf_counter as pf
garbage = gc.isenabled()
gc.disable()
start = pf()
with noPrint():
for _ in range(r):
func(*args)
end = pf()
if garbage:
gc.enable()
return (end-start)/r
| 31.865672
| 92
| 0.562061
|
7bd9a84e5c6f84dbd90d1bc72cc33fccf0f2c06c
| 9,106
|
py
|
Python
|
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | 7
|
2021-03-04T15:43:21.000Z
|
2021-07-08T08:42:23.000Z
|
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | null | null | null |
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | 2
|
2021-03-11T12:04:42.000Z
|
2021-04-20T16:33:31.000Z
|
#!/usr/bin/env python
u"""
polygonize.py
Yara Mohajerani (Last update 09/2020)
Read output predictions and convert to shapefile lines
"""
import os
import sys
import rasterio
import numpy as np
import getopt
import shapefile
from skimage.measure import find_contours
from shapely.geometry import Polygon,LineString,Point
#-- main function
#-- run main program
if __name__ == '__main__':
main()
| 32.992754
| 121
| 0.647595
|
7bdb2f5c5a190e7161ceacb56d31dd8753fd3925
| 4,573
|
py
|
Python
|
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from autofit.graphical import (
EPMeanField,
LaplaceOptimiser,
EPOptimiser,
Factor,
)
from autofit.messages import FixedMessage, NormalMessage
np.random.seed(1)
prior_std = 10.
error_std = 1.
a = np.array([[-1.3], [0.7]])
b = np.array([-0.5])
n_obs = 100
n_features, n_dims = a.shape
x = 5 * np.random.randn(n_obs, n_features)
y = x.dot(a) + b + np.random.randn(n_obs, n_dims)
def check_model_approx(mean_field, a_, b_, z_, x_, y_):
X = np.c_[x, np.ones(len(x))]
XTX = X.T.dot(X) + np.eye(3) * (error_std / prior_std)**2
cov = np.linalg.inv(XTX) * error_std**2
cov_a = cov[:2, :]
cov_b = cov[2, :]
# Analytic results
mean_a = cov_a.dot(X.T.dot(y))
mean_b = cov_b.dot(X.T.dot(y))
a_std = cov_a.diagonal()[:, None] ** 0.5
b_std = cov_b[[-1]] ** 0.5
assert mean_field[a_].mean == pytest.approx(mean_a, rel=1e-2)
assert mean_field[b_].mean == pytest.approx(mean_b, rel=1e-2)
assert mean_field[a_].sigma == pytest.approx(a_std, rel=0.5)
assert mean_field[b_].sigma == pytest.approx(b_std, rel=0.5)
| 26.9
| 84
| 0.659086
|
7bdbfbdb118df696ee04cd30b0904cea6a77354a
| 1,716
|
py
|
Python
|
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
import math
from torch import nn
import torch
import torch.nn.functional as F
import linear_cpu as linear
| 29.586207
| 79
| 0.666084
|
7bdf6ec04e7754ae150125e027e057b6d43b24d9
| 11,907
|
py
|
Python
|
object_files_api/files_api.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | 1
|
2021-06-27T15:16:13.000Z
|
2021-06-27T15:16:13.000Z
|
object_files_api/files_api.py
|
ndlib/marble-manifest-pipeline
|
abc036e4c81a8a5e938373a43153e2492a17cbf8
|
[
"Apache-2.0"
] | 8
|
2019-11-05T18:58:23.000Z
|
2021-09-03T14:54:42.000Z
|
object_files_api/files_api.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | null | null | null |
""" Files API """
import boto3
import os
import io
from datetime import datetime, timedelta
import json
import time
from s3_helpers import write_s3_json, read_s3_json, delete_s3_key
from api_helpers import json_serial
from search_files import crawl_available_files, update_pdf_fields
from dynamo_helpers import add_file_to_process_keys, add_file_group_keys, get_iso_date_as_string, add_image_group_keys, add_media_group_keys, add_media_keys, add_image_keys
from dynamo_save_functions import save_file_system_record
from add_files_to_json_object import change_file_extensions_to_tif
from pipelineutilities.dynamo_query_functions import get_all_file_to_process_records_by_storage_system
| 62.340314
| 259
| 0.646342
|
7be095f1c9c4b3f5f33d92d1c96cc497d62846c5
| 40,240
|
py
|
Python
|
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
"""
import flask
import flask_login
import json
from flask_babel import _
from . import frontend
from .. import logic
from ..logic.object_permissions import Permissions
from ..logic.security_tokens import verify_token
from ..logic.languages import get_languages, get_language, get_language_by_lang_code
from ..models.languages import Language
from .projects_forms import CreateProjectForm, EditProjectForm, LeaveProjectForm, InviteUserToProjectForm, InviteGroupToProjectForm, AddSubprojectForm, RemoveSubprojectForm, DeleteProjectForm, RemoveProjectMemberForm, RemoveProjectGroupForm, ObjectLinkForm
from .permission_forms import PermissionsForm
from .utils import check_current_user_is_not_readonly
from ..logic.utils import get_translated_text
| 56.437588
| 256
| 0.675149
|
7be58215b629ccdaed1b12b4ee8ac016d5bf374b
| 1,474
|
py
|
Python
|
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | 4
|
2021-04-26T18:42:38.000Z
|
2021-04-26T18:42:41.000Z
|
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import codecs
import os
import re
from setuptools import setup
with open('README.md', 'r') as f:
readme = f.read()
here = os.path.abspath(os.path.dirname(__file__))
_title = 'caaalle'
_description = 'caaalle'
_author = 'Carl Larsson'
_author_email = 'example@gmail.com'
_license = 'Apache 2.0'
_url = 'https://github.com/caalle/caaalle'
setup(
name=_title,
description=_description,
long_description=readme,
long_description_content_type='text/markdown',
version=find_version("caaalle", "__init__.py"),
author=_author,
author_email=_author_email,
url=_url,
packages=['caaalle'],
include_package_data=True,
python_requires=">=3.5.*",
install_requires=[],
license=_license,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5'
],
keywords='caaalle'
)
| 26.321429
| 68
| 0.643148
|
7be827f0693117abffb3e3ef853dcd8e6d5807a0
| 10,522
|
py
|
Python
|
kevlar/tests/test_novel.py
|
johnsmith2077/kevlar
|
3ed06dae62479e89ccd200391728c416d4df8052
|
[
"MIT"
] | 24
|
2016-12-07T07:59:09.000Z
|
2019-03-11T02:05:36.000Z
|
kevlar/tests/test_novel.py
|
johnsmith2077/kevlar
|
3ed06dae62479e89ccd200391728c416d4df8052
|
[
"MIT"
] | 325
|
2016-12-07T07:37:17.000Z
|
2019-03-12T19:01:40.000Z
|
kevlar/tests/test_novel.py
|
standage/kevlar
|
622d1869266550422e91a60119ddc7261eea434a
|
[
"MIT"
] | 8
|
2017-08-17T01:37:39.000Z
|
2019-03-01T16:17:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import filecmp
import glob
import json
import pytest
import re
from tempfile import NamedTemporaryFile, mkdtemp
import screed
from shutil import rmtree
import sys
import kevlar
from kevlar.tests import data_file, data_glob
from khmer import Counttable
def test_novel_two_cases(capsys):
cases = kevlar.tests.data_glob('trio1/case6*.fq')
controls = kevlar.tests.data_glob('trio1/ctrl[5,6].fq')
with NamedTemporaryFile(suffix='.ct') as case1ct, \
NamedTemporaryFile(suffix='.ct') as case2ct, \
NamedTemporaryFile(suffix='.ct') as ctrl1ct, \
NamedTemporaryFile(suffix='.ct') as ctrl2ct:
counttables = [case1ct, case2ct, ctrl1ct, ctrl2ct]
seqfiles = cases + controls
for ct, seqfile in zip(counttables, seqfiles):
arglist = ['count', '--ksize', '19', '--memory', '1e7', ct.name,
seqfile]
print(arglist)
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = ['novel', '--ksize', '19', '--memory', '1e7',
'--ctrl-max', '1', '--case-min', '7',
'--case', cases[0], '--case', cases[1],
'--case-counts', case1ct.name, case2ct.name,
'--control-counts', ctrl1ct.name, ctrl2ct.name]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert out.strip() != ''
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case1 = int(abundmatch.group(1))
case2 = int(abundmatch.group(2))
ctl1 = int(abundmatch.group(3))
ctl2 = int(abundmatch.group(4))
assert case1 >= 7 and case2 >= 7
assert ctl1 <= 1 and ctl2 <= 1
| 37.180212
| 79
| 0.585535
|
7be972ac4586def48187bfcf50e95c9e16542c4d
| 361
|
py
|
Python
|
Python Advanced Retake Exam - 16 Dec 2020/Problem 3- Magic triangle - Pascal.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
Python Advanced Retake Exam - 16 Dec 2020/Problem 3- Magic triangle - Pascal.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
Python Advanced Retake Exam - 16 Dec 2020/Problem 3- Magic triangle - Pascal.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
get_magic_triangle(5)
| 21.235294
| 46
| 0.509695
|
7bea7db6a9ed79dea66853c2fd9ed8df8241cc8b
| 1,353
|
py
|
Python
|
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
#client=MongoClient(os.environ['database'])
#db=client.
#users=db.users
print('7777')
bot.polling(none_stop=True,timeout=600)
| 22.932203
| 115
| 0.625277
|
7beab3658ca8052cfa8c2cfea3b8cd3bd3c9a157
| 262
|
py
|
Python
|
py4mc/__init__.py
|
capslock321/py4mc
|
aad43d33f2ab1d264f0b86a84c80823309677994
|
[
"MIT"
] | null | null | null |
py4mc/__init__.py
|
capslock321/py4mc
|
aad43d33f2ab1d264f0b86a84c80823309677994
|
[
"MIT"
] | null | null | null |
py4mc/__init__.py
|
capslock321/py4mc
|
aad43d33f2ab1d264f0b86a84c80823309677994
|
[
"MIT"
] | null | null | null |
from .api import MojangApi
from .dispatcher import Dispatch
from .exceptions import (
ApiException,
ResourceNotFound,
InternalServerException,
UserNotFound,
)
__version__ = "0.0.1a"
__license__ = "MIT"
__author__ = "capslock321"
| 17.466667
| 33
| 0.698473
|
7bed1d2243d33ac3902ca09a4b56c1ae1c77465e
| 553
|
py
|
Python
|
server/players/query.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | 1
|
2021-10-03T05:44:32.000Z
|
2021-10-03T05:44:32.000Z
|
server/players/query.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
server/players/query.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
from loguru import logger
from channels.db import database_sync_to_async
from schema.base import query
from .models import Player
from .schemata import PlayerConnection
| 24.043478
| 74
| 0.755877
|
7bee6b98a8502317f53e2986edd1dc16f78c2ac7
| 50,039
|
py
|
Python
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 38
|
2021-03-07T17:13:10.000Z
|
2022-02-28T19:50:00.000Z
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 44
|
2021-03-12T19:13:32.000Z
|
2022-03-18T10:20:52.000Z
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 33
|
2021-03-08T18:59:59.000Z
|
2022-03-23T10:57:46.000Z
|
import asyncio
import logging
import random
import time
from abc import ABC
from typing import Literal, Optional
import aiohttp
import discord
from redbot.core import Config, bank, checks, commands
from redbot.core.utils.chat_formatting import box
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from tabulate import tabulate
from .core import SimHelper
from .functions import WEATHER
from .simset import SimsetMixin
from .stats import StatsMixin
from .teamset import TeamsetMixin
# THANKS TO https://code.sololearn.com/ci42wd5h0UQX/#py FOR THE SIMULATION AND FIXATOR/AIKATERNA/STEVY FOR THE PILLOW HELP/LEVELER
| 43.85539
| 142
| 0.428846
|
7befce5f0d88c105c0447661c3338248d03f3ae9
| 2,118
|
py
|
Python
|
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
"""
Deep Learning
"""
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import make_column_transformer
import os
path = os.path.abspath(os.getcwd()) + r"/0_dataset/"
dataset = pd.read_csv(path + "Credit2.csv", sep=";")
dataset
#separao dos variveis, ignoro primeira pois no tem valor semntico
X = dataset.iloc[:,1:10].values
y = dataset.iloc[:, 10].values
#temos um arry e no mais um data frame
X
#label encoder coluna checking_status
#atribui valores de zero a 3
labelencoder = LabelEncoder()
X[:,0] = labelencoder.fit_transform(X[:,0])
X
#one hot encoder coluna credit_history
#deve adicionar 5 colunas
onehotencoder = make_column_transformer((OneHotEncoder(categories='auto', sparse=False), [1]), remainder="passthrough")
X = onehotencoder.fit_transform(X)
X
#Excluimos a varivel para evitar a dummy variable trap X = X:,1: X
#Laber encoder com a classe
labelencoder_Y = LabelEncoder()
y = labelencoder_Y.fit_transform(y)
y
#separao em treino e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print(len(X_train),len(X_test),len(y_train),len(y_test))
#Feature Scalling, Padronizao z-score
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_test
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 12))
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
y_pred
#matriz de confuso
cm = confusion_matrix(y_test, y_pred)
cm
| 29.830986
| 119
| 0.767705
|
7bf26d67b6d552692974b4958df2a46110802ae6
| 1,529
|
py
|
Python
|
src/python_settings/python_settings.py
|
tomatze/opendihu-webapp
|
0f08bdeb82348a1e30fa44db1ac3b9b1606f1da1
|
[
"MIT"
] | 17
|
2018-11-25T19:29:34.000Z
|
2021-09-20T04:46:22.000Z
|
src/python_settings/python_settings.py
|
tomatze/opendihu-webapp
|
0f08bdeb82348a1e30fa44db1ac3b9b1606f1da1
|
[
"MIT"
] | 1
|
2020-11-12T15:15:58.000Z
|
2020-12-29T15:29:24.000Z
|
src/python_settings/python_settings.py
|
tomatze/opendihu-webapp
|
0f08bdeb82348a1e30fa44db1ac3b9b1606f1da1
|
[
"MIT"
] | 4
|
2018-10-17T12:18:10.000Z
|
2021-05-28T13:24:20.000Z
|
import re
# import all settings-modules here, so we can only import this module to get them all
from python_settings.settings_activatable import *
from python_settings.settings_child_placeholder import *
from python_settings.settings_choice import *
from python_settings.settings_comment import *
from python_settings.settings_conditional import *
from python_settings.settings_container import *
from python_settings.settings_dict_entry import *
from python_settings.settings_empty_line import *
from python_settings.settings_list_entry import *
# this holds a complete settings.py by parsing its config-dict and storing the rest of the file in prefix and postfix
| 39.205128
| 120
| 0.695226
|
7bf3d0583faad7a302993fc30d577771cb1e654a
| 460
|
py
|
Python
|
titan/abstracts/decorator.py
|
DeSireFire/titans
|
9194950694084a7cbc6434dfec0ecb2e755f0cdf
|
[
"Apache-2.0"
] | 17
|
2020-03-14T01:08:07.000Z
|
2020-12-26T08:20:14.000Z
|
titan/abstracts/decorator.py
|
DeSireFire/titans
|
9194950694084a7cbc6434dfec0ecb2e755f0cdf
|
[
"Apache-2.0"
] | 4
|
2020-12-05T08:50:55.000Z
|
2022-02-27T06:48:21.000Z
|
titan/abstracts/decorator.py
|
DeSireFire/titans
|
9194950694084a7cbc6434dfec0ecb2e755f0cdf
|
[
"Apache-2.0"
] | 1
|
2020-05-24T06:57:03.000Z
|
2020-05-24T06:57:03.000Z
|
# -*- coding: utf-8 -*-
import timeit
from functools import wraps
from titan.manages.global_manager import GlobalManager
| 25.555556
| 70
| 0.630435
|
7bf5036dc7b11f3015385fa7ebed58f2c40e9c71
| 262
|
py
|
Python
|
src/cs2mako/patterns.py
|
eventbrite/cs2mako
|
163affcc764a574b4af543c3520b7f345992973a
|
[
"MIT"
] | null | null | null |
src/cs2mako/patterns.py
|
eventbrite/cs2mako
|
163affcc764a574b4af543c3520b7f345992973a
|
[
"MIT"
] | null | null | null |
src/cs2mako/patterns.py
|
eventbrite/cs2mako
|
163affcc764a574b4af543c3520b7f345992973a
|
[
"MIT"
] | 2
|
2015-04-03T05:35:36.000Z
|
2021-09-08T11:48:27.000Z
|
# Copyright (c) 2014 Eventbrite, Inc. All rights reserved.
# See "LICENSE" file for license.
import re
open_r_str = r'\<\?cs\s*([a-zA-Z]+)([:]|\s)'
close_r_str = r'\<\?cs\s*/([a-zA-Z]+)\s*\?\>'
open_r = re.compile(open_r_str)
close_r = re.compile(close_r_str)
| 26.2
| 58
| 0.637405
|
7bf5401a73cd65b2b3dab4a303b9fc867d22f877
| 3,142
|
py
|
Python
|
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Subteno IT
# License MIT License
import requests
import xmltodict
import string
import random
import io
| 34.911111
| 131
| 0.579885
|
7bf8224c1d14572f51a3d9141d24b9fbd1be25c1
| 2,884
|
py
|
Python
|
blender/SCAFFOLDER_settings.py
|
nodtem66/Scaffolder
|
c2b89e981192f61b028e1e8780a01894b1e34494
|
[
"MIT"
] | 8
|
2019-12-24T17:28:03.000Z
|
2022-03-23T02:49:28.000Z
|
blender/SCAFFOLDER_settings.py
|
nodtem66/Scaffolder
|
c2b89e981192f61b028e1e8780a01894b1e34494
|
[
"MIT"
] | 9
|
2019-12-27T18:10:05.000Z
|
2021-08-04T15:18:47.000Z
|
blender/SCAFFOLDER_settings.py
|
nodtem66/Scaffolder
|
c2b89e981192f61b028e1e8780a01894b1e34494
|
[
"MIT"
] | null | null | null |
import bpy
from bpy.types import Panel
from bpy.props import *
import math
default_surface_names = [
("bcc", "bcc", "", 1),
("schwarzp", "schwarzp", "", 2),
("schwarzd", "schwarzd", "", 3),
("gyroid", "gyroid", "", 4),
("double-p", "double-p", "", 5),
("double-d", "double-d", "", 6),
("double-gyroid", "double-gyroid", "", 7),
("lidinoid", "lidinoid", "", 8),
("schoen_iwp", "schoen_iwp", "", 9),
("neovius", "neovius", "", 10),
("tubular_g_ab", "tubular_g_ab", "", 11),
("tubular_g_c", "tubular_g_c", "", 12)
]
default_direction = [
("X", "X", "", 0),
("Y", "Y", "", 1),
("Z", "Z", "", 2),
("All", "All", "", 3)
]
| 40.055556
| 106
| 0.645631
|
7bf8ba88150b609b31fa7978009e2b6cda410d96
| 1,702
|
py
|
Python
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 4
|
2022-02-16T14:52:55.000Z
|
2022-03-17T13:31:42.000Z
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 3
|
2022-02-17T08:57:42.000Z
|
2022-03-28T08:41:53.000Z
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 7
|
2022-02-13T14:35:00.000Z
|
2022-03-28T08:51:11.000Z
|
import argparse
import torch
from torch.nn import Softplus
from pina import PINN, Plotter
from pina.model import FeedForward
from problems.burgers import Burgers1D
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run PINA")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "-save", action="store_true")
group.add_argument("-l", "-load", action="store_true")
parser.add_argument("id_run", help="number of run", type=int)
parser.add_argument("features", help="extra features", type=int)
args = parser.parse_args()
feat = [myFeature(0)] if args.features else []
burgers_problem = Burgers1D()
model = FeedForward(
layers=[30, 20, 10, 5],
output_variables=burgers_problem.output_variables,
input_variables=burgers_problem.input_variables,
func=Softplus,
extra_features=feat,
)
pinn = PINN(
burgers_problem,
model,
lr=0.006,
error_norm='mse',
regularizer=0,
lr_accelerate=None)
if args.s:
pinn.span_pts(2000, 'latin', ['D'])
pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 't0'])
pinn.train(5000, 100)
pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features))
else:
pinn.load_state('pina.burger.{}.{}'.format(args.id_run, args.features))
plotter = Plotter()
plotter.plot(pinn)
| 28.366667
| 79
| 0.636898
|
7bf92b8ac984ff1d4af8bc11028ce720f6dccb7d
| 2,072
|
py
|
Python
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values xand yof two different nodes in the tree.
Returntrueif and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
Input: root = [1,2,3,null,4], x = 2, y = 3
Output: false
Constraints:
The number of nodes in the tree will be between 2 and 100.
Each node has a unique integer value from 1 to 100.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| 28
| 117
| 0.531853
|
7bfad01ae563f31b06389bcaffa8bf4fb786658a
| 456
|
py
|
Python
|
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
from .configuration_entry import ConfigurationEntry
from utility_ai.traits.utility_score_trait import UtilityScoreTrait
| 30.4
| 67
| 0.699561
|
7bfb0d85a9d2727156196fca82066ec05a53a3a0
| 1,119
|
py
|
Python
|
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | 2
|
2018-05-30T17:23:46.000Z
|
2019-08-29T20:32:27.000Z
|
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | null | null | null |
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
Style = namedtuple('Style', 'name fg bg')
default_pal = {
Style('inv-black', 'black', 'light gray'),
Style('green-bold', 'dark green,bold', ''),
Style('red-bold', 'dark red,bold', ''),
Style('blue-bold', 'dark blue,bold', ''),
Style('yellow-bold', 'yellow,bold', ''),
Style('magenta-bold', 'dark magenta,bold', ''),
Style('cyan-bold', 'dark cyan,bold', ''),
Style('green', 'dark green', ''),
Style('red', 'dark red', ''),
Style('blue', 'dark blue', ''),
Style('cyan', 'dark cyan', ''),
Style('magenta', 'dark magenta', ''),
Style('yellow', 'yellow', ''),
}
INV_BLACK = 'inv-black'
RED_BOLD = 'red-bold'
GREEN_BOLD = 'green-bold'
BLUE_BOLD = 'blue-bold'
MAGENTA_BOLD = 'magenta-bold'
CYAN_BOLD = 'cyan-bold'
YELLOW_BOLD = 'yellow-bold'
BLUE = 'blue'
GREEN = 'green'
RED = 'red'
MAGENTA = 'magenta'
CYAN = 'cyan'
YELLOW = 'yellow'
| 29.447368
| 61
| 0.489723
|
7bfb8c398b66afff9f9537190851684dffe009d8
| 189
|
py
|
Python
|
basics.py
|
c25l/longmont_data_science_tensorflow
|
78302ab5b76a1e4632deda164615b4861c21f534
|
[
"MIT"
] | null | null | null |
basics.py
|
c25l/longmont_data_science_tensorflow
|
78302ab5b76a1e4632deda164615b4861c21f534
|
[
"MIT"
] | null | null | null |
basics.py
|
c25l/longmont_data_science_tensorflow
|
78302ab5b76a1e4632deda164615b4861c21f534
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import tensorflow as tf
x=tf.Variable(0.5)
y = x*x
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("x =",sess.run(x))
print("y =",sess.run(y))
| 18.9
| 43
| 0.687831
|
7bfc0a90c6e361e602b8b4fb5d3bb23952ab70e8
| 3,468
|
py
|
Python
|
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
import os
import random
import cv2
import numpy as np
from gen_textures import add_noise, texture, blank_image
from nist_tools.extract_nist_text import BaseMain, parse_args, display
if __name__ == '__main__':
random.seed(123)
args = parse_args()
CombineMain().main(args)
print('done')
| 31.527273
| 94
| 0.625144
|
7bfe07fff56233f17c17498061812fd747efa684
| 1,205
|
py
|
Python
|
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
# this function looks for either the encounter date or the patient's date of birth
# so that we can avoid duplicate encounters.
import time
#this will select element in div with relement div.
| 30.125
| 99
| 0.637344
|
7bfefe9a585dfb51817f970316b20305a606310a
| 1,047
|
py
|
Python
|
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import g
from flask_restplus import Resource, marshal
from app import db
from app.api.namespaces.token_namespace import token_ns, token
from app.api.security.authentication import basic_auth, token_auth
| 32.71875
| 67
| 0.700096
|
7bff9b4a9c838befc20c601a3d326698664e8b5d
| 1,025
|
py
|
Python
|
quickSort.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
quickSort.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
quickSort.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# low --> Starting index, high --> Ending index
test = Solution()
print test.quickSort([10, 80, 30, 90, 40, 50, 70], 0, 6)
| 29.285714
| 66
| 0.520976
|
d0003ec058228de9777e23294e4fbffc93d7d212
| 4,816
|
py
|
Python
|
docker_multiarch/tool.py
|
CynthiaProtector/helo
|
ad9e22363a92389b3fa519ecae9061c6ead28b05
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
docker_multiarch/tool.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 58
|
2017-05-30T23:25:32.000Z
|
2019-11-18T09:30:54.000Z
|
docker_multiarch/tool.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multi arch dockerized build tool.
"""
__author__ = 'Pedro Larroy'
__version__ = '0.1'
import os
import sys
import subprocess
import logging
import argparse
from subprocess import check_call
import glob
import re
def get_arches():
"""Get a list of architectures given our dockerfiles"""
dockerfiles = glob.glob("Dockerfile.build.*")
dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles))
arches = list(map(lambda x: re.sub(r"Dockerfile.build.(.*)", r"\1", x), dockerfiles))
arches.sort()
return arches
def build(arch):
"""Build the given architecture in the container"""
assert arch in get_arches(), "No such architecture {0}, Dockerfile.build.{0} not found".format(arch)
logging.info("Building for target platform {0}".format(arch))
check_call(["docker", "build",
"-f", get_dockerfile(arch),
"-t", get_docker_tag(arch),
"."])
def collect_artifacts(arch):
"""Collects the artifacts built inside the docker container to the local fs"""
logging.info("Collect artifacts from build in {0}".format(artifact_path(arch)))
mkdir_p("build/{}".format(arch))
# Mount artifact_path on /$arch inside the container and copy the build output so we can access
# locally from the host fs
check_call(["docker","run",
"-v", "{}:/{}".format(artifact_path(arch), arch),
get_docker_tag(arch),
"bash", "-c", "cp -r /work/build/* /{}".format(arch)])
if __name__ == '__main__':
sys.exit(main())
| 30.871795
| 108
| 0.65054
|
d001b6743e397b1ed7c3a5a49549452902031c2c
| 150
|
py
|
Python
|
integrate/test/test_samples/sample_norun.py
|
Requirement-Engineers/default-coding-Bo2
|
f51e4e17af4fff077aebe2f3611c363da9ed9871
|
[
"Unlicense"
] | null | null | null |
integrate/test/test_samples/sample_norun.py
|
Requirement-Engineers/default-coding-Bo2
|
f51e4e17af4fff077aebe2f3611c363da9ed9871
|
[
"Unlicense"
] | null | null | null |
integrate/test/test_samples/sample_norun.py
|
Requirement-Engineers/default-coding-Bo2
|
f51e4e17af4fff077aebe2f3611c363da9ed9871
|
[
"Unlicense"
] | null | null | null |
import json
if __name__ == '__main__':
test_norun()
| 11.538462
| 27
| 0.593333
|
d003fb1f6605d874e72c3a666281e62431d7b2a8
| 3,283
|
py
|
Python
|
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# @file name : module_containers.py
# @author : tingsongyu
# @date : 2019-09-20 10:08:00
# @brief : Sequential, ModuleList, ModuleDict
"""
import torch
import torchvision
import torch.nn as nn
from collections import OrderedDict
# ============================ Sequential
# net = LeNetSequential(classes=2)
# net = LeNetSequentialOrderDict(classes=2)
#
# fake_img = torch.randn((4, 3, 32, 32), dtype=torch.float32)
#
# output = net(fake_img)
#
# print(net)
# print(output)
# ============================ ModuleList
# net = ModuleList()
#
# print(net)
#
# fake_data = torch.ones((10, 10))
#
# output = net(fake_data)
#
# print(output)
# ============================ ModuleDict
net = ModuleDict()
fake_img = torch.randn((4, 10, 32, 32))
output = net(fake_img, 'conv', 'relu')
print(output)
# 4 AlexNet
alexnet = torchvision.models.AlexNet()
| 22.486301
| 76
| 0.540664
|
d00408e74248e82eceb28ea83155d9b67a8bad9f
| 2,124
|
py
|
Python
|
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | 20
|
2019-07-15T21:49:29.000Z
|
2020-01-09T14:35:03.000Z
|
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
import pytest
import shutil as sh
import pandas as pd
from pathlib import Path
from glob import glob
import libs.dirs as dirs
from libs.iteration_manager import SampleImages
from libs.utils import copy_files, replace_symbols
| 34.819672
| 91
| 0.677966
|
d0056587271ff8ce0d2628ab99ab1c7bc8e2f7e9
| 558
|
py
|
Python
|
data/Carp.py
|
shebang-sh/npb-ouenka-bot
|
6fc6f7c1717632c3845496c309560233a9c73d8e
|
[
"MIT"
] | null | null | null |
data/Carp.py
|
shebang-sh/npb-ouenka-bot
|
6fc6f7c1717632c3845496c309560233a9c73d8e
|
[
"MIT"
] | 14
|
2022-03-29T09:07:31.000Z
|
2022-03-30T02:37:07.000Z
|
data/Carp.py
|
shebang-sh/npb-ouenka-bot
|
6fc6f7c1717632c3845496c309560233a9c73d8e
|
[
"MIT"
] | null | null | null |
data={
"":" ",
"":" ",
"":" ",
"":" SHOW TIME!",
"":" ",
"":" \n ",
"":" ",
"":"! \n ",
"":" ",
"":" ",
"":" ",
}
| 42.923077
| 77
| 0.691756
|
d0057db4b4f167cbdeebfbc062e049713a913fcb
| 42
|
py
|
Python
|
source/constants.py
|
sideround/predict-revenue-new-releases
|
b6b597cfed328d6b7981917477ceb6f0630aee49
|
[
"MIT"
] | null | null | null |
source/constants.py
|
sideround/predict-revenue-new-releases
|
b6b597cfed328d6b7981917477ceb6f0630aee49
|
[
"MIT"
] | 11
|
2020-05-21T17:52:04.000Z
|
2020-06-08T03:33:28.000Z
|
source/constants.py
|
sideround/predict-revenue-new-releases
|
b6b597cfed328d6b7981917477ceb6f0630aee49
|
[
"MIT"
] | 2
|
2020-06-02T13:14:16.000Z
|
2020-06-11T17:46:05.000Z
|
BASE_URL = 'https://api.themoviedb.org/3'
| 21
| 41
| 0.714286
|
d00676794b322b39517d8082c8b83c61f4836359
| 284
|
py
|
Python
|
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | 1
|
2021-04-08T14:02:49.000Z
|
2021-04-08T14:02:49.000Z
|
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | null | null | null |
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | null | null | null |
speed(0)
penup()
setposition(-100, 0)
pendown()
for i in range (6):
pendown()
make_square(i)
penup()
forward(35)
| 14.947368
| 23
| 0.503521
|
d0075df444476cd69e92bd3d5f61f5eff5a35b08
| 771
|
py
|
Python
|
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
"""
Module for reading data from 'linearX.csv' and 'linearY.csv'
"""
import numpy as np
def loadData (x_file="ass1_data/linearX.csv", y_file="ass1_data/linearY.csv"):
"""
Loads the X, Y matrices.
Splits into training, validation and test sets
"""
X = np.genfromtxt(x_file)
Y = np.genfromtxt(y_file)
Z = [X, Y]
Z = np.c_[X.reshape(len(X), -1), Y.reshape(len(Y), -1)]
np.random.shuffle(Z)
# Partition the data into three sets
size = len(Z)
training_size = int(0.8 * size)
validation_size = int(0.1 * size)
test_size = int(0.1 * size)
training_Z = Z[0:training_size]
validation_Z = Z[training_size:training_size+validation_size]
test_Z = Z[training_size+validation_size:]
return (Z[:,0], Z[:,1])
| 25.7
| 78
| 0.639429
|
d00814276e589d5ea8bb86b5cdc709673c74e2be
| 331
|
py
|
Python
|
apps/experiments/forms.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | 1
|
2022-01-26T18:07:22.000Z
|
2022-01-26T18:07:22.000Z
|
apps/experiments/forms.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | null | null | null |
apps/experiments/forms.py
|
Intellia-SME/OptiPLANT
|
1d40b62f00b3fff940499fa27d0c2d59e7e6dd4c
|
[
"Apache-2.0"
] | 1
|
2022-01-26T18:07:26.000Z
|
2022-01-26T18:07:26.000Z
|
from django import forms
from .models import Experiment
| 23.642857
| 54
| 0.679758
|