hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb20be04422ba85fc708db252613db55adc1f7a9
| 359
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
#!/bin/env python
# Vulture often detects false positives when analyzing a code
# base. If there are particular things you wish to ignore,
# add them below. This file is consumed by
# scripts/dead_code/find-dead-code.sh
from vulture.whitelist_utils import Whitelist
view_whitelilst = Whitelist()
# Example:
# view_whitelist.name_of_function_to_whitelist
| 23.933333
| 61
| 0.793872
|
eb212bcaed139e5c9db595186ee8e16677921512
| 8,088
|
py
|
Python
|
mmdet/utils/memory.py
|
Youth-Got/mmdetection
|
2e0a02599804da6e07650dde37b9df538e15d646
|
[
"Apache-2.0"
] | 1
|
2021-12-10T15:08:22.000Z
|
2021-12-10T15:08:22.000Z
|
mmdet/utils/memory.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
mmdet/utils/memory.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import abc
from contextlib import contextmanager
from functools import wraps
import torch
from mmdet.utils import get_root_logger
def cast_tensor_type(inputs, src_type=None, dst_type=None):
"""Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype | torch.device): Source type.
src_type (torch.dtype | torch.device): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
assert dst_type is not None
if isinstance(inputs, torch.Tensor):
if isinstance(dst_type, torch.device):
# convert Tensor to dst_device
if hasattr(inputs, 'to') and \
hasattr(inputs, 'device') and \
(inputs.device == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
else:
# convert Tensor to dst_dtype
if hasattr(inputs, 'to') and \
hasattr(inputs, 'dtype') and \
(inputs.dtype == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
# we need to ensure that the type of inputs to be casted are the same
# as the argument `src_type`.
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type=src_type, dst_type=dst_type)
for item in inputs)
# TODO: Currently not supported
# elif isinstance(inputs, InstanceData):
# for key, value in inputs.items():
# inputs[key] = cast_tensor_type(
# value, src_type=src_type, dst_type=dst_type)
# return inputs
else:
return inputs
# To use AvoidOOM as a decorator
AvoidCUDAOOM = AvoidOOM()
| 37.794393
| 103
| 0.574679
|
eb213849d6f5cbf00a64871c3293e7fb777f9ff4
| 2,278
|
py
|
Python
|
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | null | null | null |
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | 1
|
2020-11-28T16:29:28.000Z
|
2020-11-28T16:29:28.000Z
|
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | null | null | null |
# import packages to extend python (just like we extend sublime, or Atom, or VSCode)
from random import randint
from gameComponents import gameVars, chooseWinner
while gameVars.player is False:
print("=======================*/ RPS CONTEST /*=======================")
print("Computer Lives: ", gameVars.ai_lives, "/", gameVars.total_lives)
print("Player Lives: ", gameVars.player_lives, "/", gameVars.total_lives)
print("==============================================")
print("Choose your weapon! or type quit to leave\n")
gameVars.player = input("Choose rock, paper or scissors: \n")
# if the player chose to quit then exit the game
if gameVars.player == "quit":
print("You chose to quit")
exit()
#player = True -> it has a value (rock, paper, or scissors)
# this will be the AI choice -> a random pick from the choices array
computer = gameVars.choices[randint(0, 2)]
# check to see what the user input
# print outputs whatever is in the round brackets -> in this case it outputs player to the command prompt window
print("user chose: " + gameVars.player)
# validate that the random choice worked for the AI
print("AI chose: " + computer)
#--------------------------- MOVE THIS CHUNK OF CODE TO A PACKAGE - START HERE --------------------
if (computer == gameVars.player):
print("tie")
# always check for negative conditions first (the losing case)
elif (computer == "rock"):
if (gameVars.player == "scissors"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "paper"):
if (gameVars.player == "rock"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "scissors"):
if (gameVars.player == "paper"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
#--------------------------- stop here - all of the above needs to move -----------------------
if gameVars.player_lives is 0:
chooseWinner.winorlose("lost")
if gameVars.ai_lives is 0:
chooseWinner.winorlose("won")
print("Player has", gameVars.player_lives, "lives left")
print("AI has", gameVars.ai_lives, "lives left")
gameVars.player = False
| 26.183908
| 113
| 0.6295
|
eb21b87b5bc6c350c9c4db10e19ca1430b1bd7c2
| 1,227
|
py
|
Python
|
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
| 36.088235
| 109
| 0.597392
|
eb2259b4263e5697783bf6849627924369449a0f
| 1,222
|
py
|
Python
|
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
import time
from threading import Timer
i = 0
if __name__ == "__main__":
print("Starting...")
rt = RepeatedTimer(0.05, timeTest) # it auto start ,so dont need rt.start()
try:
ST = time.time()
time.sleep(5)
except Exception as e:
raise e
finally:
rt.stop()
print(time.time() - ST)
| 24.44
| 85
| 0.531097
|
eb22d571bce236b4e4b07269afd4c1273f92107f
| 721
|
py
|
Python
|
src/main/PyCodes/deep_versions.py
|
panditu2015/DL-Lab-7th-Semester
|
59a64d9c219cbed8cc4a75517f46c7f551a95a5a
|
[
"MIT"
] | null | null | null |
src/main/PyCodes/deep_versions.py
|
panditu2015/DL-Lab-7th-Semester
|
59a64d9c219cbed8cc4a75517f46c7f551a95a5a
|
[
"MIT"
] | null | null | null |
src/main/PyCodes/deep_versions.py
|
panditu2015/DL-Lab-7th-Semester
|
59a64d9c219cbed8cc4a75517f46c7f551a95a5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import keras
# In[2]:
# scipy
import scipy
print( ' scipy: %s ' % scipy.__version__)
# numpy
import numpy
print( ' numpy: %s ' % numpy.__version__)
# matplotlib
import matplotlib
print( ' matplotlib: %s ' % matplotlib.__version__)
# pandas
import pandas
print( ' pandas: %s ' % pandas.__version__)
# statsmodels
import statsmodels
print( ' statsmodels: %s ' % statsmodels.__version__)
# scikit-learn
import sklearn
print( ' sklearn: %s ' % sklearn.__version__)
# In[3]:
# theano
import theano
print( ' theano: %s ' % theano.__version__)
# tensorflow
import tensorflow
print( ' tensorflow: %s ' % tensorflow.__version__)
# keras
import keras
print( ' keras: %s ' % keras.__version__)
| 15.673913
| 53
| 0.694868
|
eb2601a12ac399bfb0e416993c3a1b51cb79ad73
| 577
|
py
|
Python
|
graph_help/colorschemes/DarkColorScheme.py
|
jgurhem/Graph_Generator
|
d60f4451feef0c530389bfc4bc6978bda3d4c0cb
|
[
"MIT"
] | null | null | null |
graph_help/colorschemes/DarkColorScheme.py
|
jgurhem/Graph_Generator
|
d60f4451feef0c530389bfc4bc6978bda3d4c0cb
|
[
"MIT"
] | null | null | null |
graph_help/colorschemes/DarkColorScheme.py
|
jgurhem/Graph_Generator
|
d60f4451feef0c530389bfc4bc6978bda3d4c0cb
|
[
"MIT"
] | null | null | null |
from .DefaultColorScheme import DefaultColorScheme
| 30.368421
| 50
| 0.636049
|
eb266bf3b2f0517ce3d9501b3cfc011f8ded2d3e
| 3,817
|
bzl
|
Python
|
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | 2
|
2021-09-02T18:59:09.000Z
|
2021-09-20T23:13:17.000Z
|
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | null | null | null |
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_skylib//lib:shell.bzl", "shell")
json_extract = rule(
implementation = _json_extract_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = "List of inputs. Must all be valid JSON files.",
),
"suffix": attr.string(
default = "",
doc = ("Output file extensions. Each input file will be renamed " +
"from basename.json to basename+suffix."),
),
"raw": attr.bool(
default = False,
doc = ("Whether or not to pass -r to jq. Passing -r will result " +
"in raw data being extracted, i.e. non-JSQN output."),
),
"query": attr.string(
default = ".",
doc = ("Query to pass to the jq binary. The default is '.', " +
"meaning just copy the validated input."),
),
"flags": attr.string_list(
allow_empty = True,
doc = "List of flags to pass to the jq binary.",
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
)
json_test = rule(
implementation = _json_test_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = ("List of inputs. The test will verify that they are " +
"valid JSON files."),
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
outputs = {"test": "%{name}.sh"},
test = True,
)
| 31.545455
| 79
| 0.556196
|
eb26e6350d60cf3d97e04c6da4b6ad1b56768020
| 554
|
py
|
Python
|
Psi_Phi/plot.py
|
Twinstar2/Phython_scripts
|
19f88420bca64014585e87747d01737afe074400
|
[
"MIT"
] | null | null | null |
Psi_Phi/plot.py
|
Twinstar2/Phython_scripts
|
19f88420bca64014585e87747d01737afe074400
|
[
"MIT"
] | 1
|
2018-02-14T15:19:07.000Z
|
2018-02-14T15:19:07.000Z
|
Psi_Phi/plot.py
|
TobiasJu/Python_Master_scripts
|
19f88420bca64014585e87747d01737afe074400
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
sns_plot = \
(sns.jointplot(psi, phi, size=12, space=0, xlim=(-190, 190), ylim=(-190, 190)).plot_joint(sns.kdeplot, zorder=0,
n_levels=6))
# sns_plot = sns.jointplot(psi_list_numpy, phi_list_numpy, kind="hex", color="#4CB391") # stat_func=kendalltau
# sns_plot.ylim(-180, 180)
print "plotting: ", pfam
sns_plot.savefig("Ramachandranplot_scatter/ramachandranplot_" + pfam + ".png")
| 39.571429
| 112
| 0.617329
|
eb289039ceb1e6cb9ff0bbb176aa1f763781e163
| 692
|
py
|
Python
|
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | 3
|
2021-06-25T20:52:50.000Z
|
2021-11-30T16:22:30.000Z
|
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
import pytest
get_tracer = pytest.importorskip('opentelemetry.trace.get_tracer')
| 40.705882
| 99
| 0.789017
|
eb2a05506a2d5dac21a3a7230d334f572006e5b5
| 42
|
py
|
Python
|
logic/start_game.py
|
sparkingdark/Project
|
fdd521407d788d1945275148337992a795ebdf0c
|
[
"MIT"
] | null | null | null |
logic/start_game.py
|
sparkingdark/Project
|
fdd521407d788d1945275148337992a795ebdf0c
|
[
"MIT"
] | null | null | null |
logic/start_game.py
|
sparkingdark/Project
|
fdd521407d788d1945275148337992a795ebdf0c
|
[
"MIT"
] | 5
|
2020-11-28T13:13:15.000Z
|
2020-12-07T16:32:36.000Z
|
from logic import *
| 8.4
| 19
| 0.619048
|
eb2a6dfadfc03cbe4b08fd33a47e0c0b3e370224
| 1,184
|
py
|
Python
|
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | null | null | null |
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | 1
|
2019-09-24T22:04:03.000Z
|
2019-09-24T22:04:03.000Z
|
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
def initlist(listnum):
head = ListNode(listnum[0])
tail = head
for num in listnum[1:]:
tail.next = ListNode(num)
tail = tail.next
return head
if __name__ == "__main__":
sol = Solution()
sol.swapPairs(initlist([1,2,3,4]))
| 24.163265
| 44
| 0.47973
|
eb2b0a445ecc0e541307b4aff935b22d4cc3183d
| 939
|
py
|
Python
|
hello.py
|
ookcode/CodingSpider
|
eac57ef8b41be841a8366f3cc376ff259d01e27f
|
[
"MIT"
] | null | null | null |
hello.py
|
ookcode/CodingSpider
|
eac57ef8b41be841a8366f3cc376ff259d01e27f
|
[
"MIT"
] | null | null | null |
hello.py
|
ookcode/CodingSpider
|
eac57ef8b41be841a8366f3cc376ff259d01e27f
|
[
"MIT"
] | 1
|
2022-02-23T07:12:23.000Z
|
2022-02-23T07:12:23.000Z
|
#!/usr/bin/python
#coding=utf-8
import os
from flask import Flask
from flask import Response
from flask import request
app = Flask(__name__)
if __name__ == "__main__":
app.run()
| 30.290323
| 97
| 0.652822
|
eb2c8b8b8d777e9a0438515ac0aea6cd01f5301b
| 2,696
|
py
|
Python
|
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
"""
Ahira Justice, ADEFOKUN
justiceahira@gmail.com
"""
import os
import pygame
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE_DIR = os.path.join(BASE_DIR, "images")
BLACK = "BLACK"
WHITE = "WHITE"
BISHOP = "BISHOP"
KING = "KING"
KNGHT = "KNIGHT"
PAWN = "PAWN"
QUEEN = "QUEEN"
ROOK = "ROOK"
| 29.304348
| 66
| 0.582715
|
eb2cab16d3d0736d863c283be6817d00ab5e890d
| 3,993
|
py
|
Python
|
stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 3
|
2019-08-13T01:44:16.000Z
|
2019-12-10T04:05:56.000Z
|
stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | null | null | null |
stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 7
|
2019-05-29T17:35:25.000Z
|
2021-12-04T07:55:10.000Z
|
from resource_management.core.resources.system import Execute
from resource_management.libraries.script import Script
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.source import InlineTemplate
from resource_management.libraries.functions.check_process_status import check_process_status
import os
if __name__ == "__main__":
Rocketmq().execute()
| 35.651786
| 146
| 0.643877
|
eb361ceecffd166eeb0b6b3ee13b8be48e6f4d86
| 819
|
py
|
Python
|
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
"""Cue: Script Orchestration for Data Analysis
Cue lets your package your data analysis into simple actions which can be connected
into a dynamic data analysis pipeline with coverage over even complex data sets.
"""
DOCLINES = (__doc__ or '').split('\n')
from setuptools import find_packages, setup
setup(
name='py-cue',
package_dir={'cue/cue': 'cue'},
packages=find_packages(include=['cue']),
version='0.1.0',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
project_urls={
"Source Code": "https://github.com/ktvng/cue"
},
author='ktvng',
license='MIT',
python_requires='>=3.8',
install_requires=['pyyaml>=5.2'],
entry_points={
'console_scripts': {
'cue=cue.cli:run'
}
}
)
| 26.419355
| 85
| 0.616606
|
eb3657629d59fdcbd7874c2822fc0707cfc70c45
| 1,689
|
py
|
Python
|
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | 1
|
2021-06-02T19:51:26.000Z
|
2021-06-02T19:51:26.000Z
|
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | null | null | null |
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | null | null | null |
#!/usr/bin/python3.2
#
# Zabbix API Python usage example
# Christoph Haas <email@christoph-haas.de>
#
username=''
password='1'
hostgroup=''
item_name='system.cpu.load[,avg1]'
zabbix_url=''
import zabbix_api
import sys
# Connect to Zabbix server
z=zabbix_api.ZabbixAPI(server=zabbix_url)
z.login(user=username, password=password)
# Get hosts in the hostgroup
hostgroup = z.hostgroup.get(
{
'filter': { 'name':hostgroup },
'sortfield': 'name',
'sortorder': 'ASC',
'limit':2,
'select_hosts':'extend'
})
print(hostgroup[0])
print("\n")
for host in hostgroup[0]['name']:
hostname = host['host']
print("Host:", hostname)
print("Host-ID:", host['hostid'])
item = z.item.get({
'output':'extend',
'hostids':host['hostid'],
'filter':{'key_':item_name}})
if item:
print(item[0]['lastvalue'])
print("Item-ID:", item[0]['itemid'])
# Get history
lastvalue = z.history.get({
'history': item[0]['value_type'],
'itemids': item[0]['itemid'],
'output': 'extend',
# Sort by timestamp from new to old
'sortfield':'clock',
'sortorder':'DESC',
# Get only the first (=newest) entry
'limit': 1,
})
# CAVEAT! The history.get function must be told which type the
# values are (float, text, etc.). The item.value_type contains
# the number that needs to be passed to history.get.
if lastvalue:
lastvalue = lastvalue[0]['value']
print("Last value:", lastvalue)
else:
print("No item....")
print("---------------------------")
| 23.788732
| 70
| 0.562463
|
eb3b035d6a2b960bc0d338d7dd3785c2208f99f5
| 11,813
|
py
|
Python
|
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
import copy
import math
import os
import random
import cherrypy
"""
This is a simple Battlesnake server written in Python.
For instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md
"""
if __name__ == "__main__":
server = Battlesnake()
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
cherrypy.config.update({
"server.socket_port":
int(os.environ.get("PORT", "8080")),
})
print("Starting Battlesnake Server...")
cherrypy.quickstart(server)
| 31.501333
| 108
| 0.632439
|
eb3bba063d98bf83051c3973141cbbea653626d3
| 342
|
py
|
Python
|
EventIntegrityLib.py
|
fermi-lat/EventIntegrity
|
600c64c7b9be57e1008d12b7bd28ef0d260d7973
|
[
"BSD-3-Clause"
] | null | null | null |
EventIntegrityLib.py
|
fermi-lat/EventIntegrity
|
600c64c7b9be57e1008d12b7bd28ef0d260d7973
|
[
"BSD-3-Clause"
] | null | null | null |
EventIntegrityLib.py
|
fermi-lat/EventIntegrity
|
600c64c7b9be57e1008d12b7bd28ef0d260d7973
|
[
"BSD-3-Clause"
] | null | null | null |
# $Header: /nfs/slac/g/glast/ground/cvs/GlastRelease-scons/EventIntegrity/EventIntegrityLib.py,v 1.2 2008/08/28 21:50:54 ecephas Exp $
| 38
| 134
| 0.681287
|
eb3c0fe9fe75281912b7403d1e9af8679184f59d
| 107
|
py
|
Python
|
mr4mp/__init__.py
|
lapets/mr4mp
|
3f3d6ec01272d4b450eda536b37bcd76851a57d2
|
[
"MIT"
] | 5
|
2019-06-28T17:36:37.000Z
|
2022-03-08T18:59:01.000Z
|
mr4mp/__init__.py
|
lapets/mr4mp
|
3f3d6ec01272d4b450eda536b37bcd76851a57d2
|
[
"MIT"
] | null | null | null |
mr4mp/__init__.py
|
lapets/mr4mp
|
3f3d6ec01272d4b450eda536b37bcd76851a57d2
|
[
"MIT"
] | null | null | null |
"""Gives users direct access to class and functions."""
from mr4mp.mr4mp import pool, mapreduce, mapconcat
| 35.666667
| 55
| 0.775701
|
eb3c1435400a880f8b3833ff6b37ef02c5237e11
| 59,098
|
py
|
Python
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.devtools.testing.v1',
manifest={
'OrchestratorOption',
'RoboActionType',
'InvalidMatrixDetails',
'TestState',
'OutcomeSummary',
'TestMatrix',
'TestExecution',
'TestSpecification',
'SystraceSetup',
'TestSetup',
'IosTestSetup',
'EnvironmentVariable',
'Account',
'GoogleAuto',
'Apk',
'AppBundle',
'DeviceFile',
'ObbFile',
'RegularFile',
'IosDeviceFile',
'AndroidTestLoop',
'IosXcTest',
'IosTestLoop',
'AndroidInstrumentationTest',
'AndroidRoboTest',
'RoboDirective',
'RoboStartingIntent',
'LauncherActivityIntent',
'StartActivityIntent',
'EnvironmentMatrix',
'AndroidDeviceList',
'IosDeviceList',
'AndroidMatrix',
'ClientInfo',
'ClientInfoDetail',
'ResultStorage',
'ToolResultsHistory',
'ToolResultsExecution',
'ToolResultsStep',
'GoogleCloudStorage',
'FileReference',
'Environment',
'AndroidDevice',
'IosDevice',
'TestDetails',
'InvalidRequestDetail',
'ShardingOption',
'UniformSharding',
'ManualSharding',
'TestTargetsForShard',
'Shard',
'CreateTestMatrixRequest',
'GetTestMatrixRequest',
'CancelTestMatrixRequest',
'CancelTestMatrixResponse',
},
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.029472
| 108
| 0.619953
|
eb3c4ae70f222dd8a499b8678c9508db3922f5b5
| 1,457
|
py
|
Python
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
from collections import defaultdict
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2")
| 29.734694
| 67
| 0.539465
|
eb41c235a81322c2905a0154804ac4a18d5c346c
| 1,060
|
py
|
Python
|
src/sentimentClassification.py
|
MaxPowerScience/EnglishSentiment
|
119eeb6e1ee9f24805fbad6650d1a9c3e305f952
|
[
"Apache-2.0"
] | null | null | null |
src/sentimentClassification.py
|
MaxPowerScience/EnglishSentiment
|
119eeb6e1ee9f24805fbad6650d1a9c3e305f952
|
[
"Apache-2.0"
] | null | null | null |
src/sentimentClassification.py
|
MaxPowerScience/EnglishSentiment
|
119eeb6e1ee9f24805fbad6650d1a9c3e305f952
|
[
"Apache-2.0"
] | null | null | null |
from perceptron import train_network, create_perceptron, test_network
from preprocessingData import get_ids_matrix, separate_test_and_training_data, read_word_list
from extractRawData import get_raw_data
from lstm import create_lstm, create_lstm_with_tensorflow
if __name__ == "__main__":
main()
| 37.857143
| 93
| 0.766038
|
eb41c51ce9970b54d5b685bba4f5e3319c3b6398
| 33,225
|
py
|
Python
|
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | 1
|
2022-02-08T03:56:32.000Z
|
2022-02-08T03:56:32.000Z
|
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | null | null | null |
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | 4
|
2022-01-01T09:41:31.000Z
|
2022-02-17T09:48:05.000Z
|
# Databricks notebook source
import builtins as BI
# Setup the capstone
import re, uuid
from pyspark.sql.types import StructType, StringType, IntegerType, TimestampType, DoubleType
from pyspark.sql.functions import col, to_date, weekofyear
from pyspark.sql import DataFrame
static_tests = None
bronze_tests = None
silver_tests = None
gold_tests = None
registration_id = None
final_passed = False
course_name = "Core Partner Enablement"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
user_db = f"dbacademy_{clean_username}_dev_ess_cap"
working_dir = f"dbfs:/user/{username}/dbacademy/dev-ess-cap"
outputPathBronzeTest = f"{working_dir}/bronze_test"
outputPathSilverTest = f"{working_dir}/silver_test"
outputPathGoldTest = f"{working_dir}/gold_test"
source_path = f"wasbs://courseware@dbacademy.blob.core.windows.net/developer-essentials-capstone/v01"
eventSchema = ( StructType()
.add('eventName', StringType())
.add('eventParams', StructType()
.add('game_keyword', StringType())
.add('app_name', StringType())
.add('scoreAdjustment', IntegerType())
.add('platform', StringType())
.add('app_version', StringType())
.add('device_id', StringType())
.add('client_event_time', TimestampType())
.add('amount', DoubleType())
)
)
print(f"Declared the following variables:")
print(f" * user_db: {user_db}")
print(f" * working_dir: {working_dir}")
print()
print(f"Declared the following function:")
print(f" * realityCheckBronze(..)")
print(f" * realityCheckStatic(..)")
print(f" * realityCheckSilver(..)")
print(f" * realityCheckGold(..)")
print(f" * realityCheckFinal()")
# COMMAND ----------
try: reinstall = dbutils.widgets.get("reinstall").lower() == "true"
except: reinstall = False
install_exercise_datasets(reinstall)
print(f"\nYour Registration ID is {registration_id}")
# COMMAND ----------
# Setup Bronze
from pyspark.sql import DataFrame
import time
None
# COMMAND ----------
# Setup Static
None
# COMMAND ----------
# Setup Silver
None
# COMMAND ----------
# Setup Gold
None
# COMMAND ----------
html_passed = f"""
<html>
<body>
<h2>Congratulations! You're all done!</h2>
While the preliminary evaluation of your project indicates that you have passed, we have a few more validation steps to run on the back-end:<br/>
<ul style="margin:0">
<li> Code & statistical analysis of your capstone project</li>
<li> Correlation of your account in our LMS via your email address, <b>{username}</b></li>
<li> Final preparation of your badge
</ul>
<p>Assuming there are no issues with our last few steps, you will receive your <b>Databricks Developer Essentials Badge</b> within 2 weeks. Notification will be made by email to <b>{username}</b> regarding the availability of your digital badge via <b>Accredible</b>.
Should we have any issues, such as not finding your email address in our LMS, we will do our best to resolve the issue using the email address provided here.
</p>
<p>Your digital badge will be available in a secure, verifiable, and digital format that you can easily retrieve via <b>Accredible</b>. You can then share your achievement via any number of different social media platforms.</p>
<p>If you have questions about the status of your badge after the initial two-week window, or if the email address listed above is incorrect, please <a href="https://help.databricks.com/s/contact-us?ReqType=training" target="_blank">submit a ticket</a> with the subject "Core Capstone" and your Registration ID (<b>{registration_id}</b>) in the message body. Please allow us 3-5 business days to respond.</p>
One final note: In order to comply with <a href="https://oag.ca.gov/privacy/ccpa" target="_blank">CCPA</a> and <a href="https://gdpr.eu/" target="_blank">GDPR</a>, which regulate the collection of your personal information, the status of this capstone and its correlation to your email address will be deleted within 30 days of its submission.
</body>
</html>
"""
html_failed = f"""
<html>
<body>
<h2>Almost There!</h2>
<p>Our preliminary evaluation of your project indicates that you have not passed.</p>
<p>In order for your project to be submitted <b>all</b> reality checks must pass.</p>
<p>In some cases this problem can be resolved by simply clearning the notebook's state (<b>Clear State & Results</b>) and then selecting <b>Run All</b> from the toolbar above.</p>
<p>If your project continues to fail validation, please review each step above to ensure that you are have properly addressed all the corresponding requirements.</p>
</body>
</html>
"""
# Setup Final
None
# COMMAND ----------
daLogger = CapstoneLogger()
None
# COMMAND ----------
# These imports are OK to provide for students
import pyspark
from typing import Callable, Any, Iterable, List, Set, Tuple
import uuid
#############################################
# Test Suite classes
#############################################
# Test case
# Test result
# Decorator to lazy evaluate - used by TestSuite
def lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
attr_name = '_lazy_' + fn.__name__
return _lazy_property
testResultsStyle = """
<style>
table { text-align: left; border-collapse: collapse; margin: 1em; caption-side: bottom; font-family: Sans-Serif; font-size: 16px}
caption { text-align: left; padding: 5px }
th, td { border: 1px solid #ddd; padding: 5px }
th { background-color: #ddd }
.passed { background-color: #97d897 }
.failed { background-color: #e2716c }
.skipped { background-color: #f9d275 }
.results .points { display: none }
.results .message { display: none }
.results .passed::before { content: "Passed" }
.results .failed::before { content: "Failed" }
.results .skipped::before { content: "Skipped" }
.grade .passed .message:empty::before { content:"Passed" }
.grade .failed .message:empty::before { content:"Failed" }
.grade .skipped .message:empty::before { content:"Skipped" }
</style>
""".strip()
# Test suite class
class __TestResultsAggregator(object):
testResults = dict()
def displayResults(self):
displayHTML(testResultsStyle + f"""
<table class='results'>
<tr><th colspan="2">Test Summary</th></tr>
<tr><td>Number of Passing Tests</td><td style="text-align:right">{self.score}</td></tr>
<tr><td>Number of Failing Tests</td><td style="text-align:right">{self.maxScore-self.score}</td></tr>
<tr><td>Percentage Passed</td><td style="text-align:right">{self.percentage}%</td></tr>
</table>
""")
# Lazy-man's singleton
TestResultsAggregator = __TestResultsAggregator()
None
# COMMAND ----------
from pyspark.sql import Row, DataFrame
None
# COMMAND ----------
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, sum
import os
print("Finished setting up the capstone environment.")
| 38.544084
| 408
| 0.669586
|
eb424108a96bf604264def77319d83c190ad7040
| 12,658
|
py
|
Python
|
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs, BeautifulSoup
import pandas as pd
import numpy as np
import re
import logging
def get_houses_in_location(
self,
location_url_: str,
houses_in_location: set = set(),
page_limit: int = 1,
page_number: int = 1,
) -> list:
"""
Accepts location url and goes through pages in that location scraping every house
until page limit is reached. Returns list of dicts with scraped information about every house in that location.
:param location_url_: string with link to specific location in California state
:param houses_in_location: set with already scraped links. Since retrieved links can be repetitive, there is
no need to go to the same link which has already been scraped. Set is used for faster search
:param page_limit: how many pages to scraped. If not passed by the user, default is 1
:param page_number: Current page to scrape. Starting number is 1
:return: list of dictionaries
"""
houses_information = []
try:
new_url = self.basic_url + location_url_ + f"?page={page_number}"
page_ = self.get_page(new_url)
if page_.find_all("li", class_="lslide"):
for elem in page_.find_all("li", class_="lslide"):
link = elem.find("a")["href"]
if link.startswith("/US") and link not in houses_in_location:
houses_information.append(
self.scrape_info_one_house(
self.get_page(self.basic_url + link)
)
)
houses_in_location.add(link)
if page_number <= page_limit:
page_number += 1
self.get_houses_in_location(
location_url_,
houses_in_location,
page_limit,
page_number=page_number,
)
except Exception as err:
logging.error(f"Error occurred while scraping locations. Message: {err}")
return houses_information
def scrape_platform(self, page_limit: int = 1) -> None:
"""
Main scraping function. Accepts page limit - how many pages to scrape, default is 1.
The flow:
- First, all California areas (locations) are extracted and put into a list.
- Area list is iterated over. Each area has a number of pages with real estate descriptions. User can select how
many pages he wants to go through.
- Scraper visits every real estate link in the page and scrapes required information. After all houses are scraped,
scraper moves to the next page. When no more pages are left or user denoted page limit is reached, scraper
moves to the next category.
:param page_limit: how many pages to scrape per area
:return: None.
"""
starting_url = "https://www.point2homes.com/US/Real-Estate-Listings/CA.html"
houses = []
starting_page = self.get_page(starting_url)
locations = self.get_location_urls(starting_page)
for location in locations:
houses.extend(
self.get_houses_in_location(location, set(), page_limit=page_limit)
)
self.to_dataframe(houses).to_csv("California Housing.csv")
| 38.241692
| 123
| 0.586902
|
eb42e8c815ef79c9ee2b0e9d574f89c917610639
| 693
|
py
|
Python
|
ArticleSpider/ArticleSpider/utils/selenium_spider.py
|
ms-wu/Scrapy_projects
|
376eb5e1c6eca54bcfb781170513c8e9d3476fec
|
[
"MIT"
] | null | null | null |
ArticleSpider/ArticleSpider/utils/selenium_spider.py
|
ms-wu/Scrapy_projects
|
376eb5e1c6eca54bcfb781170513c8e9d3476fec
|
[
"MIT"
] | null | null | null |
ArticleSpider/ArticleSpider/utils/selenium_spider.py
|
ms-wu/Scrapy_projects
|
376eb5e1c6eca54bcfb781170513c8e9d3476fec
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from scrapy.selector import Selector
import time
chrome_opt = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2}
chrome_opt.add_experimental_option("prefs", prefs)
browser = webdriver.Chrome(executable_path="H:\chromedriver.exe", chrome_options=chrome_opt)
browser.get("https://www.taobao.com")
# time.sleep(5)
# browser.find_element_by_css_selector()
# t_selector = Selector(text=browser.page_source)
# t_selector.css()
# for i in range(3):
# browser.execute_script("window.scrollTo(0, document.body.scrollHeight); var lenOfPage=document.body.scrollHeight; return lenOfPage;")
# time.sleep(3)
# browser.quit()
| 31.5
| 139
| 0.780664
|
eb4407cbcc3f00735c03c065582c4a89413734d8
| 1,678
|
py
|
Python
|
launcher.py
|
dlario/PyFlow
|
b53b9d14b37aa586426d85842c6cd9a9c35443f2
|
[
"MIT"
] | null | null | null |
launcher.py
|
dlario/PyFlow
|
b53b9d14b37aa586426d85842c6cd9a9c35443f2
|
[
"MIT"
] | null | null | null |
launcher.py
|
dlario/PyFlow
|
b53b9d14b37aa586426d85842c6cd9a9c35443f2
|
[
"MIT"
] | null | null | null |
from nine import str
from Qt.QtWidgets import QApplication, QStyleFactory
from Qt import QtGui
from Qt import QtCore
import sys
import os
from PyFlow.App import PyFlow
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
SETTINGS_PATH = os.path.join(FILE_DIR, "PyFlow", "appConfig.ini")
STYLE_PATH = os.path.join(FILE_DIR, "PyFlow", "style.css")
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create("plastique"))
dark_palette = app.palette()
dark_palette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.Base, QtGui.QColor(25, 25, 25))
dark_palette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.Text, QtCore.Qt.black)
dark_palette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.black)
dark_palette.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)
dark_palette.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))
dark_palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))
dark_palette.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)
app.setPalette(dark_palette)
try:
with open(STYLE_PATH, 'r') as f:
styleString = f.read()
app.setStyleSheet(styleString)
except Exception as e:
print(e)
instance = PyFlow.instance()
app.setActiveWindow(instance)
instance.show()
try:
sys.exit(app.exec_())
except Exception as e:
print(e)
| 33.56
| 77
| 0.781883
|
eb444f1d2f4c6079bc153578e3e68294eef319a0
| 4,344
|
py
|
Python
|
src/gapminder_challenge/dashboard/dash_app2.py
|
UBC-MDS/gapminder_challenge
|
bbc8132a475d483e7c6c46572c8efca40b506afc
|
[
"MIT"
] | 1
|
2022-03-19T03:31:49.000Z
|
2022-03-19T03:31:49.000Z
|
src/gapminder_challenge/dashboard/dash_app2.py
|
imtvwy/gapminder_challenge
|
0f7d9816b0c5baf6422baff24e0413c800d6e62a
|
[
"MIT"
] | 39
|
2022-02-17T05:04:48.000Z
|
2022-03-19T21:37:20.000Z
|
src/gapminder_challenge/dashboard/dash_app2.py
|
imtvwy/gapminder_challenge
|
0f7d9816b0c5baf6422baff24e0413c800d6e62a
|
[
"MIT"
] | 1
|
2022-03-19T03:30:08.000Z
|
2022-03-19T03:30:08.000Z
|
import pandas as pd
from dash import Dash, html, dcc, Input, Output
import altair as alt
df = pd.read_csv('../../data/raw/world-data-gapminder_raw.csv') # local run
# df = pd.read_csv('data/raw/world-data-gapminder_raw.csv') # heroku deployment
url = '/dash_app2/'
def add_dash(server):
"""
It creates a Dash app that plots a line chart of children per woman from gapminder dataset
with 2 widgets : rangeslider for years and dropdown for filter
:param server: The Flask app object
:return: A Dash server
"""
app = Dash(server=server, url_base_pathname=url)
app.layout = html.Div([
html.Iframe(
id='line_children',
style={'border-width': '0', 'width': '600px', 'height': '400px', 'display': 'block',
'margin-left': 'auto', 'margin-right': 'auto'}),
html.Label([
'Zoom in years: ',
dcc.RangeSlider(1918, 2018, 10, value=[1918, 2018], id='year_range_slider',
marks={str(year): str(year) for year in range(1918, 2028, 10)}),
]),
html.Label([
'See breakdown number by: ',
dcc.Dropdown(options=[
{'label': 'All', 'value': 'all'},
{'label': 'Income Group', 'value': 'income_group'},
{'label': 'Region', 'value': 'region'}
],
value='', id='filter_dropdown')
]),
html.Div(id="data_card_2", **{'data-card_2_data': []})
])
# Set up callbacks/backend
return app.server
| 42.174757
| 113
| 0.575506
|
eb448a448b8928b4d93cd021756f058d5d672505
| 4,595
|
py
|
Python
|
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | 2
|
2021-05-10T16:59:34.000Z
|
2021-05-19T16:10:24.000Z
|
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | null | null | null |
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | 2
|
2021-04-16T23:55:16.000Z
|
2021-09-09T12:48:41.000Z
|
# Author: Arrykrishna Mootoovaloo
# Collaborators: Alan Heavens, Andrew Jaffe, Florent Leclercq
# Email : a.mootoovaloo17@imperial.ac.uk
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Perform all additional operations such as interpolations
'''
import os
import logging
import numpy as np
import scipy.interpolate as itp
from typing import Tuple
def indices(nzmax: int) -> Tuple[list, tuple]:
'''
Generates indices for double sum power spectra
:param: nzmax (int) - the maximum number of redshifts (assuming first redshift is zero)
:return: di_ee (list), idx_gi (tuple) - double indices for EE and indices for GI
'''
# create emty lists to recod all indices
# for EE power spectrum
di_ee = []
# for GI power spectrum
# ab means alpha, beta
Lab_1 = []
Lab_2 = []
Lba_1 = []
Lba_2 = []
for i in range(1, nzmax + 1):
for j in range(1, nzmax + 1):
di_ee.append(np.min([i, j]))
if i > j:
Lab_1.append(i)
Lab_2.append(j)
elif j > i:
Lba_1.append(i)
Lba_2.append(j)
Lab_1 = np.asarray(Lab_1)
Lab_2 = np.asarray(Lab_2)
Lba_1 = np.asarray(Lba_1)
Lba_2 = np.asarray(Lba_2)
di_ee = np.asarray(di_ee)
idx_gi = (Lab_1, Lab_2, Lba_1, Lba_2)
return di_ee, idx_gi
def dvalues(d: dict) -> np.ndarray:
'''
Returns an array of values instead of dictionary format
:param: d (dict) - a dictionary with keys and values
:return: v (np.ndarray) - array of values
'''
v = np.array(list(d.values()))
return v
def like_interp_2d(inputs: list, int_type: str = 'cubic') -> object:
'''
We want to predict the function for any new point of k and z (example)
:param: inputs (list) - a list containing x, y, f(x,y)
:param: int_type (str) - interpolation type (default: 'cubic')
:return: f (object) - the interpolator
'''
k, z, f_kz = np.log(inputs[0]), inputs[1], inputs[2]
inputs_trans = [k, z, f_kz]
f = itp.interp2d(*inputs_trans)
return f
def two_dims_interpolate(inputs: list, grid: list) -> np.ndarray:
'''
Function to perform 2D interpolation using interpolate.interp2d
:param: inputs (list) : inputs to the interpolation module, that is, we need to specify the following:
- x
- y
- f(x,y)
- 'linear', 'cubic', 'quintic'
:param: grid (list) : a list containing xnew and ynew
:return: pred_new (np.ndarray) : the predicted values on the 2D grid
'''
# check that all elements are greater than 0 for log-transformation to be used
condition = np.all(inputs[2] > 0)
if condition:
# transform k and f to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], np.log(inputs[2]), inputs[3]
else:
# transform in k to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], inputs[2], inputs[3]
inputs_trans = [k, z, f_kz, int_type]
# tranform the grid to log
knew, znew = np.log(grid[0]), grid[1]
grid_trans = [knew, znew]
f = itp.interp2d(*inputs_trans)
if condition:
pred_new = np.exp(f(*grid_trans))
else:
pred_new = f(*grid_trans)
return pred_new
def interpolate(inputs: list) -> np.ndarray:
'''
Function to interpolate the power spectrum along the redshift axis
:param: inputs (list or tuple) : x values, y values and new values of x
:return: ynew (np.ndarray) : an array of the interpolated power spectra
'''
x, y, xnew = inputs[0], inputs[1], inputs[2]
spline = itp.splrep(x, y)
ynew = itp.splev(xnew, spline)
return ynew
def get_logger(name: str, log_name: str, folder_name: str = 'logs'):
'''
Create a log file for each Python scrip
:param: name (str) - name of the Python script
:param: log_name (str) - name of the output log file
'''
# create the folder if it does not exist
if not os.path.exists(folder_name):
os.makedirs(folder_name)
log_format = '%(asctime)s %(name)8s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.DEBUG,
format=log_format,
filename=folder_name + '/' + log_name + '.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(name).addHandler(console)
return logging.getLogger(name)
| 24.972826
| 106
| 0.618498
|
eb458b4c5c0f75854528fff96d2061d078c5cbe7
| 2,984
|
py
|
Python
|
pypy/translator/microbench/pybench/Imports.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/microbench/pybench/Imports.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/translator/microbench/pybench/Imports.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
from pybench import Test
# First imports:
import os
import package.submodule
| 21.314286
| 37
| 0.515416
|
de16d40373757db432c5c7a3e7d57eeddc1025cc
| 1,745
|
py
|
Python
|
tests/test_generators_rst.py
|
dbaty/soho
|
3fe67d3dc52919751217d6e73be436c3e291ab04
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_generators_rst.py
|
dbaty/soho
|
3fe67d3dc52919751217d6e73be436c3e291ab04
|
[
"BSD-3-Clause"
] | 1
|
2015-10-11T10:34:08.000Z
|
2015-10-11T10:34:08.000Z
|
tests/test_generators_rst.py
|
dbaty/soho
|
3fe67d3dc52919751217d6e73be436c3e291ab04
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase
| 37.12766
| 79
| 0.581089
|
de170bec53f0702af41038f426ab0305ba516d45
| 206
|
py
|
Python
|
wagtail_ab_testing/test/apps.py
|
alxbridge/wagtail-ab-testing
|
1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807
|
[
"BSD-3-Clause"
] | 14
|
2021-02-19T08:52:37.000Z
|
2022-03-16T05:16:38.000Z
|
wagtail_ab_testing/test/apps.py
|
alxbridge/wagtail-ab-testing
|
1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807
|
[
"BSD-3-Clause"
] | 10
|
2021-04-09T16:16:17.000Z
|
2022-03-31T17:30:18.000Z
|
wagtail_ab_testing/test/apps.py
|
alxbridge/wagtail-ab-testing
|
1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807
|
[
"BSD-3-Clause"
] | 11
|
2021-04-23T15:19:06.000Z
|
2022-03-28T16:15:14.000Z
|
from django.apps import AppConfig
| 25.75
| 47
| 0.771845
|
de188ec6c9675e889154db140be0ba41e013c1c2
| 835
|
py
|
Python
|
shc/__init__.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 5
|
2021-07-02T21:48:45.000Z
|
2021-12-12T21:55:42.000Z
|
shc/__init__.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 49
|
2020-09-18T20:05:55.000Z
|
2022-03-05T19:51:33.000Z
|
shc/__init__.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 1
|
2021-12-10T14:50:43.000Z
|
2021-12-10T14:50:43.000Z
|
# Copyright 2020 Michael Thies <mail@mhthies.de>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from . import base
from . import supervisor
from . import variables
from . import datatypes
from . import conversion
from . import timer
from .base import handler, blocking_handler
from .variables import Variable
from .supervisor import main
| 34.791667
| 120
| 0.777246
|
de1a03c3bf2d4b4418706f4fb2057bc7977a7251
| 777
|
py
|
Python
|
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Simple Http Client, to request html files
Modification: 11/09/2017
Author: J. Jnior
'''
import httplib
import sys
#get http server ip - pass in the command line
http_server = sys.argv[1]
#create a connection with the server
conn = httplib.HTTPConnection(http_server)
while 1:
cmd = raw_input('input command (ex. GET index.html): ')
cmd = cmd.split()
if cmd[0] == 'exit': #type exit to end it
break
#request command to server
conn.request(cmd[0], cmd[1])
#get response from server
rsp = conn.getresponse()
#print server response and data
print(rsp.status, rsp.reason)
print(rsp.getheaders())
data_received = rsp.read()
print(data_received)
#close connection
conn.close()
| 22.852941
| 58
| 0.679537
|
de1d5ad5042762573fde2a3a38799da995504ae1
| 6,881
|
py
|
Python
|
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
"""Implement asymmetric cryptography.
"""
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa, utils, padding
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.backends import default_backend
from collections import OrderedDict
import io
from builtins import int #pylint: disable=redefined-builtin
from pyssh.constants import ENC_SSH_RSA, ENC_SSH_DSS
from pyssh.base_types import String, MPInt
# pylint:disable=invalid-name
#TODO: ECDSA (RFC 5656)
def pack_pubkey(self):
"""Pack a public key into bytes."""
raise NotImplementedError('not implemented')
def verify_signature(self, signature, data):
"""Verify the signature against the given data. Pubkey must be set."""
raise NotImplementedError('not implemented')
def sign(self, data):
"""Sign some data. Privkey must be set."""
raise NotImplementedError('not implemented')
def read_pubkey(self, data):
"""Read a public key from data in the ssh public key format.
:param bytes data: the data to read.
Sets self.pubkey.
"""
pubkey = serialization.load_ssh_public_key(data, default_backend())
assert isinstance(pubkey.public_numbers(), self.PUBKEY_CLASS)
self.pubkey = pubkey
def read_privkey(self, data, password=None):
"""Read a PEM-encoded private key from data. If a password is set, it
will be used to decode the key.
:param bytes data: the data to read
:param bytes password: The password.
Sets self.privkey.
"""
privkey = serialization.load_pem_private_key(data, password,
default_backend())
assert isinstance(privkey.private_numbers(), self.PRIVKEY_CLASS)
self.privkey = privkey
class RSAAlgorithm(BaseAlgorithm):
"""Support for the RSA algorithm."""
FORMAT_STR = String(ENC_SSH_RSA)
PRIVKEY_CLASS = rsa.RSAPrivateNumbers
PUBKEY_CLASS = rsa.RSAPublicNumbers
class DSAAlgorithm(BaseAlgorithm):
"""Support for the DSA."""
FORMAT_STR = String(ENC_SSH_DSS)
PRIVKEY_CLASS = dsa.DSAPrivateNumbers
PUBKEY_CLASS = dsa.DSAPublicNumbers
PUBLIC_KEY_PROTOCOLS = OrderedDict((
(ENC_SSH_RSA, RSAAlgorithm),
(ENC_SSH_DSS, DSAAlgorithm)
))
def get_asymmetric_algorithm(keytype):
"""Get the referenced public key type. If a signature_blob blob is included,
validate it.
"""
try:
handler = PUBLIC_KEY_PROTOCOLS[keytype]
except KeyError:
raise UnsupportedKeyProtocol(keytype)
return handler()
| 31.277273
| 83
| 0.636826
|
de1dfa963d73dc87e79e92fa3fe653f6462539c8
| 1,230
|
py
|
Python
|
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
haohonglin/DeepLearning-1
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | 1
|
2020-12-01T06:13:21.000Z
|
2020-12-01T06:13:21.000Z
|
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
idonashino/DeepLearning
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | null | null | null |
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
idonashino/DeepLearning
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | 1
|
2021-01-01T15:28:36.000Z
|
2021-01-01T15:28:36.000Z
|
"""
@ jetou
@ cart decision_tree
@ date 2017 10 31
"""
import numpy as np
| 28.604651
| 89
| 0.585366
|
de1e40b74da53919bbdc4c6c8dda38d5aba2c247
| 27
|
py
|
Python
|
src/__init__.py
|
natrodrigues/face-recognition
|
00c78bea55d2738913cf5475056c2faf05fe960e
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
natrodrigues/face-recognition
|
00c78bea55d2738913cf5475056c2faf05fe960e
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
natrodrigues/face-recognition
|
00c78bea55d2738913cf5475056c2faf05fe960e
|
[
"MIT"
] | null | null | null |
from . import frame_manager
| 27
| 27
| 0.851852
|
de1e4247762eb410a1475e5659c71d8d5fb3aa3a
| 276
|
py
|
Python
|
sparweltbitool/config.py
|
checkout-charlie/bitool
|
e41ce66ab2b88992dbfc08d79372bf3965724f3e
|
[
"MIT"
] | null | null | null |
sparweltbitool/config.py
|
checkout-charlie/bitool
|
e41ce66ab2b88992dbfc08d79372bf3965724f3e
|
[
"MIT"
] | null | null | null |
sparweltbitool/config.py
|
checkout-charlie/bitool
|
e41ce66ab2b88992dbfc08d79372bf3965724f3e
|
[
"MIT"
] | 1
|
2015-07-22T16:53:42.000Z
|
2015-07-22T16:53:42.000Z
|
import os
import sys
if sys.version_info[:2] >= (3, 4):
import configparser
config = configparser.ConfigParser()
else:
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open('app/config/config_%s.cfg' % os.environ.get('APP_ENV', 'dev')))
| 25.090909
| 82
| 0.706522
|
de2067c1459291384093f5c6102e9ab0301ade68
| 3,164
|
py
|
Python
|
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
import tkinter
from tkinter import *
from rsa_decryption_125 import decryptor
if __name__ == '__main__':
main()
| 34.391304
| 102
| 0.631163
|
de207e25aa9bca185c57928c53cd749f04d47818
| 2,031
|
py
|
Python
|
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
import paddle.fluid as fluid
import parl
from parl import layers
| 27.445946
| 68
| 0.573609
|
de20802d519423344cda6384cb09a94946775ee1
| 724
|
py
|
Python
|
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
from PyQt4.QtGui import QPalette, QColor
__author__ = 'pawel'
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
| 25.857143
| 57
| 0.672652
|
de269b1d0a4fe87a69767fba8b3e00ccf68b4d65
| 6,543
|
py
|
Python
|
admin.py
|
ericholscher/pypi
|
4c7c13bd2061d99bbf11a803ac7a7afe3740e365
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:31:07.000Z
|
2015-11-08T11:31:07.000Z
|
admin.py
|
ericholscher/pypi
|
4c7c13bd2061d99bbf11a803ac7a7afe3740e365
|
[
"BSD-3-Clause"
] | null | null | null |
admin.py
|
ericholscher/pypi
|
4c7c13bd2061d99bbf11a803ac7a7afe3740e365
|
[
"BSD-3-Clause"
] | null | null | null |
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt, shutil
import zipfile, gzip, tarfile
#sys.path.append('/usr/local/pypi/lib')
import store, config
def set_password(store, name, pw):
""" Reset the user's password and send an email to the address given.
"""
user = store.get_user(name.strip())
if user is None:
raise ValueError, 'user name unknown to me'
store.store_user(user['name'], pw.strip(), user['email'], None)
print 'done'
def remove_package(store, name):
''' Remove a package from the database
'''
store.remove_package(name)
print 'done'
def add_classifier(st, classifier):
''' Add a classifier to the trove_classifiers list
'''
cursor = st.get_cursor()
cursor.execute("select max(id) from trove_classifiers")
id = cursor.fetchone()[0]
if id:
id = int(id) + 1
else:
id = 1
fields = [f.strip() for f in classifier.split('::')]
for f in fields:
assert ':' not in f
levels = []
for l in range(2, len(fields)):
c2 = ' :: '.join(fields[:l])
store.safe_execute(cursor, 'select id from trove_classifiers where classifier=%s', (c2,))
l = cursor.fetchone()
if not l:
raise ValueError, c2 + " is not a known classifier"
levels.append(l[0])
levels += [id] + [0]*(3-len(levels))
store.safe_execute(cursor, 'insert into trove_classifiers (id, classifier, l2, l3, l4, l5) '
'values (%s,%s,%s,%s,%s,%s)', [id, classifier]+levels)
def rename_package(store, old, new):
''' Rename a package. '''
if not store.has_package(old):
raise ValueError, 'no such package'
if store.has_package(new):
raise ValueError, new+' exists'
store.rename_package(old, new)
print "Please give www-data permissions to all files of", new
def add_mirror(store, root, user):
''' Add a mirror to the mirrors list
'''
store.add_mirror(root, user)
print 'done'
def delete_mirror(store, root):
''' Delete a mirror
'''
store.delete_mirror(root)
print 'done'
def delete_old_docs(config, store):
'''Delete documentation directories for packages that have been deleted'''
for i in os.listdir(config.database_docs_dir):
if not store.has_package(i):
path = os.path.join(config.database_docs_dir, i)
print "Deleting", path
shutil.rmtree(path)
if __name__ == '__main__':
config = config.Config('/data/pypi/config.ini')
st = store.Store(config)
st.open()
command = sys.argv[1]
args = (st, ) + tuple(sys.argv[2:])
try:
if command == 'password':
set_password(*args)
elif command == 'rmpackage':
remove_package(*args)
elif command == 'addclass':
add_classifier(*args)
print 'done'
elif command == 'addowner':
add_owner(*args)
elif command == 'delowner':
delete_owner(*args)
elif command == 'rename':
rename_package(*args)
elif command == 'addmirror':
add_mirror(*args)
elif command == 'delmirror':
delete_mirror(*args)
elif command == 'delolddocs':
delete_old_docs(config, *args)
elif command == 'send_comments':
send_comments(*args)
elif command == 'mergeuser':
merge_user(*args)
elif command == 'nuke_nested_lists':
nuke_nested_lists(*args)
else:
print "unknown command '%s'!"%command
st.changed()
finally:
st.close()
| 35.367568
| 97
| 0.599419
|
de26d7fc8c223d9eef08edc2aa50933adc8cafe1
| 1,777
|
py
|
Python
|
scripts/geodata/address_expansions/equivalence.py
|
Fillr/libpostal
|
bce153188aff9fbe65aef12c3c639d8069e707fc
|
[
"MIT"
] | 3,489
|
2015-03-03T00:21:38.000Z
|
2022-03-29T09:03:05.000Z
|
scripts/geodata/address_expansions/equivalence.py
|
StephenHildebrand/libpostal
|
d8c9847c5686a1b66056e65128e1774f060ff36f
|
[
"MIT"
] | 488
|
2015-05-29T23:04:28.000Z
|
2022-03-29T11:20:24.000Z
|
scripts/geodata/address_expansions/equivalence.py
|
StephenHildebrand/libpostal
|
d8c9847c5686a1b66056e65128e1774f060ff36f
|
[
"MIT"
] | 419
|
2015-11-24T16:53:07.000Z
|
2022-03-27T06:51:28.000Z
|
import random
import re
import six
from itertools import izip
from geodata.address_expansions.gazetteers import *
from geodata.encoding import safe_decode, safe_encode
from geodata.text.normalize import normalized_tokens
from geodata.text.tokenize import tokenize_raw, token_types
from geodata.text.utils import non_breaking_dash_regex
def equivalent(s1, s2, gazetteer, language):
'''
Address/place equivalence
-------------------------
OSM discourages abbreviations, but to make our training data map better
to real-world input, we can safely replace the canonical phrase with an
abbreviated version and retain the meaning of the words
'''
tokens_s1 = normalized_tokens(s1)
tokens_s2 = normalized_tokens(s2)
abbreviated_s1 = list(abbreviations_gazetteer.filter(tokens_s1))
abbreviated_s2 = list(abbreviations_gazetteer.filter(tokens_s2))
if len(abbreviated_s1) != len(abbreviated_s2):
return False
for ((t1, c1, l1, d1), (t2, c2, l2, d2)) in izip(abbreviated_s1, abbreviated_s2):
if c1 != token_types.PHRASE and c2 != token_types.PHRASE:
if t1 != t2:
return False
elif c2 == token_types.PHRASE and c2 == token_types.PHRASE:
canonicals_s1 = canonicals_for_language(d1, language)
canonicals_s2 = canonicals_for_language(d2, language)
if not canonicals_s1 & canonicals_s2:
return False
else:
return False
return True
| 31.175439
| 85
| 0.68655
|
de27afb959d2cb13e74aaad06b80a65da178a7e6
| 170
|
py
|
Python
|
Language Skills/Python/Unit 08 Loops/01 Loops/Step Up 'For's/While Loops/3-While You're at it.py
|
rhyep/Python_tutorials
|
f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3
|
[
"MIT"
] | 346
|
2016-02-22T20:21:10.000Z
|
2022-01-27T20:55:53.000Z
|
Language Skills/Python/Unit 8/1-Loops/While Loops/3-While You're at it.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 55
|
2016-04-07T13:58:44.000Z
|
2020-06-25T12:20:24.000Z
|
Language Skills/Python/Unit 8/1-Loops/While Loops/3-While You're at it.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 477
|
2016-02-21T06:17:02.000Z
|
2021-12-22T10:08:01.000Z
|
num = 1
while num <= 10: # Fill in the condition
x = num ** 2# Print num squared
num = num + 1# Increment num (make sure to do this!)
print x
print num
| 21.25
| 56
| 0.594118
|
de2838f69cfe04090e0142bb22b24b01a4243cd5
| 948
|
py
|
Python
|
setup.py
|
povilasb/udptest
|
3d16d2e6509e008b37775e7784af54b6edb6633e
|
[
"MIT"
] | 2
|
2017-11-17T09:10:41.000Z
|
2019-09-20T21:50:08.000Z
|
setup.py
|
povilasb/udptest
|
3d16d2e6509e008b37775e7784af54b6edb6633e
|
[
"MIT"
] | null | null | null |
setup.py
|
povilasb/udptest
|
3d16d2e6509e008b37775e7784af54b6edb6633e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='udptest',
version='0.1.0',
description='UDP benchmarking/testing tool.',
long_description=open('README.rst').read(),
url='https://github.com/povilasb/httpmeter',
author='Povilas Balciunas',
author_email='balciunas90@gmail.com',
license='MIT',
packages=find_packages(exclude=('tests')),
entry_points={
'console_scripts': [
'udptestd = udptest.server:main',
'udptest = udptest.client:main',
]
},
classifiers=[
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Topic :: System :: Networking',
'Topic :: Internet :: UDP',
],
install_requires=requirements(),
)
| 25.621622
| 49
| 0.582278
|
de28f51f7fb4db9f4c4cfed3b53384caa7188918
| 3,200
|
py
|
Python
|
ssanchors/utilities.py
|
IoSR-Surrey/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 4
|
2018-07-06T14:35:29.000Z
|
2019-08-28T17:13:11.000Z
|
ssanchors/utilities.py
|
nd1511/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 1
|
2018-06-18T17:08:28.000Z
|
2018-06-19T10:45:58.000Z
|
ssanchors/utilities.py
|
nd1511/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 1
|
2018-11-05T19:56:17.000Z
|
2018-11-05T19:56:17.000Z
|
from __future__ import division
import numpy as np
from untwist import data
from untwist import transforms
def target_accompaniment(target, others, sample_rate=None):
"""
Given a target source and list of 'other' sources, this function returns
the target and accompaniment as untwist.data.audio.Wave objects. The
accompaniment is defined as the sum of the other sources.
Parameters
----------
target : np.ndarray or Wave, shape=(num_samples, num_channels)
The true target source.
others : List or single np.ndarray or Wave object
Each object should have the shape=(num_samples, num_channels)
If a single array is given, this should correspond to the
accompaniment.
sample_rate : int, optional
Only needed if Wave objects not provided.
Returns
-------
target : Wave, shape=(num_samples, num_channels)
accompaniment : Wave, shape=(num_samples, num_channels)
"""
if isinstance(others, list):
if not isinstance(others[0], data.audio.Wave):
others = [data.audio.Wave(_, sample_rate) for _ in others]
accompaniment = sum(other for other in others)
else:
if not isinstance(others, data.audio.Wave):
others = data.audio.Wave(others, sample_rate)
accompaniment = others
if not isinstance(target, data.audio.Wave):
target = data.audio.Wave(target, sample_rate)
return target, accompaniment
def stft_istft(num_points=2048, window='hann'):
"""
Returns an STFT and an ISTFT Processor object, both configured with the
same window and transform length. These objects are to be used as follows:
>>> stft, istft = stft_istft()
>>> x = untwist.data.audio.Wave.tone() # Or some Wave
>>> y = stft.process(x)
>>> x = istft.process(y)
Parameters
----------
num_points : int
The number of points to use for the window and the fft transform.
window : str
The type of window to use.
Returns
-------
stft : untwist.transforms.stft.STFT
An STFT processor.
itft : untwist.transforms.stft.ITFT
An ISTFT processor.
"""
stft = transforms.STFT(window, num_points, num_points // 2)
istft = transforms.ISTFT(window, num_points, num_points // 2)
return stft, istft
def ensure_audio_doesnt_clip(list_of_arrays):
"""
Takes a list of arrays and scales them by the same factor such that
none clip.
Parameters
----------
list_of_arrays : list
A list of array_like objects
Returns
-------
new_list_of_arrays : list
A list of scaled array_like objects.
"""
max_peak = 1
for audio in list_of_arrays:
audio_peak = np.max(np.abs(audio))
if audio_peak > max_peak:
max_peak = audio_peak
if max_peak >= 1:
print('Warning: Audio has been attenuated to prevent clipping')
gain = 0.999 / max_peak
new_list_of_arrays = []
for audio in list_of_arrays:
new_list_of_arrays.append(audio * gain)
else:
new_list_of_arrays = list_of_arrays
return new_list_of_arrays
| 25.806452
| 78
| 0.64625
|
de296667231d2bd75b621d94c889fd2ea3b5afb5
| 812
|
py
|
Python
|
bids_events/Events.py
|
InstitutoDOr/bids_events
|
c00d76e1f62e5b647f94609acbc9e173a356aef7
|
[
"MIT"
] | null | null | null |
bids_events/Events.py
|
InstitutoDOr/bids_events
|
c00d76e1f62e5b647f94609acbc9e173a356aef7
|
[
"MIT"
] | null | null | null |
bids_events/Events.py
|
InstitutoDOr/bids_events
|
c00d76e1f62e5b647f94609acbc9e173a356aef7
|
[
"MIT"
] | null | null | null |
import os
import re
| 31.230769
| 62
| 0.571429
|
de2bfdafb52bf7f86a472b4af4f49451d709be07
| 87
|
py
|
Python
|
tests/fixtures/abcd_package/test_a.py
|
venmo/nose-randomly
|
39db5db71a226ffdb6572d5785638e0a16379cfb
|
[
"BSD-3-Clause"
] | 19
|
2015-07-30T17:27:56.000Z
|
2021-08-10T07:19:43.000Z
|
tests/fixtures/abcd_package/test_a.py
|
venmo/nose-randomly
|
39db5db71a226ffdb6572d5785638e0a16379cfb
|
[
"BSD-3-Clause"
] | 11
|
2016-02-14T10:33:44.000Z
|
2016-10-28T12:38:35.000Z
|
tests/fixtures/abcd_package/test_a.py
|
adamchainz/nose-randomly
|
8a3fbeaf7cc5452c44da8c7e7573fe89391c8260
|
[
"BSD-3-Clause"
] | 4
|
2016-06-01T06:04:46.000Z
|
2016-10-26T11:41:53.000Z
|
from unittest import TestCase
| 12.428571
| 29
| 0.666667
|
de2d96eb9081272f5172b90d540db88b204c04b4
| 427
|
py
|
Python
|
Python_Challenge_115/6/F.py
|
LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 28
|
2019-10-15T13:15:26.000Z
|
2021-11-08T08:23:45.000Z
|
Python_Challenge_115/6/F.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | null | null | null |
Python_Challenge_115/6/F.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 17
|
2019-09-09T00:15:36.000Z
|
2021-01-28T13:08:51.000Z
|
'''
Statement
Fibonacci numbers are the numbers in the integer sequence starting with 1, 1 where every number after the first two is the sum of the two preceding ones:
1, 1, 2, 3, 5, 8, 13, 21, 34, ...
Given a positive integer n, print the nth Fibonacci number.
Example input
6
Example output
8
'''
num = int(input())
before, curr, i = 0, 1, 1
while num > i:
before, curr = curr, curr + before
i += 1
print(curr)
| 18.565217
| 153
| 0.676815
|
de2edc2bbe1eee14e878fa5bd6b3104c3a6af8ad
| 144
|
py
|
Python
|
test/test_sum_up.py
|
marco-a-wagner/nirvana
|
325756ec5f208994767b4909ed217ce716f5fcfb
|
[
"CC0-1.0"
] | null | null | null |
test/test_sum_up.py
|
marco-a-wagner/nirvana
|
325756ec5f208994767b4909ed217ce716f5fcfb
|
[
"CC0-1.0"
] | null | null | null |
test/test_sum_up.py
|
marco-a-wagner/nirvana
|
325756ec5f208994767b4909ed217ce716f5fcfb
|
[
"CC0-1.0"
] | null | null | null |
from src.sum_up import *
| 16
| 30
| 0.583333
|
de2ffb901bbfbc3af2061583ab91b8842066be1f
| 1,376
|
py
|
Python
|
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | 1
|
2018-07-10T23:37:47.000Z
|
2018-07-10T23:37:47.000Z
|
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | null | null | null |
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 15:38:11 2018
@author: Yekta
"""
import csv
import numpy as np
from sklearn.cluster import KMeans
clon = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/MoS2BP Binding Characterization_07-11-17_DY.csv")))
for k in range(1,15):
fin=[]
for m in range(1,13):
dataFromCSV = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/573x96/recon/location"+str(m)+"/PCA"+str(k)+".csv")))
dataFromCSV=np.asarray(dataFromCSV)
dataFromCSV=dataFromCSV.T
temp=dataFromCSV[1:,1:]
temp=temp.astype(np.float)
#clusters according to properties
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(temp)
fin.append(y_kmeans)
fin=np.asarray(fin)
fin=fin.T
matrix = [[0 for x in range(13)] for y in range(97)]
matrix[0][0]="Index"
for z in range(1,97):
matrix[z][0]=clon[z+1][11]
for x in range(1,13):
matrix[0][x]=x
for y in range(1,97):
matrix[y][x]=fin[y-1,x-1]
matrix=np.asarray(matrix)
with open("C:/Users/Yekta/Desktop/stajvol3/573x96/cluster/clusteredPCA"+str(k)+".csv", 'w', newline='') as myfile:
wr = csv.writer(myfile)
wr.writerows(matrix)
| 32.761905
| 130
| 0.588663
|
de319a3d0a027f8b448c09d0528c44c359822d8e
| 1,440
|
py
|
Python
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | 2
|
2015-09-16T15:24:39.000Z
|
2015-11-18T11:53:51.000Z
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | 1
|
2018-04-04T15:33:20.000Z
|
2018-04-04T15:33:20.000Z
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_collision.test_discretedynamicsworld
"""
from __future__ import unicode_literals, print_function, absolute_import
import unittest
import bullet
from .test_worlds import WorldTestDataMixin
| 28.8
| 72
| 0.634028
|
de31e808778594864eecf61a23f3d4e16b0f2a4b
| 820
|
py
|
Python
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 1
|
2019-08-19T16:02:20.000Z
|
2019-08-19T16:02:20.000Z
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 396
|
2017-07-18T15:19:55.000Z
|
2021-05-03T06:23:06.000Z
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 2
|
2019-03-05T16:23:10.000Z
|
2020-04-16T08:59:11.000Z
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
from force_wfmanager.notifications.ui_notification_hooks_manager \
import \
UINotificationHooksManager
from force_wfmanager.notifications.ui_notification_plugin import \
UINotificationPlugin
| 31.538462
| 68
| 0.74878
|
de31ea78bbeb185adcdcced18fcb297d6af4dc71
| 447
|
py
|
Python
|
phrasebook/middleware.py
|
DanCatchpole/phrasebook-django
|
4f85ec40626cbb97c659448ee06f2291c8f2918b
|
[
"MIT"
] | 1
|
2020-11-10T17:31:56.000Z
|
2020-11-10T17:31:56.000Z
|
phrasebook/middleware.py
|
DanCatchpole/phrasebook-django
|
4f85ec40626cbb97c659448ee06f2291c8f2918b
|
[
"MIT"
] | null | null | null |
phrasebook/middleware.py
|
DanCatchpole/phrasebook-django
|
4f85ec40626cbb97c659448ee06f2291c8f2918b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import redirect
from .models import UserLanguage
| 27.9375
| 66
| 0.680089
|
de346180214f310ac4c427bc250a7eb3f75732e4
| 113
|
py
|
Python
|
PROGATE/PYTHON_I_page07.py
|
vox256/Codes
|
c408ef0fbc25af46dacef93b3496985feb98dd5c
|
[
"MIT"
] | null | null | null |
PROGATE/PYTHON_I_page07.py
|
vox256/Codes
|
c408ef0fbc25af46dacef93b3496985feb98dd5c
|
[
"MIT"
] | null | null | null |
PROGATE/PYTHON_I_page07.py
|
vox256/Codes
|
c408ef0fbc25af46dacef93b3496985feb98dd5c
|
[
"MIT"
] | null | null | null |
money = 2000
print(money)
# money5000money
money += 5000
# money
print (money)
| 14.125
| 36
| 0.787611
|
de3486ad1b0724a14e6330a44ee92a956bf5ee2e
| 380
|
py
|
Python
|
quokka/modules/accounts/views.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | null | null | null |
quokka/modules/accounts/views.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | null | null | null |
quokka/modules/accounts/views.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import redirect, request, url_for
from flask.views import MethodView
from flask.ext.security import current_user
| 22.352941
| 59
| 0.692105
|
de34fea664d85474bd07e69ca7917ce3402fb32e
| 142
|
py
|
Python
|
nolina/__init__.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
nolina/__init__.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
nolina/__init__.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
"""Randomised linear algebra."""
import numpy.linalg as la
| 15.777778
| 39
| 0.640845
|
de35289eea69e5ceb7febfc7fa32b43c5609a79c
| 887
|
py
|
Python
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 6
|
2019-08-15T20:19:38.000Z
|
2021-02-28T21:33:19.000Z
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 31
|
2019-08-14T08:42:08.000Z
|
2020-05-07T13:43:43.000Z
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 5
|
2019-08-17T13:39:53.000Z
|
2020-04-01T07:25:51.000Z
|
from bt_utils.console import Console
from bt_utils.config import cfg
from bt_utils.embed_templates import SuccessEmbed, WarningEmbed
from bt_utils.handle_sqlite import DatabaseHandler
SHL = Console('BundestagsBot Reload')
DB = DatabaseHandler()
settings = {
'name': 'reload',
'channels': ['team'],
'mod_cmd': True
}
| 27.71875
| 92
| 0.713641
|
de3555aacf51f612d0e7cb4e5d614fc7db59f6c9
| 4,022
|
py
|
Python
|
scanner.py
|
Darchiv/scambus
|
0a81a67b76a5ec5117d56a4c05c4392696eb3f06
|
[
"MIT"
] | 22
|
2015-08-21T11:58:20.000Z
|
2021-12-28T04:50:05.000Z
|
scanner.py
|
Darchiv/scambus
|
0a81a67b76a5ec5117d56a4c05c4392696eb3f06
|
[
"MIT"
] | 5
|
2017-02-26T14:22:53.000Z
|
2021-02-11T00:47:48.000Z
|
scanner.py
|
Darchiv/scambus
|
0a81a67b76a5ec5117d56a4c05c4392696eb3f06
|
[
"MIT"
] | 14
|
2015-04-13T08:02:18.000Z
|
2021-12-16T14:08:54.000Z
|
#! /usr/bin/env python2.7
import getopt, sys, time, util
from wmbus import WMBusFrame
from Crypto.Cipher import AES
if __name__ == "__main__":
main(sys.argv[1:])
'''
Class Scanner(threading.Thread):
def __init__(self,dev):
#something here that initialize serial port
def run():
while True:
def pack(self):
#something
def checksum(self):
#something
def write(self):
#something
'''
| 31.421875
| 92
| 0.458478
|
de35b41f521bfe20dfbbf60f134cdbe2d7425715
| 2,080
|
py
|
Python
|
pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py
|
pyy1988/pyy_test1
|
6bea878409e658aa87441384419be51aaab061e7
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# module gi._gi
# from /usr/lib/python3/dist-packages/gi/_gi.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
# no doc
# imports
import _gobject as _gobject # <module '_gobject'>
import _glib as _glib # <module '_glib'>
import gi as __gi
import gobject as __gobject
from .object import object
| 25.679012
| 76
| 0.610577
|
de3618687057494d918d8f6f783dfd78edbb7ce5
| 828
|
py
|
Python
|
setup.py
|
ntamas/python-selecta
|
bc9a11f288df427ceb126aa994ac3810685e2d94
|
[
"MIT"
] | 1
|
2019-02-21T14:47:40.000Z
|
2019-02-21T14:47:40.000Z
|
setup.py
|
ntamas/python-selecta
|
bc9a11f288df427ceb126aa994ac3810685e2d94
|
[
"MIT"
] | 2
|
2015-07-11T03:32:35.000Z
|
2015-08-26T09:29:40.000Z
|
setup.py
|
ntamas/python-selecta
|
bc9a11f288df427ceb126aa994ac3810685e2d94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from selecta import __version__
from setuptools import setup
options = dict(
name='python-selecta',
version=__version__,
url='http://github.com/ntamas/python-selecta',
description='Python port of @garybernhardt/selecta',
license='MIT',
author='Tamas Nepusz',
author_email='ntamas@gmail.com',
package_dir={'selecta': 'selecta'},
packages=['selecta'],
entry_points={
"console_scripts": [
'selecta = selecta.__main__:main'
]
},
test_suite="tests",
platforms='ALL',
classifiers=[
# TODO
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
setup(**options)
| 20.195122
| 56
| 0.607488
|
de37ff05a0046e06ac61cbc292e777a426c175fb
| 525
|
py
|
Python
|
graphsaint/setup.py
|
alexs131/GraphSAINT
|
20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8
|
[
"MIT"
] | null | null | null |
graphsaint/setup.py
|
alexs131/GraphSAINT
|
20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8
|
[
"MIT"
] | null | null | null |
graphsaint/setup.py
|
alexs131/GraphSAINT
|
20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8
|
[
"MIT"
] | null | null | null |
# cython: language_level=3
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
# import cython_utils
import os
os.environ["CC"] = "/opt/homebrew/Cellar/gcc/11.2.0_3/bin/g++-11"
os.environ["CXX"] = "/opt/homebrew/Cellar/gcc/11.2.0_3/bin/g++-11"
setup(ext_modules=cythonize(["graphsaint/cython_sampler.pyx", "graphsaint/cython_utils.pyx",
"graphsaint/norm_aggr.pyx"]), include_dirs=[numpy.get_include()])
# to compile: python graphsaint/setup.py build_ext --inplace
| 37.5
| 93
| 0.737143
|
de3854551e9e60f025c395d03bedb3f5b3cb6f38
| 4,958
|
py
|
Python
|
models/get_networks.py
|
kingqiuol/pytorch-template
|
8bc78f996fbbc15ae54a3055cd3d33199b4a96d8
|
[
"MIT"
] | null | null | null |
models/get_networks.py
|
kingqiuol/pytorch-template
|
8bc78f996fbbc15ae54a3055cd3d33199b4a96d8
|
[
"MIT"
] | null | null | null |
models/get_networks.py
|
kingqiuol/pytorch-template
|
8bc78f996fbbc15ae54a3055cd3d33199b4a96d8
|
[
"MIT"
] | null | null | null |
import sys
def get_network(args):
""" return given network
"""
if args.MODEL.NAME == 'vgg16':
from models.vgg import vgg16_bn
net = vgg16_bn()
elif args.MODEL.NAME == 'vgg13':
from models.vgg import vgg13_bn
net = vgg13_bn()
elif args.MODEL.NAME == 'vgg11':
from models.vgg import vgg11_bn
net = vgg11_bn()
elif args.MODEL.NAME == 'vgg19':
from models.vgg import vgg19_bn
net = vgg19_bn()
elif args.MODEL.NAME == 'densenet121':
from models.densenet import densenet121
net = densenet121()
elif args.MODEL.NAME == 'densenet161':
from models.densenet import densenet161
net = densenet161()
elif args.MODEL.NAME == 'densenet169':
from models.densenet import densenet169
net = densenet169()
elif args.MODEL.NAME == 'densenet201':
from models.densenet import densenet201
net = densenet201()
elif args.MODEL.NAME == 'googlenet':
from models.googlenet import googlenet
net = googlenet()
elif args.MODEL.NAME == 'inceptionv3':
from models.inceptionv3 import inceptionv3
net = inceptionv3()
elif args.MODEL.NAME == 'inceptionv4':
from models.inceptionv4 import inceptionv4
net = inceptionv4()
elif args.MODEL.NAME == 'inceptionresnetv2':
from models.inceptionv4 import inception_resnet_v2
net = inception_resnet_v2()
elif args.MODEL.NAME == 'xception':
from models.xception import xception
net = xception()
elif args.MODEL.NAME == 'resnet18':
from models.resnet import resnet18
net = resnet18()
elif args.MODEL.NAME == 'resnet34':
from models.resnet import resnet34
net = resnet34()
elif args.MODEL.NAME == 'resnet50':
from models.resnet import resnet50
net = resnet50()
elif args.MODEL.NAME == 'resnet101':
from models.resnet import resnet101
net = resnet101()
elif args.MODEL.NAME == 'resnet152':
from models.resnet import resnet152
net = resnet152()
elif args.MODEL.NAME == 'preactresnet18':
from models.preactresnet import preactresnet18
net = preactresnet18()
elif args.MODEL.NAME == 'preactresnet34':
from models.preactresnet import preactresnet34
net = preactresnet34()
elif args.MODEL.NAME == 'preactresnet50':
from models.preactresnet import preactresnet50
net = preactresnet50()
elif args.MODEL.NAME == 'preactresnet101':
from models.preactresnet import preactresnet101
net = preactresnet101()
elif args.MODEL.NAME == 'preactresnet152':
from models.preactresnet import preactresnet152
net = preactresnet152()
elif args.MODEL.NAME == 'resnext50':
from models.resnext import resnext50
net = resnext50()
elif args.MODEL.NAME == 'resnext101':
from models.resnext import resnext101
net = resnext101()
elif args.MODEL.NAME == 'resnext152':
from models.resnext import resnext152
net = resnext152()
elif args.MODEL.NAME == 'shufflenet':
from models.shufflenet import shufflenet
net = shufflenet()
elif args.MODEL.NAME == 'shufflenetv2':
from models.shufflenetv2 import shufflenetv2
net = shufflenetv2()
elif args.MODEL.NAME == 'squeezenet':
from models.squeezenet import squeezenet
net = squeezenet()
elif args.MODEL.NAME == 'mobilenet':
from models.mobilenet import mobilenet
net = mobilenet()
elif args.MODEL.NAME == 'mobilenetv2':
from models.mobilenetv2 import mobilenetv2
net = mobilenetv2()
elif args.MODEL.NAME == 'nasnet':
from models.nasnet import nasnet
net = nasnet()
elif args.MODEL.NAME == 'attention56':
from models.attention import attention56
net = attention56()
elif args.MODEL.NAME == 'attention92':
from models.attention import attention92
net = attention92()
elif args.MODEL.NAME == 'seresnet18':
from models.senet import seresnet18
net = seresnet18()
elif args.MODEL.NAME == 'seresnet34':
from models.senet import seresnet34
net = seresnet34()
elif args.MODEL.NAME == 'seresnet50':
from models.senet import seresnet50
net = seresnet50()
elif args.MODEL.NAME == 'seresnet101':
from models.senet import seresnet101
net = seresnet101()
elif args.MODEL.NAME == 'seresnet152':
from models.senet import seresnet152
net = seresnet152()
elif args.MODEL.NAME == 'wideresnet':
from models.wideresidual import wideresnet
net = wideresnet()
elif args.MODEL.NAME == 'stochasticdepth18':
from models.stochasticdepth import stochastic_depth_resnet18
net = stochastic_depth_resnet18()
elif args.MODEL.NAME == 'stochasticdepth34':
from models.stochasticdepth import stochastic_depth_resnet34
net = stochastic_depth_resnet34()
elif args.MODEL.NAME == 'stochasticdepth50':
from models.stochasticdepth import stochastic_depth_resnet50
net = stochastic_depth_resnet50()
elif args.MODEL.NAME == 'stochasticdepth101':
from models.stochasticdepth import stochastic_depth_resnet101
net = stochastic_depth_resnet101()
elif args.MODEL.NAME == 'vit':
from models.vit import vit
net =vit()
else:
print('the network name you have entered is not supported yet')
sys.exit()
if args.MODEL.USE_GPU: # use_gpu
net = net.cuda()
return net
| 32.834437
| 65
| 0.740621
|
de38b348a7c3f728ca43e602a33e53edfd8f033d
| 10,812
|
py
|
Python
|
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | 2
|
2020-01-30T21:51:00.000Z
|
2020-07-22T14:51:05.000Z
|
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | null | null | null |
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | null | null | null |
import pytest
from hypothesis import (
given,
settings,
strategies as st,
)
from eth_utils import (
ValidationError,
)
from eth.constants import (
ZERO_HASH32,
)
from eth2.beacon.committee_helpers import (
get_crosslink_committees_at_slot,
)
from eth2.beacon.state_machines.forks.serenity.block_validation import (
validate_attestation_aggregate_signature,
validate_attestation_latest_crosslink_root,
validate_attestation_justified_block_root,
validate_attestation_justified_epoch,
validate_attestation_crosslink_data_root,
validate_attestation_slot,
)
from eth2.beacon.tools.builder.validator import (
create_mock_signed_attestation,
)
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.crosslink_records import CrosslinkRecord
| 31.068966
| 117
| 0.561321
|
de3966c1044750e98c8968c82831f55e24112044
| 13,679
|
py
|
Python
|
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | null | null | null |
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | 1
|
2021-05-21T04:52:28.000Z
|
2021-05-21T05:00:10.000Z
|
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | 1
|
2021-04-07T13:50:43.000Z
|
2021-04-07T13:50:43.000Z
|
###########################################################################################################
###########################################################################################################
## SeqtaToSDS ##
## Jacob Curulli ##
## This code is shared as is, under Creative Commons Attribution Non-Commercial 4.0 License ##
## Permissions beyond the scope of this license may be available at http://creativecommons.org/ns ##
###########################################################################################################
# Read Me
# This script will likely not work out of the box and will need to be customised
# 1. The approvedClassesCSV is a list of classes in Seqta that will be exported,
# the list is checked against the 'name' column in the public.classunit table.
# 2. A directory called 'sds' will need to be created in the root of where the script is run.
# 3. This script allows for an admin user to be added to every class (section)
# import required modules
# psycopg2 isn't usually included with python and may need to be installed separately
# see www.psycopg.org for instructions
import psycopg2
import csv
import os.path
import configparser
from datetime import datetime
# Get the date
dateNow = datetime.now()
# Read the config.ini file
config = configparser.ConfigParser()
config.read('config.ini')
# read config file for seqta database connection details
db_user=config['db']['user']
db_port=config['db']['port']
db_password=config['db']['password']
db_database=config['db']['database']
db_host=config['db']['host']
db_sslmode=config['db']['sslmode']
# read config file for school details
teamsAdminUsername=config['school']['teamsAdminUsername']
teamsAdminFirstName=config['school']['teamsAdminFirstName']
teamsAdminLastName=config['school']['teamsAdminLastName']
teamsAdminID=config['school']['teamsAdminID']
schoolName =config['school']['schoolName']
schoolSISId=config['school']['schoolSISId']
classTermName=config['school']['classTermName']
# declare some variables here so we can make sure they are present
staffList = set()
studentList = set()
classArray = tuple()
currentYear = dateNow.strftime("%Y")
print("current year is:", currentYear)
# file locations, this can be changed to suit your environment
csvApprovedClasses = "approved_classes.csv"
csvSchoolFilename = "sds/School.csv"
csvSectionFileName = "sds/Section.csv"
csvStudentFileName = "sds/Student.csv"
csvTeacherFileName = "sds/Teacher.csv"
csvTeacherRosterFileName = "sds/TeacherRoster.csv"
csvStudentEnrollmentFileName = "sds/StudentEnrollment.csv"
# remove the csv files if they already exist. This is a messy way of doing it but I learnt python 2 days ago so whatever
if os.path.exists(csvSchoolFilename):
os.remove(csvSchoolFilename)
if os.path.exists(csvSectionFileName):
os.remove(csvSectionFileName)
if os.path.exists(csvStudentFileName):
os.remove(csvStudentFileName)
if os.path.exists(csvTeacherFileName):
os.remove(csvTeacherFileName)
if os.path.exists(csvTeacherRosterFileName):
os.remove(csvTeacherRosterFileName)
if os.path.exists(csvStudentEnrollmentFileName):
os.remove(csvStudentEnrollmentFileName)
try:
# Import CSV file for approved class lists
with open(csvApprovedClasses, newline='', encoding='utf-8-sig') as csvfile:
classList = list(csv.reader(csvfile))
print (type(classList))
print (classList)
print ("Number of classes imported from csv list: ",len(classList))
except:
print("***************************")
print("Error importing csv file")
# Open connection to Seqta
try:
connection = psycopg2.connect(user=db_user,
port=db_port,
password=db_password,
database=db_database,
host = db_host,
sslmode = db_sslmode)
cursor = connection.cursor()
print(connection.get_dsn_parameters(), "\n")
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
# Fetch data for classlists
try:
for i in classList:
className = str(('[%s]' % ', '.join(map(str, (i))))[1:-1])
print ("**")
print (className)
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
# Lookup classID from Class name in Seqta
sq_classUnitQuery = "SELECT * FROM public.classunit WHERE name = (%s);"
cursor.execute(sq_classUnitQuery,(className,))
classUnitPull = cursor.fetchall()
print("Getting class information for:", (className))
for row in classUnitPull:
classUnitID = row[0]
classSubjectID = row[4]
classTermID = row[7]
print("Class unit ID (classUnitID) is:", classUnitID)
print("Class subject ID (classSubjectID) is:", classSubjectID)
print("Class term ID (classTermID) is:", classTermID)
# Check if class has a staff member or students
# If they don't we need to stop processing the class and drop it gracefully
# Get subject description for Class
sq_classSubjectQuery = "SELECT * FROM subject WHERE id = (%s);"
cursor.execute(sq_classSubjectQuery, (classSubjectID,))
classSubjectPull = cursor.fetchall()
for row in classSubjectPull:
classSubjectDescription = row[3]
classSubjectName = row[2]
classTeamName = (className + " - " + classSubjectDescription)
print("Class subject Description (classSubjectDescription) is:", classSubjectDescription)
print("Class team name (classTeamName) is:", classTeamName)
print("Class subject Name (classSubjectName) is:", classSubjectName)
# Get StaffID in this classUnit
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) and date <= current_date ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
if staffID_pre is None:
print("Couldn't find a class today or previously for classunit:", classUnitID)
print("Checking for a class up to 14 days in the future and selecting the closest date to today")
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) date = current_date + interval '14 day' ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
else:
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
# Get Student ID's for this classUnit
sq_studentIDListQuery = "SELECT student from \"classunitStudent\" WHERE classunit = (%s) and removed is NULL;"
cursor.execute(sq_studentIDListQuery, (classUnitID,))
studentIDArray = tuple([r[0] for r in cursor.fetchall()])
print("List of students in class name:", className)
print(studentIDArray)
for row in studentIDArray:
studentList.add(row)
# Check if the csv section file exists
csvSectionFileExists = os.path.isfile(csvSectionFileName)
# Write to the section csv file
with open(csvSectionFileName, 'a', newline='') as csvSection:
writer = csv.writer(csvSection)
# If the csv doesn't exist already we'll need to put in the headers
if not csvSectionFileExists:
writer.writerow(["SIS ID", "School SIS ID", "Section Name", "Section Number", "Term SIS ID", "Term Name", "Course SIS ID", "Course Name", "Course Description"])
writer.writerow([(classUnitID), (schoolSISId), (classTeamName), (classUnitID), (classTermID), (classTermName), (classUnitID), (classSubjectName), (classSubjectDescription)])
print ("Writing class section row")
# Check if the csv teacher roster file exists
csvTeacherRosterFileExists = os.path.isfile(csvTeacherRosterFileName)
# Write to the teacher roster csv file
with open(csvTeacherRosterFileName, 'a', newline='') as csvTeacherRoster:
writer = csv.writer(csvTeacherRoster)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherRosterFileExists:
writer.writerow(["Section SIS ID", "SIS ID"])
writer.writerow([(classUnitID), (staffID)])
# Also include the Teams Admin account as a teacher
writer.writerow([(classUnitID), (teamsAdminID)])
print("Written staff to roster")
# Check if the csv student enrollment file exists
csvStudentEnrollmentFileNameExists = os.path.isfile(csvStudentEnrollmentFileName)
# Write to the student enrollment csv file
with open(csvStudentEnrollmentFileName, 'a', newline='') as csvStudentEnrollment:
writer = csv.writer(csvStudentEnrollment)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentEnrollmentFileNameExists:
writer.writerow(["Section SIS ID", "SIS ID"])
for studentInArray in studentIDArray:
writer.writerow([(classUnitID), (studentInArray)])
except:
print("")
print("***************************")
print("Error fetching class list data")
print("")
# Now we will fetch the staff information
try:
print("Print the staff lists now")
print(staffList)
for staff in staffList:
# Now get the staff information
sq_staffQuery = "SELECT * from public.staff WHERE id = (%s);"
cursor.execute(sq_staffQuery, (staff,))
staffPull = cursor.fetchall()
for row in staffPull:
staffFirstName = row[4]
staffLastName = row[7]
staffUsername = row[21]
print("Staff First Name (staffFirstName) is:", staffFirstName)
print("Staff Last Name (staffLastName) is:", staffLastName)
print("Staff username (staffUsername) is:", staffUsername)
print("Staff ID is (staff) is:", staff)
# Now we write this information to the Teacher.csv file
# Check if the csv teacher file exists
csvTeacherFileNameExists = os.path.isfile(csvTeacherFileName)
# Write to the teacher csv file
with open(csvTeacherFileName, 'a', newline='') as csvTeacher:
writer = csv.writer(csvTeacher)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Teacher Number"])
# Also include the Teams Admin user as a teacher
writer.writerow(
[(teamsAdminID), (schoolSISId), (teamsAdminFirstName), (teamsAdminLastName), (teamsAdminUsername),
(teamsAdminID)])
writer.writerow([(staff), (schoolSISId), (staffFirstName), (staffLastName), (staffUsername), (staff)])
except:
print("something went wrong getting the staff data")
# Now we will fetch the student information
try:
print("Print the student lists now")
print(studentList)
for student in studentList:
# Now get the student information
sq_studentQuery = "SELECT * from student WHERE id = (%s) AND status = 'FULL';"
cursor.execute(sq_studentQuery, (student,))
studentPull = cursor.fetchall()
for row in studentPull:
studentFirstName = row[3]
studentLastName = row[6]
studentUsername = row[47]
print("Student First Name (studentFirstName) is:", studentFirstName)
print("Student Last Name (studentLastName) is:", studentLastName)
print("Student username (studentUsername) is:", studentUsername)
print("Student ID is (student) is:", student)
# Now we write this information to the Student.csv file
# Check if the csv Student file exists
csvStudentFileNameExists = os.path.isfile(csvStudentFileName)
# Write to the student enrollment csv file
with open(csvStudentFileName, 'a', newline='') as csvStudent:
writer = csv.writer(csvStudent)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Student Number"])
writer.writerow([(student), (schoolSISId), (studentFirstName), (studentLastName), (studentUsername), (student)])
except:
print("something went wrong getting the student data")
# write the School.csv file
try:
with open('sds/School.csv', 'a', newline='') as csvSchool:
writer = csv.writer(csvSchool)
writer.writerow(["SIS ID","Name"])
writer.writerow([(schoolSISId),(schoolName)])
except:
print("something went wrong writing the school csv file")
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
| 45.445183
| 185
| 0.635865
|
de3b514aae1619036f4e6044f0e8e9c86052e8a3
| 457
|
py
|
Python
|
Chapter 1/imtools.py
|
ai-distill/PythonVisionProgramming
|
15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e
|
[
"Apache-2.0"
] | null | null | null |
Chapter 1/imtools.py
|
ai-distill/PythonVisionProgramming
|
15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e
|
[
"Apache-2.0"
] | null | null | null |
Chapter 1/imtools.py
|
ai-distill/PythonVisionProgramming
|
15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
import os
from PIL import Image
from numpy import *
def get_imlist(path):
"""
JPG
:param path:
:return:
"""
return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]
def imresize(im, sz):
"""
:param im:
:param sz:
:return:
"""
pil_im = Image.fromarray(uint8(im))
return array(pil_im.resize(sz))
print(get_imlist('.'))
| 16.925926
| 82
| 0.610503
|
de3ba9c03d6171d2fbdd34396181dfc69aedd8a7
| 5,190
|
py
|
Python
|
cart/views.py
|
lbacon17/lb-fitness
|
16f78841c834ca0e45317285b6c3b05ad97501f6
|
[
"W3C"
] | null | null | null |
cart/views.py
|
lbacon17/lb-fitness
|
16f78841c834ca0e45317285b6c3b05ad97501f6
|
[
"W3C"
] | null | null | null |
cart/views.py
|
lbacon17/lb-fitness
|
16f78841c834ca0e45317285b6c3b05ad97501f6
|
[
"W3C"
] | 1
|
2021-03-31T10:55:51.000Z
|
2021-03-31T10:55:51.000Z
|
from django.shortcuts import (
render, redirect, reverse, get_object_or_404, HttpResponse)
from django.contrib import messages
from shop.models import Product
from members.models import Member
def load_cart(request):
"""This view render's the user's cart contents"""
return render(request, 'cart/cart.html')
def add_item_to_cart(request, item_id):
"""This view lets the user add an item to their shopping cart"""
item = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
if item_id in list(cart.keys()):
if size in cart[item_id]['items_by_size'].keys():
cart[item_id]['items_by_size'][size] += quantity
messages.success(request, f'Updated size {size.upper()} '
f'of {item.friendly_name} to '
f'{cart[item_id]["items_by_size"][size]}')
else:
cart[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Added {quantity}x '
f'{item.friendly_name} in {size.upper()}')
else:
cart[item_id] = {'items_by_size': {size: quantity}}
messages.success(request, f'Added {quantity}x {item.friendly_name}'
f' in size {size.upper()}')
else:
if item_id in list(cart.keys()):
cart[item_id] += quantity
messages.success(request, f'Added {quantity}x {item.friendly_name}'
f' to your cart. You now have {cart[item_id]} of'
f' {item.friendly_name} in your cart')
else:
cart[item_id] = quantity
messages.success(request, f'{cart[item_id]}x {item.friendly_name} '
f'was added to your cart')
request.session['cart'] = cart
return redirect(redirect_url)
def update_cart(request, item_id):
"""This view lets the user update the quantity of an item in their cart"""
item = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
if quantity > 99:
messages.error(request, 'You cannot add this many units of a '
'product. The maximum possible quantity is 99. '
'Please enter a quantity within the accepted '
'range.')
elif quantity > 0:
cart[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Updated quantity of '
f'{item.friendly_name} in size {size.upper()} '
f'to to {cart[item_id]["items_by_size"][size]}.')
else:
del cart[item_id]['items_by_size'][size]
if not cart[item_id]['items_by_size']:
cart.pop(item_id)
messages.success(request, f'Removed {item.friendly_name} in size '
f'{size.upper()} from your cart.')
else:
if quantity > 99:
messages.error(request, 'You cannot add this many units of a '
'product. The maximum possible quantity is 99. '
'Please enter a quantity within the accepted '
'range.')
elif quantity > 0:
cart[item_id] = quantity
messages.success(request, f'Successfully updated quantity of '
f'{item.friendly_name} to {cart[item_id]}.')
else:
cart.pop(item_id)
messages.success(request, f'{item.friendly_name} was removed from '
'your cart.')
request.session['cart'] = cart
return redirect(reverse('load_cart'))
def remove_item_from_cart(request, item_id):
"""This view lets the user delete an item from their shopping cart"""
try:
item = get_object_or_404(Product, pk=item_id)
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
del cart[item_id]['items_by_size'][size]
if not cart[item_id]['items_by_size']:
cart.pop(item_id)
messages.success(request, f'Removed {item.friendly_name} in size '
f'{size.upper()} from your cart.')
else:
cart.pop(item_id)
messages.success(request, f'{item.friendly_name} was deleted from '
'your cart.')
request.session['cart'] = cart
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'There was a a problem removing the item.'
'{e}')
return HttpResponse(status=500)
| 41.854839
| 79
| 0.559152
|
de3d6c63aa40e3dc9ff43cbc7c4deca001d8d40e
| 172
|
py
|
Python
|
runserver.py
|
revalo/hush.mit.edu
|
e47c28c934dcfb94c52f6e12367869389e8ed7a8
|
[
"MIT"
] | 21
|
2017-10-30T20:55:48.000Z
|
2021-09-03T14:06:58.000Z
|
runserver.py
|
revalo/hush.mit.edu
|
e47c28c934dcfb94c52f6e12367869389e8ed7a8
|
[
"MIT"
] | 1
|
2021-11-08T02:05:34.000Z
|
2021-11-08T06:54:41.000Z
|
runserver.py
|
revalo/hush.mit.edu
|
e47c28c934dcfb94c52f6e12367869389e8ed7a8
|
[
"MIT"
] | 3
|
2017-11-15T23:18:00.000Z
|
2018-01-01T06:44:03.000Z
|
from confess import app
from confess.config import PORT, DEBUG
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=PORT,
debug=DEBUG
)
| 19.111111
| 38
| 0.593023
|
de3daa1f9c197f223b8adf05ac9c7b5634367d5c
| 5,945
|
py
|
Python
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-03-18T18:27:49.000Z
|
2019-03-18T18:27:49.000Z
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-12-17T21:33:15.000Z
|
2020-12-17T21:35:41.000Z
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-01-05T08:23:20.000Z
|
2021-01-05T08:23:20.000Z
|
""" Plots analysis on the workflow variables for experiments with different
workflow types and different %of workflow core hours in the workload.
Resuls are plotted as barchars that show how much the vas deviate in
single and multi from aware.
"""
import matplotlib
from orchestration import get_central_db
from orchestration.definition import ExperimentDefinition
from plot import (plot_multi_bars, produce_plot_config, extract_results,
gen_trace_ids_exps, calculate_diffs, get_args, join_rows,
replace)
from stats.trace import ResultTrace
# remote use no Display
matplotlib.use('Agg')
base_trace_id_percent, lim = get_args(2459, True)
print("Base Exp", base_trace_id_percent)
print("Using analysis of limited workflows:", lim)
db_obj = get_central_db()
edge_keys= {0: "[0,48] core.h", 48*3600:"(48, 960] core.h",
960*3600:"(960, inf.) core.h"}
trace_id_rows = []
base_exp=170
exp=ExperimentDefinition()
exp.load(db_obj, base_exp)
core_seconds_edges=exp.get_machine().get_core_seconds_edges()
# trace_id_rows = [
# [ 4166, 4167, 4168, 4184, 4185, 4186, 4202, 4203, 4204,
# 4220, 4221, 4222, 4238, 4239, 4240 ],
# [ 4169, 4170, 4171, 4187, 4188, 4189, 4205, 4206, 4207,
# 4223, 4224, 4225, 4241, 4242, 4243 ],
# [ 4172, 4173, 4174, 4190, 4191, 4192, 4208, 4209, 4210,
# 4226, 4227, 4228, 4244, 4245, 4246 ],
# [ 4175, 4176, 4177, 4193, 4194, 4195, 4211, 4212, 4213,
# 4229, 4230, 4231, 4247, 4248, 4249],
# [ 4178, 4179, 4180, 4196, 4197, 4198, 4214, 4215, 4216,
# 4232, 4233, 4234, 4250, 4251, 4252],
# [ 4181, 4182, 4183, 4199, 4200, 4201, 4217, 4218, 4219,
# 4235, 4236, 4237, 4253, 4254, 4255],
# ]
pre_base_trace_id_percent = 2549+18
trace_id_rows= join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1),
gen_trace_ids_exps(base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5)
)
trace_id_colors=join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent+1,
inverse=False, skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1,
group_size=2),
gen_trace_ids_exps(base_trace_id_percent+1,
inverse=False,skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5,
group_size=2)
)
print("IDS", trace_id_rows)
trace_id_rows=replace(trace_id_rows,
[2489, 2490, 2491,
2507, 2508, 2509,
2525, 2526, 2527],
[2801, 2802, 2803,
2804, 2805, 2806,
2807, 2808, 2809])
print("IDS", trace_id_rows)
print("COLORS", trace_id_colors)
time_labels = ["", "5%", "", "10%", "", "25%",
"", "50%", "", "75%",
"", "100%"]
manifest_label=["floodP", "longW", "wideL",
"cybers", "sipht", "montage"]
y_limits_dic={"[0,48] core.h": (1, 1000),
"(48, 960] core.h":(1,100),
"(960, inf.) core.h":(1,20)}
target_dir="percent"
grouping_types = [["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"]]
colors, hatches, legend = produce_plot_config(db_obj, trace_id_colors)
#head_file_name="percent"
head_file_name="wf_percent-b{0}".format(base_trace_id_percent)
for (name, result_type) in zip(["Turnaround speedup", "wait time(h.)",
"runtime (h.)", "stretch factor"],
["wf_turnaround", "wf_waittime",
"wf_runtime", "wf_stretch_factor"]):
if lim:
result_type="lim_{0}".format(result_type)
print("Loading: {0}".format(name))
factor=1.0/3600.0
if result_type in ("wf_stretch_factor", "lim_wf_stretch_factor"):
factor=None
edge_plot_results = extract_results(db_obj, trace_id_rows,
result_type, factor=factor,
second_pass=lim)
diffs_results = calculate_diffs(edge_plot_results, base_index=0,
group_count=3, speedup=True)
# for res_row in edge_plot_results:
# print [ x._get("median") for x in res_row]
title="{0}".format(name)
y_limits=(0,4)
print("Plotting figure")
ref_level=1.0
plot_multi_bars(
name=title,
file_name=target_dir+"/{0}-{1}-bars.png".format(head_file_name,
result_type),
title=title,
exp_rows=diffs_results,
y_axis_labels=manifest_label,
x_axis_labels=time_labels,
y_axis_general_label=name,
type_rows=grouping_types,
colors=colors,
hatches=hatches,
y_limits=y_limits,
y_log_scale=False,
legend=legend,
y_tick_count=3,
subtitle="% workflow workload",
ncols=2,
ref_line=ref_level
)
| 36.030303
| 75
| 0.518923
|
de3df638310dcbe32c189284547dca83d1fe51a7
| 410
|
py
|
Python
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:41:29.000Z
|
2021-05-15T23:41:29.000Z
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2022-02-19T20:38:33.000Z
|
2022-02-19T23:53:39.000Z
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:42:21.000Z
|
2021-05-15T23:42:21.000Z
|
from __future__ import annotations
from .title import TitleFromGroupChat, Base
| 27.333333
| 63
| 0.660976
|
de3e64921cbcc4e464aa3d32a70cc4b3179f2705
| 1,034
|
py
|
Python
|
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import matplotlib.pyplot as plt
url_gas_data = 'https://raw.githubusercontent.com/KeithGalli/matplotlib_tutorial/master/gas_prices.csv'
res1 = requests.get(url_gas_data, allow_redirects=True)
with open('gas_prices.csv', 'wb') as file:
file.write(res1.content)
plt.figure(figsize=(12, 5))
gas = pd.read_csv('gas_prices.csv')
plt.title('Gas prices overtime (in USD)', fontdict={
'fontweight': 'bold', 'fontsize': 16
})
countries_to_look_at = ['USA', 'Australia', 'South Korea', 'Canada']
for country in gas:
if country in countries_to_look_at:
plt.plot(gas.Year, gas[country], label=country, marker='.')
"""
Other way to pass data:
plt.plot(gas.Year, gas.USA, 'b.-', label='United States')
plt.plot(gas.Year, gas.Canada, 'r.-', label='Canada')
plt.plot(gas.Year, gas['South Korea'], 'g.-', label='South Korea')
plt.plot(gas.Year, gas.Australia, 'y.-', label='Australia')
"""
plt.xticks(gas.Year[::3])
plt.xlabel('Year')
plt.ylabel('US Dollars')
plt.legend()
plt.show()
| 23.5
| 103
| 0.698259
|
de40955063f239619674a2b5ecbf4dbaa910621e
| 2,305
|
py
|
Python
|
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
import unittest
import uuid
from app import survey_loader
from app import message_manager
from app.tester import run_survey
| 37.786885
| 109
| 0.572668
|
de42aa506b54f4487685cb532dc908e5f790e4a5
| 509
|
py
|
Python
|
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | 2
|
2020-02-12T23:03:09.000Z
|
2020-02-12T23:09:42.000Z
|
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | null | null | null |
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | null | null | null |
from os import environ
from azure.storage.table import TableService
azure_account_name = environ['AZURE_ACCOUNT_NAME']
azure_account_key = environ['AZURE_ACCOUNT_KEY']
azure_table_name = environ['AZURE_TABLE_NAME']
table = TableService(azure_account_name, azure_account_key)
get_entity = table.get_entity
| 28.277778
| 65
| 0.776031
|
de44446f8526c9f2e48dd37b76b2ac71ae33e71b
| 3,424
|
py
|
Python
|
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | 1
|
2018-10-30T08:57:14.000Z
|
2018-10-30T08:57:14.000Z
|
import logging
import h5py
import numpy as np
from sklearn.utils import check_random_state
from csrank.constants import OBJECT_RANKING
from csrank.dataset_reader.letor_dataset_reader import LetorDatasetReader
from csrank.dataset_reader.objectranking.util import sub_sampling
NAME = "LetorObjectRankingDatasetReader"
# if __name__ == '__main__':
# import sys
# import os
# import inspect
# dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# logging.basicConfig(filename=os.path.join(dirname, 'log.log'), level=logging.DEBUG,
# format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
# logger = logging.getLogger(name='letor')
# sys.path.append("..")
# for n in [2008, 2007]:
# ds = LetorObjectRankingDatasetReader(year=n)
# logger.info(ds.X_train.shape)
# logger.info(np.array(ds.X_test.keys).shape)
| 39.356322
| 104
| 0.629965
|
de44c06366bdb1cf83f5f3bb8ad925cefb959cf0
| 1,222
|
py
|
Python
|
app/wqFull/dev/trans.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/wqFull/dev/trans.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/wqFull/dev/trans.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | 2
|
2021-04-04T02:45:59.000Z
|
2022-03-19T09:41:39.000Z
|
from sklearn.preprocessing import QuantileTransformer, PowerTransformer
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath
import json
import os
import importlib
importlib.reload(axplot)
importlib.reload(figplot)
dm = dbBasin.DataFrameBasin('weathering')
# subset
dm.saveSubset('B10', ed='2009-12-31')
dm.saveSubset('A10', sd='2010-01-01')
yrIn = np.arange(1985, 2020, 5).tolist()
t1 = dbBasin.func.pickByYear(dm.t, yrIn, pick=False)
t2 = dbBasin.func.pickByYear(dm.t, yrIn)
dm.createSubset('rmYr5', dateLst=t1)
dm.createSubset('pkYr5', dateLst=t2)
codeSel = ['00915', '00925', '00930', '00935', '00940', '00945', '00955']
d1 = dbBasin.DataModelBasin(dm, varY=codeSel, subset='rmYr5')
d2 = dbBasin.DataModelBasin(dm, varY=codeSel, subset='pkYr5')
mtdY = ['QT' for var in codeSel]
d1.trans(mtdY=mtdY)
d1.saveStat('temp')
# d2.borrowStat(d1)
d2.loadStat('temp')
yy = d2.y
yP = d2.transOutY(yy)
yO = d2.Y
# TS
indS = 1
fig, axes = figplot.multiTS(d1.t, [yO[:, indS, :], yP[:, indS, :]])
fig.show()
indS = 1
fig, axes = figplot.multiTS(d1.t, [yy[:, indS, :]])
fig.show()
| 25.458333
| 78
| 0.714403
|
de463062073e4c38b0ef746845b5c9b821ed145e
| 659
|
py
|
Python
|
pysad/statistics/__init__.py
|
selimfirat/pysad
|
dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede
|
[
"BSD-3-Clause"
] | 155
|
2020-08-17T12:52:38.000Z
|
2022-03-19T02:59:26.000Z
|
pysad/statistics/__init__.py
|
shubhsoni/pysad
|
dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede
|
[
"BSD-3-Clause"
] | 2
|
2020-10-22T09:50:28.000Z
|
2021-02-15T02:01:44.000Z
|
pysad/statistics/__init__.py
|
shubhsoni/pysad
|
dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede
|
[
"BSD-3-Clause"
] | 14
|
2020-10-09T17:08:23.000Z
|
2022-03-25T11:30:12.000Z
|
"""
The :mod:`pysad.statistics` module contains methods to keep track of statistics on streaming data.
"""
from .abs_statistic import AbsStatistic
from .average_meter import AverageMeter
from .count_meter import CountMeter
from .max_meter import MaxMeter
from .median_meter import MedianMeter
from .min_meter import MinMeter
from .running_statistic import RunningStatistic
from .sum_meter import SumMeter
from .sum_squares_meter import SumSquaresMeter
from .variance_meter import VarianceMeter
__all__ = ["AbsStatistic", "AverageMeter", "CountMeter", "MaxMeter", "MedianMeter", "MinMeter", "RunningStatistic", "SumMeter", "SumSquaresMeter", "VarianceMeter"]
| 41.1875
| 163
| 0.814871
|
de481c317eb312cc809e4b8eb2f8383abd96ba97
| 324
|
py
|
Python
|
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | 2
|
2018-07-20T11:03:42.000Z
|
2019-06-06T06:00:12.000Z
|
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | null | null | null |
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | 2
|
2018-12-18T16:00:34.000Z
|
2019-04-08T14:29:02.000Z
|
"""Global index view."""
import pkg_resources
from django.shortcuts import render
def index(request):
"""Basic view."""
plugins = \
[plugin.load() for plugin in
pkg_resources.iter_entry_points(group='elrados.plugins')]
return render(request, "index.html", {
"plugins": plugins
})
| 21.6
| 66
| 0.641975
|
de48207667680d4095ac834e7b25417f0ab4f83a
| 2,274
|
py
|
Python
|
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | 1
|
2022-03-26T07:11:18.000Z
|
2022-03-26T07:11:18.000Z
|
import pandas as pd
import matplotlib.pyplot as plt
from zipline.finance.commission import PerShare
from zipline.api import set_commission, symbol, order_target_percent
import zipline
from models.live_momentum import LiveMomentum
with open('/Users/landey/Desktop/Eonum/live_model/eouniverse/stock_list.txt', 'r') as f:
data = f.read().split()
tickers = data[:20]
etf_list = tickers[15:]
start = pd.Timestamp('2020-3-22', tz='utc')
end = pd.Timestamp('2020-4-28', tz='utc')
perf = zipline.run_algorithm(start=start,
end=end,
initialize=initialize,
capital_base=100000,
handle_data=handle_data,
bundle='sep')
perf.portfolio_value.plot()
plt.show()
| 30.72973
| 95
| 0.647757
|
de4860345de948d81c21b1062677ea640e28f033
| 10,120
|
py
|
Python
|
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 2
|
2021-01-15T13:27:19.000Z
|
2021-08-04T08:40:52.000Z
|
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | null | null | null |
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 5
|
2018-05-01T10:39:31.000Z
|
2022-03-25T03:02:35.000Z
|
# Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# Jan Feitsma, March 2020
# Robot will continuously intercept around current position.
#
# For description and usage hints, execute with '-h'
import sys, os
import time
import logging, signal
logging.basicConfig(level=logging.INFO)
import math, random
import argparse
import falconspy
import rtdb2tools
from robotLibrary import RobotLibrary
from worldState import WorldState
from FalconsCoordinates import *
def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)):
"""
Helper function to distribute robot positions on a circle.
"""
gamma = 2*math.pi / numRobots
x = radius * math.cos(gamma * robotIdx) + center[0]
y = radius * math.sin(gamma * robotIdx) + center[1]
phi = gamma * robotIdx - math.pi
return (x, y, phi)
if __name__ == '__main__':
args = parse_arguments()
if args.robot == 0 or args.robot == None:
raise RuntimeError("Error: could not determine robot ID, this script should run on a robot")
main(args)
| 42.700422
| 305
| 0.619368
|
de4f135b4907a9ad1ee036150f5775fba0b81256
| 4,859
|
py
|
Python
|
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib.ticker import FuncFormatter
def plot_dynamic_strats(t, v_t_strat, v_t_risky, w_t_risky, h_t_risky,
num, j_sel):
"""For details, see here.
Parameters
----------
t : array, shape (t_,)
v_t_strat : array, shape (j_,t_)
v_t_risky : array, shape (j_,t_)
w_t_risky : array, shape (j_,t_)
h_t_risky: array, shape (j_,t_)
num: int
j_sel: int
"""
# adjust v_t_risky so that it has the same initial value as v_t_strat
v_t_risky = v_t_risky * v_t_strat[0, 0] / v_t_risky[0, 0]
mu_risky = np.mean(v_t_risky, axis=0, keepdims=True).reshape(-1)
sig_risky = np.std(v_t_risky, axis=0, keepdims=True).reshape(-1)
mu_strat = np.mean(v_t_strat, axis=0, keepdims=True).reshape(-1)
sig_strat = np.std(v_t_strat, axis=0, keepdims=True).reshape(-1)
plt.style.use('arpm')
fig = plt.figure()
gs = GridSpec(1, 2)
gs1 = GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[0])
num_bins = int(round(100 * np.log(v_t_strat.shape[1])))
lgrey = [0.8, 0.8, 0.8] # light grey
dgrey = [0.4, 0.4, 0.4] # dark grey
j_ = v_t_risky.shape[0]
x_min = t[0]
x_max = 1.25 * t[-1]
y_min = v_t_strat[0, 0] / 4
y_max = v_t_strat[0, 0] * 2.25
# scatter plot
ax4 = plt.subplot(gs[1])
plt.scatter(v_t_risky[:, -1], v_t_strat[:, -1], marker='.', s=2)
so = np.sort(v_t_risky[:, -1])
plt.plot(so, so, label='100% risky instrument', color='r')
plt.plot([y_min, v_t_risky[j_sel, -1], v_t_risky[j_sel, -1]],
[v_t_strat[j_sel, -1], v_t_strat[j_sel, -1], y_min], 'b--')
plt.plot(v_t_risky[j_sel, -1], v_t_strat[j_sel, -1], 'bo')
ax4.set_xlim(y_min, y_max)
ax4.set_ylim(y_min, y_max)
ax4.xaxis.set_major_formatter(FuncFormatter(tick_label_func))
ax4.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.xlabel('Strategy')
plt.ylabel('Risky instrument')
plt.legend()
# weights and holdings
ax3 = plt.subplot(gs1[2])
y_min_3 = np.min(h_t_risky[j_sel, : -1])
y_max_3 = np.max(h_t_risky[j_sel, : -1])
plt.sca(ax3)
plt.plot(t, w_t_risky[j_sel, :], color='b')
plt.axis([x_min, x_max, 0, 1])
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
plt.yticks(np.linspace(0, 1, 3), color='b')
plt.ylabel('Weights', color='b')
plt.xlabel('Time')
ax3_2 = ax3.twinx()
plt.plot(t, h_t_risky[j_sel, :], color='black')
plt.ylabel('Holdings', color='black')
plt.axis([x_min, x_max, y_min_3 - 1, y_max_3 + 1])
plt.yticks(np.linspace(y_min_3, y_max_3, 3))
ax3_2.yaxis.set_major_formatter(FuncFormatter(tick_label_func_1))
ax1 = plt.subplot(gs1[0], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of strategy
for j in range(j_ - num, j_):
plt.plot(t, v_t_strat[j, :], color=lgrey)
plt.plot(t, v_t_strat[j_sel, :], color='b')
plt.plot(t, mu_strat + sig_strat, color='orange')
plt.plot(t, mu_strat - sig_strat, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_strat[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel('Strategy')
ax1.set_ylim(y_min, y_max)
ax1.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
# risky instrument
ax2 = plt.subplot(gs1[1], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of risky instrument
for j in range(j_ - num, j_):
plt.plot(t, v_t_risky[j, :], color=lgrey)
plt.plot(t, v_t_risky[j_sel, :], color='b')
plt.plot(t, mu_risky + sig_risky, color='orange')
plt.plot(t, mu_risky - sig_risky, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_risky[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel('Risky instrument')
ax2.set_ylim(y_min, y_max)
ax2.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.grid(True)
plt.tight_layout()
return fig, gs
| 35.210145
| 106
| 0.61844
|
de4f23bfb5a827684724b1fa6940e53745dbb142
| 1,166
|
py
|
Python
|
krpc_client.py
|
janismac/ksp_rtls_launch_to_rendezvous
|
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
|
[
"Apache-2.0"
] | 1
|
2020-11-07T15:53:19.000Z
|
2020-11-07T15:53:19.000Z
|
krpc_client.py
|
janismac/ksp_rtls_launch_to_rendezvous
|
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
|
[
"Apache-2.0"
] | null | null | null |
krpc_client.py
|
janismac/ksp_rtls_launch_to_rendezvous
|
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
|
[
"Apache-2.0"
] | 1
|
2020-11-07T15:56:06.000Z
|
2020-11-07T15:56:06.000Z
|
import sys
import subprocess
import time
import json
import krpc
import math
import scipy.integrate
import numpy as np
from PrePlanningChecklist import PrePlanningChecklist
from PlannerUiPanel import PlannerUiPanel
from MainUiPanel import MainUiPanel
from ConfigUiPanel import ConfigUiPanel
from AutopilotUiPanel import AutopilotUiPanel
from predict_orbit_BCBF import predict_orbit_BCBF
while True:
try:
main()
#time.sleep(2.0)
except krpc.error.RPCError:
time.sleep(4.0)
#except ValueError:
# time.sleep(4.0)
| 25.911111
| 76
| 0.736707
|
de4fbddd1a8e5c3c47f15c39acb99e707f22e65b
| 617
|
py
|
Python
|
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
import os
from discord import Webhook, RequestsWebhookAdapter, Colour, Embed
| 36.294118
| 110
| 0.666126
|
de50a4c4fb04e2350cc10caa2aea9a7a75fcac8c
| 4,593
|
py
|
Python
|
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
#%%
#https://github.com/timesler/facenet-pytorch
from facenet_pytorch import MTCNN, extract_face
import torch
import numpy as np
import mmcv, cv2
import os
import matplotlib.pyplot as plt
from PIL import Image
# %%
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=100)
video_dir = "VIDEO_FILES/"
dest_path = 'VIDEO_PROCESSED/'
dir_list = os.listdir(video_dir)
dir_list.sort()
if not os.path.exists(dest_path):
os.makedirs(dest_path)
#%%
# %%
#iemocap
k = 1 #session to process
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
i=0
#%%
dir_list
path = 'datasets/IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
dir_list
#%%
#divide each video and manually crop around face
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
path = 'IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
for file_name in dir_list:
print(file_name)
video = mmcv.VideoReader(video_dir + '/'+file_name)
if 'F_' in file_name:
new_file_left = path + file_name[:-4] + '_F.avi'
new_file_right = path +file_name[:-4] + '_M.avi'
else:
new_file_left = path +file_name[:-4] + '_M.avi'
new_file_right = path + file_name[:-4] + '_F.avi'
h,w,c = video[0].shape
dim = (300,280)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
#left
video_tracked = cv2.VideoWriter(new_file_left, fourcc, 25.0, dim)
i=0
for frame in video:
h,w,c = frame.shape
#left
#different boxes for each session
#box (left, upper, right, lower)-tuple
#ses1 [120:int(h- 690),120:int(w/2.4)]
#ses2 [150:int(h - 660),120:int(w/2.4)]
#ses5 [120:int(h - 690),120:int(w/2.4)]
#[130:int(h/2.18),120:int(w/2.4)]
video_tracked.write(frame[100:h-100,:300])
video_tracked.release()
del video_tracked
print(h,w,c)
dim = (370,280)
# #right
video_tracked = cv2.VideoWriter(new_file_right, fourcc, 25.0, dim)
for frame in video:
h,w,c = frame.shape
#right
#ses1 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses2 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses5 [150:int(h - 660),int(w/1.5):int(w-60)]
video_tracked.write(frame[100:h-100,350:])
video_tracked.release()
del video, video_tracked
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=2000,margin=5)
i = 1
video_dir = "../../../../datasets/IEMOCAP/CLIPPED_VIDEOS/Session{}/".format(i)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
dir_list
#%%
file_list = dir_list
path = '../datasets/IEMOCAP/FACE_VIDEOS/Session{}/'.format(i)
if not os.path.exists(path):
os.makedirs(path)
#%%
#%%
#track using mtcnn
for file_name in file_list:
video = mmcv.VideoReader(video_dir + file_name)
frames = [Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in video]
frames_tracked = []
for x, frame in enumerate(frames):
#print('\rTracking frame: {}'.format(i + 1), end='')
# Detect faces
boxes, _ = mtcnn.detect(frame)
if not boxes is None:
# print(boxes[0])
im_array = extract_face(frame, boxes[0],image_size=112,margin=50)
#im_array = im_array.permute(1,2,0)
img = im_array #Image.fromar ray(np.uint8(im_array.numpy()))
# Add to frame list
frames_tracked.append(img)
else:
frames_tracked.append(img)
dim = frames_tracked[0].size
print(len(frames),len(frames_tracked))
new_file = path + '/' + file_name
print(new_file)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
video_tracked = cv2.VideoWriter(new_file, fourcc, 25.0, dim)
for frame in frames_tracked:
video_tracked.write(cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))
video_tracked.release()
del video, video_tracked, frames_tracked, frames
| 29.254777
| 95
| 0.642717
|
de51709d96e27d7e3576d5ee6ad6f2ebabdc7ebc
| 1,441
|
py
|
Python
|
launch/gazebo.launch.py
|
fly4future/fog_gazebo_resources
|
1af1aa2d3a5e7c67bf39605655ca96a154daa4b3
|
[
"BSD-3-Clause"
] | null | null | null |
launch/gazebo.launch.py
|
fly4future/fog_gazebo_resources
|
1af1aa2d3a5e7c67bf39605655ca96a154daa4b3
|
[
"BSD-3-Clause"
] | null | null | null |
launch/gazebo.launch.py
|
fly4future/fog_gazebo_resources
|
1af1aa2d3a5e7c67bf39605655ca96a154daa4b3
|
[
"BSD-3-Clause"
] | null | null | null |
"""Launch Gazebo server and client with command line arguments."""
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.actions import ExecuteProcess
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python.packages import get_package_share_directory
| 38.945946
| 117
| 0.696738
|
de5241403b212e20d0b5a9c1eb86d5461e49bad7
| 957
|
py
|
Python
|
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
import torch.nn as nn
| 20.804348
| 66
| 0.6186
|
de53cfe343832488633720622d964252c48b5617
| 3,180
|
py
|
Python
|
test/test_postfix.py
|
JoseTomasTocino/toptal-calculator
|
baeb69fdeca81699d655e1f2f11f03f2a3972ab7
|
[
"Unlicense"
] | null | null | null |
test/test_postfix.py
|
JoseTomasTocino/toptal-calculator
|
baeb69fdeca81699d655e1f2f11f03f2a3972ab7
|
[
"Unlicense"
] | null | null | null |
test/test_postfix.py
|
JoseTomasTocino/toptal-calculator
|
baeb69fdeca81699d655e1f2f11f03f2a3972ab7
|
[
"Unlicense"
] | null | null | null |
import unittest
from calculator import tokens, evaluator
from calculator.parser import tokenize, infix_to_postfix
| 31.485149
| 70
| 0.646226
|
de55352cff35ae8596924966eb4c23a46054b461
| 1,124
|
py
|
Python
|
Weather API/app.py
|
TanushreeShaw/Weather
|
0bebe029536f579bbd9d28c07d3e33f3438a1a56
|
[
"MIT"
] | null | null | null |
Weather API/app.py
|
TanushreeShaw/Weather
|
0bebe029536f579bbd9d28c07d3e33f3438a1a56
|
[
"MIT"
] | null | null | null |
Weather API/app.py
|
TanushreeShaw/Weather
|
0bebe029536f579bbd9d28c07d3e33f3438a1a56
|
[
"MIT"
] | null | null | null |
from flask import Flask,
render_template, request
import requests
import json
import os
app = Flask(__name__)
picfolder = os.path.join('static','pics')
app.config['UPLOAD_FOLDER'] = picfolder
if __name__ == '__main__':
app.run(debug=True)
| 33.058824
| 152
| 0.670819
|
de559c2b5884fa9c7d514b793b602e0875f672ea
| 561
|
py
|
Python
|
core/urls.py
|
cybernetisk/internsystem
|
b81faa0deef08153032e56d5740173e5a6cf3ad9
|
[
"MIT"
] | null | null | null |
core/urls.py
|
cybernetisk/internsystem
|
b81faa0deef08153032e56d5740173e5a6cf3ad9
|
[
"MIT"
] | 38
|
2017-12-21T10:10:54.000Z
|
2022-03-07T20:54:37.000Z
|
core/urls.py
|
cybernetisk/internsystem
|
b81faa0deef08153032e56d5740173e5a6cf3ad9
|
[
"MIT"
] | 6
|
2018-06-01T21:04:34.000Z
|
2020-01-14T15:26:26.000Z
|
from django.conf.urls import url
from core.views import me
from core.rest import CardViewSet, UserViewSet, NfcCardViewSet, GroupViewSet
from core.utils import SharedAPIRootRouter
# SharedAPIRootRouter is automatically imported in global urls config
router = SharedAPIRootRouter()
router.register(r"core/users", UserViewSet, basename="users")
router.register(r"core/cards", CardViewSet, basename="voucher_cards")
router.register(r"core/nfc", NfcCardViewSet)
router.register(r"core/groups", GroupViewSet)
urlpatterns = [
url(r"^api/me$", me, name="me"),
]
| 33
| 76
| 0.787879
|
de5df9efa200676cbee6ac7078451697101f76eb
| 2,931
|
py
|
Python
|
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | 1
|
2020-11-20T16:36:17.000Z
|
2020-11-20T16:36:17.000Z
|
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | null | null | null |
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | null | null | null |
from flora_tools.experiment import *
| 37.576923
| 111
| 0.588536
|
de5e91c132fdc9f05dd13b11b8708a82b0c0f470
| 213
|
py
|
Python
|
6P/REDES/restAPI/main/serializers.py
|
rwnicholas/fluffy-potato
|
52ccd25cf77f8cebce1420e7fe9028a277811986
|
[
"MIT"
] | null | null | null |
6P/REDES/restAPI/main/serializers.py
|
rwnicholas/fluffy-potato
|
52ccd25cf77f8cebce1420e7fe9028a277811986
|
[
"MIT"
] | null | null | null |
6P/REDES/restAPI/main/serializers.py
|
rwnicholas/fluffy-potato
|
52ccd25cf77f8cebce1420e7fe9028a277811986
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from main.models import Suco
| 26.625
| 55
| 0.704225
|
de5f40f2fa117e9d234c38567381795609e6e892
| 183
|
py
|
Python
|
gpytorch/kernels/keops/__init__.py
|
wjmaddox/gpytorch
|
679f437fa71f8e15d98b3d256924ecf4b52c0448
|
[
"MIT"
] | 1
|
2019-09-16T16:58:54.000Z
|
2019-09-16T16:58:54.000Z
|
gpytorch/kernels/keops/__init__.py
|
wjmaddox/gpytorch
|
679f437fa71f8e15d98b3d256924ecf4b52c0448
|
[
"MIT"
] | null | null | null |
gpytorch/kernels/keops/__init__.py
|
wjmaddox/gpytorch
|
679f437fa71f8e15d98b3d256924ecf4b52c0448
|
[
"MIT"
] | null | null | null |
from .matern_kernel import MaternKernel
from .rbf_kernel import RBFKernel
from .spectralgp_kernel import SpectralGPKernel
__all__ = ["MaternKernel", "RBFKernel", "SpectralGPKernel"]
| 30.5
| 59
| 0.825137
|
de61a2c63bd8bf8c89dfa8db3b212f5ada8c9268
| 271
|
py
|
Python
|
bc/recruitment/migrations/0018_merge_20200324_1630.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-27T07:27:17.000Z
|
2021-02-27T07:27:17.000Z
|
bc/recruitment/migrations/0018_merge_20200324_1630.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | null | null | null |
bc/recruitment/migrations/0018_merge_20200324_1630.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T15:56:54.000Z
|
2021-06-09T15:56:54.000Z
|
# Generated by Django 2.2.10 on 2020-03-24 16:30
from django.db import migrations
| 19.357143
| 52
| 0.664207
|
de61aeb69172f0bbf84a85482ba65c30efe863a2
| 1,901
|
py
|
Python
|
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
import requests
import os
from datetime import datetime
import pandas as pd
if __name__ == "__main__":
main()
| 23.7625
| 78
| 0.711731
|
de6435cdbc67360ee94636dc50bd704495e2b720
| 382
|
py
|
Python
|
dump/yoloCarAccident/generate.py
|
lovishchopra/ITRI-Car-Accident
|
96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d
|
[
"MIT"
] | null | null | null |
dump/yoloCarAccident/generate.py
|
lovishchopra/ITRI-Car-Accident
|
96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d
|
[
"MIT"
] | null | null | null |
dump/yoloCarAccident/generate.py
|
lovishchopra/ITRI-Car-Accident
|
96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d
|
[
"MIT"
] | null | null | null |
import os
import yoloCarAccident as yc
# yc.find('test.txt')
f1 = open('result2.txt','r')
i = 0
s = ""
for lines in f1:
if(i<80000):
s += lines
i+=1
else:
f2 = open('test.txt','w')
f2.write(s)
f2.close()
try:
yc.find('test.txt')
except ValueError:
pass
s = ""
i = 0
# break
# f2 = open('test.txt','w')
# f2.write(s)
# f2.close()
# yc.find('test.txt')
| 13.172414
| 28
| 0.557592
|
de65eb26862ea6588043a83de4e49020ae4daf2c
| 1,853
|
py
|
Python
|
socketserver_extra.py
|
sim642/pyqwebirc
|
cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e
|
[
"MIT"
] | null | null | null |
socketserver_extra.py
|
sim642/pyqwebirc
|
cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e
|
[
"MIT"
] | 2
|
2017-01-04T18:24:00.000Z
|
2017-01-04T18:50:32.000Z
|
socketserver_extra.py
|
sim642/pyqwebirc
|
cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e
|
[
"MIT"
] | null | null | null |
import socketserver
import socket
| 33.089286
| 88
| 0.652455
|
de681128c0eb4ded13f92d6720603223e15efc17
| 4,560
|
py
|
Python
|
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | 2
|
2019-12-14T14:59:22.000Z
|
2020-01-30T16:17:28.000Z
|
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | null | null | null |
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | 1
|
2020-01-16T20:03:35.000Z
|
2020-01-16T20:03:35.000Z
|
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.abspath(__file__ + "/../../")) # just so we can use 'libs'
import torch.utils.data
import torch.optim as optim
from torch import nn
import numpy as np
import torch
from libs.Loader import Dataset
from libs.shufflenetv2 import ShuffleNetV2AutoEncoder
BATCH_SIZE = 32
CROP_SIZE = 400
ENCODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_encoder.pth'
DECODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_decoder.pth'
EPOCHS = 20
if __name__ == '__main__':
main()
| 35.905512
| 84
| 0.53114
|
de69814605b1835959a1ffdafc1b9774d60d18ad
| 75
|
py
|
Python
|
utils/__init__.py
|
bitst0rm-st3/AutomaticPackageReloader
|
b48699420ccadb3c1a8796a1a7275f70089f0934
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
bitst0rm-st3/AutomaticPackageReloader
|
b48699420ccadb3c1a8796a1a7275f70089f0934
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
bitst0rm-st3/AutomaticPackageReloader
|
b48699420ccadb3c1a8796a1a7275f70089f0934
|
[
"MIT"
] | null | null | null |
from .progress_bar import ProgressBar
from .read_config import read_config
| 25
| 37
| 0.866667
|
de6c1a64c58a8aca902a8fc78dd2204b84031a65
| 2,871
|
py
|
Python
|
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
#%%
# PAGE EXAMPLE
# {'title': 'Zuppa_di_pesce_(film)',
# 'chains': [{'revisions': ['95861493', '95861612', '95973728'],
# 'users': {'93.44.99.33': '', 'Kirk39': '63558', 'AttoBot': '482488'},
# 'len': 3,
# 'start': '2018-04-01 04:54:40.0',
# 'end': '2018-04-05 07:36:26.0'}],
# 'n_chains': 1,
# 'n_reverts': 3,
# 'mean': 3.0,
# 'longest': 3,
# 'M': 0,
# 'lunghezze': {'3': 1}}
import json
from datetime import datetime
import numpy as np
import pandas as pd
import os
import shutil
from utils import utils
import sys
language = sys.argv[1]
dataset_folder = f'/home/gandelli/dev/data/{language}/chains/page/'
output = f'/home/gandelli/dev/data/{language}/chains/user/'
#%% get users from the json page
# input a dict of users with the chains joined
#%%
shutil.rmtree(output)
os.mkdir(output)
users = get_users()
compute_users(users)
# %%
| 26.1
| 183
| 0.563915
|
de6c4ab063a946c3b3fd6bbb89fa20997b2be723
| 5,105
|
py
|
Python
|
src/carts/views.py
|
dhaval6552/ecommerce-2
|
ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737
|
[
"MIT"
] | null | null | null |
src/carts/views.py
|
dhaval6552/ecommerce-2
|
ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737
|
[
"MIT"
] | null | null | null |
src/carts/views.py
|
dhaval6552/ecommerce-2
|
ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin,DetailView
from django.shortcuts import render,get_object_or_404,redirect
from django.http import HttpResponseRedirect,Http404,JsonResponse
from django.views.generic.edit import FormMixin
from orders.forms import GuestCheckoutForm
from products.models import Variation
from carts.models import Cart,CartItem
# Create your views here.
| 33.807947
| 92
| 0.582174
|
de71e1c800cd0628725b2dd49b907881044e1b6d
| 721
|
py
|
Python
|
Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 22
|
2015-02-26T09:07:18.000Z
|
2020-05-10T16:22:05.000Z
|
Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 123
|
2016-04-05T18:32:41.000Z
|
2022-03-13T21:09:21.000Z
|
Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 56
|
2015-03-19T22:26:37.000Z
|
2021-12-06T02:52:02.000Z
|
#!/usr/bin/python
'''
Created on May 23, 2012
@author: Charlie
'''
import unittest
from mock import patch
import xml.etree.ElementTree as ET
from TestCgiMainBase import TestCgiMainBase
if __name__ == "__main__":
unittest.main()
| 24.862069
| 71
| 0.647712
|