hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1905b552f6b906092520144e21a33c6cfbd7fe0b
| 883
|
py
|
Python
|
src/save_docs.py
|
j-c-m-code/gutenbergsearch
|
b08f69d1d35fcca57e8ad0fcceaab614b9104abc
|
[
"MIT"
] | null | null | null |
src/save_docs.py
|
j-c-m-code/gutenbergsearch
|
b08f69d1d35fcca57e8ad0fcceaab614b9104abc
|
[
"MIT"
] | null | null | null |
src/save_docs.py
|
j-c-m-code/gutenbergsearch
|
b08f69d1d35fcca57e8ad0fcceaab614b9104abc
|
[
"MIT"
] | null | null | null |
"""
Processes a folder of .txt files to Spacy docs then saves the docs
"""
# first import standard modules
import glob
import os
from pathlib import Path
# then import third-party modules
import spacy
# finally import my own code (PEP-8 convention)
from askdir import whichdir
nlp = spacy.load("en_core_web_lg")
source_directory = whichdir()
os.chdir(source_directory)
filelist = glob.glob("*")
output_directory = whichdir()
for filename in filelist:
with open(filename, "r", encoding="utf-8") as f:
novel = f.read()
# the novel is too long for the default, so increase allocated memory
nlp.max_length = len(novel) + 100
# Process a text
doc = nlp(novel)
short_name = Path(filename).stem
# r for raw string--no escape characters
# f for format string--allow me to pass in variable
doc.to_disk(rf"{output_directory}\{short_name}")
| 23.864865
| 73
| 0.711212
|
190623703fe56b71a26a9d008afda8919d9e105d
| 253
|
py
|
Python
|
output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_max_length_2_xsd.nistschema_sv_iv_list_nmtoken_max_length_2 import NistschemaSvIvListNmtokenMaxLength2
__all__ = [
"NistschemaSvIvListNmtokenMaxLength2",
]
| 42.166667
| 194
| 0.893281
|
190750f0b978b05cd4e96bab0a727296c6a7e5d0
| 462
|
py
|
Python
|
jython/src/sample_src.py
|
adrianpothuaud/Sikuli-WS
|
6210a949768fb4eb2b80693818ae3eb31ec9c406
|
[
"MIT"
] | 1
|
2018-02-20T16:28:45.000Z
|
2018-02-20T16:28:45.000Z
|
jython/src/sample_src.py
|
adrianpothuaud/Sikuli-WS
|
6210a949768fb4eb2b80693818ae3eb31ec9c406
|
[
"MIT"
] | null | null | null |
jython/src/sample_src.py
|
adrianpothuaud/Sikuli-WS
|
6210a949768fb4eb2b80693818ae3eb31ec9c406
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
file: src/sample.py
Sample Source file
==================
Description
-----------
Sample description ...
Content
-------
- say_hello_sikuli
Status
------
Test with: tests/sample.py
last verification date: xx/xx/xxxx
last verification status: XX
"""
from sikuli import *
def say_hello_sikuli():
"""
:return:
"""
popup("Hello World !", title="Sikuli")
| 12.486486
| 42
| 0.515152
|
190774ef04a1a93a7a9832bb8db9d5fd37d72396
| 533
|
py
|
Python
|
k8s/images/codalab/apps/chahub/provider.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 333
|
2015-12-29T22:49:40.000Z
|
2022-03-27T12:01:57.000Z
|
k8s/images/codalab/apps/chahub/provider.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 1,572
|
2015-12-28T21:54:00.000Z
|
2022-03-31T13:00:32.000Z
|
k8s/images/codalab/apps/chahub/provider.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 107
|
2016-01-08T03:46:07.000Z
|
2022-03-16T08:43:57.000Z
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
providers.registry.register(ChaHubProvider)
| 23.173913
| 74
| 0.75985
|
1907beae999c84a846e911c9160f122031a33418
| 3,046
|
py
|
Python
|
tools/perf/contrib/cluster_telemetry/screenshot_unittest.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/perf/contrib/cluster_telemetry/screenshot_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/perf/contrib/cluster_telemetry/screenshot_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from telemetry.util import image_util
from contrib.cluster_telemetry import screenshot
| 41.726027
| 80
| 0.738345
|
1908a3c6547cb1830569167b36fc11ceff479110
| 652
|
py
|
Python
|
bosm2015/pcradmin_old/urls.py
|
dvm-bitspilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | 1
|
2015-09-15T17:19:30.000Z
|
2015-09-15T17:19:30.000Z
|
bosm2015/pcradmin_old/urls.py
|
DVM-BITS-Pilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | null | null | null |
bosm2015/pcradmin_old/urls.py
|
DVM-BITS-Pilani/BITS-BOSM-2015
|
df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a
|
[
"MIT"
] | 1
|
2016-03-28T19:44:41.000Z
|
2016-03-28T19:44:41.000Z
|
from pcradmin import views
from django.conf.urls import url, include
urlpatterns = [
url(r'^(?P<pagename>\w+)/', views.index),
#url(r'^sendmail$', views.sendmail),
#url(r'^sentmail$', views.sentmail),
url(r'^changelimit$', views.change_team_limits),
url(r'^change_team_limit$', views.change_team_limit_list),
url(r'^limit_changed$', views.change_limits),
url(r'^changesportslimit$', views.change_sports_limits),
url(r'^sports_limits_changed$', views.save_sports_limits),
url(r'^setstatus', views.set_status),
url(r'^showstatus', views.save_status),
url(r'^emailsend', views.send_mail),
url(r'^compose', views.compose),
]
| 38.352941
| 62
| 0.71319
|
1908ece0bbcf8875b565e097e59669305dfcf236
| 354
|
py
|
Python
|
_aulas/ex004.py
|
CarlosJunn/Aprendendo_Python
|
cddb29b5ee2058c3fb612574eb4af414770b7422
|
[
"MIT"
] | null | null | null |
_aulas/ex004.py
|
CarlosJunn/Aprendendo_Python
|
cddb29b5ee2058c3fb612574eb4af414770b7422
|
[
"MIT"
] | null | null | null |
_aulas/ex004.py
|
CarlosJunn/Aprendendo_Python
|
cddb29b5ee2058c3fb612574eb4af414770b7422
|
[
"MIT"
] | null | null | null |
a = input('digite algo :')
print('O tipo primitivo desswe valor ', type(a))
print("S tem espaos? ", a.isspace())
print(' um nmero? ', a.isnumeric())
print('E alfabetico?', a.isalpha())
print(' alphanumerico?', a.isalnum())
print('Esta em maisculas?', a.isupper())
print('Esta em minsculas?', a.islower())
print('Est capitalizada', a.istitle())
| 35.4
| 50
| 0.675141
|
190ab0b7b7eed8792f426c4ad62cea8612750811
| 3,966
|
py
|
Python
|
authentication/login.py
|
ICTKevinWong/webservices-samples
|
35a8b8571d88276ff12ad60959192ce20ef5bf19
|
[
"BSD-3-Clause"
] | 6
|
2018-01-03T14:13:57.000Z
|
2021-07-28T21:12:35.000Z
|
authentication/login.py
|
ICTKevinWong/webservices-samples
|
35a8b8571d88276ff12ad60959192ce20ef5bf19
|
[
"BSD-3-Clause"
] | 5
|
2018-01-03T15:28:47.000Z
|
2020-08-28T08:25:07.000Z
|
authentication/login.py
|
ICTKevinWong/webservices-samples
|
35a8b8571d88276ff12ad60959192ce20ef5bf19
|
[
"BSD-3-Clause"
] | 6
|
2017-10-17T19:37:44.000Z
|
2021-08-19T13:10:16.000Z
|
"""
Examples of authenticating to the API.
Usage:
login <username> <password> <server>
login -h
Arguments:
username ID to provide for authentication
password Password corresponding to specified userid.
server API endpoint.
Options:
-h --help Show this screen.
--version Show version.
Description:
There are two ways that you can authenticate to the Web Services API. Both options are viable and are demonstrated
below with examples.
Basic-Authentication is probably the most popular option, especially for shorter/simpler usages of the API, mostly
because of its simplicity. The credentials are simply provided with each request.
There is a login endpoint (POST /devmgr/utils/login), that will allow you to explicitly authenticate with the API.
Upon authenticating, a JSESSIONID will be provided in the Response headers and as a Cookie that can be utilized
to create a persistent session (that will eventually timeout).
"""
import logging
import docopt
import requests
LOG = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
args = docopt.docopt(__doc__)
login(args.get('<server>'), args.get('<username>'), args.get('<password>'))
| 47.783133
| 120
| 0.72113
|
190b6799d93e741a949b082cb1fde511c62a4b57
| 487
|
py
|
Python
|
Chapter08/qt08_winBkground03.py
|
csy1993/PythonQt
|
c100cd9e1327fc7731bf04c7754cafb8dd578fa5
|
[
"Apache-2.0"
] | null | null | null |
Chapter08/qt08_winBkground03.py
|
csy1993/PythonQt
|
c100cd9e1327fc7731bf04c7754cafb8dd578fa5
|
[
"Apache-2.0"
] | null | null | null |
Chapter08/qt08_winBkground03.py
|
csy1993/PythonQt
|
c100cd9e1327fc7731bf04c7754cafb8dd578fa5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
'''
from PyQt5.QtWidgets import QApplication, QLabel ,QWidget, QVBoxLayout , QPushButton, QMainWindow
from PyQt5.QtGui import QPalette , QBrush , QPixmap
from PyQt5.QtCore import Qt
import sys
app = QApplication(sys.argv)
win = QMainWindow()
win.setWindowTitle("")
win.resize(350, 250)
palette = QPalette()
palette.setColor(QPalette.Background , Qt.red )
win.setPalette(palette)
win.show()
sys.exit(app.exec_())
| 20.291667
| 99
| 0.710472
|
190c0898a136d9b08e445150bbf358f595547ad3
| 8,349
|
py
|
Python
|
tcsv.py
|
eadanfahey/transform-csv
|
40d3aaf34b286fe9d6262fe69c7245e3a44a5b41
|
[
"MIT"
] | null | null | null |
tcsv.py
|
eadanfahey/transform-csv
|
40d3aaf34b286fe9d6262fe69c7245e3a44a5b41
|
[
"MIT"
] | null | null | null |
tcsv.py
|
eadanfahey/transform-csv
|
40d3aaf34b286fe9d6262fe69c7245e3a44a5b41
|
[
"MIT"
] | null | null | null |
import csv
| 31.387218
| 83
| 0.542819
|
190dc436e49d1655496d4e4796285c2ff4464f81
| 13,074
|
py
|
Python
|
selim/datasets/lidc.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
selim/datasets/lidc.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
selim/datasets/lidc.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
import numpy as np
from tensorflow.keras.preprocessing.image import Iterator
import time
import os
import xml.etree.ElementTree as ET
import cv2
import pydicom as dicom
from os.path import join as opjoin
import json
from tqdm import tqdm
def parseXML(scan_path):
'''
parse xml file
args:
xml file path
output:
nodule list
[{nodule_id, roi:[{z, sop_uid, xy:[[x1,y1],[x2,y2],...]}]}]
'''
file_list = os.listdir(scan_path)
xml_file = None
for file in file_list:
if '.' in file and file.split('.')[1] == 'xml':
xml_file = file
break
prefix = "{http://www.nih.gov}"
if xml_file is None:
print('SCAN PATH: {}'.format(scan_path))
tree = ET.parse(scan_path + '/' + xml_file)
root = tree.getroot()
readingSession_list = root.findall(prefix + "readingSession")
nodules = []
for session in readingSession_list:
# print(session)
unblinded_list = session.findall(prefix + "unblindedReadNodule")
for unblinded in unblinded_list:
nodule_id = unblinded.find(prefix + "noduleID").text
edgeMap_num = len(unblinded.findall(prefix + "roi/" + prefix + "edgeMap"))
if edgeMap_num >= 1:
# it's segmentation label
nodule_info = {}
nodule_info['nodule_id'] = nodule_id
nodule_info['roi'] = []
roi_list = unblinded.findall(prefix + "roi")
for roi in roi_list:
roi_info = {}
# roi_info['z'] = float(roi.find(prefix + "imageZposition").text)
roi_info['sop_uid'] = roi.find(prefix + "imageSOP_UID").text
roi_info['xy'] = []
edgeMap_list = roi.findall(prefix + "edgeMap")
for edgeMap in edgeMap_list:
x = float(edgeMap.find(prefix + "xCoord").text)
y = float(edgeMap.find(prefix + "yCoord").text)
xy = [x, y]
roi_info['xy'].append(xy)
nodule_info['roi'].append(roi_info)
nodules.append(nodule_info)
return nodules
| 38.795252
| 106
| 0.567998
|
190de2ec2acd9e5640757238ffbce83a69af9dc2
| 2,058
|
py
|
Python
|
hexagon/__main__.py
|
redbeestudios/hexagon
|
dc906ae31a14eb750a3f9bde8dd0633d8e1af486
|
[
"Apache-2.0"
] | 8
|
2021-06-27T21:46:04.000Z
|
2022-02-26T18:03:10.000Z
|
hexagon/__main__.py
|
redbeestudios/hexagon
|
dc906ae31a14eb750a3f9bde8dd0633d8e1af486
|
[
"Apache-2.0"
] | 31
|
2021-06-24T14:35:38.000Z
|
2022-02-17T03:01:23.000Z
|
hexagon/__main__.py
|
redbeestudios/hexagon
|
dc906ae31a14eb750a3f9bde8dd0633d8e1af486
|
[
"Apache-2.0"
] | 1
|
2021-08-16T16:15:16.000Z
|
2021-08-16T16:15:16.000Z
|
from hexagon.support.hooks import HexagonHooks
from hexagon.support.execute.tool import select_and_execute_tool
from hexagon.support.update.cli import check_for_cli_updates
import sys
from hexagon.support.args import fill_args
from hexagon.domain import cli, tools, envs
from hexagon.support.help import print_help
from hexagon.support.tracer import tracer
from hexagon.support.printer import log
from hexagon.support.update.hexagon import check_for_hexagon_updates
from hexagon.support.storage import (
HexagonStorageKeys,
store_user_data,
)
from hexagon.plugins import collect_plugins
if __name__ == "__main__":
main()
| 27.078947
| 76
| 0.614189
|
190e10d1d867b6f965986f63ffa52b804353b9e8
| 18,322
|
py
|
Python
|
ligpy/ligpy_utils.py
|
LigninTools/UWCAP_10
|
0f665d3a2895657d9dda8ea9cc395583f3437dcc
|
[
"BSD-2-Clause"
] | 7
|
2016-06-30T18:14:14.000Z
|
2020-04-20T22:18:47.000Z
|
ligpy/ligpy_utils.py
|
LigninTools/UWCAP_10
|
0f665d3a2895657d9dda8ea9cc395583f3437dcc
|
[
"BSD-2-Clause"
] | null | null | null |
ligpy/ligpy_utils.py
|
LigninTools/UWCAP_10
|
0f665d3a2895657d9dda8ea9cc395583f3437dcc
|
[
"BSD-2-Clause"
] | 5
|
2016-07-30T04:05:29.000Z
|
2021-08-14T13:58:11.000Z
|
"""
Misc utility functions required by several modules in the ligpy program.
"""
import os
import numpy as np
from constants import GAS_CONST, MW
def set_paths():
"""
Set the absolute path to required files on the current machine.
Returns
-------
reactionlist_path : str
path to the file `complete_reactionlist.dat`
rateconstantlist_path : str
path to the file `complete_rateconstantlist.dat`
compositionlist_path : str
path to the file `compositionlist.dat`
"""
module_dir = os.path.abspath(__file__).split('ligpy_utils')[0]
reactionlist_path = module_dir + 'data/complete_reaction_list.dat'
rateconstantlist_path = module_dir + 'data/complete_rateconstant_list.dat'
compositionlist_path = module_dir + 'data/compositionlist.dat'
return reactionlist_path, rateconstantlist_path, compositionlist_path
def get_specieslist(completereactionlist):
"""
Make a list of all the molecular species involved in the kinetic scheme.
Parameters
----------
completereactionlist : str
the path to the `complete_reaction_list.dat` file
Returns
-------
specieslist : list
a list of all the species in the kinetic scheme
"""
specieslist = []
for line in open(completereactionlist, 'r').readlines():
for spec in line.split(','):
# If the species has already been added to the list then move on.
if spec.split('_')[1].split()[0] in specieslist:
continue
else:
specieslist.append(spec.split('_')[1].split()[0])
specieslist.sort()
return specieslist
def get_speciesindices(specieslist):
"""
Create a dictionary to assign an arbitrary index to each of the species in
the kinetic scheme.
Parameters
----------
specieslist : list
a list of all the species in the model
Returns
-------
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
"""
speciesindices = {}
index = 0
for x in specieslist:
speciesindices[x] = index
index += 1
indices_to_species = dict(zip(speciesindices.values(),
speciesindices.keys()))
return speciesindices, indices_to_species
def define_initial_composition(compositionlist, species):
"""
Read the plant ID specified and define the initial composition of the
lignin polymer in terms of the three model components (PLIGC, PLIGH,
PLIGO).
Parameters
----------
compositionlist : str
the path of the `compositionlist.dat` file
species : str
the name of a lignin species that exists in the
`compositionlist.dat` file
Returns
-------
pligc_0 : float
The initial composition (mol/L) of PLIGC
pligh_0 : float
The initial composition (mol/L) of PLIGH
pligo_0 : float
The initial composition (mol/L) of PLIGO
"""
for line in open(compositionlist, 'rb').readlines():
if line.split(',')[0] == species:
# Initial compositions [mole fraction]
pligc_mol = float(line.split(',')[1])
pligh_mol = float(line.split(',')[2])
pligo_mol = float(line.split(',')[3])
# The weighted average molar mass of mixture [kg/mol]
weighted_m = (301*pligc_mol + 423*pligh_mol + 437*pligo_mol)/1000
# the density of the condensed phase [kg/L]
density = 0.75
# Initial compositions [mol/L]
pligc_0 = density/weighted_m * pligc_mol
pligh_0 = density/weighted_m * pligh_mol
pligo_0 = density/weighted_m * pligo_mol
break
return pligc_0, pligh_0, pligo_0
def build_k_matrix(rateconsts):
"""
Build a matrix of all the rate constant parameters (A, n, E).
Parameters
----------
rateconsts : str
the path to the file `complete_rateconstant_list.dat`
Returns
-------
kmatrix : list
a list of lists that defines a matrix. Each entry in the list
is A, n, E for a given reaction
"""
num_lines = sum(1 for line in open(rateconsts))
kmatrix = [None]*num_lines
for i, line in enumerate(open(rateconsts, 'r').readlines()):
kmatrix[i] = [line.split(' ')[0], line.split(' ')[1],
line.split(' ')[2].split()[0]]
return kmatrix
def get_k_value(T, reaction_index, kmatrix):
"""
Returns the value of the rate constant for a particular reaction index.
Parameters
----------
T : float
temperature in Kelvin
reaction_index : int
the index of the reaction for which you want the rate
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
k : float
the value of the rate constant for the given reaction at the given
temperature.
"""
k = (eval(kmatrix[reaction_index][0]) *
T**eval(kmatrix[reaction_index][1]) *
np.exp(-1 * eval(kmatrix[reaction_index][2]) /(GAS_CONST * T)))
return k
def get_k_value_list(T, kmatrix):
"""
Returns a list of all the k-values for a given temperature.
Parameters
----------
T : float
temperature in Kelvin
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
kvaluelist : list
a list of all the rate constant values for a given temperature
"""
kvaluelist = []
for index, row in enumerate(kmatrix):
kvaluelist.append(get_k_value(T, index, kmatrix))
return kvaluelist
def build_reactant_dict(completereactionlist, speciesindices):
"""
Build a dictionary of the reactants involved in each reaction,
along with their stoichiometric coefficients. The keys of the
dictionary are the reaction numbers, the values are lists of lists
[[reactant1index, -1*coeff1],...]
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
speciesindices : dict
the dictionary speciesindices from
get_speciesindices()
Returns
-------
reactant_dict : dict
a dictionary where keys are reaction numbers and values
are lists of lists with the reactants and their
stoichiometric coefficients for each reaction
"""
reactant_dict = {}
for rxnindex, reaction in enumerate(open(completereactionlist, 'rb')
.readlines()):
reactants = []
# x is each coefficient_species set
for x in reaction.split(','):
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactants.append([speciesindices[x.split('_')[1].split()[0]],
-1*float(x.split('_')[0])])
# in preceding line: *-1 because I want the |stoich coeff|
reactant_dict[rxnindex] = reactants
return reactant_dict
def build_species_rxns_dict(completereactionlist):
"""
Build a dictionary where keys are species and values are lists with the
reactions that species is involved in, that reaction's sign in the net
rate equation, and the stoichiometric coefficient of the species in that
reaction.
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
Returns
-------
species_rxns : dict
keys are the species in the model; values are lists of
[reaction that species is involved in,
sign of that species in the net rate equation,
stoichiometric coefficient]
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = {}
for species in specieslist:
# This loop makes a list of which reactions "species" takes part in
# and what sign that term in the net rate eqn has
# and what the stoichiometric coefficient is
reactions_involved = []
for rxnindex, line in enumerate(open(completereactionlist, 'rb')
.readlines()):
# example of x = '-1_ADIO'
for x in line.split(','):
# If the species being iterated over is part of this reaction
if species == x.split('_')[1].split()[0]:
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactions_involved.append(
[rxnindex, -1, x.split('_')[0]])
# if the species is a product
if float(x.split('_')[0]) > 0:
reactions_involved.append(
[rxnindex, 1, '+' + x.split('_')[0]])
species_rxns[species] = reactions_involved
return species_rxns
def build_rates_list(rateconstlist, reactionlist, speciesindices,
indices_to_species, human='no'):
""" This function writes the list of rate expressions for each reaction.
Parameters
----------
rateconstlist : str
the path to the file `complete_rateconstant_list.dat`
reactionlist : str
the path to the file `complete_reaction_list.dat`
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
rates_list : list
a list of the rate expressions for all the reactions in the
model
"""
kmatrix = build_k_matrix(rateconstlist)
reactant_dict = build_reactant_dict(reactionlist, speciesindices)
rates_list = []
for i, line in enumerate(kmatrix):
rate = 'rate[%s] = kvalue(T,%s) ' % (i, i)
concentrations = ''
for entry in reactant_dict[i]:
if entry == 'n': # if there is no reaction
concentrations = '* 0'
break
else:
if human == 'no':
concentrations += '* y[%s]**%s ' % (entry[0], entry[1])
elif human == 'yes':
concentrations += '* [%s]**%s ' % \
(indices_to_species[entry[0]], entry[1])
else:
raise ValueError('human must be a string: yes or no')
rate += concentrations
rates_list.append(rate)
return rates_list
def build_dydt_list(rates_list, specieslist, species_rxns, human='no'):
"""This function returns the list of dydt expressions generated for all
the reactions from rates_list.
Parameters
----------
rates_list : list
the output of build_rates_list()
specieslist : list
a list of all the species in the kinetic scheme
species_rxns : dict
dictionary where keys that are the model species and
values are the reactions they are involved in
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
dydt_expressions : list
expressions for the ODEs expressing the concentration
of each species with time
"""
dydt_expressions = []
for species in specieslist:
rate_formation = 'd[%s]/dt = ' % (species)
# "entry" is [reaction#, sign of that reaction, coefficient]
for entry in species_rxns[species]:
if human == 'no':
rate_formation += '%s*%s ' % \
(entry[2], rates_list[entry[0]].split(' = ')[1])
elif human == 'yes':
rate_formation += '%s*rate[%s] ' % (entry[2], entry[0])
else:
raise ValueError('human must be a string: yes or no')
dydt_expressions.append(rate_formation)
return dydt_expressions
def write_rates_and_odes(filename, rates, odes):
"""
Writes a file that contains the model equations to be solved (a list of
rate expressions, followed by a list of ODEs for each species). This
file is just for reference for humans to be able to look at the specific
reactions that are modeled, it is not actually used by the program. Users
should only need to generate this file if they've changed anything about
the kinetic scheme (it already exists in the data folder).
Parameters
----------
filename : str
the filename (including relative path if appropriate) of the
ratesandodes file to write
rates : list
the output of build_rates_list() with human='yes'
odes : list
the output of build_dydt_list() with human='yes'
Returns
-------
None
"""
with open(filename, 'wb') as initialize:
initialize.write('Reaction Rates:\n')
with open(filename, 'ab') as writer:
for line in rates:
writer.write(line+'\n')
writer.write('\n\nODE''s:\n')
for line in odes:
writer.write(line+'\n')
# These are some functions for checking the integrity of some model
# components, but they are not used except for exploratory or verification
# purposes
def check_species_in_MW(specieslist=None):
"""
Check to make sure that everything in the specieslist is in the MW
dictionary from `constants.py`.
Parameters
----------
specieslist : list, optional
a list of species to check against. If no list is
specified then the function get_specieslist() will be used
to generate the default list
Returns
-------
None
"""
if specieslist == None:
specieslist = get_specieslist(set_paths()[0])
for item in MW.keys():
if item in specieslist:
print '%s is in specieslist' % ('{: <20}'.format(item))
else:
print '********'+item
for item in specieslist:
if item in MW.keys():
print '%s is in MW dictionary' % ('{: <20}'.format(item))
else:
print '********'+item
print '\n%s should equal %s' % (len(MW.keys()), len(specieslist))
def check_mass_balance():
"""
Check for conservation of mass, and if mass is not conserved, see which
reactions are creating or losing mass.
Note that mass will not be wholly conserved in this model because
protons are not accounted for when radicals are involved in
non-Hydrogen-abstraction reactions, but all other reactions should
conserve mass.
Parameters
----------
None
Returns
-------
total_mass_balance : numpy array
an array with the amount of mass gained or lost
in each reaction
"""
specieslist = get_specieslist(set_paths()[0])
speciesindices = get_speciesindices(specieslist)[0]
kmatrix = build_k_matrix(set_paths()[1])
species_rxns = build_species_rxns_dict(set_paths()[0])
# Make vector of the MW's of each species, in the order from speciesindices
mw_vector = np.zeros((len(MW), 1))
for species in MW:
mw_vector[speciesindices[species]] = MW[species][0]
mw_vector = mw_vector.transpose()
# In this stoichiometric matrix, rows are species, columns are reactions
stoicmatrix = np.zeros((len(speciesindices), len(kmatrix)), dtype='float')
for species in species_rxns:
i = speciesindices[species]
for reaction in species_rxns[species]:
j = reaction[0]
stoicmatrix[i, j] += float(reaction[2])
# The result of this dot product should be a vector full of zeros.
# This will not be the case because protons are not accounted for when
# radicals are involved in non-H-abstraction rxns,
# but all other reactions should be 0
total_mass_balance = np.dot(mw_vector, stoicmatrix[:, :])
# Use this to look at which reactions are creating or losing mass
# (from missing Hydrogen)
h_sum = 0
for i, value in enumerate(total_mass_balance[0, :]):
if value != 0:
print i, value
h_sum += value
print '\nNet mass change = %s' % h_sum
return total_mass_balance
def check_species_fate():
"""
Check to see which species (if any) are only produced, but never
consumed in the model reactions (assuming that all reactions occur).
Parameters
----------
None
Returns
-------
fate_dict : dictionary
a dictionary with the fate of model species
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = build_species_rxns_dict(set_paths()[0])
fate_dict = {}
for species in specieslist:
fate_dict[species] = 'produced only'
for entry in species_rxns[species]:
if entry[1] < 0:
fate_dict[species] = 'consumed'
for species in specieslist:
if fate_dict[species] == 'consumed':
del fate_dict[species]
return fate_dict
| 35.370656
| 79
| 0.581705
|
ef6ac86677970e875c525f92de89d605e9c5d009
| 7,836
|
py
|
Python
|
data_prep_helper.py
|
mayofcumtb/PaperWrite
|
4a2154d68fa00e1912a3d4ce7b514364314c55e3
|
[
"Apache-2.0"
] | null | null | null |
data_prep_helper.py
|
mayofcumtb/PaperWrite
|
4a2154d68fa00e1912a3d4ce7b514364314c55e3
|
[
"Apache-2.0"
] | null | null | null |
data_prep_helper.py
|
mayofcumtb/PaperWrite
|
4a2154d68fa00e1912a3d4ce7b514364314c55e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import random
import tempfile
import shutil
mayan_debug = 1
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from caffe_utils import *
'''
@brief:
extract labels from rendered images
@input:
xxx/03790512_13a245da7b0567509c6d15186da929c5_a035_e009_t-01_d004.png
@output:
(35,9,359)
'''
def outspath2label(path):
'''
:param path: input_labels+=bookshelf_16_a007_e023_t359_d002px_216.00_py_499.00_bbwidth_280.00_bbheight_485.00.jpg===
=====0======1==2=====3===4=====5======6====7===8=======9=======10======11======12=========
:return:
'''
parts = os.path.basename(path).split('_')
class_name = str(parts[0])
cad_index = str(parts[1])
azimuth = int(parts[2][1:])
elevation = int(parts[3][1:])
tilt = -int(parts[4][1:])
distance = float(parts[5][1:-2])
px = float(parts[6])
py = float(parts[8])
bbox_width = float(parts[10])
bbox_height = float(parts[12][:-4])
return (class_name, cad_index, azimuth, elevation, tilt, distance, px, py, bbox_width, bbox_height)
'''
@brief:
get rendered image filenames and annotations, save to specified files.
@input:
shape_synset - like '02958343' for car
[train,test]_image_label_file - output file list filenames
train_ratio - ratio of training images vs. all images
@output:
save "<image_filepath> <class_idx> <azimuth> <elevation> <tilt>" to files.
'''
no_bkg = 0
'''
@brief:
combine lines from input files and save the shuffled version to output file.
@input:
input_file_list - a list of input file names
output_file - output filename
'''
'''
@brief:
convert 360 view degree to view estimation label
e.g. for bicycle with class_idx 1, label will be 360~719
'''
'''
@brief:
generate LMDB from files containing image filenames and labels
@input:
image_label_file - each line is <image_filepath> <class_idx> <azimuth> <elelvation> <tilt>
output_lmdb: LMDB pathname-prefix like xxx/xxxx_lmdb
image_resize_dim (D): resize image to DxD square
@output:
write TWO LMDB corresponding to images and labels,
i.e. xxx/xxxx_lmdb_label (each item is class_idx, azimuth, elevation, tilt) and xxx/xxxx_lmdb_image
'''
| 41.026178
| 185
| 0.661179
|
ef6c9cef1dd0ae3c36a242179a531b49d4c57a72
| 486
|
py
|
Python
|
pyvisio/__init__.py
|
i-wan/pyvisio
|
6beed5a18644793e5c6769c5a4fa5f64f9dc436b
|
[
"MIT"
] | 1
|
2018-06-05T13:15:35.000Z
|
2018-06-05T13:15:35.000Z
|
pyvisio/__init__.py
|
i-wan/pyvisio
|
6beed5a18644793e5c6769c5a4fa5f64f9dc436b
|
[
"MIT"
] | 1
|
2017-06-05T18:17:16.000Z
|
2017-06-05T18:17:16.000Z
|
pyvisio/__init__.py
|
i-wan/pyvisio
|
6beed5a18644793e5c6769c5a4fa5f64f9dc436b
|
[
"MIT"
] | 1
|
2019-06-30T17:36:35.000Z
|
2019-06-30T17:36:35.000Z
|
# -*- coding: utf-8 -*-
"""
PyVisio visDocuments - Visio Document manipulation library
See docstring for class VisDocument for usage
"""
#TODO docstring
__author__ = 'Ivo Velcovsky'
__email__ = 'velcovsky@email.cz'
__copyright__ = "Copyright (c) 2015"
__license__ = "MIT"
__status__ = "Development"
from .visCOM import *
from .documents import *
from .stencils import *
from .shapes import *
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21.130435
| 59
| 0.691358
|
ef6ed1166f6e406d8fb8cc64a8cdbbcd50db4769
| 7,103
|
py
|
Python
|
store/main.py
|
Soemonewho2/pi-ware
|
86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9
|
[
"MIT"
] | null | null | null |
store/main.py
|
Soemonewho2/pi-ware
|
86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9
|
[
"MIT"
] | null | null | null |
store/main.py
|
Soemonewho2/pi-ware
|
86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Pi-Ware main UI
from tkinter import *
from tkinter.ttk import *
import tkinter as tk
import os
import webbrowser
from functools import partial
import getpass
#Set global var username
global username
username = getpass.getuser()
#Set global install/uninstall scripts
global install_script
global uninstall_script
#Import custom pi-ware functions
#import function
import classes
window = tk.Tk()
#Functions
#Check if dev files exist
filepath = f"/home/{username}/pi-ware/.dev"
try:
file_tst = open(filepath)
file_tst.close()
except FileNotFoundError:
IsDev = "False"
else:
IsDev = "True"
#Set window icon
p1 = PhotoImage(file = f'/home/{username}/pi-ware/icons/logo.png')
window.iconphoto(False, p1)
#Main
window.resizable(0, 0)
window.geometry("330x500")
window.eval('tk::PlaceWindow . center')
window.title("Pi-Ware")
# Window tabs
tab_control = Notebook(window)
apps_tab = Frame(tab_control)
news_tab = Frame(tab_control)
credits_tab = Frame(tab_control)
DEV_tab = Frame(tab_control)
tab_control.add(apps_tab, text="Apps")
tab_control.add(news_tab, text="News")
tab_control.add(credits_tab, text="Credits")
#Show dev tab if dev files are found
if IsDev == "True":
tab_control.add(DEV_tab, text="Dev")
tab_control.pack(expand=0, fill="both")
#Show DEV stuff
PiWareVersionFile = open(f"/home/{username}/.local/share/pi-ware/version", "r")
PiWareVersioncontent = PiWareVersionFile.read()
files = folders = 0
for _, dirnames, filenames in os.walk(f"/home/{username}/pi-ware/apps"):
files += len(filenames)
folders += len(dirnames)
InstallibleApps = "{:,} installible Apps".format(folders)
PiWareVersion = tk.Label(DEV_tab, text=f"Pi-Ware Version:\n{PiWareVersioncontent}", font="Arial 11 bold")
PiWareInstallableApps = tk.Label(DEV_tab, text=f"{InstallibleApps}", font="Arial 11 bold")
PiWareVersion.pack()
PiWareInstallableApps.pack()
#Show latest news message
NewsMessagefile = open(f"/home/{username}/pi-ware/func/info/latestnewsmessage", "r")
NewsMessagecontent = NewsMessagefile.read()
NewsMessage = tk.Label(news_tab, text=f"Latest news:\n{NewsMessagecontent}", font="Arial 11 bold")
NewsMessage.pack()
#Show info message
InfoMessagefile = open(f"/home/{username}/pi-ware/func/info/infomessage", "r")
InfoMessagecontent = InfoMessagefile.read()
InfoMessage = tk.Label(credits_tab, text=f"{InfoMessagecontent}", font="Arial 11 bold")
InfoMessage.pack()
#Show commit links
commitmessage = tk.Label(credits_tab, text=f"To see commits, please go to the link below.", font="Arial 11 bold")
commitmessage.pack()
commit = classes.HyperLink(credits_tab, f"""https://github.com/piware14/pi-ware/graphs/contributors""");
commit.pack()
#Add pi-ware website
piwarewebsite = tk.Label(credits_tab, text=f"To vist the pi-ware website, click the link below.", font="Arial 11 bold")
piwarewebsite.pack()
Website = classes.HyperLink(credits_tab, f"""https://pi-ware.ml""");
Website.pack()
tree = Treeview(apps_tab)
tree.pack(expand=YES, fill=BOTH)
tree.column("#0", minwidth=0, width=330, stretch=NO)
s = Style()
s.configure('Treeview', rowheight=35)
ap = next(os.walk(f"/home/{username}/pi-ware/apps"))[1]
applist = sorted(ap)
print("Current apps:\n")
for app in applist:
print(app)
appb = ""
for a in app:
if(a == " "):
appb += "_"
else:
appb += a
tree.bind("<<TreeviewSelect>>", partial(show_desc,app))
exec(appb + """_button = PhotoImage(file=f'/home/{username}/pi-ware/apps/{app}/icon.png')""")
exec("""tree.insert('', 'end', text=f"{app}",image=""" + appb + """_button)""")
ScrollForMore = tk.Label(apps_tab, text="Scroll down for more apps.", font="Arial 11 bold")
ScrollForMore.pack()
quitbutton = tk.Button(window, text="Quit", font="Arial 11 bold", width=200, bg="grey", fg="white", command=quit)
quitbutton.pack(side="bottom")
window.mainloop()
| 33.504717
| 147
| 0.68464
|
ef700e08b8631cf4f5d03872e7a2e1c13a5f31f4
| 50,478
|
py
|
Python
|
shwirl/shaders/render_volume.py
|
macrocosme/shwirl
|
87147ba1e99463e96b7f4295fd24ab57440d9981
|
[
"BSD-3-Clause"
] | 3
|
2018-05-09T17:55:53.000Z
|
2019-07-22T09:14:41.000Z
|
shwirl/shaders/render_volume.py
|
macrocosme/shwirl
|
87147ba1e99463e96b7f4295fd24ab57440d9981
|
[
"BSD-3-Clause"
] | 9
|
2017-04-07T01:44:15.000Z
|
2018-12-16T20:47:08.000Z
|
shwirl/shaders/render_volume.py
|
macrocosme/shwirl
|
87147ba1e99463e96b7f4295fd24ab57440d9981
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
# This file implements a RenderVolumeVisual class. It is derived from the
# VolumeVisual class in vispy.visuals.volume, which is released under a BSD
# license included here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the (new) BSD license:
#
# Copyright (c) 2015, Dany Vohl
# All rights reserved.
#
# A copy of the license is available in the root directory of this project.
#
from ..extern.vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from ..extern.vispy.visuals import Visual
from ..extern.vispy.visuals.shaders import Function
from ..extern.vispy.color import get_colormap
from ..extern.vispy.scene.visuals import create_visual_node
from ..extern.vispy.io import load_spatial_filters
import numpy as np
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
// attribute vec3 a_texcoord;
uniform vec3 u_shape;
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
// v_texcoord = a_texcoord;
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
""" # noqa
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform vec3 u_resolution;
uniform float u_threshold;
uniform float u_relative_step_size;
//uniform int u_color_scale;
//uniform float u_data_min;
//uniform float u_data_max;
// Moving box filter variables
uniform int u_filter_size;
uniform float u_filter_coeff;
uniform int u_filter_arm;
uniform int u_filter_type;
uniform int u_use_gaussian_filter;
uniform int u_gaussian_filter_size;
//uniform int u_log_scale;
// Volume Stats
uniform float u_volume_mean;
uniform float u_volume_std;
//uniform float u_volume_madfm;
uniform float u_high_discard_filter_value;
uniform float u_low_discard_filter_value;
uniform float u_density_factor;
uniform int u_color_method;
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g;
}}
vec4 movingAverageFilter_line_of_sight(vec3 loc, vec3 step)
{{
// Initialise variables
vec4 partial_color = vec4(0.0, 0.0, 0.0, 0.0);
for ( int i=1; i<=u_filter_arm; i++ )
{{
partial_color += $sample(u_volumetex, loc-i*step);
partial_color += $sample(u_volumetex, loc+i*step);
}}
partial_color += $sample(u_volumetex, loc);
// Evaluate mean
partial_color *= u_filter_coeff;
return partial_color;
}}
vec4 Gaussian_5(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.3333333333333333 * direction;
color += color_original * 0.29411764705882354;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.35294117647058826;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.35294117647058826;
return color;
}}
vec4 Gaussian_9(vec4 color_original, vec3 loc, vec3 direction)
{{
vec4 color = vec4(0.0);
vec3 off1 = 1.3846153846 * direction;
vec3 off2 = 3.2307692308 * direction;
color += color_original * 0.2270270270;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.0702702703;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.0702702703;
return color;
}}
vec4 Gaussian_13(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.411764705882353 * direction;
vec3 off2 = 3.2941176470588234 * direction;
vec3 off3 = 5.176470588235294 * direction;
color += color_original * 0.1964825501511404;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc + (off3 * u_resolution)) * 0.010381362401148057;
color += $sample(u_volumetex, loc - (off3 * u_resolution)) * 0.010381362401148057;
return color;
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Edge detection Pass
// (adapted from https://www.shadertoy.com/view/MscSzf#)
// ----------------------------------------------------------------
float checkSame(vec4 center, vec4 sample, vec3 resolution) {{
vec2 centerNormal = center.xy;
float centerDepth = center.z;
vec2 sampleNormal = sample.xy;
float sampleDepth = sample.z;
vec2 sensitivity = (vec2(0.3, 1.5) * resolution.y / 50.0);
vec2 diffNormal = abs(centerNormal - sampleNormal) * sensitivity.x;
bool isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
float diffDepth = abs(centerDepth - sampleDepth) * sensitivity.y;
bool isSameDepth = diffDepth < 0.1;
return (isSameNormal && isSameDepth) ? 1.0 : 0.0;
}}
vec4 edge_detection(vec4 color_original, vec3 loc, vec3 step, vec3 resolution) {{
vec4 sample1 = $sample(u_volumetex, loc + (vec3(1., 1., 0.) / resolution));
vec4 sample2 = $sample(u_volumetex, loc + (vec3(-1., -1., 0.) / resolution));
vec4 sample3 = $sample(u_volumetex, loc + (vec3(-1., 1., 0.) / resolution));
vec4 sample4 = $sample(u_volumetex, loc + (vec3(1., -1., 0.) / resolution));
float edge = checkSame(sample1, sample2, resolution) *
checkSame(sample3, sample4, resolution);
return vec4(color_original.rgb, 1-edge);
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Used with iso surface
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
//distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
// (u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
vec3 loc = start_loc;
int iter = 0;
float discard_ratio = 1.0 / (u_high_discard_filter_value - u_low_discard_filter_value);
float low_discard_ratio = 1.0 / u_low_discard_filter_value;
for (iter=0; iter<nsteps; iter++)
{{
// Get sample color
vec4 color;
if (u_filter_size == 1)
color = $sample(u_volumetex, loc);
else {{
color = movingAverageFilter_line_of_sight(loc, step);
}}
if (u_use_gaussian_filter==1) {{
vec4 temp_color;
vec3 direction;
if (u_gaussian_filter_size == 5){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_5(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_5(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_5(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 9){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_9(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_9(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_9(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 13){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_13(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_13(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_13(temp_color, loc, direction);
}}
color = temp_color;
}}
float val = color.g;
// To force activating the uniform - this should be done differently
float density_factor = u_density_factor;
if (u_filter_type == 1) {{
// Get rid of very strong signal values
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
// Don't consider noisy values
//if (val < u_volume_mean - 3*u_volume_std)
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
if (u_low_discard_filter_value == u_high_discard_filter_value)
{{
if (u_low_discard_filter_value != 0.)
{{
val *= low_discard_ratio;
}}
}}
else {{
val -= u_low_discard_filter_value;
val *= discard_ratio;
}}
}}
else {{
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
}}
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
{after_loop}
//gl_FragColor = edge_detection(gl_FragColor, loc, step, u_shape);
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
""" # noqa
MIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
int maxi = 0; // Where the maximum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
// Refine search for max value
loc = start_loc + step * (float(maxi) - 0.5);
for (int i=0; i<10; i++) {
maxval = max(maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (maxval > u_high_discard_filter_value || maxval < u_low_discard_filter_value)
{{
maxval = 0.;
}}
// Color is associated to voxel intensity
// Moment 0
if (u_color_method == 0) {
gl_FragColor = $cmap(maxval);
}
// Moment 1
else if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = maxval;
}
// Color is associated to RGB cube
else if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = maxval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (maxval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., maxval);
}
// < 3 sigmas
if ( (maxval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., maxval);
}
}
else {
// Moment 2
// TODO: verify implementation of MIP-mom2.
gl_FragColor = $cmap((maxval * ((maxval - loc.y) * (maxval - loc.y))) / maxval);
}
""",
)
MIP_FRAG_SHADER = FRAG_SHADER.format(**MIP_SNIPPETS)
LMIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
float local_maxval = -99999.0; // The local maximum encountered value
int maxi = 0; // Where the maximum value was encountered
int local_maxi = 0; // Where the local maximum value was encountered
bool local_max_found = false;
""",
in_loop="""
if( val > u_threshold && !local_max_found ) {
local_maxval = val;
local_maxi = iter;
local_max_found = true;
}
if( val > maxval) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
if (!local_max_found) {
local_maxval = maxval;
local_maxi = maxi;
}
// Refine search for max value
loc = start_loc + step * (float(local_maxi) - 0.5);
for (int i=0; i<10; i++) {
local_maxval = max(local_maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (local_maxval > u_high_discard_filter_value) {
local_maxval = 0.;
}
if (local_maxval < u_low_discard_filter_value) {
local_maxval = 0.;
}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(local_maxval);
gl_FragColor.a = local_maxval;
}
// Color is associated to redshift/velocity
else {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = local_maxval;
}
""",
)
LMIP_FRAG_SHADER = FRAG_SHADER.format(**LMIP_SNIPPETS)
TRANSLUCENT_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float mom0 = 0.;
float mom1 = 0.;
float ratio = 1/nsteps; // final average
float a1 = 0.;
float a2 = 0.;
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
/*color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;*/
color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
if (u_color_method == 2){
color.r = loc.y;
color.g = loc.z;
color.b = loc.x;
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 4: Mom2
// TODO: Finish implementation of mom2 (not correct in its present form).
else {
// mom0
a1 = mom0;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom0 *= a1 / alpha;
mom0 += val * a2 / alpha;
// mom1
a1 = mom1;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom1 *= a1 / alpha;
mom1 += loc.y * a2 / alpha;
}
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
if (u_color_method != 3){
gl_FragColor = integrated_color;
}
else {
gl_FragColor = $cmap((mom0 * (mom0-mom1 * mom0-mom1)) / mom0);
}
""",
)
TRANSLUCENT_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT_SNIPPETS)
TRANSLUCENT2_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float ratio = 1/nsteps; // final average
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
color = $cmap(val);
integrated_color = (val * density_factor + integrated_color.a * (1 - density_factor)) * color;
alpha = integrated_color.a;
//alpha = a1+a2;
// integrated_color *= a1 / alpha;
// integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
color.r = loc.x;
color.g = loc.z;
color.b = loc.y;
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
TRANSLUCENT2_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT2_SNIPPETS)
ADDITIVE_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
integrated_color = 1.0 - (1.0 - integrated_color) * (1.0 - color);
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
ADDITIVE_FRAG_SHADER = FRAG_SHADER.format(**ADDITIVE_SNIPPETS)
ISO_SNIPPETS = dict(
before_loop="""
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
""",
in_loop="""
if (val > u_threshold-0.2) {
// Take the last interval in smaller steps
vec3 iloc = loc - step;
for (int i=0; i<10; i++) {
val = $sample(u_volumetex, iloc).g;
if (val > u_threshold) {
color = $cmap(val);
gl_FragColor = calculateColor(color, iloc, dstep);
iter = nsteps;
break;
}
iloc += step * 0.1;
}
}
""",
after_loop="""
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
MINIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // maximum encountered
float minval = 99999.0; // The minimum encountered value
int mini = 0; // Where the minimum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
}
if( val < minval ) {
minval = val;
mini = iter;
}
""",
after_loop="""
// Refine search for min value
loc = start_loc + step * (float(mini) - 0.5);
for (int i=0; i<10; i++) {
minval = min(minval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (minval > u_high_discard_filter_value || minval < u_low_discard_filter_value)
{{
minval = 0.;
}}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(minval);
//gl_FragColor.a = minval;
}
else{
// Color is associated to redshift/velocity
if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
//if (minval == 0)
gl_FragColor.a = 1-minval;
}
// Color is associated to RGB cube
else {
if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = minval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (1-minval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., 1-minval);
}
// < 3 sigmas
if ( (1-minval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., 1-minval);
}
}
// Case 4: Mom2
// TODO: verify implementation of MIP-mom2.
else {
gl_FragColor = $cmap((minval * ((minval - loc.y) * (minval - loc.y))) / minval);
}
}
}
""",
)
MINIP_FRAG_SHADER = FRAG_SHADER.format(**MINIP_SNIPPETS)
frag_dict = {
'mip': MIP_FRAG_SHADER,
'lmip': LMIP_FRAG_SHADER,
'iso': ISO_FRAG_SHADER,
'avip': TRANSLUCENT_FRAG_SHADER,
'minip': MINIP_FRAG_SHADER,
'translucent2': TRANSLUCENT2_FRAG_SHADER,
'additive': ADDITIVE_FRAG_SHADER,
}
# _interpolation_template = """
# #include "misc/spatial-filters.frag"
# vec4 texture_lookup_filtered(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return %s($texture, $shape, texcoord);
# }"""
#
# _texture_lookup = """
# vec4 texture_lookup(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return texture2D($texture, texcoord);
# }"""
RenderVolume = create_visual_node(RenderVolumeVisual)
| 33.252964
| 110
| 0.567297
|
ef703db82c659a484347e75656e30bf7c5cabb9f
| 854
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#TOFILL
if __name__ == '__main__':
param = [
(29,),
(13,),
(25,),
(65,),
(27,),
(42,),
(19,),
(50,),
(59,),
(13,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 21.897436
| 64
| 0.456674
|
ef70dbcac1c09bceb1d774bc2a9dbf1cb0f819da
| 3,342
|
py
|
Python
|
make3d.py
|
BritishMuseumDH/scaffold3D
|
314ee4ca5f52304c89fac71b8f293774341d6278
|
[
"CC0-1.0"
] | 4
|
2017-03-30T09:41:21.000Z
|
2021-10-01T09:18:02.000Z
|
make3d.py
|
BritishMuseumDH/scaffold3D
|
314ee4ca5f52304c89fac71b8f293774341d6278
|
[
"CC0-1.0"
] | null | null | null |
make3d.py
|
BritishMuseumDH/scaffold3D
|
314ee4ca5f52304c89fac71b8f293774341d6278
|
[
"CC0-1.0"
] | 3
|
2018-01-30T09:18:34.000Z
|
2019-06-16T17:55:24.000Z
|
import os
import shutil
from textwrap import dedent
import argparse
import subprocess
parser = argparse.ArgumentParser(description='This is a script to create 3D model folder structure')
parser.add_argument('-p', '--project', help='3D project name', required=True)
parser.add_argument('-wd', '--wd', help='Working directory', required=True)
args = parser.parse_args()
os.chdir(args.wd)
root_dir = os.path.join(args.wd, args.project)
if os.path.exists(root_dir) and os.listdir(root_dir):
# If the path already exists and it is not empty, raise an error
err_msg = '''
{directory} already exists and it is not empty.
Please try a different project name or root directory.
'''.format(directory=root_dir)
raise IOError(000, dedent(err_msg))
else:
os.mkdir(root_dir) # Create the root directory
dirnames = ('images', 'masks', 'models')
# Create all the other directories
for item in dirnames:
path3D = os.path.join(args.wd, args.project, item)
os.mkdir(path3D)
write_readme(args.project, root_dir)
write_license(root_dir)
write_ignore(root_dir)
| 35.178947
| 153
| 0.680132
|
ef724512834ae77b7fe4b2559fc21eb34f4025f5
| 5,476
|
py
|
Python
|
integration/experiment/common_args.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
integration/experiment/common_args.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
integration/experiment/common_args.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Common command line arguments for experiments.
'''
def setup_run_args(parser):
"""Add common arguments for all run scripts:
--output-dir --node-count --trial-count --cool-off-time
"""
add_output_dir(parser)
add_node_count(parser)
add_trial_count(parser)
add_cool_off_time(parser)
add_enable_traces(parser)
add_enable_profile_traces(parser)
| 36.506667
| 95
| 0.611578
|
ef7445ff5f0dbb5c8605cf8ad95f6ecbcc7f04a5
| 10,466
|
py
|
Python
|
Source Code.py
|
S-AlMazrouai/Chess-king-last-position-finder
|
609346ee660655bd7aa2afe486c4ad074e3d33fc
|
[
"MIT"
] | 1
|
2022-02-04T11:14:13.000Z
|
2022-02-04T11:14:13.000Z
|
Source Code.py
|
S-AlMazrouai/Chess-king-last-position-finder
|
609346ee660655bd7aa2afe486c4ad074e3d33fc
|
[
"MIT"
] | null | null | null |
Source Code.py
|
S-AlMazrouai/Chess-king-last-position-finder
|
609346ee660655bd7aa2afe486c4ad074e3d33fc
|
[
"MIT"
] | null | null | null |
import requests
import json
import chess
import chess.pgn
import io
from collections import Counter
from openpyxl import load_workbook
import numpy
#API link: https://api.chess.com/pub/player/{user}/games/{year}/{month}/pgn
baseUrl='https://api.chess.com/pub/player/'
users=['Mazrouai'] # You can add one or more chess.com profile/s, make sure to type the prfile name/s as it's/they're written in chess.com.
for user in users:
years = range(2000,2022) # Add the range of the years you want this code to analyze (from,to).
months = ['01','02','03','04','05','06','07','08','09','10','11','12'] # Keep this as it is.
count=0
winBlackKingPos=[] # Array to collect King position in the games won as black.
lossBlackKingPos=[] # Array to collect King position in the games lost as black.
winWhiteKingPos=[] # Array to collect King position in the games won as white.
lossWhiteKingPos=[] # Array to collect King position in the games lost as white.
for i in years: # For loop to irritate through the specified years range.
for j in months: # For loop to irritate through the monthes of the specified years.
extension=str(str(user)+'/games/'+str(i)+'/'+str(j)+'/pgn') # Creates the extension for the baseUrl.
url=baseUrl+extension # Merges baseUrl with the extension.
response = requests.get(url)
pgns = io.StringIO(response.text)
if response.text == '': # Checks if pgn file is empty and if it is, it jumps to the next PGN file.
continue
while True:
games=chess.pgn.read_game(pgns) # Reads PGN file.
if games == None: # Checks if there is a game available to read inside the pgn file, if not it exits this loop to the next PGN file.
break
if games.headers['Black'] == '?': # Checks if game data is missing, if true it jumps to the next game.
continue
if games.headers['White'] == '?': # Checks if game data is missing, if true it jumps to the next game.
continue
board=games.board()
for move in games.mainline_moves(): # Moves to the last position in the game.
board.push(move)
map=board.piece_map() # Collect the position of the pieces in thier last move.
if games.headers['Black']== str(user): # Checks if the specified user is playing as black
for x,y in map.items():
if str(y) == 'k':
kingPos=chess.square_name(x) # Gets the black king postion.
if games.headers['Result'] == '0-1': # Collects the king position in the games won as black.
winBlackKingPos.append(kingPos)
if games.headers['Result'] == '1-0': # Collects the king position in the games lost as black.
lossBlackKingPos.append(kingPos)
else: # If the if condition is not satisfied then the specificed user is playing as white.
for x,y in map.items():
if str(y) == 'K':
kingPos=chess.square_name(x) # Gets the white king postion.
if games.headers['Result'] == '0-1': # Collects the king position in the games lost as white.
lossWhiteKingPos.append(kingPos)
if games.headers['Result'] == '1-0': # Collects the king position in the games won as white.
winWhiteKingPos.append(kingPos)
gamesWon=len(winBlackKingPos)+len(winWhiteKingPos) # Counts # of won games.
gamesLost=len(lossBlackKingPos)+len(lossWhiteKingPos) # Counts # of lost games.
gamesPlayed=gamesWon+gamesLost # counts # of analyzed games
print("Player: ",user) # Prints the name of the player.
print("games played: ",gamesPlayed) # Prints # of won games.
print("games won: ",gamesWon) # Prints # of lost games.
print("games lost: ",gamesLost) # Prints # of analyzed games
print("\n")
winWhiteKingPosCount= Counter(winWhiteKingPos) # Creates a list with a position and the number of times the wining white king was in that position.
lossWhiteKingPosCount= Counter(lossWhiteKingPos) # Creates a list with a position and the number of times the losing white king was in that position.
winBlackKingPosCount= Counter(winBlackKingPos) # Creates a list with a position and the number of times the wining black king was in that position.
lossBlackKingPosCount= Counter(lossBlackKingPos) # Creates a list with a position and the number of times the losing black king was in that position.
posCounts=[winWhiteKingPosCount,lossWhiteKingPosCount,winBlackKingPosCount,lossBlackKingPosCount] # Merges the lists into an array.
Data = load_workbook(filename='Data_Template.xlsx') # Opens the template excel file .
sheets=Data.sheetnames # Register the sheets name.
cellLetters=[] # Array for the cell letters in the excel file.
cellNum=[] # Array for the cell numbers in the excel file.
for j in range(8): # Generates cell letters to get the cells this code will work .
for i in range(66, 74):
cellLetters.append(chr(i))
for i in [10,9,8,7,6,5,4,3]: # Generates cell numbers to get the cells this code will work .
for j in range(8):
cellNum.append(i)
c = 0 # This variable will be used as an index to go thorugh the lists that have been merged into an array.
for sheet in sheets: # For loop to irritate through the excel sheets.
workSheet=Data[sheet]
posCount=posCounts[c] # Gets the postion list.
c=c+1
for i in range(64): # For loop to go through the sheet cells and assign them the king recurrence value.
cell=str(cellLetters[i])+str(cellNum[i]) # Constructs the excel cell name (e.g. A12).
count=posCount[chess.square_name(i)] # Gets the king postion count that correlates with the cell name.
if count== 0: # If king recurrence equals 0 set the cell to None.
count= None
workSheet[cell] = count # Makes the cell value equales the king recurrence in that position.
Data.save(filename='Data_'+str(user)+'.xlsx') # Saves the data into a new xlsx file
| 87.94958
| 228
| 0.391458
|
ef798894676eb6e1574bdd64f329e5761081b579
| 1,464
|
py
|
Python
|
src/FileHandler.py
|
mohitgupta07/tipr-1st-assgn
|
be2f742de69dbf7c300410c230eaa541d8d0eab8
|
[
"MIT"
] | null | null | null |
src/FileHandler.py
|
mohitgupta07/tipr-1st-assgn
|
be2f742de69dbf7c300410c230eaa541d8d0eab8
|
[
"MIT"
] | null | null | null |
src/FileHandler.py
|
mohitgupta07/tipr-1st-assgn
|
be2f742de69dbf7c300410c230eaa541d8d0eab8
|
[
"MIT"
] | 1
|
2019-02-15T16:44:02.000Z
|
2019-02-15T16:44:02.000Z
|
import csv
import numpy as np
| 32.533333
| 75
| 0.57377
|
ef7a8845996df8b5695e947565280cd90979fd06
| 1,840
|
py
|
Python
|
load.py
|
ontocord/create_pii_dataset
|
bfd246a8f8b443e238f260f307bd41d86adc3136
|
[
"Apache-2.0"
] | null | null | null |
load.py
|
ontocord/create_pii_dataset
|
bfd246a8f8b443e238f260f307bd41d86adc3136
|
[
"Apache-2.0"
] | null | null | null |
load.py
|
ontocord/create_pii_dataset
|
bfd246a8f8b443e238f260f307bd41d86adc3136
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright, 2021 Ontocord, LLC, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datasets import load_dataset
import os
import re
import itertools
from re import finditer
import glob
import random
import fsspec
import json
from random import randint, choice
from collections import Counter
import spacy, itertools
import langid
from nltk.corpus import stopwords
import fsspec, os, gzip
from faker import Faker
from faker.providers import person, company, geo, address
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, MarianMTModel, AutoTokenizer, pipeline
import torch
import sys
from tqdm import tqdm
model_name = 'Helsinki-NLP/opus-mt-en-hi'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_name = 'Helsinki-NLP/opus-mt-en-ar'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_name = 'Helsinki-NLP/opus-mt-en-zh'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
nlp = spacy.load('en_core_web_lg')
stopwords_en = set(stopwords.words('english'))
| 33.454545
| 112
| 0.807609
|
ef7b9c110a5e75cb118c0870480aa130248a1ef2
| 1,432
|
py
|
Python
|
piWriters/graphiteSender.py
|
shackledtodesk/piWeather
|
e0b4b4ded7ebd01fe7844807de6949a83aa3913f
|
[
"Apache-2.0"
] | null | null | null |
piWriters/graphiteSender.py
|
shackledtodesk/piWeather
|
e0b4b4ded7ebd01fe7844807de6949a83aa3913f
|
[
"Apache-2.0"
] | null | null | null |
piWriters/graphiteSender.py
|
shackledtodesk/piWeather
|
e0b4b4ded7ebd01fe7844807de6949a83aa3913f
|
[
"Apache-2.0"
] | null | null | null |
## Send data to a Graphite/Carbon Server
import traceback
import sys, time, socket, datetime
from datetime import datetime
| 31.130435
| 96
| 0.561453
|
ef7c2ae59e8e02d4a104708b8f76dca033259df6
| 479
|
py
|
Python
|
src/main/python/bots/b_jira.py
|
jceaser/gcmd_bot
|
2b2ae0631d69d9f95a3a23b04e12a4467a116ffa
|
[
"MIT"
] | null | null | null |
src/main/python/bots/b_jira.py
|
jceaser/gcmd_bot
|
2b2ae0631d69d9f95a3a23b04e12a4467a116ffa
|
[
"MIT"
] | null | null | null |
src/main/python/bots/b_jira.py
|
jceaser/gcmd_bot
|
2b2ae0631d69d9f95a3a23b04e12a4467a116ffa
|
[
"MIT"
] | null | null | null |
from b_bot import BBot
from rand_str import *
| 28.176471
| 74
| 0.553236
|
ef7c66788463fc4b72dcc5b29d43203643002b12
| 2,295
|
py
|
Python
|
preprocessors/neg_sample_from_run.py
|
felipemoraes/pyNeuIR
|
5256857387c8fe57d28167e42077ad1dcade1983
|
[
"MIT"
] | 4
|
2019-11-09T19:46:44.000Z
|
2022-01-03T07:58:20.000Z
|
preprocessors/neg_sample_from_run.py
|
felipemoraes/pyNeuIR
|
5256857387c8fe57d28167e42077ad1dcade1983
|
[
"MIT"
] | null | null | null |
preprocessors/neg_sample_from_run.py
|
felipemoraes/pyNeuIR
|
5256857387c8fe57d28167e42077ad1dcade1983
|
[
"MIT"
] | 3
|
2019-06-18T12:31:49.000Z
|
2020-11-22T08:35:07.000Z
|
"""Samples negative pairs from run."""
import argparse
from utils import load_qrels, load_run
import numpy as np
if __name__ == "__main__":
main()
| 28.333333
| 114
| 0.525054
|
ef7cc313a84b2a9b9ea62469241644f5f1b9560b
| 1,123
|
py
|
Python
|
Search_Algorithms/testing/python_scripts/GridNet.py
|
JAOP1/GO
|
48c0275fd37bb552c0db4b968391a5a95ed6c860
|
[
"MIT"
] | null | null | null |
Search_Algorithms/testing/python_scripts/GridNet.py
|
JAOP1/GO
|
48c0275fd37bb552c0db4b968391a5a95ed6c860
|
[
"MIT"
] | null | null | null |
Search_Algorithms/testing/python_scripts/GridNet.py
|
JAOP1/GO
|
48c0275fd37bb552c0db4b968391a5a95ed6c860
|
[
"MIT"
] | 2
|
2019-12-12T18:55:35.000Z
|
2019-12-12T19:03:35.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
#Unicamente como esta ahorita funciona para un grafo de 5x5.
| 31.194444
| 92
| 0.577026
|
ef7d0ee9d64040c9087075b823e521c746835c31
| 3,436
|
py
|
Python
|
instances/game_instances.py
|
Napam/MayhemPacman
|
cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf
|
[
"MIT"
] | 1
|
2021-04-07T12:54:13.000Z
|
2021-04-07T12:54:13.000Z
|
instances/game_instances.py
|
Napam/MayhemPacman
|
cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf
|
[
"MIT"
] | null | null | null |
instances/game_instances.py
|
Napam/MayhemPacman
|
cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf
|
[
"MIT"
] | null | null | null |
'''
Module containing the in-game mayhem instances
such as the ship, planets, asteroid objects etc etc...
Written by Naphat Amundsen
'''
import numpy as np
import pygame as pg
import configparser
import sys
import os
sys.path.insert(0,'..')
from classes import spaceship
from classes import planet
from classes import maps
from classes import interface
import user_settings as user_cng
from instances import instance_config as icng
pg.font.init()
w_shape = user_cng.w_shape
w_norm = np.linalg.norm(w_shape)
COLORS = pg.colordict.THECOLORS
# The initial values of the objects
# are mostly just educated guesses
game_map = maps.game_map(
map_shape=(icng.map_shape)
)
minimap = maps.minimap(
gmap=game_map,
w_shape=w_shape,
w_norm=w_norm)
ship = spaceship.spaceship(
pos=(200,200),
init_dir=icng.RIGHT
)
sun = planet.planet(
pos=game_map.center,
init_vel=None,
init_dir=None,
rforce=None
)
earth = planet.rotating_planet(
pos=(game_map.shape[0]/2, 800),
init_vel=[-3,0],
init_dir=[1,0],
r_force=25000,
omega=0.25
)
venus = planet.rotating_planet(
pos=(game_map.shape[0]/2, 2000),
init_vel=[-5,0],
init_dir=[1,0],
r_force=40000,
omega=0.25
)
asteroids = [
planet.rotating_planet(
pos=(3000, 1000),
init_vel=[-8,2],
init_dir=[1,0],
r_force=150000,
omega=0.25
),
planet.rotating_planet(
pos=(1200, 1000),
init_vel=[10,1],
init_dir=[1,0],
r_force=390000,
omega=0.25
),
planet.rotating_planet(
pos=(500, 2000),
init_vel=[2,10],
init_dir=[1,0],
r_force=540000,
omega=0.25
),
planet.rotating_planet(
pos=(6500, 6000),
init_vel=[5,-15],
init_dir=[1,0],
r_force=1500000,
omega=0.5
),
planet.rotating_planet(
pos=(6000, 6000),
init_vel=[-15,1],
init_dir=[1,0],
r_force=1000000,
omega=0.5
),
planet.rotating_planet(
pos=(6000, 500),
init_vel=[-8,-2],
init_dir=[1,0],
r_force=600000,
omega=0.25
),
planet.rotating_planet(
pos=(5000, 2000),
init_vel=[-2,-8],
init_dir=[1,0],
r_force=200000,
omega=0.25
),
planet.rotating_planet(
pos=(game_map.shape[0]/2, 800),
init_vel=[15,0],
init_dir=[1,0],
r_force=590000,
omega=0.25
),
planet.rotating_planet(
pos=(5000, game_map.shape[1]/2),
init_vel=[0,10],
init_dir=[1,0],
r_force=150000,
omega=0.25
),
]
# For convenience
planets = [earth, venus]
all_celestials = planets + asteroids
minimap_colors = [
COLORS['white'],
COLORS['orange'],
COLORS['blue'],
COLORS['green']
]
minimap_sizes = [
1,
int(500/5000*minimap.shape[0]),
int(250/5000*minimap.shape[0]),
1
]
'''Minimap stuff for LAN-mayhem'''
minimap_colors_online = [
COLORS['white'],
COLORS['orange'],
COLORS['blue'],
COLORS['green'],
COLORS['red'],
]
minimap_sizes_online = [
1,
int(500/5000*minimap.shape[0]),
int(250/5000*minimap.shape[0]),
1,
3
]
| 20.093567
| 55
| 0.556752
|
ef7dd46d9034574570b5f449e1ddf8eb84731597
| 286
|
py
|
Python
|
test.py
|
RoyLQ/Advanced-_TCGAIntegrator
|
4767ab74b14e9d7e65e2c1ffe656619ef414148b
|
[
"MIT"
] | 2
|
2021-09-14T05:53:16.000Z
|
2021-12-01T23:59:18.000Z
|
test.py
|
RoyLQ/Advanced-_TCGAIntegrator
|
4767ab74b14e9d7e65e2c1ffe656619ef414148b
|
[
"MIT"
] | null | null | null |
test.py
|
RoyLQ/Advanced-_TCGAIntegrator
|
4767ab74b14e9d7e65e2c1ffe656619ef414148b
|
[
"MIT"
] | null | null | null |
import sys
import os
simp_path = 'TCGAIntegrator'
abs_path = os.path.abspath(simp_path)
sys.path.append(abs_path)
from TCGAIntegrator import TCGAData as TCGAData
if __name__ == '__main__':
main()
| 15.888889
| 47
| 0.716783
|
ef7f8e86f21851da0cc13ef9dc3a597eb38daaa9
| 1,649
|
py
|
Python
|
synergy/conf/global_context.py
|
mushkevych/scheduler
|
8228cde0f027c0025852cb63a6698cdd320838f1
|
[
"BSD-3-Clause"
] | 15
|
2015-02-01T09:20:23.000Z
|
2021-04-27T08:46:45.000Z
|
synergy/conf/global_context.py
|
mushkevych/scheduler
|
8228cde0f027c0025852cb63a6698cdd320838f1
|
[
"BSD-3-Clause"
] | 26
|
2015-01-12T22:28:40.000Z
|
2021-07-05T01:22:17.000Z
|
synergy/conf/global_context.py
|
mushkevych/scheduler
|
8228cde0f027c0025852cb63a6698cdd320838f1
|
[
"BSD-3-Clause"
] | 2
|
2016-07-21T03:02:46.000Z
|
2019-10-03T23:59:23.000Z
|
from synergy.db.model.queue_context_entry import queue_context_entry
from synergy.scheduler.scheduler_constants import PROCESS_GC, TOKEN_GC, PROCESS_MX, TOKEN_WERKZEUG, EXCHANGE_UTILS, \
PROCESS_SCHEDULER, TOKEN_SCHEDULER, QUEUE_UOW_STATUS, QUEUE_JOB_STATUS, PROCESS_LAUNCH_PY, TOKEN_LAUNCH_PY, \
ROUTING_IRRELEVANT
from synergy.supervisor.supervisor_constants import PROCESS_SUPERVISOR, TOKEN_SUPERVISOR
from synergy.db.model.daemon_process_entry import daemon_context_entry
process_context = {
PROCESS_LAUNCH_PY: daemon_context_entry(
process_name=PROCESS_LAUNCH_PY,
classname='',
token=TOKEN_LAUNCH_PY,
routing=ROUTING_IRRELEVANT,
exchange=EXCHANGE_UTILS),
PROCESS_MX: daemon_context_entry(
process_name=PROCESS_MX,
token=TOKEN_WERKZEUG,
classname=''),
PROCESS_GC: daemon_context_entry(
process_name=PROCESS_GC,
token=TOKEN_GC,
classname=''),
PROCESS_SCHEDULER: daemon_context_entry(
process_name=PROCESS_SCHEDULER,
classname='synergy.scheduler.synergy_scheduler.Scheduler.start',
token=TOKEN_SCHEDULER,
queue='',
routing='',
exchange=''),
PROCESS_SUPERVISOR: daemon_context_entry(
process_name=PROCESS_SUPERVISOR,
classname='synergy.supervisor.synergy_supervisor.Supervisor.start',
token=TOKEN_SUPERVISOR),
}
mq_queue_context = {
QUEUE_UOW_STATUS: queue_context_entry(exchange=EXCHANGE_UTILS, queue_name=QUEUE_UOW_STATUS),
QUEUE_JOB_STATUS: queue_context_entry(exchange=EXCHANGE_UTILS, queue_name=QUEUE_JOB_STATUS),
}
timetable_context = {
}
| 35.085106
| 117
| 0.753184
|
ef81335057d05cc62a7c03fc8a45db94b745d375
| 1,890
|
py
|
Python
|
fylesdk/apis/fyle_v3/fyle_v3.py
|
fylein/fyle-sdk-py
|
826f804ec4d94d5f95fb304254a373679a494238
|
[
"MIT"
] | 4
|
2019-05-07T07:38:27.000Z
|
2021-09-14T08:39:12.000Z
|
fylesdk/apis/fyle_v3/fyle_v3.py
|
snarayanank2/fyle-sdk-py
|
826f804ec4d94d5f95fb304254a373679a494238
|
[
"MIT"
] | 3
|
2019-09-23T11:50:31.000Z
|
2020-02-10T12:12:10.000Z
|
fylesdk/apis/fyle_v3/fyle_v3.py
|
fylein/fyle-sdk-py
|
826f804ec4d94d5f95fb304254a373679a494238
|
[
"MIT"
] | 12
|
2019-05-06T09:48:51.000Z
|
2020-11-13T10:00:26.000Z
|
"""
Fyle V3 APIs Base Class
"""
from .expenses import Expenses
from .reports import Reports
from .employees import Employees
from .orgs import Orgs
from .reimbursements import Reimbursements
from .cost_centes import CostCenters
from .categories import Categories
from .projects import Projects
from .refunds import Refunds
from .balance_transfers import BalanceTransfers
from .settlements import Settlements
from .advance_requests import AdvanceRequests
from .advances import Advances
from .bank_transactions import BankTransactions
from .trip_requests import TripRequests
from .expense_custom_properties import ExpenseCustomProperties
from .employee_custom_properties import EmployeeCustomProperties
from .advance_request_custom_properties import AdvanceRequestCustomProperties
from .trip_request_custom_properties import TripRequestCustomProperties
| 37.8
| 81
| 0.757672
|
ef8179e868198d6a8e03937bb76a29cb988fcda9
| 6,164
|
py
|
Python
|
67-2.py
|
paqul/ALX
|
0f397b53f8208df62ed3bc1f63f27a087799eb32
|
[
"MIT"
] | null | null | null |
67-2.py
|
paqul/ALX
|
0f397b53f8208df62ed3bc1f63f27a087799eb32
|
[
"MIT"
] | null | null | null |
67-2.py
|
paqul/ALX
|
0f397b53f8208df62ed3bc1f63f27a087799eb32
|
[
"MIT"
] | null | null | null |
from datetime import date as d
#---------------------------HOTEL---------------------------#
main()
| 39.512821
| 157
| 0.514114
|
ef8277ef3ae9c0ea2b164ecadf77b6f20ca717ce
| 598
|
py
|
Python
|
core/mayaScripts/SimpleCube.py
|
Bernardrouhi/HandFree
|
fbb9623cba0b8e7eb18649d29465393f06c2b9ee
|
[
"MIT"
] | null | null | null |
core/mayaScripts/SimpleCube.py
|
Bernardrouhi/HandFree
|
fbb9623cba0b8e7eb18649d29465393f06c2b9ee
|
[
"MIT"
] | null | null | null |
core/mayaScripts/SimpleCube.py
|
Bernardrouhi/HandFree
|
fbb9623cba0b8e7eb18649d29465393f06c2b9ee
|
[
"MIT"
] | null | null | null |
import sys
import maya.standalone
maya.standalone.initialize(name='python')
from maya import cmds
# try:
run()
maya.standalone.uninitialize()
# except Exception as e:
# sys.stdout.write(1)
| 19.933333
| 41
| 0.602007
|
ef841a52c1f626cc7c84690f06d3bbb17715d9c8
| 3,733
|
py
|
Python
|
GreedyGRASP/Solver_Greedy.py
|
HamidL/AMMM_Project
|
7679d1c336578464317b8326311c1ab4b69cbf11
|
[
"MIT"
] | null | null | null |
GreedyGRASP/Solver_Greedy.py
|
HamidL/AMMM_Project
|
7679d1c336578464317b8326311c1ab4b69cbf11
|
[
"MIT"
] | null | null | null |
GreedyGRASP/Solver_Greedy.py
|
HamidL/AMMM_Project
|
7679d1c336578464317b8326311c1ab4b69cbf11
|
[
"MIT"
] | null | null | null |
from GreedyGRASP.Solver import Solver
from GreedyGRASP.Solution import Solution
from GreedyGRASP.LocalSearch import LocalSearch
# Inherits from a parent abstract solver.
| 41.477778
| 130
| 0.625234
|
ef854c6d7447ee5fbf75a72fb0ffd6549ac302f6
| 5,654
|
py
|
Python
|
statslib/_lib/gmodel.py
|
ashubertt/statslib
|
5a35c0d10c3ca44c2d48f329c4f3790c91c385ac
|
[
"Apache-2.0"
] | null | null | null |
statslib/_lib/gmodel.py
|
ashubertt/statslib
|
5a35c0d10c3ca44c2d48f329c4f3790c91c385ac
|
[
"Apache-2.0"
] | 1
|
2021-04-06T10:55:34.000Z
|
2021-04-06T10:55:34.000Z
|
statslib/_lib/gmodel.py
|
ashubertt/statslib
|
5a35c0d10c3ca44c2d48f329c4f3790c91c385ac
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import math as _math
from copy import deepcopy
import matplotlib.pyplot as _plt
import numpy as np
import pandas as pd
import statsmodels.api as _sm
from statslib._lib.gcalib import CalibType
| 40.385714
| 107
| 0.561726
|
ef85c06ba18faf8168c199da975507a6176f5a0a
| 174
|
py
|
Python
|
Richard.py
|
Jpowell10/firstrepo
|
c41ac4a0526b6e56449df5adaa448091d930f731
|
[
"CC0-1.0"
] | null | null | null |
Richard.py
|
Jpowell10/firstrepo
|
c41ac4a0526b6e56449df5adaa448091d930f731
|
[
"CC0-1.0"
] | null | null | null |
Richard.py
|
Jpowell10/firstrepo
|
c41ac4a0526b6e56449df5adaa448091d930f731
|
[
"CC0-1.0"
] | null | null | null |
List1 = [1, 2, 3, 4]
List2 = ['I', 'tripped', 'over', 'and', 'hit', 'the', 'floor']
print(List1 + List2)
List3 = List1 + List2
print(List3)
fibs = (0, 1, 2, 3)
print(fibs[3])
| 24.857143
| 62
| 0.563218
|
ef86d428f2e17ef9b526fc491dcb0a17513a95ba
| 1,581
|
py
|
Python
|
app.py
|
Chen-Junbao/MalwareClassification
|
a2ef045c1e5f1f57ff183bfc6577275b14bf84d2
|
[
"MIT"
] | 4
|
2020-06-17T03:14:47.000Z
|
2022-03-29T12:15:33.000Z
|
app.py
|
Chen-Junbao/MalwareClassification
|
a2ef045c1e5f1f57ff183bfc6577275b14bf84d2
|
[
"MIT"
] | 1
|
2020-12-20T03:14:33.000Z
|
2021-02-01T17:13:44.000Z
|
app.py
|
Chen-Junbao/MalwareClassification
|
a2ef045c1e5f1f57ff183bfc6577275b14bf84d2
|
[
"MIT"
] | 1
|
2021-03-07T15:43:20.000Z
|
2021-03-07T15:43:20.000Z
|
import os
from flask import Flask, render_template, request, jsonify
from display.predict import predict_file
app = Flask(__name__, static_folder="./display/static", template_folder="./display/templates")
if __name__ == '__main__':
if not os.path.exists('./display/files'):
os.mkdir('./display/files')
app.run(debug=True)
| 29.277778
| 94
| 0.571157
|
ef873aee93350e545e2097a1a737710da0346193
| 886
|
py
|
Python
|
test_linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | null | null | null |
test_linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | 3
|
2019-11-22T08:04:18.000Z
|
2019-11-26T06:55:36.000Z
|
test_linprog_curvefit.py
|
drofp/linprog_curvefit
|
96ba704edae7cea42d768d7cc6d4036da2ba313a
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from ortools.linear_solver import pywraplp
| 40.272727
| 80
| 0.700903
|
ef87a851b0ff397ab056489c49ee4d54f1a8b8b0
| 14,278
|
py
|
Python
|
uno_ct_v3.py
|
simple-circuit/Component-Curve-Tracer
|
3842f1b0054230325f55296cbc88628b3f88fa88
|
[
"MIT"
] | 1
|
2021-08-04T03:08:07.000Z
|
2021-08-04T03:08:07.000Z
|
uno_ct_v3.py
|
simple-circuit/Component-Curve-Tracer
|
3842f1b0054230325f55296cbc88628b3f88fa88
|
[
"MIT"
] | null | null | null |
uno_ct_v3.py
|
simple-circuit/Component-Curve-Tracer
|
3842f1b0054230325f55296cbc88628b3f88fa88
|
[
"MIT"
] | 1
|
2021-08-29T14:05:42.000Z
|
2021-08-29T14:05:42.000Z
|
# Uno PWM bipolar curve tracer app by simple-circuit 12-22-19
# rev 3 1-13-20
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import font
import numpy as np
import serial
root = Tk()
default_font = font.nametofont("TkDefaultFont")
default_font.configure(size=9)
canvas = Canvas(root)
root.geometry("720x540")
root.title('Uno Curve Tracer in Python')
canvas.grid(column=0, row=0, sticky=(N,W,E,S))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
xtvar = BooleanVar()
xtvar.set(False)
contvar = BooleanVar()
contvar.set(False)
crampvar = BooleanVar()
crampvar.set(False)
#ser = serial.Serial('/dev/ttyACM0', baudrate=115200, timeout = 1)
ser = serial.Serial('COM10', baudrate=115200, timeout = 1)
globalVar()
stepn = IntVar()
stepn.set(-1)
canvas.create_rectangle((0,0,512,512),fill='green')
for i in range(11):
canvas.create_line((51.2*i, 0, 51.2*i, 512), fill='black', width=1)
canvas.create_line((0,51.2*i, 512, 51.2*i), fill='black', width=1)
for i in range(50):
canvas.create_line((10.24*i, 254, 10.24*i, 258), fill='green', width=1)
canvas.create_line((254,10.24*i, 258, 10.24*i), fill='green', width=1)
canvas.create_line((612,10,612,480), fill='grey', width=3)
canvas.create_line((520,10,605,10), fill='grey', width=3)
canvas.create_line((520,115,605,115), fill='grey', width=3)
canvas.create_line((520,220,605,220), fill='grey', width=3)
canvas.create_line((520,247,605,247), fill='grey', width=3)
canvas.create_line((520,335,605,335), fill='grey', width=3)
canvas.create_line((520,430,605,430), fill='grey', width=3)
canvas.create_line((620,10,705,10), fill='grey', width=3)
canvas.create_line((620,75,705,75), fill='grey', width=3)
canvas.create_line((620,170,705,170), fill='grey', width=3)
canvas.create_line((620,275,705,275), fill='grey', width=3)
canvas.create_line((620,370,705,370), fill='grey', width=3)
canvas.create_line((520,515,705,515), fill='grey', width=3)
trcvar = IntVar(value=0) # initial value
trc = Spinbox(canvas, from_= 0, to = 4, increment = 1, width = 1, command = plotxy, textvariable=trcvar)
trc.place(x = 620, y = 20)
trcvar.set(0)
labeltrc = Label(canvas)
labeltrc.place(x = 655, y = 20)
labeltrc.config(text = 'Trace')
mtrcvar = IntVar(value=1) # initial value
mtrc = Spinbox(canvas, from_= 1, to = 5, increment = 1, width = 1, command = plotxy, textvariable=mtrcvar)
mtrc.place(x = 620, y = 45)
mtrcvar.set(1)
labelmtrc = Label(canvas)
labelmtrc.place(x = 655, y = 45)
labelmtrc.config(text = 'Multiple')
trcsave = ttk.Button(canvas, text="Save", command = runSave)
trcsave.place(x = 620, y = 90)
trcload = ttk.Button(canvas, text="Load", command = runLoad)
trcload.place(x = 620, y = 130)
startvar = DoubleVar(value=0.6) # initial value
startval = Spinbox(canvas, from_= 0.0, to = 5.0, increment = 0.02, width = 4, command = runStart, textvariable=startvar)
startval.place(x = 620, y = 185)
startvar.set(0.6)
labelstart = Label(canvas)
labelstart.place(x = 680, y = 185)
labelstart.config(text = 'Start')
stepvar = DoubleVar(value=0.88) # initial value
stepval = Spinbox(canvas, from_= 0.0, to = 5.0, increment = 0.02, width = 4, textvariable=stepvar)
stepval.place(x = 620, y = 205)
stepvar.set(0.88)
labelstep = Label(canvas)
labelstep.place(x = 680, y = 205)
labelstep.config(text = 'Step')
trcload = ttk.Button(canvas, text="Run Steps", command = runSteps)
trcload.place(x = 620, y = 235)
adcvar = IntVar(value=0) # initial value
adcval = Spinbox(canvas, from_= 0, to = 5, increment = 1, width = 2, textvariable=adcvar)
adcval.place(x = 620, y = 325)
adcvar.set(0)
labeladc = Label(canvas)
labeladc.place(x = 660, y = 325)
labeladc.config(text = '0.000V')
adcread = ttk.Button(canvas, text="Read ADC", command = runAdc)
adcread.place(x = 620, y = 295)
cts = ttk.Button(canvas, text="Sine")
cts.place(x = 520, y = 20)
cts.bind("<Button-1>", runSine)
magvar = DoubleVar(value=11.5) # initial value
sinmag = Spinbox(canvas, from_= 2.4, to = 11.5, increment = 0.1, width = 4, command = runMag, textvariable=magvar)
sinmag.place(x = 520, y = 50)
magvar.set(11.5)
sinmag.bind("<Return>", runMag)
labelmag = Label(canvas)
labelmag.place(x = 575, y = 50)
labelmag.config(text = 'Vp')
freqvar = DoubleVar(value=16) # initial value
freq = Spinbox(canvas, from_= 4, to =50, increment = 1.0, width = 2, command = runFreq, textvariable=freqvar)
freq.place(x = 520, y = 75)
freqvar.set(16)
labelfreq = Label(canvas)
labelfreq.place(x = 555, y = 75)
labelfreq.config(text = '60.2 Hz')
cnt = ttk.Checkbutton(canvas, text="Cont. Sine", variable=contvar, onvalue=True)
cnt.place(x = 520, y = 95)
ctr = ttk.Button(canvas, text="Ramp")
ctr.place(x = 520, y = 125)
ctr.bind("<Button-1>", runRamp)
posvar = DoubleVar(value=11.5) # initial value
posmag = Spinbox(canvas, from_= -11.5, to = 11.5, increment = 0.1, width = 4, command = runPos, textvariable=posvar)
posmag.place(x = 520, y = 155)
posvar.set(11.5)
posmag.bind("<Return>", runPos)
labelpos = Label(canvas)
labelpos.place(x = 572, y = 155)
labelpos.config(text = 'Vmax')
negvar = DoubleVar(value=-11.5) # initial value
negmag = Spinbox(canvas, from_= -11.5, to = 11.5, increment = 0.1, width = 4, command = runNeg, textvariable=negvar)
negmag.place(x = 520, y = 180)
negvar.set(-11.5)
negmag.bind("<Return>", runNeg)
labelneg = Label(canvas)
labelneg.place(x = 572, y = 180)
labelneg.config(text = 'Vmin')
cramp = ttk.Checkbutton(canvas, text="Cont. Ramp", variable=crampvar, onvalue=True)
cramp.place(x = 520, y = 200)
xt = ttk.Checkbutton(canvas, text="X-t Plot", variable=xtvar, command=runXt, onvalue=True)
xt.place(x = 520, y = 225)
curvar = IntVar(value= 0)
cursor = Spinbox(canvas, from_= 0, to = 255, width = 3, command = runPlot, textvariable = curvar)
cursor.place(x = 520, y = 255)
cursor.bind("<Return>", runPlot)
labelcur = Label(canvas)
labelcur.place(x = 565, y = 255)
labelcur.config(text = 'Cursor')
label1 = Label(canvas)
label1.place(x = 520, y = 275)
label1.config(text = 'V')
label2 = Label(canvas)
label2.place(x = 520, y = 295)
label2.config(text = 'mA')
label3 = Label(canvas)
label3.place(x = 520, y = 315)
label3.config(text = 'ms')
mrk = ttk.Button(canvas, text="Mark")
mrk.place(x = 520, y = 345, height = 22)
mrk.bind("<Button-1>", runMark)
labelm1 = Label(canvas)
labelm1.place(x = 520, y = 370)
labelm1.config(text = 'V')
labelm2 = Label(canvas)
labelm2.place(x = 520, y = 390)
labelm2.config(text = 'mA')
labelm3 = Label(canvas)
labelm3.place(x = 520, y = 410)
labelm3.config(text = 'ms')
labeldt = Label(canvas)
labeldt.place(x = 540, y = 433)
labeldt.config(text = 'Delta')
labeld1 = Label(canvas)
labeld1.place(x = 520, y = 450)
labeld1.config(text = 'V')
labeld2 = Label(canvas)
labeld2.place(x = 520, y = 470)
labeld2.config(text = 'mA')
labeld3 = Label(canvas)
labeld3.place(x = 520, y = 490)
labeld3.config(text = 'ms')
labelr = Label(canvas)
labelr.place(x = 520, y = 520)
labelr.config(text = 'ohms')
labelc = Label(canvas)
labelc.place(x = 640, y = 520)
labelc.config(text = 'C = uF')
canvas.bind("<Button-1>", runMouse)
plotxy()
root.after(0, runCont)
root.wm_protocol ("WM_DELETE_WINDOW", endserial)
root.mainloop()
| 29.745833
| 172
| 0.583975
|
ef8970f817cb168ae688aab739b624cb804e885d
| 665
|
py
|
Python
|
logger/TMP102.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | 1
|
2015-12-23T04:27:16.000Z
|
2015-12-23T04:27:16.000Z
|
logger/TMP102.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | null | null | null |
logger/TMP102.py
|
scsibug/Raspberry-Pi-Sensor-Node
|
606cf2a15a72ac1503c7318a39c9f3cc523a9c4a
|
[
"Unlicense"
] | null | null | null |
import time
import smbus
from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# TMP102 Class
# ===========================================================================
| 24.62963
| 77
| 0.508271
|
ef8bac5da5b68c79dd574b7a205be89cb3f23f5d
| 178
|
py
|
Python
|
headlines.py
|
plamenbelev/headlines
|
49e5995042abf31b2f898ca1daaf7ee99005dde9
|
[
"MIT"
] | null | null | null |
headlines.py
|
plamenbelev/headlines
|
49e5995042abf31b2f898ca1daaf7ee99005dde9
|
[
"MIT"
] | null | null | null |
headlines.py
|
plamenbelev/headlines
|
49e5995042abf31b2f898ca1daaf7ee99005dde9
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
if __name__ == '__main__':
app.run(port=5000, debug=True)
| 14.833333
| 34
| 0.662921
|
ef90aefef6921157afac229b23fbddf7cab99743
| 854
|
py
|
Python
|
help_desk/help_desk/doctype/department_name/department_name.py
|
shrikant9867/mycfohelpdesk
|
b285b156aec53ecff5873f4630638687ff5a0e92
|
[
"MIT"
] | null | null | null |
help_desk/help_desk/doctype/department_name/department_name.py
|
shrikant9867/mycfohelpdesk
|
b285b156aec53ecff5873f4630638687ff5a0e92
|
[
"MIT"
] | null | null | null |
help_desk/help_desk/doctype/department_name/department_name.py
|
shrikant9867/mycfohelpdesk
|
b285b156aec53ecff5873f4630638687ff5a0e92
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import string
from frappe.model.document import Document
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from erpnext.controllers.item_variant import get_variant, copy_attributes_to_variant, ItemVariantExistsError
| 32.846154
| 108
| 0.799766
|
ef911bdd33ff81cae4898bfd37e8a89b765f201c
| 2,565
|
py
|
Python
|
src/medical_test_service/medical_test.py
|
phamnam-mta/know-life
|
f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d
|
[
"MIT"
] | null | null | null |
src/medical_test_service/medical_test.py
|
phamnam-mta/know-life
|
f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d
|
[
"MIT"
] | null | null | null |
src/medical_test_service/medical_test.py
|
phamnam-mta/know-life
|
f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d
|
[
"MIT"
] | null | null | null |
import logging
from typing import Text, List
from src.utils.io import read_json
from src.utils.fuzzy import is_relevant_string
from src.utils.common import is_float
from src.utils.constants import (
MEDICAL_TEST_PATH,
QUANTITATIVE_PATH,
POSITIVE_TEXT,
TestResult
)
logger = logging.getLogger(__name__)
| 43.474576
| 163
| 0.474854
|
ef933f2244982928a2ce88206760be93146f1a77
| 1,064
|
py
|
Python
|
scam.py
|
TheToddLuci0/Tarkov-Scammer
|
5fced3952c6cec72fe3eb85384bc11f65ee6af9c
|
[
"BSD-3-Clause"
] | 2
|
2021-02-09T19:13:14.000Z
|
2021-02-23T08:41:14.000Z
|
scam.py
|
TheToddLuci0/Tarkov-Scammer
|
5fced3952c6cec72fe3eb85384bc11f65ee6af9c
|
[
"BSD-3-Clause"
] | null | null | null |
scam.py
|
TheToddLuci0/Tarkov-Scammer
|
5fced3952c6cec72fe3eb85384bc11f65ee6af9c
|
[
"BSD-3-Clause"
] | null | null | null |
import requests
import sys
from time import sleep
from tabulate import tabulate
if __name__=='__main__':
try:
with open('.secret', 'r') as f:
secret = f.read().strip()
except IOException:
secret = sys.argv[1]
get_scams(secret)
| 34.322581
| 107
| 0.599624
|
ef955712af3bae4edd3cd451907984d2f75e38d0
| 101
|
py
|
Python
|
hair_segmentation/model.py
|
shoman2/mediapipe-models
|
c588321f3b5056f2239d834c603046de7901d02e
|
[
"Apache-2.0"
] | 28
|
2019-10-08T06:07:45.000Z
|
2021-06-12T07:01:32.000Z
|
hair_segmentation/model.py
|
shoman2/mediapipe-models
|
c588321f3b5056f2239d834c603046de7901d02e
|
[
"Apache-2.0"
] | null | null | null |
hair_segmentation/model.py
|
shoman2/mediapipe-models
|
c588321f3b5056f2239d834c603046de7901d02e
|
[
"Apache-2.0"
] | 8
|
2019-10-10T04:59:02.000Z
|
2021-03-28T16:11:09.000Z
|
# Real-time Hair Segmentation and Recoloring on Mobile GPUs (https://arxiv.org/abs/1907.06740)
# TODO
| 50.5
| 94
| 0.772277
|
ef978b1341d6c5d5f3129e9244bedb2f75765eb8
| 996
|
py
|
Python
|
blog/viewmixins.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 11
|
2018-04-02T16:36:19.000Z
|
2019-07-10T05:54:58.000Z
|
blog/viewmixins.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 22
|
2019-01-01T20:40:21.000Z
|
2022-02-10T08:06:39.000Z
|
blog/viewmixins.py
|
pincoin/rakmai
|
d9daa399aff50712a86b2dec9d94e622237b25b0
|
[
"MIT"
] | 4
|
2019-03-12T14:24:37.000Z
|
2022-01-07T16:20:22.000Z
|
import logging
from .forms import PostSearchForm
from .models import Blog
| 27.666667
| 77
| 0.684739
|
ef978c724ad463ecd7562dae7e149d5ae0ce4282
| 677
|
py
|
Python
|
mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py
|
mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep
|
b35f6c0b2e264e2913d0a1c432bf89c7b329bf52
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py
|
mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep
|
b35f6c0b2e264e2913d0a1c432bf89c7b329bf52
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py
|
mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep
|
b35f6c0b2e264e2913d0a1c432bf89c7b329bf52
|
[
"Apache-2.0"
] | null | null | null |
from opencmiss.utils.maths.algorithms import calculate_line_plane_intersection
| 33.85
| 78
| 0.713442
|
ef97b640d54812e21c1fdb002e84f00eb0d09eea
| 76
|
py
|
Python
|
antools/shared/data_validation/__init__.py
|
antonin-drozda/antools
|
550310a61aae8d11e50e088731211197b7ee790b
|
[
"MIT"
] | 1
|
2021-02-27T07:22:39.000Z
|
2021-02-27T07:22:39.000Z
|
antools/shared/data_validation/__init__.py
|
antonin-drozda/antools
|
550310a61aae8d11e50e088731211197b7ee790b
|
[
"MIT"
] | null | null | null |
antools/shared/data_validation/__init__.py
|
antonin-drozda/antools
|
550310a61aae8d11e50e088731211197b7ee790b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
SHARED - DATA VALIDATION
"""
# %% FILE IMPORT
| 9.5
| 24
| 0.526316
|
ef9836ec7a3a89d88130ef5b51f413cd84a57435
| 2,152
|
py
|
Python
|
test/test/host_test_default.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 29
|
2018-11-30T19:45:22.000Z
|
2022-03-29T17:02:16.000Z
|
test/test/host_test_default.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 160
|
2018-11-30T21:55:52.000Z
|
2022-01-18T10:58:09.000Z
|
test/test/host_test_default.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 73
|
2018-11-30T21:34:41.000Z
|
2021-10-02T05:51:40.000Z
|
# Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mbed_os_tools.test.host_tests_runner.host_test_default import DefaultTestSelector
if __name__ == '__main__':
unittest.main()
| 39.127273
| 86
| 0.676115
|
ef9847d747aab77361f5e75e1a5b9c126c9e90f9
| 3,359
|
py
|
Python
|
lib/surface/debug/logpoints/delete.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/debug/logpoints/delete.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/debug/logpoints/delete.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:23:41.000Z
|
2020-07-25T12:23:41.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete command for gcloud debug logpoints command group."""
from googlecloudsdk.api_lib.debug import debug
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
| 36.51087
| 80
| 0.693659
|
ef99c583c045deb51df0a2fd8b0f81216762f3eb
| 3,844
|
py
|
Python
|
day-07/solution.py
|
wangjoshuah/Advent-Of-Code-2018
|
6bda7956bb7c6f9a54feffb19147961b56dc5d81
|
[
"MIT"
] | null | null | null |
day-07/solution.py
|
wangjoshuah/Advent-Of-Code-2018
|
6bda7956bb7c6f9a54feffb19147961b56dc5d81
|
[
"MIT"
] | null | null | null |
day-07/solution.py
|
wangjoshuah/Advent-Of-Code-2018
|
6bda7956bb7c6f9a54feffb19147961b56dc5d81
|
[
"MIT"
] | null | null | null |
# directed graph problem or breadth first search variant
from collections import defaultdict
import re
input_file = open("input.txt", "r")
input_lines = input_file.readlines()
letter_value = {
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'I': 9,
'J': 10,
'K': 11,
'L': 12,
'M': 13,
'N': 14,
'O': 15,
'P': 16,
'Q': 17,
'R': 18,
'S': 19,
'T': 20,
'U': 21,
'V': 22,
'W': 23,
'X': 24,
'Y': 25,
'Z': 26
}
# construct graph of nodes and edges
# Read nodes and edges from input with regex
# A set of possible nodes to work on (starts with C)
# pick the first alphabetical node and remove it and its edges
# Part 1
# print(work_nodes(construct_graph(input_lines)))
# Part 2
nodes, edges = construct_graph(input_lines)
total_time = work_nodes_in_parallel(nodes, edges, 5, 60)
print(f"It took {total_time} seconds")
| 27.070423
| 105
| 0.601977
|
ef99d022220363214630da6ad916a3a41900d8d7
| 2,862
|
py
|
Python
|
src/infi/pypi_manager/scripts/compare_pypi_repos.py
|
Infinidat/infi.pypi_manager
|
7b5774b395ef47a23be2957a091b607b35a049f2
|
[
"BSD-3-Clause"
] | null | null | null |
src/infi/pypi_manager/scripts/compare_pypi_repos.py
|
Infinidat/infi.pypi_manager
|
7b5774b395ef47a23be2957a091b607b35a049f2
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T10:04:45.000Z
|
2020-11-05T11:03:25.000Z
|
src/infi/pypi_manager/scripts/compare_pypi_repos.py
|
Infinidat/infi.pypi_manager
|
7b5774b395ef47a23be2957a091b607b35a049f2
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
from .. import PyPI, DjangoPyPI, PackageNotFound
from prettytable import PrettyTable
from pkg_resources import parse_version, resource_filename
import requests
import re
try:
from urlparse import unquote
except ImportError:
# Python 3
from urllib.parse import unquote
| 40.885714
| 120
| 0.70615
|
ef9afc42c7347b259e757e59b46b756f7ac092fc
| 6,954
|
py
|
Python
|
src/GNLSE_specific.py
|
Computational-Nonlinear-Optics-ORC/Compare-CNLSE
|
9b56cedbca2a06af3baa9f64e46ebfd4263f86c2
|
[
"MIT"
] | null | null | null |
src/GNLSE_specific.py
|
Computational-Nonlinear-Optics-ORC/Compare-CNLSE
|
9b56cedbca2a06af3baa9f64e46ebfd4263f86c2
|
[
"MIT"
] | null | null | null |
src/GNLSE_specific.py
|
Computational-Nonlinear-Optics-ORC/Compare-CNLSE
|
9b56cedbca2a06af3baa9f64e46ebfd4263f86c2
|
[
"MIT"
] | 3
|
2018-06-04T18:43:03.000Z
|
2021-11-24T07:57:03.000Z
|
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.fftpack import fft
from combined_functions import check_ft_grid
from scipy.constants import pi, c, hbar
from numpy.fft import fftshift
from scipy.io import loadmat
from time import time
import sys
import matplotlib.pyplot as plt
from scipy.integrate import simps
def fv_creator(fp, df, F, int_fwm):
"""
Cretes frequency grid such that the estimated MI-FWM bands
will be on the grid and extends this such that to avoid
fft boundary problems.
Inputs::
lamp: wavelength of the pump (float)
lamda_c: wavelength of the zero dispersion wavelength(ZDW) (float)
int_fwm: class that holds nt (number of points in each band)
betas: Taylor coeffiencts of beta around the ZDW (Array)
M : The M coefficient (or 1/A_eff) (float)
P_p: pump power
Df_band: band frequency bandwidth in Thz, (float)
Output::
fv: Frequency vector of bands (Array of shape [nt])
"""
f_centrals = [fp + i * F for i in range(-1, 2)]
fv1 = np.linspace(f_centrals[0], f_centrals[1],
int_fwm.nt//4 - 1, endpoint=False)
df = fv1[1] - fv1[0]
fv2 = np.linspace(f_centrals[1], f_centrals[2], int_fwm.nt//4)
try:
assert df == fv2[1] - fv2[0]
except AssertionError:
print(df, fv2[1] - fv2[0])
fv0, fv3 = np.zeros(int_fwm.nt//4 + 1), np.zeros(int_fwm.nt//4)
fv0[-1] = fv1[0] - df
fv3[0] = fv2[-1] + df
for i in range(1, len(fv3)):
fv3[i] = fv3[i - 1] + df
for i in range(len(fv0) - 2, -1, -1):
fv0[i] = fv0[i + 1] - df
assert not(np.any(fv0 == fv1))
assert not(np.any(fv1 == fv2))
assert not(np.any(fv2 == fv3))
fv = np.concatenate((fv0, fv1, fv2, fv3))
for i in range(3):
assert f_centrals[i] in fv
check_ft_grid(fv, df)
p_pos = np.where(np.abs(fv - fp) == np.min(np.abs(fv - fp)))[0]
return fv, p_pos, f_centrals
| 33.757282
| 77
| 0.560397
|
ef9b8a20ac811d824f51d3976e30e0eeef10150a
| 172
|
py
|
Python
|
recipe/run_test.py
|
regro-cf-autotick-bot/astropy-healpix-feedstock
|
c859aa66fbdcc397e82ef3bb1940a99da0deb8fc
|
[
"BSD-3-Clause"
] | null | null | null |
recipe/run_test.py
|
regro-cf-autotick-bot/astropy-healpix-feedstock
|
c859aa66fbdcc397e82ef3bb1940a99da0deb8fc
|
[
"BSD-3-Clause"
] | 24
|
2017-10-15T20:52:48.000Z
|
2021-11-11T00:45:54.000Z
|
recipe/run_test.py
|
regro-cf-autotick-bot/astropy-healpix-feedstock
|
c859aa66fbdcc397e82ef3bb1940a99da0deb8fc
|
[
"BSD-3-Clause"
] | 4
|
2017-10-15T20:37:19.000Z
|
2021-08-05T14:42:53.000Z
|
# The test suite runs in <20 seconds so is worth running here to
# make sure there are no issues with the C/Cython extensions
import astropy_healpix
astropy_healpix.test()
| 34.4
| 64
| 0.796512
|
ef9bbe1541d0c953af96d087b8ca600f95dd7284
| 45
|
py
|
Python
|
way2sms/__init__.py
|
shubhamc183/way2sms
|
33d8c9e69ab9b053e50501baf887191c718d2d2a
|
[
"MIT"
] | 38
|
2016-12-15T14:03:00.000Z
|
2022-03-22T01:28:29.000Z
|
way2sms/__init__.py
|
shubhamc183/way2sms
|
33d8c9e69ab9b053e50501baf887191c718d2d2a
|
[
"MIT"
] | 10
|
2017-11-18T08:13:18.000Z
|
2020-09-06T11:18:32.000Z
|
way2sms/__init__.py
|
shubhamc183/way2sms
|
33d8c9e69ab9b053e50501baf887191c718d2d2a
|
[
"MIT"
] | 41
|
2016-12-26T16:52:59.000Z
|
2022-03-22T01:31:40.000Z
|
"""
Way2sms
"""
from way2sms.app import Sms
| 7.5
| 27
| 0.666667
|
ef9be069a058d33204131a55950dcf855daf7d54
| 1,164
|
py
|
Python
|
example.py
|
jasonkatz/py-graphql-client
|
9f938f3d379a8f4d8810961c87baf25dbe35889d
|
[
"BSD-3-Clause"
] | 38
|
2019-03-22T16:27:08.000Z
|
2022-03-30T11:07:55.000Z
|
example.py
|
anthonyhiga/py-graphql-client
|
9c59b32bae5c5c6a12634b2bd6353f76328aa31a
|
[
"BSD-3-Clause"
] | 31
|
2019-03-25T20:28:40.000Z
|
2022-01-26T21:22:47.000Z
|
example.py
|
anthonyhiga/py-graphql-client
|
9c59b32bae5c5c6a12634b2bd6353f76328aa31a
|
[
"BSD-3-Clause"
] | 11
|
2019-03-25T18:54:32.000Z
|
2021-09-11T17:00:27.000Z
|
import time
from graphql_client import GraphQLClient
# some sample GraphQL server which supports websocket transport and subscription
client = GraphQLClient('ws://localhost:9001')
# Simple Query Example
# query example with GraphQL variables
query = """
query getUser($userId: Int!) {
users (id: $userId) {
id
username
}
}
"""
# This is a blocking call, you receive response in the `res` variable
print('Making a query first')
res = client.query(query, variables={'userId': 2})
print('query result', res)
# Subscription Example
subscription_query = """
subscription getUser {
users (id: 2) {
id
username
}
}
"""
# Our callback function, which will be called and passed data everytime new data is available
print('Making a graphql subscription now...')
sub_id = client.subscribe(subscription_query, callback=my_callback)
print('Created subscription and waiting. Callback function is called whenever there is new data')
# do some operation while the subscription is running...
time.sleep(10)
client.stop_subscribe(sub_id)
client.close()
| 23.755102
| 97
| 0.734536
|
ef9c140412569fc3198bcf6324071fb38dea2030
| 2,465
|
py
|
Python
|
Scopus2Histcite.py
|
hengxyz/Scopus4HistCite
|
87395afe5d8a520b9c32a0efeed2288225430244
|
[
"Apache-2.0"
] | 2
|
2020-07-09T13:10:44.000Z
|
2020-07-10T13:00:52.000Z
|
Scopus2Histcite.py
|
hengxyz/Scopus4HistCite
|
87395afe5d8a520b9c32a0efeed2288225430244
|
[
"Apache-2.0"
] | null | null | null |
Scopus2Histcite.py
|
hengxyz/Scopus4HistCite
|
87395afe5d8a520b9c32a0efeed2288225430244
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
import os
import sys
if __name__ == '__main__':
Scopus2HistCite()
| 35.724638
| 80
| 0.416633
|
ef9d373a85947b14498743498aaf4ab814a074db
| 2,449
|
py
|
Python
|
mel_scale.py
|
zjlww/dsp
|
d7bcbf49bc8693560f3203c55b73956cc61dcd50
|
[
"MIT"
] | 9
|
2021-07-22T19:59:34.000Z
|
2021-12-16T06:37:27.000Z
|
mel_scale.py
|
zjlww/dsp
|
d7bcbf49bc8693560f3203c55b73956cc61dcd50
|
[
"MIT"
] | null | null | null |
mel_scale.py
|
zjlww/dsp
|
d7bcbf49bc8693560f3203c55b73956cc61dcd50
|
[
"MIT"
] | 2
|
2021-07-26T07:14:58.000Z
|
2021-12-16T06:37:30.000Z
|
"""
Mel-scale definition.
"""
import torch
from torch import Tensor
from typing import Union
import numpy as np
from math import log
import librosa
from librosa.filters import mel as mel_fn
def hz_to_mel(
frequencies: Union[float, int, Tensor, np.ndarray],
htk=False) -> Union[float, int, Tensor, np.ndarray]:
"""Convert Hz to Mels.
Extending librosa.hz_to_mel to accepting Tensor.
"""
if not isinstance(frequencies, Tensor):
return librosa.hz_to_mel(frequencies)
if htk:
return 2595.0 * torch.log10(1.0 + frequencies / 700.0)
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = log(6.4) / 27.0 # step size for log region
log_t = frequencies >= min_log_hz
mels[log_t] = min_log_mel + torch.log(frequencies[log_t] / min_log_hz) / \
logstep
return mels
def mel_to_hz(
mels: Union[int, float, Tensor, np.ndarray],
htk=False) -> Union[int, float, Tensor, np.ndarray]:
"""Convert mel bin numbers to frequencies."""
if not isinstance(mels, Tensor):
return librosa.mel_to_hz(mels, htk=htk)
if htk:
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = log(6.4) / 27.0 # step size for log region
log_t = mels >= min_log_mel
freqs[log_t] = min_log_hz * \
torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def linear_mel_matrix(
sampling_rate: int, fft_size: int, mel_size: int,
mel_min_f0: Union[int, float],
mel_max_f0: Union[int, float],
device: torch.device
) -> Tensor:
"""
Args:
sampling_rate: Sampling rate in Hertz.
fft_size: FFT size, must be an even number.
mel_size: Number of mel-filter banks.
mel_min_f0: Lowest frequency in the mel spectrogram.
mel_max_f0: Highest frequency in the mel spectrogram.
device: Target device of the transformation matrix.
Returns:
basis: [mel_size, fft_size // 2 + 1].
"""
basis = torch.FloatTensor(
mel_fn(sampling_rate, fft_size, mel_size, mel_min_f0, mel_max_f0)
).transpose(-1, -2)
return basis.to(device)
| 31.805195
| 78
| 0.642303
|
ef9edac80f3106bed3243580dd908ece6900cb29
| 379
|
py
|
Python
|
order/urls.py
|
xxcfun/trip-api
|
a51c8b6033ba2a70cf0e400180f31809f4ce476a
|
[
"Apache-2.0"
] | 1
|
2021-06-18T03:03:40.000Z
|
2021-06-18T03:03:40.000Z
|
order/urls.py
|
xxcfun/trip-api
|
a51c8b6033ba2a70cf0e400180f31809f4ce476a
|
[
"Apache-2.0"
] | null | null | null |
order/urls.py
|
xxcfun/trip-api
|
a51c8b6033ba2a70cf0e400180f31809f4ce476a
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from order import views
urlpatterns = [
#
path('ticket/submit/', views.TicketOrderSubmitView.as_view(), name='ticket_submit'),
#
path('order/detail/<int:sn>/', views.OrderDetail.as_view(), name='order_detail'),
#
path('order/list/', views.OrderListView.as_view(), name='order_list')
]
| 29.153846
| 89
| 0.664908
|
ef9f39f03563135dc82bcc1a0e27d1ea6a62e525
| 349
|
py
|
Python
|
api/models.py
|
yaroshyk/todo
|
828d5afc9abd85cd7f8f25e4d01f90c765231357
|
[
"MIT"
] | 3
|
2021-05-30T19:04:37.000Z
|
2021-08-30T14:16:57.000Z
|
api/models.py
|
yaroshyk/todo
|
828d5afc9abd85cd7f8f25e4d01f90c765231357
|
[
"MIT"
] | null | null | null |
api/models.py
|
yaroshyk/todo
|
828d5afc9abd85cd7f8f25e4d01f90c765231357
|
[
"MIT"
] | null | null | null |
from django.db import models
| 23.266667
| 50
| 0.696275
|
ef9fbd19157663838a068e78c39ee7e40bade1b6
| 127
|
py
|
Python
|
delightfulsoup/utils/unminify.py
|
etpinard/delightfulsoup
|
6d8cf976bf216e0e311808ffbd871a5915ba7b09
|
[
"MIT"
] | null | null | null |
delightfulsoup/utils/unminify.py
|
etpinard/delightfulsoup
|
6d8cf976bf216e0e311808ffbd871a5915ba7b09
|
[
"MIT"
] | null | null | null |
delightfulsoup/utils/unminify.py
|
etpinard/delightfulsoup
|
6d8cf976bf216e0e311808ffbd871a5915ba7b09
|
[
"MIT"
] | null | null | null |
"""
unminify
========
"""
def unminify(soup, encoding='utf-8'):
"""
"""
return soup.prettify().encode(encoding)
| 10.583333
| 43
| 0.527559
|
ef9feaf45807510f4cf448436f428cc436b0de04
| 744
|
py
|
Python
|
main.py
|
bhaskar-nair2/Coded-Passwords
|
306d01e54bf43c46267ed12c907a49932326b931
|
[
"MIT"
] | null | null | null |
main.py
|
bhaskar-nair2/Coded-Passwords
|
306d01e54bf43c46267ed12c907a49932326b931
|
[
"MIT"
] | null | null | null |
main.py
|
bhaskar-nair2/Coded-Passwords
|
306d01e54bf43c46267ed12c907a49932326b931
|
[
"MIT"
] | null | null | null |
import hashlib
| 28.615385
| 57
| 0.555108
|
efa05bae4ae4b077bd16954d59ff3b20aac6edc2
| 17,709
|
py
|
Python
|
src/upper/utils.py
|
USArmyResearchLab/ARL-UPPER
|
2f79f25338f18655b2a19c8afe3fed267cc0f198
|
[
"Apache-2.0"
] | 4
|
2020-09-14T06:13:04.000Z
|
2020-11-21T07:10:36.000Z
|
src/upper/utils.py
|
USArmyResearchLab/ARL-UPPER
|
2f79f25338f18655b2a19c8afe3fed267cc0f198
|
[
"Apache-2.0"
] | null | null | null |
src/upper/utils.py
|
USArmyResearchLab/ARL-UPPER
|
2f79f25338f18655b2a19c8afe3fed267cc0f198
|
[
"Apache-2.0"
] | 2
|
2020-03-15T17:59:26.000Z
|
2020-09-14T06:13:05.000Z
|
from typing import Tuple
from rdkit import Chem
from rdkit.Chem import Draw
import re
import itertools
import numpy as np
import networkx as nx
import logging
import collections
def FindBreakingBonds(cnids: list, bids: list, bts: list, atomic_nums: list) -> list:
"""Returns bond ids to be broken. Check for double/triple bonds;
if exists, check if heteroatom; if heteroatom, bonds of that C atom
are not broken."""
x1s = []
rmflag = None
for (i, bt) in enumerate(bts):
for (j, x) in enumerate(bt):
if x == Chem.rdchem.BondType.DOUBLE or x == Chem.rdchem.BondType.TRIPLE:
if atomic_nums[cnids[i][j]] != 6 and atomic_nums[cnids[i][j]] != 1:
rmflag = True
break
if not rmflag:
x1s.append(i)
rmflag = None
return [bids[x1] for x1 in x1s]
def FragNeighborBreakingBondTypes(
neighbor_ids: list, fnids: list, faids: list, bond_type_matrix: list
) -> list:
"""Determine broken bond types between fragments and fragment neighbors."""
# neighbor ids of fragment neighbors
nids_of_fnids = [[neighbor_ids[x] for x in y] for y in fnids]
# atom ids 'bonded' to fragment neighbor
int_ = [
[Intersection(x, faids[i])[0] for x in y] for (i, y) in enumerate(nids_of_fnids)
]
return [
[bond_type_matrix[x[i]][y[i]] for (i, z) in enumerate(x)]
for (x, y) in zip(fnids, int_)
]
def EditFragNeighborIds(fnids: list, bbtps: list) -> list:
"""Remove fragment neighbor ids that are doubly/triply bonded to fragment."""
# not double/triple bonds
n23bonds = [
[
(x != Chem.rdchem.BondType.DOUBLE and x != Chem.rdchem.BondType.TRIPLE)
for x in y
]
for y in bbtps
]
# return new fragment neighbor ids
return [
[x for (j, x) in enumerate(y) if n23bonds[i][j]] for (i, y) in enumerate(fnids)
]
def num_atom_rings_1bond(atom_rings: tuple, bond_rings: tuple, num_atoms: int) -> list:
"""Number of rings each atoms is in. Only rings sharing at most
1 bond with neighboring rings are considered."""
# atom ids of rings that share at most 1 bond with neighboring rings
atom_rings_1bond = [
atom_rings[i]
for (i, y) in enumerate(bond_rings)
if not any(
IntersectionBoolean(x, y, 2)
for x in [z for (j, z) in enumerate(bond_rings) if i != j]
)
]
return [sum(i in x for x in atom_rings_1bond) for i in range(num_atoms)]
def UniqueElements(x: list) -> list:
"""Returns unique elements of a list (not order preserving)."""
keys = {}
for e in x:
keys[e] = 1
return list(keys.keys())
def NeighborIDs(neighbor_ids: list, atomic_nums: list, y: list) -> list:
"""Find neighbor ids of a list of atoms (Hs not included)."""
# neighbor ids
z = [neighbor_ids[x] for x in y]
# remove Hs
return [[x for x in y if atomic_nums[x] != 1] for y in z]
def GetFragments(
smiles: str,
mol: Chem.rdchem.Mol,
neighbor_ids: list,
atomic_nums: list,
bond_id_matrix: list,
bond_type_matrix: list,
) -> Tuple[list, list]:
"""Fragment the molecule with isolated carbons method, see
Lian and Yalkowsky, JOURNAL OF PHARMACEUTICAL SCIENCES 103:2710-2723."""
# carbons
cids = [i for (i, x) in enumerate(atomic_nums) if x == 6]
# carbon neighbor ids
cnids = NeighborIDs(neighbor_ids, atomic_nums, cids)
# bond ids
bids = [
[bond_id_matrix[cid][cnid] for cnid in cnids]
for (cid, cnids) in zip(cids, cnids)
]
# bond types
bts = [
[bond_type_matrix[cid][cnid] for cnid in cnids]
for (cid, cnids) in zip(cids, cnids)
]
# broken bond ids
bbids = FindBreakingBonds(cnids, bids, bts, atomic_nums)
# break bonds, get fragments
try:
fmol = Chem.FragmentOnBonds(
mol, UniqueElements(list(itertools.chain.from_iterable(bbids)))
)
except:
fmol = mol
logging.info("fragmentation exception: %s" % (smiles))
# draw fragments, debugging only, expensive
# Draw.MolToFile(fmol,'fmol.png')
# fragment atom ids
faids = [list(x) for x in Chem.rdmolops.GetMolFrags(fmol)]
# fragment smiles
fsmiles = [Chem.rdmolfiles.MolFragmentToSmiles(fmol, frag) for frag in faids]
# fragment smarts
fsmarts = [Chem.rdmolfiles.MolFragmentToSmarts(fmol, frag) for frag in faids]
return faids, fsmiles, fsmarts
def FragNeighborID(fsmile: str) -> list:
"""End atoms bonded to a fragment."""
fnid = re.compile(r"(%s|%s)" % ("\d+(?=\*)", "\*[^\]]")).findall(fsmile)
fnid = fnid if fnid else ["-1"]
return [int(x) if "*" not in x else 0 for x in fnid]
def FragNeighborIDs(fsmiles: list) -> list:
"""End atoms bonded to fragments."""
fnids = list(map(FragNeighborID, fsmiles))
return [x if (-1 not in x) else [] for x in fnids]
def BondedFragNeighborIDs(true_faids: list, fnids: list) -> list:
"""Neighbor fragment ids (not atom ids)."""
return [[k for (k, x) in enumerate(true_faids) for j in i if j in x] for i in fnids]
def NumHybridizationType(htype: Chem.rdchem.HybridizationType, fnhybrds: list) -> list:
"""Number of specified hybridization type for each fragment."""
return [sum(x == htype for x in fnhybrd) for fnhybrd in fnhybrds]
def Intersection(x: list, y: list) -> list:
"""Elements that match between two lists."""
return list(set(x) & set(y))
def IntersectionBoolean(x: list, y: list, z: int) -> bool:
"""Returns whether or not two lists overlap with at least z common elements."""
return len(set(x) & set(y)) >= z
def FindIdsWithHtype(
fids: list, fnids: list, fnhybrds: list, htype: Chem.rdchem.HybridizationType
) -> list:
"""Find fragment neighbor ids with htype."""
fnhybrds_in_fids = [fnhybrds[x] for x in fids]
fnids_in_fids = [fnids[x] for x in fids]
hids = []
x1 = 0
for x in fnhybrds_in_fids:
x2 = 0
for y in x:
if y == htype:
hids.append(fnids_in_fids[x1][x2])
x2 += 1
x1 += 1
return hids
def AromaticRings(atom_ids_in_rings: list, bond_type_matrix: list) -> list:
"""Return if bonds in rings are aromatic."""
# atom ids in rings
atom_ids_in_rings = [np.array(x) for x in atom_ids_in_rings]
return [
[
(bond_type_matrix[int(x)][int(y)] == Chem.rdchem.BondType.AROMATIC)
for (x, y) in zip(z, z.take(range(1, len(z) + 1), mode="wrap"))
]
for z in atom_ids_in_rings
]
def TrueFragAtomIDs(num_atoms: int, faids: list) -> list:
"""Remove dummy atom ids from fragments."""
return [[x for x in y if x < num_atoms] for y in faids]
def FindCentralCarbonsOfBiphenyl(
biphenyl_substructs: list,
neighbor_ids: list,
atomic_nums: list,
bond_matrix: list,
bond_type_matrix: list,
) -> list:
"""Find central carbons of biphenyl substructures."""
# find one of the central carbons in biphenyl substructures
cc = []
for z in biphenyl_substructs:
for (x, y) in zip(z, z.take(range(1, len(z) + 1), mode="wrap")):
if not bond_matrix[int(x)][int(y)]:
cc.append(int(y))
break
# find carbon that is singly bonded - other central carbon
ccs = []
for (i, y) in enumerate(NeighborIDs(neighbor_ids, atomic_nums, cc)):
for x in y:
if bond_type_matrix[cc[i]][x] == Chem.rdchem.BondType.SINGLE:
ccs.append([cc[i], x])
break
return ccs
def Flatten(x: list) -> list:
"""Flatten a list."""
return list(itertools.chain.from_iterable(x))
def RemoveElements(x: list, y: list) -> list:
"""Remove elements (y) from a list (x)."""
for e in y:
x.remove(e)
return x
def Graph(x: tuple) -> nx.classes.graph.Graph:
"""Make graph structure from atom ids. Used to find independent ring systems."""
# initialize graph
graph = nx.Graph()
# add nodes and edges
for part in x:
graph.add_nodes_from(part)
graph.add_edges_from(zip(part[:-1], part[1:]))
return graph
def NumIndRings(x: tuple) -> int:
"""Number of independent single, fused, or conjugated rings."""
return len(list(nx.connected_components(Graph(x))))
def ReduceFsmarts(fsmarts: list) -> list:
"""Rewrite fragment smarts."""
return [re.sub(r"\d+\#", "#", x) for x in fsmarts]
def EndLabels(fnbbtps: list) -> list:
"""End label of group.
- : bonded to one neighbor and btype = single
= : one neighbor is bonded with btype = double
tri- : one neighbor is bonded with btype = triple
allenic : allenic atom, two neighbors are bonded with btype = double"""
l = ["" for x in fnbbtps]
for (i, x) in enumerate(fnbbtps):
if len(x) == 1 and x.count(Chem.rdchem.BondType.SINGLE) == 1:
l[i] = "-"
continue
if x.count(Chem.rdchem.BondType.DOUBLE) == 1:
l[i] = "="
continue
if x.count(Chem.rdchem.BondType.TRIPLE) == 1:
l[i] = "tri-"
continue
if x.count(Chem.rdchem.BondType.DOUBLE) == 2:
l[i] = "allenic-"
return l
def FragAtomBondTypeWithSp2(
fnhybrds: list,
fnids: list,
neighbor_ids: list,
atomic_nums: list,
faids: list,
bond_type_matrix: list,
) -> list:
"""Bond type between fragment atom and neighboring sp2 atom."""
# fragment ids bonded to one sp2 atom
fids = [
i
for i, x in enumerate(
NumHybridizationType(Chem.rdchem.HybridizationType.SP2, fnhybrds)
)
if x == 1
]
# atom id in fragments corresponding to the sp2 atom
sp2ids = FindIdsWithHtype(fids, fnids, fnhybrds, Chem.rdchem.HybridizationType.SP2)
# neighbor atom ids of sp2 atoms
sp2nids = NeighborIDs(neighbor_ids, atomic_nums, sp2ids)
# intersection between sp2nids and atom ids in fragments with one sp2 atom
faid = list(
itertools.chain.from_iterable(
[Intersection(x, y) for (x, y) in zip([faids[x] for x in fids], sp2nids)]
)
)
# bond type fragment atom and sp2 atom
bts = [bond_type_matrix[x][y] for (x, y) in zip(sp2ids, faid)]
# generate list with bond types for each fragment, zero for fragments without one sp2 atom
afbts = [0] * len(fnhybrds)
for (x, y) in zip(fids, bts):
afbts[x] = y
return afbts
symm_rules: dict = {
2: {
1: {
Chem.rdchem.HybridizationType.SP: 2,
Chem.rdchem.HybridizationType.SP2: 2,
Chem.rdchem.HybridizationType.SP3: 2,
},
2: {
Chem.rdchem.HybridizationType.SP: 1,
Chem.rdchem.HybridizationType.SP2: 1,
Chem.rdchem.HybridizationType.SP3: 1,
},
},
3: {
1: {Chem.rdchem.HybridizationType.SP2: 6, Chem.rdchem.HybridizationType.SP3: 3},
2: {Chem.rdchem.HybridizationType.SP2: 2, Chem.rdchem.HybridizationType.SP3: 1},
3: {Chem.rdchem.HybridizationType.SP2: 1, Chem.rdchem.HybridizationType.SP3: 1},
},
4: {
1: {Chem.rdchem.HybridizationType.SP3: 12},
2: {Chem.rdchem.HybridizationType.SP3: 0},
3: {Chem.rdchem.HybridizationType.SP3: 1},
4: {Chem.rdchem.HybridizationType.SP3: 1},
},
}
def Symm(
smiles: str,
num_attached_atoms: int,
num_attached_types: int,
center_hybrid: Chem.rdchem.HybridizationType,
count_rankings: collections.Counter,
) -> int:
"""Molecular symmetry."""
try:
symm = symm_rules[num_attached_atoms][num_attached_types][center_hybrid]
except:
logging.warning("symmetry exception: {}".format(smiles))
symm = np.nan
# special case
if symm == 0:
vals = list(count_rankings.values())
symm = 3 if (vals == [1, 3] or vals == [3, 1]) else 2
return symm
def DataReduction(y: dict, group_labels: list) -> None:
"""Remove superfluous data for single molecule."""
for l in group_labels:
y[l] = list(itertools.compress(zip(y["fsmarts"], range(y["num_frags"])), y[l]))
def NFragBadIndices(d: np.ndarray, group_labels: list, smiles: list) -> None:
"""Indices of compounds that do not have consistent number of fragments."""
def NFragCheck(y: dict) -> bool:
"""Check number of fragments and group contributions are consistent."""
num_frags = 0
for l in group_labels:
num_frags += len(y[l])
return num_frags != y["num_frags"]
x = list(map(NFragCheck, d))
indices = list(itertools.compress(range(len(x)), x))
logging.info(
"indices of molecules with inconsistent number of fragments:\n{}".format(
indices
)
)
logging.info("and their smiles:\n{}".format([smiles[x] for x in indices]))
def UniqueGroups(d: np.ndarray, num_mol: int, group_labels: list) -> list:
"""Unique fragments for each environmental group."""
# fragments for each group
groups = [[d[i][j] for i in range(num_mol)] for j in group_labels]
# eliminate fragment ids
groups = [[x[0] for x in Flatten(y)] for y in groups]
return [UniqueElements(x) for x in groups]
def UniqueLabelIndices(flabels: list) -> list:
"""Indices of unique fingerprint labels."""
sort_ = [sorted(x) for x in flabels]
tuple_ = [tuple(x) for x in sort_]
unique_labels = [list(x) for x in sorted(set(tuple_), key=tuple_.index)]
return [[i for (i, x) in enumerate(sort_) if x == y] for y in unique_labels]
def UniqueLabels(flabels: list, indices: list) -> list:
"""Unique fingerprint labels."""
return [flabels[x[0]] for x in indices]
def UniqueFingerprint(indices: list, fingerprint: np.ndarray) -> np.ndarray:
"""Reduce fingerprint according to unique labels."""
fp = np.zeros((fingerprint.shape[0], len(indices)))
for (j, x) in enumerate(indices):
fp[:, j] = np.sum(fingerprint[:, x], axis=1)
return fp
def UniqueLabelsAndFingerprint(
flabels: list, fingerprint: np.ndarray
) -> Tuple[list, np.ndarray]:
"""Reduced labels and fingerprint."""
uli = UniqueLabelIndices(flabels)
ul = UniqueLabels(flabels, uli)
fp = UniqueFingerprint(uli, fingerprint)
return ul, fp
def CountGroups(fingerprint_groups: list, group_labels: list, d: dict) -> list:
"""Count groups for fingerprint."""
return [
[[x[0] for x in d[y]].count(z) for z in fingerprint_groups[i]]
for (i, y) in enumerate(group_labels)
]
def Concat(x: list, y: list) -> list:
"""Concatenate groups and singles in fingerprint."""
return x + y
def MakeFingerprint(
fingerprint_groups: list, labels: dict, d: np.ndarray, num_mol: int
) -> np.ndarray:
"""Make fingerprint."""
# count groups and make fingerprint
fp_groups = [
Flatten(CountGroups(fingerprint_groups, labels["groups"], d[:, 0][i]))
for i in range(num_mol)
]
# reduce singles to requested
fp_singles = [[d[:, 1][i][j] for j in labels["singles"]] for i in range(num_mol)]
# concat groups and singles
return np.array(list(map(Concat, fp_groups, fp_singles)))
def ReduceMultiCount(d: dict) -> None:
"""Ensure each fragment belongs to one environmental group.
Falsify Y, Z when YZ true
Falsify YY, Z when YYZ true
Falsify YYY, Z when YYYZ true
Falsify RG when AR true
..."""
def TrueIndices(group: str) -> list:
"""Return True indices."""
x = d[group]
return list(itertools.compress(range(len(x)), x))
def ReplaceTrue(replace_group: list, actual_group: list) -> None:
"""Replace True elements with False to avoid overcounting fragment contribution."""
replace_indices = list(map(TrueIndices, replace_group))
actual_indices = list(map(TrueIndices, actual_group))
for actual_index in actual_indices:
for (group, replace_index) in zip(replace_group, replace_indices):
int_ = Intersection(replace_index, actual_index)
for x in int_:
d[group][x] = False
replace_groups = [
["Y", "Z"],
["YY", "Z"],
["YYY", "Z"],
["RG"],
["X", "Y", "YY", "YYY", "YYYY", "YYYYY", "Z", "ZZ", "YZ", "YYZ"],
["RG", "AR"],
["AR", "BR2", "BR3", "FU"],
["RG", "AR"],
]
actual_groups = [
["YZ"],
["YYZ"],
["YYYZ"],
["AR"],
["RG", "AR"],
["BR2", "BR3"],
["BIP"],
["FU"],
]
list(map(ReplaceTrue, replace_groups, actual_groups))
def RewriteFsmarts(d: dict) -> None:
"""Rewrite fsmarts to 'fsmiles unique' fsmarts."""
def FsmartsDict(d: dict) -> dict:
"""Dict of original fsmarts to 'fsmiles unique' fsmarts."""
# unique smarts in dataset, mols
fsmarts = UniqueElements(Flatten([x[0]["fsmarts"] for x in d]))
fmols = [Chem.MolFromSmarts(x) for x in fsmarts]
# smiles, not necessarily unique
fsmiles = [Chem.MolToSmiles(x) for x in fmols]
# dict: original fsmarts to 'fsmiles unique' fsmarts
dict_ = collections.defaultdict(lambda: len(dict_))
fsmarts_dict = {}
for (i, x) in enumerate(fsmarts):
fsmarts_dict[x] = fsmarts[dict_[fsmiles[i]]]
return fsmarts_dict
fsmarts_dict = FsmartsDict(d)
# rewrite fsmarts
for (i, y) in enumerate(d):
d[i][0]["fsmarts"] = [fsmarts_dict[x] for x in y[0]["fsmarts"]]
| 28.65534
| 94
| 0.611666
|
efa2c84741d3637cb65c8fc32a0abc9a577fb053
| 3,317
|
py
|
Python
|
025_reverse-nodes-in-k-group.py
|
tasselcui/leetcode
|
5c32446b8b5bf3711cf28e465f448c6a0980f259
|
[
"MIT"
] | null | null | null |
025_reverse-nodes-in-k-group.py
|
tasselcui/leetcode
|
5c32446b8b5bf3711cf28e465f448c6a0980f259
|
[
"MIT"
] | null | null | null |
025_reverse-nodes-in-k-group.py
|
tasselcui/leetcode
|
5c32446b8b5bf3711cf28e465f448c6a0980f259
|
[
"MIT"
] | null | null | null |
# =============================================================================
# # -*- coding: utf-8 -*-
# """
# Created on Sun Aug 5 08:07:19 2018
#
# @author: lenovo
# """
# 25. Reverse Nodes in k-Group
# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
#
# k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
#
# Example:
#
# Given this linked list: 1->2->3->4->5
#
# For k = 2, you should return: 2->1->4->3->5
#
# For k = 3, you should return: 3->2->1->4->5
#
# Note:
#
# Only constant extra memory is allowed.
# You may not alter the values in the list's nodes, only nodes itself may be changed.
# =============================================================================
# =============================================================================
# difficulty: hard
# acceptance: 32.7%
# contributor: LeetCode
# =============================================================================
# reverseList(head, k)
# =============================================================================
# def reverseList(head, k):
# pre = None
# cur = head
# while cur and k:
# temp = cur.next
# cur.next = pre
# pre = cur
# cur = temp
# k -= 1
# return (cur, pre)
# =============================================================================
#------------------------------------------------------------------------------
# note: below is the test code
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
a.next = b
b.next = c
c.next = d
test = a
S = Solution()
result = S.reverseKGroup(test, 3)
#result = a
while result:
print(result.val)
result = result.next
#------------------------------------------------------------------------------
# note: below is the submission detail
# =============================================================================
# Submission Detail
# 81 / 81 test cases passed.
# Status: Accepted
# Runtime: 56 ms
# Submitted: 0 minutes ago
# beats 93.99% python3 submissions
# =============================================================================
| 28.843478
| 186
| 0.403075
|
efa5e69113b0347792c870829c3f62690cf050bb
| 2,708
|
py
|
Python
|
perma_web/perma/tests/test_views_common.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
perma_web/perma/tests/test_views_common.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
perma_web/perma/tests/test_views_common.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from perma.urls import urlpatterns
from .utils import PermaTestCase
| 43.677419
| 93
| 0.642171
|
efa68642041c99f789a40f12b356c9ba93e64adc
| 1,708
|
py
|
Python
|
GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
#Note: distances in mm instead of in cm usually used in CMS
generator = cms.EDFilter("Pythia8PtAndLxyGun",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
PGunParameters = cms.PSet(
ParticleID = cms.vint32(-15, -15),
AddAntiParticle = cms.bool(True), # antiparticle has opposite momentum and production point symmetric wrt (0,0,0) compared to corresponding particle
MinPt = cms.double(15.00),
MaxPt = cms.double(300.00),
MinEta = cms.double(-2.5),
MaxEta = cms.double(2.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
LxyMin = cms.double(0.0),
LxyMax = cms.double(550.0), # most tau generated within TOB (55cm)
LzMax = cms.double(300.0),
dxyMax = cms.double(30.0),
dzMax = cms.double(120.0),
ConeRadius = cms.double(1000.0),
ConeH = cms.double(3000.0),
DistanceToAPEX = cms.double(850.0),
LxyBackFraction = cms.double(0.0), # fraction of particles going back towards to center at transverse plan; numbers outside the [0,1] range are set to 0 or 1
LzOppositeFraction = cms.double(0.0), # fraction of particles going in opposite direction wrt to center along beam-line than in transverse plane; numbers outside the [0,1] range are set to 0 or 1
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('displaced taus'),
firstRun = cms.untracked.uint32(1),
PythiaParameters = cms.PSet(parameterSets = cms.vstring())
)
| 47.444444
| 203
| 0.668618
|
efabd851d1c220194dc0597eebe6be9a8b117165
| 5,492
|
py
|
Python
|
questions/models.py
|
stkrizh/otus-django-hasker
|
9692b8060a789b0b66b4cf3591a78e32c8a10380
|
[
"MIT"
] | null | null | null |
questions/models.py
|
stkrizh/otus-django-hasker
|
9692b8060a789b0b66b4cf3591a78e32c8a10380
|
[
"MIT"
] | 10
|
2020-06-05T22:56:30.000Z
|
2022-02-10T08:54:18.000Z
|
questions/models.py
|
stkrizh/otus-django-hasker
|
9692b8060a789b0b66b4cf3591a78e32c8a10380
|
[
"MIT"
] | null | null | null |
import logging
from typing import List, Optional
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
VOTE_UP = 1
VOTE_DOWN = -1
VOTE_CHOICES = ((VOTE_UP, "Vote Up"), (VOTE_DOWN, "Vote Down"))
User = get_user_model()
logger = logging.getLogger(__name__)
| 27.878173
| 76
| 0.619993
|
efac998014549cc9e8410daf8e8486e66ec92ef3
| 1,430
|
py
|
Python
|
backend/routers/bookmarks.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | 1
|
2022-02-08T19:35:22.000Z
|
2022-02-08T19:35:22.000Z
|
backend/routers/bookmarks.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
backend/routers/bookmarks.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from fastapi import APIRouter, Request
from ..classes.jwt_authenticator import JWTAuthenticator
from ..repositories import bookmark_repository
from ..schemas.bookmark_schema import RequestSchema, ResponseSchema
router = APIRouter(
prefix='/bookmarks',
tags=['bookmarks'],
)
# Index
# Store a new bookmark, or update if exists
| 31.086957
| 112
| 0.706294
|
efae6e71bf3ea6317e5681aeac0b15d509089b29
| 2,044
|
py
|
Python
|
legos/input_type.py
|
kamongi/legos
|
0b4b5b1300af6677ae4e9c642a211ba3c96726a9
|
[
"MIT"
] | null | null | null |
legos/input_type.py
|
kamongi/legos
|
0b4b5b1300af6677ae4e9c642a211ba3c96726a9
|
[
"MIT"
] | null | null | null |
legos/input_type.py
|
kamongi/legos
|
0b4b5b1300af6677ae4e9c642a211ba3c96726a9
|
[
"MIT"
] | null | null | null |
import os, urllib, datetime, time, sys
import getpass
from franz.openrdf.sail.allegrographserver import AllegroGraphServer
from franz.openrdf.repository.repository import Repository
from franz.miniclient import repository
from franz.openrdf.query.query import QueryLanguage
from franz.openrdf.model import URI
from franz.openrdf.vocabulary.rdf import RDF
from franz.openrdf.vocabulary.rdfs import RDFS
from franz.openrdf.vocabulary.owl import OWL
from franz.openrdf.vocabulary.xmlschema import XMLSchema
from franz.openrdf.query.dataset import Dataset
from franz.openrdf.rio.rdfformat import RDFFormat
from franz.openrdf.rio.rdfwriter import NTriplesWriter
from franz.openrdf.rio.rdfxmlwriter import RDFXMLWriter
| 31.9375
| 119
| 0.741683
|
efaec7b2aeea24ccd064fbf8dcfa28faac52b446
| 1,635
|
py
|
Python
|
analysis/scripts/project_functions_Tom.py
|
data301-2020-winter2/course-project-group_1052
|
3733aacac0812811752d77e5f3d822ef5251c17b
|
[
"MIT"
] | null | null | null |
analysis/scripts/project_functions_Tom.py
|
data301-2020-winter2/course-project-group_1052
|
3733aacac0812811752d77e5f3d822ef5251c17b
|
[
"MIT"
] | 1
|
2021-03-24T17:16:52.000Z
|
2021-03-24T17:16:52.000Z
|
analysis/scripts/project_functions_Tom.py
|
data301-2020-winter2/course-project-group_1052
|
3733aacac0812811752d77e5f3d822ef5251c17b
|
[
"MIT"
] | null | null | null |
import pandas as pd
| 30.277778
| 157
| 0.55107
|
efaec9e129260471f4d26372fd487df99a205a00
| 4,887
|
py
|
Python
|
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
utils.py
|
loc-trinh/GrandmasterZero
|
58365890fe2b0145344f17be5fb59e08c8f1993a
|
[
"MIT"
] | null | null | null |
import pprint
import time
import chess.pgn
import IPython.display as display
import ipywidgets as widgets
| 32.58
| 79
| 0.575609
|
efaff942ac5c5e8e164b052efc98c4fab3a41b3b
| 1,401
|
py
|
Python
|
falco/svc/version_pb2_grpc.py
|
jasondellaluce/client-py
|
694780796289fdd20f1588d06e66c5a1b52ecb26
|
[
"Apache-2.0"
] | 20
|
2019-10-14T15:01:14.000Z
|
2021-08-09T19:13:08.000Z
|
falco/svc/version_pb2_grpc.py
|
jasondellaluce/client-py
|
694780796289fdd20f1588d06e66c5a1b52ecb26
|
[
"Apache-2.0"
] | 45
|
2019-10-14T14:55:30.000Z
|
2022-02-11T03:27:37.000Z
|
falco/svc/version_pb2_grpc.py
|
jasondellaluce/client-py
|
694780796289fdd20f1588d06e66c5a1b52ecb26
|
[
"Apache-2.0"
] | 11
|
2019-10-14T17:41:06.000Z
|
2022-02-21T05:40:44.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import version_pb2 as version__pb2
def add_serviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'version': grpc.unary_unary_rpc_method_handler(
servicer.version,
request_deserializer=version__pb2.request.FromString,
response_serializer=version__pb2.response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'falco.version.service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 28.591837
| 70
| 0.730193
|
efb0224997c2a73db24a06482baa1e76838ea1f0
| 2,904
|
py
|
Python
|
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | 2
|
2020-06-29T16:55:17.000Z
|
2020-09-21T14:00:16.000Z
|
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | null | null | null |
query.py
|
urmi-21/COVID-biorxiv
|
6dfe713c2634197b6c9983eb2aa3fa6676f7d045
|
[
"MIT"
] | 1
|
2020-09-21T14:00:23.000Z
|
2020-09-21T14:00:23.000Z
|
import sys
import json
import requests
import subprocess
from datetime import datetime
#dict storing data
collection={}
def update_collection():
'''
Download bioarxiv and medarxiv collections
'''
link='https://connect.biorxiv.org/relate/collection_json.php?grp=181'
outfile='collection.json'
print('Downloading ...')
for output in execute_commandRealtime(['curl','-o',outfile,link]):
print (output)
def read_collection():
'''
open file
'''
filename='collection.json'
with open(filename) as f:
data = json.load(f)
i=0
for key,value in data.items() :
#print (key,":",value)
if key=='rels':
val=data[key]
print('{} records found'.format(len(val)))
return value
def filter_date(res,startdate):
'''
keep results by date
'''
filtered=[]
for d in res:
if datetime.strptime(d['rel_date'], '%Y-%m-%d')>=startdate:
filtered.append(d)
return filtered
#step 1 update collection downloads around 15 MB .json data
#update_collection()
#read collection in memory
collection=read_collection()
#see available terms
#get_terms()
#perform search
#res=search(' RNA-seq')
tosearch=[' RNA-seq','transcriptom','express','sequencing']
res=searchall(tosearch)
print(len(res))
print(len(get_title(res)))
fdate=datetime.strptime('2020-06-25', '%Y-%m-%d')
print('filtering results before',fdate)
final_res=get_title(filter_date(res,fdate))
print(len(final_res))
print('\n'.join(final_res))
| 25.034483
| 82
| 0.64084
|
efb3562ab2f0bc0a7a96ac315758b6464fb9c4ea
| 1,336
|
py
|
Python
|
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | 2
|
2017-07-19T01:24:05.000Z
|
2017-07-19T09:12:46.000Z
|
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | null | null | null |
core/server/wx_handler.py
|
Maru-zhang/FilmHub-Tornado
|
870da52cec65920565439d2d5bb1424ae614665d
|
[
"Apache-2.0"
] | 1
|
2017-07-28T09:31:42.000Z
|
2017-07-28T09:31:42.000Z
|
import tornado.web
from core.logger_helper import logger
from core.server.wxauthorize import WxConfig
from core.server.wxauthorize import WxAuthorServer
from core.cache.tokencache import TokenCache
| 32.585366
| 91
| 0.569611
|
efb4030a249dafcb2be0137ce898a4f573bed62c
| 2,771
|
py
|
Python
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 1
|
2022-02-28T10:40:53.000Z
|
2022-02-28T10:40:53.000Z
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 2
|
2021-01-08T17:51:10.000Z
|
2021-03-29T11:36:07.000Z
|
Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py
|
Lotame/DataStream_Cookbook
|
3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6
|
[
"MIT"
] | 3
|
2020-01-26T23:31:23.000Z
|
2022-02-18T19:29:30.000Z
|
#!/usr/bin/python
#
# Write in Python3.6
# Filename:
#
# Mapping_JsonToCsvExtractor.py
#
#
# Basic Usage:
#
# python Mapping_JsonToCsvExtractor.py /directory/containing/datastream/mapping/json/files
#
# Utilities
import sys
import os
import json
import argparse
# write a line to the target file
if __name__ == '__main__':
sys.exit(main())
| 35.987013
| 125
| 0.631902
|
efb4a4c9d9efe6b7461d24bff10a128e9ce9296a
| 2,692
|
py
|
Python
|
shuttl/__init__.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | 2
|
2017-06-26T18:06:58.000Z
|
2017-10-11T21:45:29.000Z
|
shuttl/__init__.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | null | null | null |
shuttl/__init__.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | null | null | null |
import sys
from flask import Flask, redirect, request, session, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager, current_user
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_wtf.csrf import CsrfProtect
from .sessions import ShuttlSessionInterface
app = Flask(__name__)
app.config.from_object("shuttl.settings.DevelopmentConfig")
app.session_interface = ShuttlSessionInterface()
csrf = CsrfProtect(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
from shuttl.MiddleWare import OrganizationMiddleware
from .Views import *
from .Models import *
from .misc import shuttl, shuttlOrgs
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
from .Commands.FillDB import FillDB
from .Commands.TestSuite import TestSuite
from .Commands.Add import Add
from .Commands.DemoFiller import DemoFiller
from .Commands.ResetPublishers import ResetPublishers
from .Commands.UploadToS3 import UploadS3
from .Templates.Tags import load_tags
# load_tags(app.jinja_env)
manager.add_command('test', TestSuite())
manager.add_command('add', Add())
manager.add_command('filldb', FillDB())
manager.add_command('demofiller', DemoFiller())
manager.add_command("resetQueue", ResetPublishers())
manager.add_command('upload', UploadS3)
app.register_blueprint(shuttl)
app.register_blueprint(shuttlOrgs)
from .Models.Reseller import Reseller, ResellerDoesNotExist
from .Models.organization import Organization, OrganizationDoesNotExistException
from .Models.FileTree.FileObjects.FileObject import FileObject
FileObject.LoadMapper()
| 28.041667
| 106
| 0.76523
|
efb55216c30cf2837e4576480260417e73138279
| 4,088
|
py
|
Python
|
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
main.py
|
DayvsonAlmeida/Programa-o-Gen-tica
|
6edaceab99c61f55f4157e81fcf7cbad580f81d1
|
[
"MIT"
] | null | null | null |
from utils import initialize
from pandas import DataFrame
from genetic import GA
import numpy as np
import argparse
import random
import time
import sys
sys.setrecursionlimit(2000)
random.seed(time.time())
parser = argparse.ArgumentParser()
parser.add_argument('--mr', help='Mutation Rate')
parser.add_argument('--cr', help='Crossover Rate')
parser.add_argument('--size', help='Population Size')
parser.add_argument('--ngen', help='Number of Generations')
parser.add_argument('--base', help='Base de Teste [Easy, Middle, Hard, Newton, Einstein, Pythagorean]')
args, unknown = parser.parse_known_args()
#cls && python main.py --mr 0.05 --cr 0.8 --size 100 --ngen 5000 --base Easy
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
mutation_rate = float(args.mr)
crossover_rate = float(args.cr)
size = int(args.size)
ngen = int(args.ngen)
test = args.base
# f(x) = 2*x
easy = {}
easy['x'] = {'a':np.array(np.arange(100), dtype='float64')}
easy['y'] = easy['x']['a']*2
easy['terminal_symb'] = ['a']
# f(x,y,z) = sqrt(x+y)+z
medium = {}
medium['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100)),#, dtype='float64'),
'z':np.array(np.random.randint(100))}#, dtype='float64')}
medium['y'] = (medium['x']['x']+medium['x']['y'])**0.5 + medium['x']['z']
medium['terminal_symb'] = ['x','y','z']
# f(x,y,z) = sin(x)+sqrt(y)-tan(z+x)
hard = {}
hard['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100), dtype='float64'),#, dtype='float64'),
'z':np.array(np.random.randint(100), dtype='float64')}#, dtype='float64')}
hard['y'] = np.sin(hard['x']['x']) + hard['x']['y']**0.5 - np.tan(hard['x']['z'] + hard['x']['x'])
hard['terminal_symb'] = ['x','y','z']
#Pythagorean Theorem
# c = a+b
pythagorean_theorem = {}
pythagorean_theorem['x'] = {'a': np.array(np.random.randint(100, size=100), dtype='float64'),
'b': np.array(np.arange(100), dtype='float64')}
pythagorean_theorem['y'] = pythagorean_theorem['x']['a']**2 +pythagorean_theorem['x']['b']**2
pythagorean_theorem['terminal_symb'] = ['a','b']
#Einstein's Theory of Relativity
# E = m*c
# c = 299.792.458 m/s
einstein_relativity = {}
einstein_relativity['x'] = {'m': np.random.random(100)}
einstein_relativity['y'] = einstein_relativity['x']['m']*(299792458**2) #c=89875517873681764
einstein_relativity['terminal_symb'] = ['m']
#Newton's Universal Law of Gravitation
# F = G*m1*m2/d
G = 6.674*10E-11
newton_law = {}
newton_law['x'] = {'m1': 10*np.array(np.random.random(100), dtype='float64'),
'm2': np.array(np.random.randint(100, size=100), dtype='float64'),
'd': np.array(np.random.randint(100, size=100)+np.random.rand(100)+10E-11, dtype='float64')}
newton_law['y'] = (newton_law['x']['m1']*newton_law['x']['m2']*G)/(newton_law['x']['d']**2)
newton_law['terminal_symb'] = ['m1','m2','d']
base = {'Easy': easy, 'Pythagorean':pythagorean_theorem,
'Middle': medium, 'Hard': hard,
'Newton': newton_law,
"Einstein": einstein_relativity}
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
results = {}
duration = {}
ngen = 2000
for test in ['Hard']:#,'Hard','Hard']:
for crossover_rate in [0.7, 0.8]:
for mutation_rate in [0.05]:#, 0.1, 0.2]:
for size in [10, 100]:
ga = GA(terminal_symb=base[test]['terminal_symb'], x=base[test]['x'], y=base[test]['y'], size=size,
num_generations=ngen, crossover_rate=crossover_rate, mutation_rate=mutation_rate, early_stop=0.1)
ga.run()
loss = ga.loss_history
loss = np.concatenate((loss, [loss[len(loss)-1] for i in range(ngen - len(loss))] ) )
results[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = loss
duration[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = [ga.duration]
df = DataFrame(results)
df.to_csv('Resultados Hard GA.csv', index=False, decimal=',', sep=';')
df = DataFrame(duration)
df.to_csv('Durao Hard GA.csv', index=False, decimal=',', sep=';')
| 40.88
| 107
| 0.634785
|
efb70cec858af5a7d681ffd1896f1dd46735a318
| 1,774
|
py
|
Python
|
Dictionary.py
|
edisoncast/DES-UOC
|
cd179e21c03ad780c9ea3876a6219c32b8e34cad
|
[
"MIT"
] | null | null | null |
Dictionary.py
|
edisoncast/DES-UOC
|
cd179e21c03ad780c9ea3876a6219c32b8e34cad
|
[
"MIT"
] | null | null | null |
Dictionary.py
|
edisoncast/DES-UOC
|
cd179e21c03ad780c9ea3876a6219c32b8e34cad
|
[
"MIT"
] | null | null | null |
#Creado por Jhon Edison Castro Snchez
#Diccionario tomado de https://github.com/danielmiessler/SecLists/blob/bb915befb208fd900592bb5a25d0c5e4f869f8ea/Passwords/Leaked-Databases/rockyou.txt.tar.gz
#Se usa para generar el mismo comportamiento de openssl de linux
#https://docs.python.org/2/library/crypt.html
import crypt
#Funcion que me permite generar los hash de las palabras de 8 caracteres
#Se usa el hash dado en la PEC
#La salida es un archivo con un par de valores por linea
#la primera es el hash y al frente el valor del password en texto plano
#Funcion que recibe el archivo de entrada y el de salida y llama a otra funcion
#que valida el tamao de los strings
#Funcion que verifica cada linea y si es de 8 caracteres me genera un archivo nuevo
#Con esto ya puedo generar los hash y comparar.
#Llamado a las funciones
if __name__ == "__main__":
with open('rockyou.txt','r')as f:
text = f.read()
DESDictionary('rockyou.txt','pec1.txt')
salt1='tl'
salt2='as'
generateHash('pec1.txt','hashed1.txt', salt1)
generateHash('pec1.txt','hashed2.txt', salt2)
| 39.422222
| 157
| 0.673055
|
efb866a60e5d0e5a7b79c81d5acd283c1c39df92
| 227
|
py
|
Python
|
.test/test/task1/aufgabe4.py
|
sowinski/testsubtree
|
d09b72e6b366e8e29e038445a1fa6987b2456625
|
[
"MIT"
] | null | null | null |
.test/test/task1/aufgabe4.py
|
sowinski/testsubtree
|
d09b72e6b366e8e29e038445a1fa6987b2456625
|
[
"MIT"
] | null | null | null |
.test/test/task1/aufgabe4.py
|
sowinski/testsubtree
|
d09b72e6b366e8e29e038445a1fa6987b2456625
|
[
"MIT"
] | null | null | null |
from nltk.book import *
print letterFrequ(text1)
print letterFrequ(text5)
| 15.133333
| 26
| 0.669604
|
efbe9e033668c8068ec57cb141083c350416dc90
| 1,668
|
py
|
Python
|
src/controllers/userController.py
|
gioliveirass/fatec-BDNR-MercadoLivre
|
dd2c407f6728e4f11e8292463cc2ba3ad562de1e
|
[
"MIT"
] | null | null | null |
src/controllers/userController.py
|
gioliveirass/fatec-BDNR-MercadoLivre
|
dd2c407f6728e4f11e8292463cc2ba3ad562de1e
|
[
"MIT"
] | null | null | null |
src/controllers/userController.py
|
gioliveirass/fatec-BDNR-MercadoLivre
|
dd2c407f6728e4f11e8292463cc2ba3ad562de1e
|
[
"MIT"
] | null | null | null |
import connectBD as connectDB
from pprint import pprint
| 30.327273
| 49
| 0.492206
|
efc074386633ac80149d8065fe9a27e3e95d188c
| 374
|
py
|
Python
|
models/sample.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | null | null | null |
models/sample.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | 2
|
2022-01-11T15:50:04.000Z
|
2022-01-13T01:53:53.000Z
|
models/sample.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | null | null | null |
from utils import hrsize
from time import time_ns
| 22
| 88
| 0.59893
|
efc4891e8e505e8dc24f5447323153c9667f9326
| 1,220
|
py
|
Python
|
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
file-convertors/pdf-to-image/pdf_to_image.py
|
fraserlove/python-productivity-scripts
|
4a667446250042b01e307c7e4be53defc905207e
|
[
"MIT"
] | null | null | null |
'''
PDF to Image Converter
Author: Fraser Love, me@fraser.love
Created: 2020-06-13
Latest Release: v1.0.1, 2020-06-21
Python: v3.6.9
Dependancies: pdf2image
Converts multiple pdf's to images (JPEG format) and stores them in a logical folder structure under the desired image directory.
Usage: Update the pdf_dir and img_dir paths to point to the directory that holds the pdf files and the directory that the
generated images should be placed under.
'''
from pdf2image import convert_from_path
import os
pdf_dir = 'pdfs/' # Include trailing forward slash
img_dir = 'images/'
first_page_only = True # Only convert the first page of the pdf to an image
pdf_names = [pdf_name.split('.')[0] for pdf_name in os.listdir(pdf_dir) if pdf_name[-4:] == ".pdf"]
for pdf_name in pdf_names:
pages = convert_from_path('{}{}.pdf'.format(pdf_dir, pdf_name))
if first_page_only:
pages[0].save('{}/{}.jpg'.format(img_dir, pdf_name), 'JPEG')
else:
directory = '{}{}'.format(img_dir, pdf_name)
if not os.path.exists(directory):
os.makedirs(directory)
for i, page in enumerate(pages):
page.save('{}{}/{}-{}.jpg'.format(img_dir, pdf_name, pdf_name, i), 'JPEG')
| 36.969697
| 128
| 0.694262
|
efc5229f2a8966dc64e04e1c67caf2f4bee4df93
| 4,217
|
py
|
Python
|
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | 1
|
2022-03-30T19:12:14.000Z
|
2022-03-30T19:12:14.000Z
|
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | null | null | null |
tests/test/search/test_references_searcher_string.py
|
watermelonwolverine/fvttmv
|
8689d47d1f904dd2bf0a083de515fda65713c460
|
[
"MIT"
] | null | null | null |
from fvttmv.exceptions import FvttmvException
from fvttmv.reference_tools import ReferenceTools
from fvttmv.search.__references_searcher_string import ReferencesSearcherString
from test.common import TestCase
| 40.548077
| 96
| 0.591653
|
efc64b0b3d469f8a4e23675a9039dc1fed37be48
| 4,999
|
py
|
Python
|
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
vtk.py
|
becklabs/geotag-gui
|
c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 19:50:43 2020
@author: beck
"""
import cv2
import datetime
import dateparser
import os
import sys
import pandas as pd
import pytz
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from PIL import Image
import numpy as np
import pytesseract
import imutils
import time
from GPSPhoto import gpsphoto
from threading import Thread
| 35.707143
| 102
| 0.619324
|
efc6952d49bfc96baa0e1e3a017cc887fba50c18
| 4,237
|
py
|
Python
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 15
|
2021-08-08T08:41:54.000Z
|
2022-03-30T10:12:49.000Z
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 1
|
2021-11-24T16:51:51.000Z
|
2021-12-03T06:20:11.000Z
|
ROS_fall_detection/src/detector.py
|
SeanChen0220/Posefall
|
f27eedc0a624cc2875d14ffa276cf96cdfc1b410
|
[
"MIT"
] | 3
|
2021-08-08T08:41:55.000Z
|
2022-03-15T07:28:53.000Z
|
#! /home/seanchen/anaconda3/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import sys
import rospy
from std_msgs.msg import String
import torch
import torch.nn.parallel
import torch.nn.functional as F
import numpy as np
import cv2
from LPN import LPN
from fall_net import Fall_Net
from pose_utils import Cropmyimage
from pose_utils import Drawkeypoints
import plot_sen
from time import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
global cam_image
if __name__ == '__main__':
rospy.init_node('detector', anonymous=True)
pub = rospy.Publisher('det_result', Image, queue_size=10)
rospy.Subscriber('cam_image', Image, callback)
rate = rospy.Rate(50) # 10hz
# model
pose_net = LPN(nJoints=17)
pose_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/pose_net_pred100.pth.tar'))
pose_net.cuda()
fall_net = Fall_Net(64, 48, 17, device=torch.device('cuda'))
fall_net.cuda().double()
fall_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/fall_net_pred5.pth.tar'))
pose_net.eval()
fall_net.eval()
print('Load successfully!')
bridge = CvBridge()
global cam_image
cam_image = np.array([])
fall_count = []
while not rospy.is_shutdown():
rate.sleep()
if not cam_image.any():
print('waiting!')
continue
start = time()
#
# image initialize
#photo_file = '/home/seanchen/robot_fall_det/fall1.jpg'
#input = cv2.imread(photo_file)# cv2 np.array(w,h,channel)
input = cam_image
bbox = [0, 0, input.shape[1], input.shape[0]]
input_image, details = Cropmyimage(input, bbox)
input_image = np.array([input_image.numpy()])
#print(input_image.shape)
input_image = torch.from_numpy(input_image)
#input_image.cuda()
# get posedetails
pose_out = pose_net(input_image.cuda())
fall_out, pose_cor = fall_net(pose_out)
#
#
neck = (pose_cor[:, 5:6, :] + pose_cor[:, 6:7, :]) / 2
pose_cor = torch.cat((pose_cor, neck), dim=1)
pose_cor = pose_cor * 4 + 2.
scale = torch.Tensor([[256, 192]]).cuda()
pose_cor = pose_cor / scale
scale = torch.Tensor([[details[3]-details[1], details[2]-details[0]]]).cuda()
pose_cor = pose_cor * scale
scale = torch.Tensor([[details[1], details[0]]]).cuda()
pose_cor = pose_cor + scale
#pose_cor_1 = (4*pose_cor[:, :, 0]+2.)/64*(details[3]-details[1])/4+details[1]
#pose_cor_2 = (4*pose_cor[:, :, 1]+2.)/48*(details[2]-details[0])/4+details[0]
pose_cor = torch.flip(pose_cor, dims=[2])
ones = torch.ones(1, 18, 1).cuda()
pose_cor = torch.cat((pose_cor, ones), dim=2).cpu().detach().numpy()
#det_result = torch.zeros(64, 48, 3).numpy()
det_result = plot_sen.plot_poses(input, pose_cor)
#print(det_result.shape)
#
#if fall_out.indices == 1:
# print('Down!')
#if fall_out.indices == 0:
# print('Not Down!')
fall_out = torch.max(F.softmax(fall_out, dim=0), dim=0)
fall_count.append(fall_out.indices)
fall_dis = sum(fall_count[len(fall_count)-30 : len(fall_count)])
#print(len(fall_count))
end = time()
run_time = end-start
if fall_dis > 24:
print('Normal!', 1. / run_time)
else:
print('Down!', 1. / run_time)
det_result = bridge.cv2_to_imgmsg(det_result, encoding="passthrough")
pub.publish(det_result)
#print(1. / run_time)
# spin() simply keeps python from exiting until this node is stopped
#rospy.spin()
#while True:
#pass
| 35.605042
| 99
| 0.630399
|
efc705c5b7dd44b358486c8f4931ee3c4faede41
| 3,696
|
py
|
Python
|
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
tensorflow1.x/sound_conv.py
|
wikeex/tensorflow-learning
|
a6ab7c99455711e9f3c015e0abb04fa58342e0cb
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from sound_lstm_test import data
batch_size = 10
x = tf.placeholder(tf.float32, [batch_size, 512, 80])
y_ = tf.placeholder(tf.float32, [batch_size, 59])
w_conv1 = tf.Variable(tf.truncated_normal([16, 2, 1, 64], stddev=0.1), name='conv1_w')
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]), name='conv1_b')
x_image = tf.reshape(x, [-1, 512, 80, 1])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, w_conv1, strides=[1, 2, 1, 1], padding='VALID') + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_conv2 = tf.Variable(tf.truncated_normal([2, 16, 64, 128], stddev=0.1), name='conv2_w')
b_conv2 = tf.Variable(tf.constant(0.1, shape=[128]), name='conv2_b')
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, w_conv2, strides=[1, 1, 1, 1], padding='VALID') + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_fc1 = tf.Variable(tf.truncated_normal([61 * 12 * 128, 1024], stddev=0.1), name='fc1_w')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name='fc1_b')
h_pool2_flat = tf.reshape(h_pool2, [-1, 61 * 12 * 128])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
rate = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, rate=rate)
w_fc2 = tf.Variable(tf.truncated_normal([1024, 59], stddev=0.1), name='fc2_w')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[59]), name='fc2_b')
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
variables = tf.trainable_variables()
conv1_variable = [t for t in variables if t.name.startswith('conv1')]
conv2_variable = [t for t in variables if t.name.startswith('conv2')]
fc1_variable = [t for t in variables if t.name.startswith('fc1')]
fc2_variable = [t for t in variables if t.name.startswith('fc2')]
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
grads_conv1, _ = tf.clip_by_global_norm(tf.gradients(loss, conv1_variable), clip_norm=5)
grads_conv2, _ = tf.clip_by_global_norm(tf.gradients(loss, conv2_variable), clip_norm=5)
grads_fc1, _ = tf.clip_by_global_norm(tf.gradients(loss, fc1_variable), clip_norm=5)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, variables), clip_norm=5)
conv1_optimizer = tf.train.AdamOptimizer(0.001)
conv2_optimizer = tf.train.AdamOptimizer(0.001)
fc1_optimizer = tf.train.AdamOptimizer(0.001)
fc2_optimizer = tf.train.AdamOptimizer(0.001)
optimizer = tf.train.AdamOptimizer(0.001)
conv1_op = conv1_optimizer.apply_gradients(zip(grads_conv1, conv1_variable))
conv2_op = conv2_optimizer.apply_gradients(zip(grads_conv2, conv2_variable))
fc1_op = fc1_optimizer.apply_gradients(zip(grads_fc1, fc1_variable))
fc2_op = fc2_optimizer.apply_gradients(zip(grads_fc2, fc2_variable))
op = optimizer.apply_gradients(zip(grads, variables))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
train_data = data.np_load(batch_size=10, batch_type='train/')
test_data = data.np_load(batch_size=10, batch_type='test/')
for i in range(1000):
for _ in range(100):
input_, label = next(train_data)
sess.run([conv1_op, conv2_op, fc1_op, fc2_op], feed_dict={x: input_, y_: label, rate: 0})
test_total_accuracy = 0
for i in range(10):
test_input_, test_label = next(test_data)
test_accuracy, _ = sess.run([accuracy, tf.no_op()], feed_dict={x: test_input_, y_: test_label, rate: 0})
test_total_accuracy += test_accuracy
print('%.3f' % (test_total_accuracy / 10))
| 44.53012
| 116
| 0.717532
|
efc7713fd5edcdf52845e8c0b576613822945b28
| 2,213
|
py
|
Python
|
interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
''''
You're given 2 huge integers represented by linked lists. Each linked list element is a number from 0 to 9999 that represents a number with exactly 4 digits. The represented number might have leading zeros. Your task is to add up these huge integers and return the result in the same format.
Example
For a = [9876, 5432, 1999] and b = [1, 8001], the output should be
addTwoHugeNumbers(a, b) = [9876, 5434, 0].
Explanation: 987654321999 + 18001 = 987654340000.
For a = [123, 4, 5] and b = [100, 100, 100], the output should be
addTwoHugeNumbers(a, b) = [223, 104, 105].
Explanation: 12300040005 + 10001000100 = 22301040105.
Input/Output
[execution time limit] 4 seconds (py3)
[input] linkedlist.integer a
The first number, without its leading zeros.
Guaranteed constraints:
0 a size 104,
0 element value 9999.
[input] linkedlist.integer b
The second number, without its leading zeros.
Guaranteed constraints:
0 b size 104,
0 element value 9999.
[output] linkedlist.integer
The result of adding a and b together, returned without leading zeros in the same format.
''''
# Definition for singly-linked list:
class ListNode(object):
def __init__(self, x):
self.value = x
self.next = None
def addTwoHugeNumbers(a, b):
a = reverseList(a)
b = reverseList(b)
helper = ListNode(None)
r = helper
carry = 0
while a != None or b != None or carry > 0:
aValue = 0 if a == None else a.value
bValue = 0 if b == None else b.value
total = aValue + bValue + carry
carry = total // 10000
total = total % 10000
r.next = ListNode(total)
r = r.next
if a != None:
a = a.next
if b != None:
b = b.next
return reverseList(helper.next)
def reverseList(a):
if a == None:
return None
stack = []
while a != None:
stack.append(a.value)
a = a.next
r = ListNode(stack.pop())
head = r
while len(stack) > 0:
r.next = ListNode(stack.pop())
r = r.next
return head
def printList(a):
while a != None:
print (a.value)
a = a.next
| 24.054348
| 291
| 0.615002
|
efc7a9d58bb127091a58a8679f3c1f9062aeca6a
| 3,123
|
py
|
Python
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | 1
|
2020-11-22T10:24:54.000Z
|
2020-11-22T10:24:54.000Z
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | 13
|
2017-11-20T00:20:45.000Z
|
2021-01-05T14:13:51.000Z
|
src/ensae_projects/datainc/data_medical.py
|
sdpython/ensae_projects
|
9647751da053c09fa35402527b294e02a4e6e2ad
|
[
"MIT"
] | null | null | null |
"""
@file
@brief Functions to handle data coming from
:epkg:`Cancer Imaging Archive`.
"""
import os
import pydicom
import pandas
import cv2
from pyquickhelper.filehelper.synchelper import explore_folder_iterfile # pylint: disable=C0411
def convert_dcm2png(folder, dest, fLOG=None):
"""
Converts all medical images in a folder from format
:epkg:`dcm` to :epkg:`png`.
@param folder source folder
@param dest destination folder
@param fLOG logging function
@return :epkg:`pandas:DataFrame` with many data
The function uses module :epkg:`pydicom`.
"""
if not os.path.exists(dest):
raise FileNotFoundError("Unable to find folder '{}'.".format(dest))
if fLOG is not None:
fLOG("[convert_dcm2png] convert dcm files from '{}'.".format(folder))
fLOG("[convert_dcm2png] into '{}'.".format(dest))
done = {}
rows = []
for name in explore_folder_iterfile(folder, ".*[.]dcm$"):
relname = os.path.relpath(name, folder)
if fLOG is not None:
fLOG("[convert_dcm2png] read {}: '{}'.".format(
len(rows) + 1, relname))
f1 = relname.replace("\\", "/").split("/")[0]
name_ = "img_%06d.png" % len(done)
if "_" in f1:
sub = f1.split('_')[0]
fsub = os.path.join(dest, sub)
if not os.path.exists(fsub):
if fLOG is not None:
fLOG("[convert_dcm2png] create folder '{}'.".format(sub))
os.mkdir(fsub)
new_name = os.path.join(sub, name_)
else:
new_name = name_
# read
ds = pydicom.dcmread(name)
# data
obs = dict(_src=relname, _dest=new_name, _size=len(ds.pixel_array))
_recurse_fill(obs, ds)
rows.append(obs)
# image
full_name = os.path.join(dest, new_name)
if os.path.exists(full_name):
done[name] = full_name
continue
pixel_array_numpy = ds.pixel_array
cv2.imwrite(full_name, pixel_array_numpy) # pylint: disable=E1101
done[name] = full_name
final = os.path.join(dest, "_summary.csv")
if fLOG is not None:
fLOG("[convert_dcm2png] converted {} images.".format(len(rows)))
fLOG("[convert_dcm2png] write '{}'.".format(final))
df = pandas.DataFrame(rows)
df.to_csv(final, index=False, encoding="utf-8")
return df
| 33.945652
| 96
| 0.565802
|
efca70e500dca1e2e95cfa22dacbadf959220409
| 8,743
|
py
|
Python
|
preprocessing_data.py
|
sharathrao13/seq2seq
|
0768ea0b765ed93617a8e9e5cb907deae042c83d
|
[
"Apache-2.0"
] | 1
|
2021-02-12T00:01:45.000Z
|
2021-02-12T00:01:45.000Z
|
preprocessing_data.py
|
sharathrao13/seq2seq
|
0768ea0b765ed93617a8e9e5cb907deae042c83d
|
[
"Apache-2.0"
] | null | null | null |
preprocessing_data.py
|
sharathrao13/seq2seq
|
0768ea0b765ed93617a8e9e5cb907deae042c83d
|
[
"Apache-2.0"
] | null | null | null |
import re
import collections
import shutil
from tensorflow.python.platform import gfile
num_movie_scripts = 10
vocabulary_size = 10000
fraction_dev = 50
path_for_x_train = 'X_train.txt'
path_for_y_train = 'y_train.txt'
path_for_x_dev = 'X_dev.txt'
path_for_y_dev = 'y_dev.txt'
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
_WORD_SPLIT = re.compile(b"([.,!?\":;)(])")
_DIGIT_RE = re.compile(br"\d")
#FROM DATA UTILS
# Build the dictionary with word-IDs from self-made dictionary and replace rare words with UNK token.
def generate_encoded_files2(x_train_file, y_train_file, x_dev_file, y_dev_file, tokenized_sentences, dictionary):
"""Sentence A is in x_train, Sentence B in y_train"""
encoded_holder = []
unk_id = dictionary['_UNK']
for sentence in tokenized_sentences:
encoded_holder.append(encode_sentence(sentence, dictionary, unk_id))
f1 = open(x_train_file, 'w')
f2 = open(y_train_file, 'w')
fraction = int(len(encoded_holder) / fraction_dev)
if (len(encoded_holder) % 2 == 0):
end = len(encoded_holder)
else:
end = len(encoded_holder)-1
for i in xrange(0,fraction,2):
f1.write(str(encoded_holder[i]) + '\n')
f2.write(str(encoded_holder[i+1]) + '\n')
f1.close()
f2.close()
d1 = open(x_dev_file, 'w')
d2 = open(y_dev_file, 'w')
for i in xrange(fraction, end, 2):
d1.write(str(encoded_holder[i]) + '\n')
d2.write(str(encoded_holder[i+1]) + '\n')
d1.close()
d2.close()
def generate_encoded_files(x_train_file, y_train_file, x_dev_file, y_dev_file, tokenized_sentences, dictionary):
"""Sentence A is in x_train and y_train, Sentence B in X_train and y_train"""
encoded_holder = []
f1 = open(x_train_file, 'w')
last_line = tokenized_sentences.pop()
first_line = tokenized_sentences.pop(0)
dev_counter = int(len(tokenized_sentences) - len(tokenized_sentences)/fraction_dev)
unk_id = dictionary['_UNK']
first_line_encoded = encode_sentence(first_line, dictionary, unk_id)
f1.write(first_line_encoded + '\n')
# Creates data for X_train
for x in xrange(dev_counter):
encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)
encoded_holder.append(encoded_sentence)
f1.write(encoded_sentence + '\n') # Write sentence to file
f1.close()
d1 = open(x_dev_file, 'w')
# Creates data for x_dev_file
for x in xrange(dev_counter, len(tokenized_sentences)):
encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)
encoded_holder.append(encoded_sentence)
d1.write(encoded_sentence + '\n') # Write sentence to file
d1.close()
# Creates data for y_train
f2 = open(y_train_file, 'w')
for x in xrange(dev_counter + 1):
f2.write(encoded_holder[x] + '\n') # Write sentence to file
f2.close()
# Creates data for y_dev
d2 = open(y_dev_file, 'w')
for x in xrange(dev_counter + 1, len(tokenized_sentences)):
d2.write(encoded_holder[x] + '\n') # Write sentence to file
last_line_encoded = encode_sentence(last_line, dictionary, unk_id)
d2.write(last_line_encoded + '\n')
d2.close()
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens"""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
def sentence_to_token_ids(sentence, vocabulary):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Returns:
a list of integers, the token-ids for the sentence.
"""
words = basic_tokenizer(sentence)
return [vocabulary.get(w, UNK_ID) for w in words]
# Reads data and puts every sentence in a TWO DIMENSIONAL array as tokens
# data_tokens[0] = ['This', 'is', 'a', 'sentence']
#-----------------------Printing methods----------------------------
| 32.501859
| 196
| 0.649663
|
efcb531829013e0d275069585a78eef303453aa5
| 851
|
py
|
Python
|
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | 4
|
2020-03-06T17:37:09.000Z
|
2020-03-17T07:50:55.000Z
|
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | null | null | null |
dfirtrack_api/serializers.py
|
0xflotus/dfirtrack
|
632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5
|
[
"MIT"
] | 1
|
2020-03-06T20:54:52.000Z
|
2020-03-06T20:54:52.000Z
|
from rest_framework import serializers
from dfirtrack_main.models import System, Systemtype
| 27.451613
| 89
| 0.654524
|
efcedab7fe4be160e4d524567ddc2da000250f7a
| 185
|
py
|
Python
|
exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py
|
exasol/advanced-analytics-framework
|
78cb9c92fa905132c346d289623598d39def480c
|
[
"MIT"
] | null | null | null |
exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py
|
exasol/advanced-analytics-framework
|
78cb9c92fa905132c346d289623598d39def480c
|
[
"MIT"
] | 12
|
2022-02-21T15:54:47.000Z
|
2022-03-30T08:35:52.000Z
|
exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py
|
exasol/advanced-analytics-framework
|
78cb9c92fa905132c346d289623598d39def480c
|
[
"MIT"
] | null | null | null |
from exasol_advanced_analytics_framework.interface.create_event_handler_udf \
import CreateEventHandlerUDF
udf = CreateEventHandlerUDF(exa)
| 20.555556
| 77
| 0.810811
|
efd02e3f34305859967db711ac4399efc0f26e99
| 7,489
|
py
|
Python
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 3
|
2020-12-08T17:09:08.000Z
|
2022-01-21T22:46:56.000Z
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 11
|
2021-03-19T11:34:34.000Z
|
2022-03-31T13:22:02.000Z
|
corrct/utils_proc.py
|
cicwi/PyCorrectedEmissionCT
|
424449e1879a03cdbb8910c806417962e5b9faff
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T18:27:48.000Z
|
2021-03-11T18:27:48.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 15:25:14 2020
@author: Nicola VIGAN, Computational Imaging group, CWI, The Netherlands,
and ESRF - The European Synchrotron, Grenoble, France
"""
import numpy as np
from . import operators
from . import solvers
def get_circular_mask(vol_shape, radius_offset=0, coords_ball=None, mask_drop_off="const", data_type=np.float32):
"""Computes a circular mask for the reconstruction volume.
:param vol_shape: The size of the volume.
:type vol_shape: numpy.array_like
:param radius_offset: The offset with respect to the volume edge.
:type radius_offset: float. Optional, default: 0
:param coords_ball: The coordinates to consider for the non-masked region.
:type coords_ball: list of dimensions. Optional, default: None
:param data_type: The mask data type.
:type data_type: numpy.dtype. Optional, default: np.float32
:returns: The circular mask.
:rtype: (numpy.array_like)
"""
vol_shape = np.array(vol_shape, dtype=np.intp)
coords = [np.linspace(-(s - 1) / 2, (s - 1) / 2, s, dtype=data_type) for s in vol_shape]
coords = np.meshgrid(*coords, indexing="ij")
if coords_ball is None:
coords_ball = np.arange(-np.fmin(2, len(vol_shape)), 0, dtype=np.intp)
else:
coords_ball = np.array(coords_ball, dtype=np.intp)
radius = np.min(vol_shape[coords_ball]) / 2 + radius_offset
coords = np.stack(coords, axis=0)
if coords_ball.size == 1:
dists = np.abs(coords[coords_ball, ...])
else:
dists = np.sqrt(np.sum(coords[coords_ball, ...] ** 2, axis=0))
if mask_drop_off.lower() == "const":
return dists <= radius
elif mask_drop_off.lower() == "sinc":
cut_off = np.min(vol_shape[coords_ball]) / np.sqrt(2) - radius
outter_region = 1 - (dists <= radius)
outter_vals = 1 - np.sinc((dists - radius) / cut_off)
return np.fmax(1 - outter_region * outter_vals, 0)
else:
raise ValueError("Unknown drop-off function: %s" % mask_drop_off)
def pad_sinogram(sinogram, width, pad_axis=-1, mode="edge", **kwds):
"""Pads the sinogram.
:param sinogram: The sinogram to pad.
:type sinogram: numpy.array_like
:param width: The width of the padding.
:type width: either an int or tuple(int, int)
:param pad_axis: The axis to pad.
:type pad_axis: int. Optional, default: -1
:param mode: The padding type (from numpy.pad).
:type mode: string. Optional, default: 'edge'.
:param kwds: The numpy.pad arguments.
:returns: The padded sinogram.
:rtype: (numpy.array_like)
"""
pad_size = [(0, 0)] * len(sinogram.shape)
if len(width) == 1:
width = (width, width)
pad_size[pad_axis] = width
return np.pad(sinogram, pad_size, mode=mode, **kwds)
def apply_flat_field(projs, flats, darks=None, crop=None, data_type=np.float32):
"""Apply flat field.
:param projs: Projections
:type projs: numpy.array_like
:param flats: Flat fields
:type flats: numpy.array_like
:param darks: Dark noise, defaults to None
:type darks: numpy.array_like, optional
:param crop: Crop region, defaults to None
:type crop: numpy.array_like, optional
:param data_type: numpy.dtype, defaults to np.float32
:type data_type: Data type of the processed data, optional
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
if crop is not None:
projs = projs[..., crop[0] : crop[2], crop[1] : crop[3]]
flats = flats[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
darks = darks[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
projs -= darks
flats -= darks
flats = np.mean(flats.astype(data_type), axis=0)
return projs.astype(data_type) / flats
def apply_minus_log(projs):
"""Apply -log.
:param projs: Projections
:type projs: numpy.array_like
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
return np.fmax(-np.log(projs), 0.0)
def denoise_image(
img, reg_weight=1e-2, stddev=None, error_norm="l2b", iterations=250, axes=(-2, -1), lower_limit=None, verbose=False
):
"""Image denoiser based on (simple, weighted or dead-zone) least-squares and wavelets.
The weighted least-squares requires the local pixel-wise standard deviations.
It can be used to denoise sinograms and projections.
:param img: The image or sinogram to denoise.
:type img: `numpy.array_like`
:param reg_weight: Weight of the regularization term, defaults to 1e-2
:type reg_weight: float, optional
:param stddev: The local standard deviations. If None, it performs a standard least-squares.
:type stddev: `numpy.array_like`, optional
:param error_norm: The error weighting mechanism. When using std_dev, options are: {'l2b'} | 'l1b' | 'hub' | 'wl2' \
(corresponding to: 'l2 dead-zone', 'l1 dead-zone', 'Huber', 'weighted least-squares').
:type error_norm: str, optional
:param iterations: Number of iterations, defaults to 250
:type iterations: int, optional
:param axes: Axes along which the regularization should be done, defaults to (-2, -1)
:type iterations: int or tuple, optional
:param lower_limit: Lower clipping limit of the image, defaults to None
:type iterations: float, optional
:param verbose: Turn verbosity on, defaults to False
:type verbose: boolean, optional
:return: Denoised image or sinogram.
:rtype: `numpy.array_like`
"""
OpI = operators.TransformIdentity(img.shape)
if stddev is not None:
if error_norm.lower() == "l2b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l2b(img_weight)
elif error_norm.lower() == "l1b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l1b(img_weight)
elif error_norm.lower() == "hub":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_Huber(img_weight)
elif error_norm.lower() == "wl2":
(img_weight, reg_weight) = compute_wls_weights(stddev, OpI.T, reg_weight)
data_term = solvers.DataFidelity_wl2(img_weight)
else:
raise ValueError('Unknown error method: "%s". Options are: {"l2b"} | "l1b" | "hub" | "wl2"' % error_norm)
else:
data_term = error_norm
if isinstance(axes, int):
axes = (axes,)
reg_wl = solvers.Regularizer_l1swl(reg_weight, "bior4.4", 2, axes=axes, normalized=False)
sol_wls_wl = solvers.CP(verbose=verbose, regularizer=reg_wl, data_term=data_term)
(denoised_img, _) = sol_wls_wl(OpI, img, iterations, x0=img, lower_limit=lower_limit)
return denoised_img
| 37.633166
| 120
| 0.667646
|
efd15e7fb718ba74481d809759853c9e66bc24c0
| 80
|
py
|
Python
|
bcap/__init__.py
|
keioku/bcap-python
|
5f1c912fcac515d8f26bda113f644d55a38e15d6
|
[
"MIT"
] | null | null | null |
bcap/__init__.py
|
keioku/bcap-python
|
5f1c912fcac515d8f26bda113f644d55a38e15d6
|
[
"MIT"
] | null | null | null |
bcap/__init__.py
|
keioku/bcap-python
|
5f1c912fcac515d8f26bda113f644d55a38e15d6
|
[
"MIT"
] | null | null | null |
from .b_cap_client import BCapClient
from .b_cap_exception import BCapException
| 26.666667
| 42
| 0.875
|
efd1c5307f2a5343f619264248d49a40d7ec14ee
| 675
|
py
|
Python
|
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
84.py
|
gdmanandamohon/leetcode
|
a691a4e37ee1fdad69c710e3710c5faf8b0a7d76
|
[
"MIT"
] | null | null | null |
'''
@author: l4zyc0d3r
People who are happy makes other happy. I am gonna finish it slowly but definitely.cdt
'''
| 29.347826
| 86
| 0.422222
|
efd28e21b75921adf9dd8a8cb27c1319019eacfc
| 402
|
py
|
Python
|
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | null | null | null |
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | 1
|
2021-04-30T20:59:15.000Z
|
2021-04-30T20:59:15.000Z
|
delete_event.py
|
garymcwilliams/py-google-calendar
|
546b412f0ffc1bdc9a81868bddf4de18a0c20899
|
[
"Apache-2.0"
] | null | null | null |
from cal_setup import get_calendar_service
if __name__ == '__main__':
main()
| 23.647059
| 48
| 0.659204
|
efd293b0fad7a4595a31aa160b88ccb1aa88a456
| 37
|
py
|
Python
|
rses/src/flask_app/blueprints/client/__init__.py
|
iScrE4m/RSES
|
88299f105ded8838243eab8b25ab1626c97d1179
|
[
"MIT"
] | 1
|
2022-02-16T15:06:22.000Z
|
2022-02-16T15:06:22.000Z
|
rses/src/flask_app/blueprints/client/__init__.py
|
djetelina/RSES
|
88299f105ded8838243eab8b25ab1626c97d1179
|
[
"MIT"
] | null | null | null |
rses/src/flask_app/blueprints/client/__init__.py
|
djetelina/RSES
|
88299f105ded8838243eab8b25ab1626c97d1179
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""Client blueprint"""
| 18.5
| 22
| 0.675676
|
efd30ee41ca03d2e23b35a990fdeba3358b3d6c7
| 15,351
|
py
|
Python
|
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
pycdp/asyncio.py
|
HMaker/python-chrome-devtools-protocol
|
a9646a1c4e172ce458c15e2fcb3860ca8c9b4599
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import json
import asyncio
import itertools
import typing as t
from collections import defaultdict
from contextlib import asynccontextmanager
from aiohttp import ClientSession
from aiohttp.client import ClientWebSocketResponse
from aiohttp.http_websocket import WSMsgType, WSCloseCode
from aiohttp.client_exceptions import (
ClientResponseError, ClientConnectorError, ClientConnectionError, ServerDisconnectedError
)
from pycdp.utils import ContextLoggerMixin, LoggerMixin, SingleTaskWorker, retry_on
from pycdp import cdp
T = t.TypeVar('T')
_CLOSE_SENTINEL = object
| 35.7
| 123
| 0.614162
|
efd3aea1c3cf0426d8d1f43ef851162a882e6a5f
| 7,680
|
py
|
Python
|
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import os
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from gspylib.common.Common import DefaultValue
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
from gspylib.os.gsfile import g_file
| 37.101449
| 78
| 0.552344
|
efd3f9d1de68654dbc76d3fbfef70bcad64b263b
| 585
|
py
|
Python
|
main/methods/analysis.py
|
hannxiao/autotrade2
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | null | null | null |
main/methods/analysis.py
|
hannxiao/autotrade2
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | 6
|
2020-06-06T01:05:02.000Z
|
2021-12-13T20:42:16.000Z
|
main/methods/analysis.py
|
hannxiao/autotrade
|
8e6f3d463334b6ea8a18074de58e25c0dab93f39
|
[
"MIT"
] | null | null | null |
from . import toolFuncs
def DefineTrend(data, K):
'''
Filter all the trend whose range less than K%
'''
pairs = list(zip(data['Date'], data['Close']))
is_extreme = toolFuncs.extreme_point(data['Close'], K, recognition_method='height')
output = [pairs[i] for i in range(len(is_extreme)) if is_extreme[i]]
return {'DefineTrend': {'name': 'Trend', 'data': output, 'position': 'main', 'type': 'line',
'lineStyle': {'normal': {'width': 3}, 'showSymbol':False}
}
}
| 32.5
| 98
| 0.529915
|
efd40da6f7f764459934c721ccc5ec880311c2e3
| 607
|
py
|
Python
|
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | 13
|
2018-05-23T07:07:28.000Z
|
2021-05-28T07:37:30.000Z
|
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | null | null | null |
FaceClassify/losses/TripletMarginLoss.py
|
CharlesPikachu/CharlesFace
|
90bfe38c58068228d0069dce43b55b2570acaa16
|
[
"MIT"
] | null | null | null |
# Author:
# Charles
# Function:
# Triplet loss function.
import torch
from torch.autograd import Function
import sys
sys.path.append('../')
from utils.utils import *
| 26.391304
| 66
| 0.744646
|