hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
47af52c1759d6ca7b81f36810e1191b6fa34e7eb
| 11,920
|
py
|
Python
|
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import ops
| 40.27027
| 127
| 0.560906
|
47b3262789a56b500e24bd5503fa34b4ab5f6bca
| 1,031
|
py
|
Python
|
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
import numpy as np
import imageio
from evaluation.eval_utils import to_img_padded, format_img, init_evaluation
main()
| 34.366667
| 87
| 0.681862
|
47b4d5911e43771cc3188d71f2b90fe1b6287fdc
| 22,950
|
py
|
Python
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 10
|
2020-09-25T08:49:19.000Z
|
2022-03-30T01:29:22.000Z
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 14
|
2020-05-22T08:00:32.000Z
|
2020-12-24T23:38:05.000Z
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 5
|
2020-05-22T03:43:20.000Z
|
2021-05-25T06:54:59.000Z
|
#
# HttpServer.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Server to implement the http part of the oneM2M Mcx communication interface.
#
from __future__ import annotations
import logging, sys, traceback, urllib3
from copy import deepcopy
from typing import Any, Callable, Tuple, cast
import flask
from flask import Flask, Request, make_response, request
from urllib3.exceptions import RequestError
from Configuration import Configuration
from Constants import Constants as C
from Types import ReqResp, ResourceTypes as T, Result, ResponseCode as RC, JSON, Conditions
from Types import Operation, CSERequest, RequestHeaders, ContentSerializationType, RequestHandler, Parameters, RequestArguments, FilterUsage, FilterOperation, DesiredIdentifierResultType, ResultContentType, ResponseType
import CSE, Utils
from Logging import Logging as L, LogLevel
from resources.Resource import Resource
from werkzeug.wrappers import Response
from werkzeug.serving import WSGIRequestHandler
from werkzeug.datastructures import MultiDict
from webUI import WebUI
from helpers.BackgroundWorker import *
#
# Types definitions for the http server
#
FlaskHandler = Callable[[str], Response]
""" Type definition for flask handler. """
##########################################################################
#
# Own request handler.
# Actually only to redirect some logging of the http server.
# This handler does NOT handle requests.
#
| 39.43299
| 223
| 0.699695
|
47b6367947784f5f8c60ac4a630ae41b0271c546
| 2,855
|
py
|
Python
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2016-04-18T14:43:28.000Z
|
2016-04-18T14:43:28.000Z
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2015-03-19T20:09:31.000Z
|
2015-03-19T20:56:13.000Z
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2021-07-21T16:45:20.000Z
|
2021-07-21T16:45:20.000Z
|
"""
Tests for the sprockets.mixins.statsd package
"""
import mock
import socket
try:
import unittest2 as unittest
except ImportError:
import unittest
from tornado import httputil
from tornado import web
from sprockets.mixins import statsd as statsd
| 34.817073
| 77
| 0.633275
|
47bbb88fe7ad9a14195f7bde44006fac967ad0e2
| 2,044
|
py
|
Python
|
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from base import BaseObject
from base import CryptoBase
from base import FileIO
from datadb2.core.dmo import BaseDB2Client
def cendant(self) -> BaseDB2Client:
"""
:return:
"""
return self._connect(self._values(self._config['cendant']))
if __name__ == "__main__":
# BuildDb2Url().wft_dev()
# BuildDb2Url().wft_prod()
BuildDb2Url().cendant()
| 28.788732
| 81
| 0.581703
|
47bbc3f593c6dfe99cc6291d9534d485f7b0f42d
| 3,462
|
py
|
Python
|
nussl/transformers/transformer_deep_clustering.py
|
KingStorm/nussl
|
78edfdaad16845fc705cefb336a7e6e5923fbcd4
|
[
"MIT"
] | 1
|
2018-10-22T19:30:45.000Z
|
2018-10-22T19:30:45.000Z
|
dataHelper/nussl/transformers/transformer_deep_clustering.py
|
AleXander-Tsui/Audio-Localization-and-Seperation
|
17d40e72b406d62ca5cb695938b50c6412f9524a
|
[
"MIT"
] | null | null | null |
dataHelper/nussl/transformers/transformer_deep_clustering.py
|
AleXander-Tsui/Audio-Localization-and-Seperation
|
17d40e72b406d62ca5cb695938b50c6412f9524a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Deep Clustering modeller class
"""
from .. import torch_imported
if torch_imported:
import torch
import torch.nn as nn
import numpy as np
| 33.941176
| 94
| 0.625361
|
47bcdf89bfb403747fce6b37d8765b1f6f980172
| 431
|
py
|
Python
|
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
""" 67 - Faa um programa que mostre a tabuada de vrios nmeros, um de cada vez, para cada valor digitado pelo
usurio. O programa ser interrompido quando o nmero solicitado for negativo. """
while True:
n = int(input('Informe um nmero para ver sua tabuada: '))
if n < 0:
break
print('-' * 13)
for m in range(1, 11):
print(f'{n} x {m} = {n*m}')
print('-' * 13)
print('Programa encerrado.')
| 33.153846
| 111
| 0.62645
|
47be1f989acf928be71983840ea1023cdafbcb67
| 1,569
|
py
|
Python
|
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image,Category,Location
# def delete_image(request, pk):
# gallery = get_object_or_404(Cat, pk=pk)
# if request.method == 'POST':
# gallery.delete()
# return redirect('/')
# return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery})
| 34.108696
| 105
| 0.662843
|
47bf6e3c9c36dabf9fe1d3cb252c2d9d2f56f9af
| 843
|
py
|
Python
|
tests/tests_query_operations/table_models.py
|
Robinson04/StructNoSQL
|
335c63593025582336bb67ad0b0ed39d30800b74
|
[
"MIT"
] | 3
|
2020-10-30T23:31:26.000Z
|
2022-03-30T21:48:40.000Z
|
tests/tests_query_operations/table_models.py
|
Robinson04/StructNoSQL
|
335c63593025582336bb67ad0b0ed39d30800b74
|
[
"MIT"
] | 42
|
2020-09-16T15:23:11.000Z
|
2021-09-20T13:00:50.000Z
|
tests/tests_query_operations/table_models.py
|
Robinson04/StructNoSQL
|
335c63593025582336bb67ad0b0ed39d30800b74
|
[
"MIT"
] | 2
|
2021-01-03T21:37:22.000Z
|
2021-08-12T20:28:52.000Z
|
from typing import Dict
from StructNoSQL import TableDataModel, BaseField, MapModel
| 42.15
| 104
| 0.778173
|
47c00f796dfbf64fa498d1661e0430227f50240a
| 3,301
|
py
|
Python
|
__init__.py
|
mchorse/io_export_bobj
|
2de7a55c59a5e4ece5ae047cceaa16da94272685
|
[
"CNRI-Python"
] | 2
|
2021-10-04T17:03:20.000Z
|
2021-12-07T20:20:49.000Z
|
__init__.py
|
mchorse/io_export_bobj
|
2de7a55c59a5e4ece5ae047cceaa16da94272685
|
[
"CNRI-Python"
] | null | null | null |
__init__.py
|
mchorse/io_export_bobj
|
2de7a55c59a5e4ece5ae047cceaa16da94272685
|
[
"CNRI-Python"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Blockbuster extended OBJ format",
"author": "Campbell Barton, Bastien Montagne, McHorse",
"version": (0, 1, 0),
"blender": (2, 77, 0),
"location": "File > Export",
"description": "Export Blockbuster OBJ models (meshes, armatures and keyframes)",
"warning": "",
"category": "Export"
}
import bpy
from bpy.props import (BoolProperty, FloatProperty, StringProperty, EnumProperty)
from bpy_extras.io_utils import (ExportHelper, orientation_helper_factory, path_reference_mode, axis_conversion)
IOOBJOrientationHelper = orientation_helper_factory("IOOBJOrientationHelper", axis_forward='Z', axis_up='Y')
# Export panel
# Register and stuff
def menu_func_export(self, context):
self.layout.operator(ExportOBJ.bl_idname, text="Blockbuster OBJ (.bobj)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if "bpy" in locals():
import importlib
if "export_bobj" in locals():
importlib.reload(export_bobj)
if __name__ == "__main__":
register()
| 39.771084
| 131
| 0.723417
|
47c2fc4cc67997a7602b32b94d673235ee2e4478
| 1,303
|
py
|
Python
|
dashboard/migrations/0010_auto_20191214_1611.py
|
BDALab/GENEActiv-sleep-analyses-system
|
f0458de041153f2dee240a53571149827de00a2e
|
[
"MIT"
] | null | null | null |
dashboard/migrations/0010_auto_20191214_1611.py
|
BDALab/GENEActiv-sleep-analyses-system
|
f0458de041153f2dee240a53571149827de00a2e
|
[
"MIT"
] | null | null | null |
dashboard/migrations/0010_auto_20191214_1611.py
|
BDALab/GENEActiv-sleep-analyses-system
|
f0458de041153f2dee240a53571149827de00a2e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-12-14 15:11
import django.utils.timezone
from django.db import migrations, models
| 32.575
| 161
| 0.593246
|
47c5b5de088c43f83c5e3e066561ed05afd513fb
| 1,666
|
py
|
Python
|
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 3
|
2019-05-08T14:49:10.000Z
|
2021-01-20T13:22:45.000Z
|
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | null | null | null |
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 2
|
2020-01-28T14:37:06.000Z
|
2020-04-03T13:37:14.000Z
|
# -*- coding=utf-8 -*-
import pygame
from bf_form import BFForm
from bf_button import BFButton
from globals import LanguageConfigParser, LanguageLib
| 35.446809
| 137
| 0.660264
|
47c64ebe5bf8c8e7b695f55fd8ecece7fcce4585
| 3,084
|
py
|
Python
|
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 14:09:44 2018
@author: VeNoMzZxHD
"""
import tkinter
from tkinter.filedialog import askopenfilename
from collections import Counter
import re
import string
#Returns string of text file
def readFile():
'''
tkinter.Tk().withdraw()
inputfilename = askopenfilename()
'''
inputfilename = 'big.txt'
with open(inputfilename) as inputfile:
return inputfile.read()
#Returns Counter dictionary containing words and their number of occurences within the input file
#Returns list of possible permutations of removing a single char from input word.
countDict = countWords(readFile())
while True:
inword = input("Type quit to exit, or input word: \n")
if inword.lower() == 'quit':
break
correction = findCorrection(inword)
if correction=="":
print("No correction found")
else:
print(correction)
| 28.293578
| 97
| 0.648508
|
47c7ce2b3e6297aeb01c2a6dd339609f2dbc4c40
| 9,826
|
py
|
Python
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 2
|
2018-01-23T22:08:32.000Z
|
2018-03-11T18:32:53.000Z
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 4
|
2017-10-24T19:13:40.000Z
|
2018-06-05T22:16:45.000Z
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 2
|
2017-10-24T18:55:45.000Z
|
2018-01-26T05:11:38.000Z
|
#!/usr/bin/env python
# **************************************** #
#############
# IMPORTS #
#############
# standard python packages
import ConfigParser, copy, inspect, logging, os, sys
from Node import Node
if not os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) )
from utils import tools
# **************************************** #
#########
# EOF #
#########
| 32.006515
| 134
| 0.479849
|
47c7ee324c762d85e146cce680e1d27dab07ca7e
| 219
|
py
|
Python
|
freezegame/ladder.py
|
mattfister/pybacon
|
c864e5f5c872f92b3c694f0ef83feb0f20f93193
|
[
"MIT"
] | 2
|
2017-02-06T14:49:48.000Z
|
2021-03-20T08:19:01.000Z
|
freezegame/ladder.py
|
mattfister/pybacon
|
c864e5f5c872f92b3c694f0ef83feb0f20f93193
|
[
"MIT"
] | null | null | null |
freezegame/ladder.py
|
mattfister/pybacon
|
c864e5f5c872f92b3c694f0ef83feb0f20f93193
|
[
"MIT"
] | 2
|
2017-11-04T10:13:59.000Z
|
2020-04-24T05:15:33.000Z
|
from freezegame.sprite import Sprite
| 31.285714
| 120
| 0.657534
|
47ca2154dad4d9f3a8ceb261cf0f46981b5b61af
| 2,656
|
py
|
Python
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 1
|
2019-01-18T03:50:46.000Z
|
2019-01-18T03:50:46.000Z
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 50
|
2018-01-24T18:04:08.000Z
|
2019-01-03T03:30:30.000Z
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 2
|
2018-02-12T15:20:52.000Z
|
2019-01-18T03:51:52.000Z
|
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.blocks import StructBlock, CharBlock
from wagtail.core.fields import StreamField
from wagtail.core.models import Page
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtailmarkdown.blocks import MarkdownBlock
from invest.blocks.location import LocationAccordionItemBlock
from invest.blocks.markdown import MarkdownAccordionItemBlock
| 29.511111
| 73
| 0.657756
|
47ca616f814b4735648b4fa4271fc547d28d5fca
| 44
|
py
|
Python
|
serve.py
|
xsblanket/sweetie
|
cce71db39961fa017f888afef756f3522f549716
|
[
"MIT"
] | null | null | null |
serve.py
|
xsblanket/sweetie
|
cce71db39961fa017f888afef756f3522f549716
|
[
"MIT"
] | 2
|
2021-03-16T10:28:33.000Z
|
2021-03-17T09:11:37.000Z
|
serve.py
|
xsblanket/sweetie
|
cce71db39961fa017f888afef756f3522f549716
|
[
"MIT"
] | 1
|
2021-03-16T10:03:19.000Z
|
2021-03-16T10:03:19.000Z
|
from utility.sweetie import serve
serve()
| 14.666667
| 34
| 0.772727
|
47ca87bbbe5378196163b9f006e09077555d7b34
| 985
|
py
|
Python
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
| 22.906977
| 68
| 0.636548
|
47cc63cf4b5393de155d0003d5754fcb3e06068b
| 889
|
py
|
Python
|
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | 4
|
2021-01-11T02:14:59.000Z
|
2022-02-15T09:20:25.000Z
|
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | null | null | null |
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | 2
|
2021-12-14T10:20:00.000Z
|
2022-02-20T01:05:18.000Z
|
#!/usr/bin/env python3
# pylint: disable=no-self-use
"""Test unit."""
import unittest
if __name__ == "__main__":
unittest.main()
| 25.4
| 70
| 0.651294
|
47cd00f1c6e6fe88e15b29bda7971944f1ec4024
| 2,127
|
py
|
Python
|
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
''' this file creates the objects used to access the scrobbling service api
No actual creds should be stored here!
This module will be imported and used by the main code
'''
import os
import sys
import pylast
try:
API_KEY = os.environ["LASTFM_API_KEY"]
API_SECRET = os.environ["LASTFM_API_SECRET"]
except KeyError:
API_KEY = "my_api_key"
API_SECRET = "my_apy_secret"
try:
lastfm_username = os.environ["LASTFM_USERNAME"]
lastfm_password_hash = os.environ["LASTFM_PASSWORD_HASH"]
print("Environment variables for user OK")
except KeyError:
# In order to perform a write operation you need to authenticate yourself
lastfm_username = "my_username"
# You can use either use the password, or find the hash once and use that
lastfm_password_hash = pylast.md5("my_password")
print(lastfm_password_hash)
# lastfm_password_hash = "my_password_hash"
print("Environment variables for user missing! So far:")
print(f"API_KEY: {API_KEY}")
print(f"API_SECRET: {API_SECRET}")
print(f"LFM USER: {lastfm_username}")
print(f"LPW HASH: {lastfm_password_hash}")
lastfm_network = pylast.LastFMNetwork(
api_key=API_KEY,
api_secret=API_SECRET,
username=lastfm_username,
password_hash=lastfm_password_hash,
)
TRACK_SEPARATOR = " - "
| 29.136986
| 77
| 0.686883
|
47cd0ed87b30c0eeeb6aca7161bf214f8970893c
| 4,802
|
py
|
Python
|
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
import os
import urwid
from .globalBase import *
from .urwidHelper import *
from .tool import *
#import dc
from .myutil import *
| 27.44
| 106
| 0.659933
|
47cf4c7848eb2692961ae1f8fb2074a86bde0da7
| 8,595
|
py
|
Python
|
Code to apply on BS output/Python/makeTtests.py
|
albertocottica/community-management-simulator
|
e942f854f41705fcb114a79308536a2765896e60
|
[
"MIT"
] | null | null | null |
Code to apply on BS output/Python/makeTtests.py
|
albertocottica/community-management-simulator
|
e942f854f41705fcb114a79308536a2765896e60
|
[
"MIT"
] | null | null | null |
Code to apply on BS output/Python/makeTtests.py
|
albertocottica/community-management-simulator
|
e942f854f41705fcb114a79308536a2765896e60
|
[
"MIT"
] | null | null | null |
# runs t-tests over the null hypothesis
# avg_gini if (priority == "newer") == avg_gini if (priority == "more active")
import csv
import numpy as np
from scipy.stats import ttest_ind
from scipy.special import stdtr
def readCsvFile(fileName):
'''
(string) => list of dicts
Read the file called fileName and put its content in computer memory
'''
allData = [] # data go here as list of dicts
with open(fileName) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
allData.append(row)
return allData
def computeTtests (data):
'''
(list of dicts) => list of dicts
Execute t-tests on data. Return results in an easy-to-read form, i.e.:
[
{
'globalchattiness': value, 'intimacystrength': value, 'randomisedchattiness': value, 'policy': value,
'dropouts': value, 'totalmembershipstrength': value, 'totalcomments': value, 'managementeffort':value',
'ms_gini': value, 'nc_gini': value
},
...
]
'''
results = []
# assign the parameter space in which we operate
for globChat in [.1, .2, .4]:
for intStren in [1, 5, 11]:
for randChat in ["true", "false"]:
for pol in ["engage", "both"]:
# keep track of parameters' values
result ={}
result['globalchattiness'] = globChat
result['intimacystrength'] = intStren
result['randomisedchattiness'] = randChat
result['policy'] = pol
# take care of non-Gini variables first
for nonGiniVar in ['dropouts', 'totalmembershipstrength', 'totalcomments', 'mgmteffort']:
# accumulate in two lists the values, separated by priority
moreActiveArray = []
newerArray = []
# read the data.
for row in data:
if ( float(row['globalchattiness']) == globChat and
int(row['intimacystrength']) == intStren and
row['randomisedchattiness'] == randChat and
row['policy'] == pol):
if row['priority'] == 'newer':
newerArray.append(float(row[nonGiniVar]))
elif row['priority'] == 'more active':
moreActiveArray.append(float(row[nonGiniVar]))
# save the means relative to the moreActive and newer cases
result[nonGiniVar + '_n_mean'] = float(sum(newerArray))/len(newerArray)
result[nonGiniVar + '_ma_mean'] = float(sum(moreActiveArray))/len(moreActiveArray)
# compute the t-tests. When T is positive, moreActive > newer
thisTest = ttest_ind(moreActiveArray, newerArray, equal_var = 'False')
result[nonGiniVar + '_t'] = float(thisTest[0])
result[nonGiniVar + '_pVal'] = float(thisTest[1])
# now the two Ginis
for giniVar in ['ms', 'nc']:
# no need for lists, I have already calculated means and SEs
# read the data.
for row in data:
if ( float(row['globalchattiness']) == globChat and
int(row['intimacystrength']) == intStren and
row['randomisedchattiness'] == randChat and
row['policy'] == pol):
if row['priority'] == 'newer':
newerMean = float(row[giniVar + '_avg_gini'])
newerSE = float(row[giniVar + '_inblockse'])
elif row['priority'] == 'more active':
moreActiveMean = float(row[giniVar + '_avg_gini'])
moreActiveSE = float(row[giniVar + '_inblockse'])
# save mean values
result[giniVar + '_gini_n_mean'] = newerMean
result[giniVar + '_gini_ma_mean'] = moreActiveMean
# compute the t-tests. When T is positive, moreActive > newer
tStat = (moreActiveMean - newerMean) / np.sqrt((moreActiveSE**2 + newerSE**2)/24)
result[giniVar + '_gini_t'] = tStat
dof = (moreActiveSE/24 + newerSE/24)**2 / (moreActiveSE**2/(24**2*23) + newerSE**2/(24**2*23))
result[giniVar + '_gini_pVal'] = 2*stdtr(dof, -np.abs(tStat))
results.append(result)
return results
def saveCsvFile(data, filename):
'''
(list of dicts. str) => NoneType
saves list of dicts into a CSV file called filename
'''
# get the fieldnames from the data:
with open (filename, 'w') as csvfile:
fieldnames = sorted(data[0].keys())
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def findTrends(data):
'''
(list of dicts) => NoneType
prints some info to screen
'''
moreActiveMoreInclusivity1 = 0
moreActiveMoreInclusivity2 = 0
moreActiveMoreActivity = 0
moreActiveMoreDiversity = 0
moreActiveMoreLoyalty = 0
for row in data:
if row['dropouts_t'] < 0 and row['dropouts_pVal'] < .01:
moreActiveMoreInclusivity1 += 1
if row['ms_gini_t'] < 0 and row['ms_gini_pVal'] < .01:
moreActiveMoreInclusivity2 += 1
if row['totalcomments_t'] > 0 and row['totalcomments_pVal'] < .01:
moreActiveMoreActivity += 1
if row['nc_gini_t'] < 0 and row['nc_gini_pVal'] < 0.1:
moreActiveMoreDiversity += 1
if row['totalmembershipstrength_t'] > 0 and row['totalmembershipstrength_pVal'] < .01:
moreActiveMoreLoyalty += 1
print 'Priority "more active" has FEWER dropouts: ' + str(moreActiveMoreInclusivity1)
print 'Priority "more active" has MORE inclusivity (lower Gini on ms): ' + str(moreActiveMoreInclusivity2)
print 'Priority "more active" has MORE comments: ' + str(moreActiveMoreActivity)
print 'Priority "more active" has MORE diversity (lower gini on nc): ' + str(moreActiveMoreDiversity)
print 'Priority "more active" has MORE loyalty (higher total membership strength): ' + str(moreActiveMoreLoyalty)
newerMoreInclusivity1 = 0
newerMoreInclusivity2 = 0
newerMoreActivity = 0
newerMoreDiversity = 0
newerMoreLoyalty = 0
for row in data:
if row['dropouts_t'] > 0 and row['dropouts_pVal'] < .01:
newerMoreInclusivity1 += 1
if row['ms_gini_t'] > 0 and row['ms_gini_pVal'] < .01:
newerMoreInclusivity2 += 1
if row['totalcomments_t'] < 0 and row['totalcomments_pVal'] < .01:
newerMoreActivity += 1
if row['nc_gini_t'] > 0 and row['nc_gini_pVal'] < 0.1:
newerMoreDiversity += 1
if row['totalmembershipstrength_t'] < 0 and row['totalmembershipstrength_pVal'] < .01:
newerMoreLoyalty += 1
print 'Priority "newer" has FEWER dropouts: ' + str(newerMoreInclusivity1)
print 'Priority "newer" has MORE inclusivity (lower Gini on ms): ' + str(newerMoreInclusivity2)
print 'Priority "newer" has MORE comments: ' + str(newerMoreActivity)
print 'Priority "newer" has MORE diversity (lower gini on nc): ' + str(newerMoreDiversity)
print 'Priority "newer" has MORE loyalty (higher total membership strength): ' + str(newerMoreLoyalty)
if __name__ == '__main__':
dirPath = '/Users/albertocottica/github/local/community-management-simulator/Data/'
allData = readCsvFile(dirPath + 'ready-4-tTest.csv')
results = computeTtests(allData)
saveCsvFile(results, dirPath + 'tTestsResults.csv')
findTrends(results)
| 47.225275
| 118
| 0.532984
|
47d18703506147df7e77ebf700589e58f57e4508
| 350
|
py
|
Python
|
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
'''
Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]nums[i]).
Return the running sum of nums.
'''
| 21.875
| 97
| 0.58
|
47d20ca2d18b88c21b0a0f588589e8dcfe03114e
| 81
|
py
|
Python
|
examples/import.py
|
vic/typhon
|
72b8ceb34f431d93321fee6046b08094afbc213c
|
[
"BSD-3-Clause"
] | 14
|
2015-01-06T10:59:09.000Z
|
2021-01-09T17:57:52.000Z
|
examples/import.py
|
vic/typhon
|
72b8ceb34f431d93321fee6046b08094afbc213c
|
[
"BSD-3-Clause"
] | 1
|
2017-04-08T17:35:03.000Z
|
2017-04-08T17:35:03.000Z
|
examples/import.py
|
vic/typhon
|
72b8ceb34f431d93321fee6046b08094afbc213c
|
[
"BSD-3-Clause"
] | 3
|
2015-05-09T15:16:37.000Z
|
2016-01-26T07:57:59.000Z
|
# -*- coding: utf-8 -*-
import imported
import foo.bar
print(imported.__doc__)
| 11.571429
| 23
| 0.691358
|
47d397381f542a9f03743386be3039bce2cd0248
| 9,576
|
py
|
Python
|
participants_codes/konnectomics/utils/submission.py
|
orlandi/connectomicsPerspectivesPaper
|
98060e613d58c8e1ef9d14eb213439ae4cb8272b
|
[
"MIT"
] | null | null | null |
participants_codes/konnectomics/utils/submission.py
|
orlandi/connectomicsPerspectivesPaper
|
98060e613d58c8e1ef9d14eb213439ae4cb8272b
|
[
"MIT"
] | null | null | null |
participants_codes/konnectomics/utils/submission.py
|
orlandi/connectomicsPerspectivesPaper
|
98060e613d58c8e1ef9d14eb213439ae4cb8272b
|
[
"MIT"
] | null | null | null |
# Copyright 2014 Alistair Muldal <alistair.muldal@pharm.ox.ac.uk>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import warnings
import datetime
import subprocess
from matplotlib import pyplot as plt
from itertools import izip
from sklearn import metrics
def adjacency2vec(M):
"""
Unpack an n-by-n directed adjacency matrix to a 1D vector of connection
weights
Arguments
----------
M: 2D float array
adjacency matrix, where: M[i, j] corresponds to w(i->j)
Returns
----------
ij: 2D int array
2-by-npairs array of row/column indices
w_ij: 1D int array
corresponding weights, i.e. w(i->j)
"""
ncells = M.shape[0]
ij = all_directed_connections(ncells)
# sanity check
# npairs = ncells * (ncells - 1)
# assert ij.shape[1] == npairs
i, j = ij
w_ij = M[i, j]
return ij, w_ij
def vec2adjacency(ij, connected):
"""
Pack a 1D vector of connection weights into an n-by-n directed adjacency
matrix
Arguments
----------
ij: 2D int array
2-by-npairs array of row/column indices
w_ij: 1D int array
corresponding weights, i.e. w(i->j)
Returns
----------
M: 2D float array
adjacency matrix, where: M[i, j] corresponds to w(i->j)
M[j, i] corresponds to w(j->i)
"""
npairs = connected.size
# 0 = ncells**2 - ncells -npairs
roots = np.roots((1, -1, -npairs))
ncells = int(roots[roots > 0])
M = np.zeros((ncells, ncells), dtype=connected.dtype)
for (ii, jj), cc in izip(ij.T, connected):
M[ii, jj] = cc
return M
def real2dense(real_connections, n=None, adj=False):
"""
The network data provided for the challenge lists connections weighted '-1'
(which aren't actually present in the simulation), and does not list any
weights for pairs of nodes that are not connected.
This function converts the provided data into a more convenient dense vector
format compatible with adjacency2vec and roc, where every possible directed
pair of nodes has a True/False weight.
Arguments:
-----------
real_connections: 2D np.ndarray or tables.(C)Array
npairs-by-3 array, whose columns represent (i, j, connected(i->j)).
i, j are assumed to follow MATLAB indexing convenions (i.e. they
start at 1).
n: positive int, optional
the total number of nodes (cells). if unspecified, this is taken to
be the maximum index in the first two columns of real_connections
plus 1.
Returns:
----------
ij: 2D int array
2-by-npairs array of row/column indices
connected:
boolean vector, True where i->j is connected
"""
if n is None:
n = int(real_connections[:, :2].max())
if np.any(real_connections[:, :2] > n):
raise ValueError('real_connections contains indices > n')
# handle CArrays
real_connections = real_connections[:]
# cast to integers
real_connections = real_connections.astype(np.int)
# find the indices of the cells that are genuinely connected ('1' means
# connection, either '-1', '0' or omission means no connection).
ij_con = real_connections[(real_connections[:, 2] == 1), :2].T
# we subtract 1 from the indices because MATLAB-style indexing starts at 1,
# whereas Python indexing starts at 0
ij_con -= 1
# we'll do this the lazy way - construct an adjacency matrix from the
# connected indices ...
M = np.zeros((n, n), dtype=np.bool)
M[ij_con[0, :], ij_con[1, :]] = True
if adj:
return M
else:
# ... then convert this directly to the desired format
ij, connected = adjacency2vec(M)
return ij, connected
def all_directed_connections(n):
"""
For an n-by-n adjacency matrix, return the indices of the nodes for every
possible directed connection, i.e. (i->j) and (j->i), but not (i->i) or
(j->j)
Arguments:
n: int
number of nodes
Returns:
idx: 2D int array
[2, n * (n - 1)] array of i, j indices
"""
# all possible pairs of indices (including repeated indices)
all_idx = np.indices((n, n)).T.reshape(-1, 2).T
# remove repeated indices
repeats = (all_idx[0, :] == all_idx[1, :])
idx = all_idx[:, ~repeats]
return idx
def roc(weights, ground_truth, nsteps=None, do_plot=False, show_progress=True):
"""
Compute ROC curve and performance metrics for a given set of posterior
connection probabilities and the set of ground-truth connections
Arguments:
----------
weights: 1D float array
vector of posterior probabilities for each possible pairwise
connection
ground_truth: 1D bool array
vector of ground-truth connections
nsteps: int, optional
number of linear steps between the minimum and maximum values of
weights at which to compute the FPR and TPR. if unspecified, every
unique value of weights is used, so that the ROC curve is computed
exactly
do_plot: bool, optional
make a pretty plot
show_progress: bool, optional
show a pretty progress bar
Returns:
---------
thresh: 1D float array
vector of threshold values used for computing the ROC curve
fpr: 1D float array
false-positive rate at each threshold value
tpr: 1D float array
true-positive rate at each threshold value
pl10: float
10% performance level (tpr at the threshold value that gives 10%
false-positives)
auc: float
area under the ROC curve
"""
# make sure we're dealing with 1D arrays
weights = weights.ravel()
ground_truth = ground_truth.ravel()
if weights.size != ground_truth.size:
raise ValueError('Input vectors must have the same number of elements')
fpr, tpr, thresh = metrics.roc_curve(ground_truth, weights, pos_label=True)
auc = metrics.roc_auc_score(ground_truth, weights)
# 'performance level' is defined as the fraction of true positives at 10%
# false-positives
pl10 = tpr[fpr.searchsorted(0.1, side='left')]
if do_plot:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.hold(True)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_aspect('equal')
ax.plot(fpr, tpr, '-b', lw=2)
ax.set_xlabel('False-positive rate')
ax.set_ylabel('True-positive rate')
ax.set_title('ROC')
bbox_props = dict(boxstyle='round', fc='w', ec='0.5')
arrow_props = dict(arrowstyle='->', color='k', linewidth=2)
ax.annotate('AUC = %.4g' % auc, xy=(0.9, 0.1),
xycoords='axes fraction', ha='right', va='bottom',
bbox=bbox_props, fontsize=16)
plt.show()
return thresh, fpr, tpr, pl10, auc
| 29.018182
| 80
| 0.611529
|
47d3c6d2f3f9ad6b0e3ffc64b6de5590845ebff4
| 30,949
|
py
|
Python
|
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | 19
|
2022-01-27T11:12:43.000Z
|
2022-03-06T00:09:47.000Z
|
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | null | null | null |
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | 6
|
2022-01-28T15:51:19.000Z
|
2022-02-20T17:39:46.000Z
|
#!/usr/bin/env python3
# by affggh
# Apcache 2.0
import os
import sys
import shutil
import zipfile
import subprocess
import platform
import requests
if os.name == 'nt':
import tkinter as tk
if os.name == 'posix':
from mttkinter import mtTkinter as tk
# While Load some need thread funcion on Linux it will failed
# Just use mttkinter replace regular tkinter
from tkinter.filedialog import *
from tkinter import ttk
from tkinter import *
#import ttkbootstrap as ttk
import time
import webbrowser
import threading
# Hide console , need ```pip install pywin32```
# import win32gui, win32con
# the_program_to_hide = win32gui.GetForegroundWindow()
# win32gui.ShowWindow(the_program_to_hide, win32con.SW_HIDE)
if __name__=='__main__':
main()
| 40.509162
| 267
| 0.534751
|
47d6eebb03b0ea6e42bdb7a3c49d5f0b5a409c1e
| 49
|
py
|
Python
|
__init__.py
|
faridfibrianto/pyrex
|
bedd088370e90bceefa45788cddf952c03bea945
|
[
"MIT"
] | null | null | null |
__init__.py
|
faridfibrianto/pyrex
|
bedd088370e90bceefa45788cddf952c03bea945
|
[
"MIT"
] | null | null | null |
__init__.py
|
faridfibrianto/pyrex
|
bedd088370e90bceefa45788cddf952c03bea945
|
[
"MIT"
] | 1
|
2021-07-03T04:49:53.000Z
|
2021-07-03T04:49:53.000Z
|
from .helpers import *
from .decorators import *
| 16.333333
| 25
| 0.755102
|
47d719fa6ddaa13236b1671d0f097880df05054a
| 3,010
|
py
|
Python
|
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from gym_snake.envs.node import Node
from gym_snake.envs.snake_env import action_to_vector
from gym_snake.envs.snake_env import SnakeAction
from gym_snake.envs.snake_env import SnakeCellState
from gym_snake.envs.snake_env import rotate_action_clockwise
from gym_snake.envs.snake_env import rotate_action_counter_clockwise
from gym_snake.envs.snake_env import invert_action
from gym_snake.queue import Queue
| 38.589744
| 168
| 0.639535
|
47d7a401f53299346b73e5c7c5fe542392290c13
| 22,070
|
py
|
Python
|
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | null | null | null |
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | 2
|
2021-03-31T18:38:57.000Z
|
2021-12-13T19:46:50.000Z
|
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
import logging
import sys
import requests
import datetime
from bs4 import BeautifulSoup
from util import *
from wowapi import WowApi, WowApiException, WowApiConfigException
from killpoints import KillPoints
from math import ceil
base_wow_progress = "http://www.wowprogress.com"
base_wow_armory = "http://us.battle.net/wow/en/character/{0}/{1}/advanced"
base_wc_logs = "https://www.warcraftlogs.com:443/v1"
class_array = [ "Warrior", "Paladin", "Hunter", "Rogue", "Priest", "Death Knight",
"Shaman", "Mage", "Warlock", "Monk", "Druid", "Demon Hunter" ]
race_map = {
1: "Human", 2: "Orc", 3: "Dwarf", 4: "Night Elf", 5: "Undead", 6: "Tauren", 7: "Gnome",
8: "Troll", 9: "Goblin", 10: "Blood Elf", 11: "Draenei", 22: "Worgen",
24:"Pandaren", 25:"Pandaren", 26:"Pandaren"
}
artifactLevelCost = {
1: { "cost": 100, "total": 100 },
2: { "cost": 300, "total": 400 },
3: { "cost": 325, "total": 725 },
4: { "cost": 350, "total": 1075 },
5: { "cost": 375, "total": 1450 },
6: { "cost": 400, "total": 1850 },
7: { "cost": 425, "total": 2275 },
8: { "cost": 450, "total": 3250 },
9: { "cost": 525, "total": 3875 },
10: { "cost": 625, "total": 4625 },
11: { "cost": 750, "total": 4625 },
12: { "cost": 875, "total": 5500 },
13: { "cost": 1000, "total": 6500 },
14: { "cost": 6840, "total": 13340 },
15: { "cost": 8830, "total": 22170 },
16: { "cost": 11280, "total": 33450 },
17: { "cost": 14400, "total": 47850 },
18: { "cost": 18620, "total": 66470 },
19: { "cost": 24000, "total": 90470 },
20: { "cost": 30600, "total": 121070 },
21: { "cost": 39520, "total": 160590 },
22: { "cost": 50880, "total": 211470 },
23: { "cost": 64800, "total": 276270 },
24: { "cost": 82500, "total": 358770 },
25: { "cost": 105280, "total": 464050 },
26: { "cost": 138650, "total": 602700 },
27: { "cost": 182780, "total": 785480 },
28: { "cost": 240870, "total": 1026350 },
29: { "cost": 315520, "total": 1341870 },
30: { "cost": 417560, "total": 1759430 },
31: { "cost": 546000, "total": 2305430 },
32: { "cost": 718200, "total": 3023630 },
33: { "cost": 946660, "total": 3970290 },
34: { "cost": 1245840, "total": 5216130 },
35: { "cost": 1635200, "total": 6851330 },
36: { "cost": 1915000, "total": 8766330 },
37: { "cost": 2010000, "total": 10776330 },
38: { "cost": 2110000, "total": 12886330 },
39: { "cost": 2215000, "total": 15101330 },
40: { "cost": 2325000, "total": 17426330 },
41: { "cost": 2440000, "total": 19866330 },
42: { "cost": 2560000, "total": 22426330 },
43: { "cost": 2690000, "total": 25116330 },
44: { "cost": 2825000, "total": 27941330 },
45: { "cost": 2965000, "total": 30906330 },
46: { "cost": 3115000, "total": 34021330 },
47: { "cost": 3270000, "total": 37291330 },
48: { "cost": 3435000, "total": 40726330 },
49: { "cost": 3605000, "total": 44331330 },
50: { "cost": 3785000, "total": 48116330 },
51: { "cost": 3975000, "total": 52091330 },
52: { "cost": 4175000, "total": 56266330 },
53: { "cost": 4385000, "total": 60651330 },
54: { "cost": 4605000, "total": 65256330 }
}
artifactKnowledge = {
0: 1,
1: 1.25,
2: 1.5,
3: 1.9,
4: 2.4,
5: 3,
6: 3.75,
7: 4.75,
8: 6,
9: 7.5,
10: 9.5,
11: 12,
12: 15,
13: 18.75,
14: 23.5,
15: 29.5,
16: 37,
17: 46.5,
18: 58,
19: 73,
20: 91,
21: 114,
22: 143,
23: 179,
24: 224,
25: 250
}
apRewards = {
"+2-3": 500,
"+4-6": 800,
"+7-9": 1000,
"10+": 1200,
}
set_wow_api_key()
set_wclogs_api_key()
# Logger info
discord_logger = logging.getLogger('discord')
discord_logger.setLevel(logging.CRITICAL)
bot = commands.Bot(command_prefix='!', description ='Progress Bot')
"""
Events Region
"""
"""
Commands Region
"""
bot.run('MjczNTgyNTAwNTk1MzY3OTM2.C2lq3A.imEczu1BMAqrOYJfZEBTPJavOvc')
| 37.343486
| 161
| 0.631899
|
47d9292775bb73955a326acc7b317a3683aeeec2
| 10,974
|
py
|
Python
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | 1
|
2019-01-09T14:38:44.000Z
|
2019-01-09T14:38:44.000Z
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | 4
|
2021-03-09T00:03:21.000Z
|
2022-02-12T05:33:21.000Z
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Module for reading and parsing values from Fingrid APIs."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo,
# Teemu Laukkarinen ja Ulla-Talvikki Virta
import copy
import csv
import datetime
import json
import requests
import time
try:
import adapters.common_utils as common_utils
import adapters.rest_utils as rest_utils
except:
# used when running the module directly
import common_utils
import rest_utils
| 43.896
| 118
| 0.609714
|
47de6098d15918068f7f92c961c579b6396d5610
| 1,222
|
py
|
Python
|
scripts/acr_make_overview.py
|
vogelbac/LAB-QA2GO-
|
be434da7399d396413309f947f4b634d8fae9a17
|
[
"BSD-3-Clause"
] | 14
|
2019-02-07T10:50:58.000Z
|
2021-09-03T16:11:00.000Z
|
scripts/acr_make_overview.py
|
vogelbac/LAB-QA2GO-
|
be434da7399d396413309f947f4b634d8fae9a17
|
[
"BSD-3-Clause"
] | 6
|
2019-01-28T09:19:27.000Z
|
2021-09-09T06:56:42.000Z
|
scripts/acr_make_overview.py
|
vogelbac/LAB-QA2GO
|
be434da7399d396413309f947f4b634d8fae9a17
|
[
"BSD-3-Clause"
] | 4
|
2019-01-28T09:00:58.000Z
|
2021-05-25T13:54:40.000Z
|
# script to generate an overview file of all measurentoverview files
import os
import sys
| 31.333333
| 504
| 0.720949
|
47de90ccb81d9e53366bae7e288742ccd559ea5d
| 328
|
py
|
Python
|
tests/test_protein.py
|
kalekundert/autosnapgene
|
cc019b89f7ab8842d95fd268c24987aabbe1c0b6
|
[
"MIT"
] | null | null | null |
tests/test_protein.py
|
kalekundert/autosnapgene
|
cc019b89f7ab8842d95fd268c24987aabbe1c0b6
|
[
"MIT"
] | null | null | null |
tests/test_protein.py
|
kalekundert/autosnapgene
|
cc019b89f7ab8842d95fd268c24987aabbe1c0b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import autosnapgene as snap
| 23.428571
| 49
| 0.707317
|
47e1291a9d383474886f3b6cb416cfcb840ff9bb
| 1,514
|
py
|
Python
|
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
from typing import Set, List
from inspectable import Inspectable
from interactable import Interactable
from items import Batteries, Hammer
from usable import Usable
| 36.926829
| 118
| 0.640687
|
47e13e680106c821ea95e6afefa8c3825aa0febc
| 97
|
py
|
Python
|
holagit.py
|
fvenya7/practica1
|
2c7084629f0c5e3788a377f8c9d916c28fe188f4
|
[
"MIT"
] | null | null | null |
holagit.py
|
fvenya7/practica1
|
2c7084629f0c5e3788a377f8c9d916c28fe188f4
|
[
"MIT"
] | null | null | null |
holagit.py
|
fvenya7/practica1
|
2c7084629f0c5e3788a377f8c9d916c28fe188f4
|
[
"MIT"
] | null | null | null |
print("aprendiendo git")
a=12
print("ya qued")
print("actualizacion 1")
print("catualizacion 2")
| 19.4
| 24
| 0.742268
|
47e169f6fbed0c98822c2408dc1e36d39f35b41d
| 463
|
py
|
Python
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 27
|
2018-11-23T21:37:14.000Z
|
2021-11-22T08:44:35.000Z
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 6
|
2019-07-09T16:26:56.000Z
|
2021-05-17T17:29:42.000Z
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 4
|
2019-06-11T06:44:30.000Z
|
2021-02-27T14:49:02.000Z
|
import argparse
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str)
parser.add_argument("--output-path", type=str)
args = parser.parse_args()
frame_paths = [p for p in Path(args.frames_path).iterdir()]
with open(args.output_path, "w") as f:
for p in sorted(Path(args.frames_path).iterdir()):
f.write('{"image_path": "%s"}\n' % str(p))
| 33.071429
| 63
| 0.652268
|
47e20a2e721763c69d92b54d367291736f3e69c7
| 26,979
|
py
|
Python
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 2
|
2020-05-07T13:58:31.000Z
|
2021-01-27T09:33:07.000Z
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 47
|
2019-09-17T19:20:07.000Z
|
2022-03-20T12:33:28.000Z
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 1
|
2019-10-02T10:42:35.000Z
|
2019-10-02T10:42:35.000Z
|
import _thread
import sqlalchemy
from app.document import bp
from flask_login import login_required, current_user
from flask import render_template, redirect, url_for, request, send_file, flash, jsonify
from flask import current_app
from app.document.general import create_document, check_and_remove_document, save_image, \
get_collaborators_select_data, save_collaborators, is_document_owner, is_user_owner_or_collaborator,\
remove_image, get_document_images, get_page_layout, get_page_layout_text, update_confidences, is_user_trusted,\
is_granted_acces_for_page, is_granted_acces_for_document, get_line_image_by_id, get_sucpect_lines_ids, \
compute_scores_of_doc, skip_textline, get_line, is_granted_acces_for_line, create_string_response, \
update_baselines, make_image_preview, find_textlines, get_documents_with_granted_acces, \
check_and_change_public_document, is_document_public
from werkzeug.exceptions import NotFound
from app.db.general import get_requests
from app.db.general import get_user_documents, get_document_by_id, get_user_by_email, get_all_documents,\
get_previews_for_documents, get_image_by_id, get_public_documents
from app.document.forms import CreateDocumentForm
from app.document.annotation_statistics import get_document_annotation_statistics, get_user_annotation_statistics, get_document_annotation_statistics_by_day
from io import BytesIO
import dateutil.parser
import zipfile
import time
import os
import json
import re
from natsort import natsorted
def image_preview(image_id=None, public_access=False):
if image_id is None:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
try:
db_image = get_image_by_id(image_id)
except (sqlalchemy.exc.StatementError, sqlalchemy.orm.exc.NoResultFound):
return "Image does not exist.", 404
document_id = db_image.document_id
if public_access:
db_document = get_document_by_id(db_image.document_id)
if not db_document.is_public:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
else:
if not is_granted_acces_for_document(document_id, current_user):
return send_file('static/img/missing_page.png', cache_timeout=10000000)
image_preview_path = os.path.join(current_app.config['PREVIEW_IMAGES_FOLDER'], str(document_id), str(image_id) + '.jpg')
if not os.path.isfile(image_preview_path):
make_image_preview(db_image)
return send_file(image_preview_path, cache_timeout=0)
def get_image_common(image_id, public=False):
try:
image_db = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if public:
if not image_db.document.is_public:
return "Image is not public.", 403
elif not is_granted_acces_for_page(image_id, current_user):
return "You do not have access to the requested images.", 403
image_path = os.path.join(current_app.config['UPLOADED_IMAGES_FOLDER'], image_db.path)
if not os.path.isfile(image_path):
print("ERROR: Could not find image on disk. image id: {}, image path: {}.".format(image_id, image_path))
raise NotFound()
return send_file(image_path, as_attachment=True, attachment_filename=image_db.filename, cache_timeout=10000000)
| 38.376956
| 156
| 0.725305
|
47e38cc73a4ca6342b90794377e31733e0fe8cef
| 4,898
|
py
|
Python
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | 1
|
2020-03-30T13:49:29.000Z
|
2020-03-30T13:49:29.000Z
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | 5
|
2020-03-30T14:32:48.000Z
|
2020-03-31T12:01:02.000Z
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | null | null | null |
"""
This module defines a packet structure
(composed of: canari, payload, payload_size, and eventually an extra payload).
You'll find a 'pack' functions allowing you to create a packet
from a payload (btyes object) you want to send, and an 'unpack' function
that can extract a payload from a packet (as a bytes object too) after
validating the packet structure (canari, checksum, length).
packet[64]: abcd abcdefghabcdefghabcd abcdefgh
^ ^ ^
canari[4] checksum[20] payload_size[8]
payload_size
<------------------------------------------->
abcdefghabcdefghabcdefghabcdefgh [...]
^ ^
payload[32] extra_payload
"""
import collections
import struct
import hashlib
CANARI = 0xdeadbeef
CANARI_SIZE = 4 # unsigned int
CHECKSUM_SIZE = 20 # sha1
INT_SIZE = 8 # unsigned long long
PAYLOAD_MIN_SIZE = 32 # TODO: tweak me based on DbClient requests size: 256-32
PACKET_MIN_SIZE = (
CANARI_SIZE + CHECKSUM_SIZE + INT_SIZE
+ PAYLOAD_MIN_SIZE
) # 64
CHECKSUM_OFFSET = CANARI_SIZE + CHECKSUM_SIZE # we'll start hasing from there
STRUCT_FORMAT = (
"!"
"I" # canari
f"{CHECKSUM_SIZE}s" # checksum
"Q" # payload_size
"{payload_size}s" # payload: complete its size using format
)
Packet = collections.namedtuple(
"Packet",
["canari", "checksum", "payload_size", "payload"]
)
def _checksum(bytes_buf):
"""Return the sha1 digest of the given 'bytes_buf'."""
return hashlib.sha1(bytes_buf[CHECKSUM_OFFSET:]).digest()
def _verify_checksum(ctrl_checksum, bytes_buf):
"""
Return True if the given 'ctrl_checksum' matches the checksum
of 'bytes_buf', otherwise throw a ValueError.
"""
if ctrl_checksum != _checksum(bytes_buf):
raise ValueError("packet: invalid checksum")
return True
def pack(payload, with_checksum=True):
"""
Create a packet from the given 'payload' byte object that you want to send.
If the 'with_checksum' argument is True, the checksum of the payload will
be calculated and inserted in the packet, otherwise the checksum will be
set to zeros.
Returns a bytes object of the created packet (ready to send).
"""
packet = Packet(
canari=CANARI,
checksum=b"\x00" * CHECKSUM_SIZE,
payload_size=len(payload),
payload=payload.ljust(PAYLOAD_MIN_SIZE, b"\x00")
)
payload_size = max(packet.payload_size, PAYLOAD_MIN_SIZE)
try:
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
except struct.error as e:
raise ValueError(f"packet: {e}")
if with_checksum:
packet = packet._replace(checksum=_checksum(bytes_buf))
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
return bytes_buf
def unpack(bytes_buf, with_checksum=True):
"""
Extract the payload (as a bytes object) from the given 'bytes_buf' packet.
If the 'with_checksum' argument is True, the checksum in the packet will be
checked against a calculated checksum of the packet payload. Otherwise it
will just be ignored.
Returns a bytes object of the extracted payload.
A ValueError will be thrown if an invalid packet is given as 'bytes_buf'
(invalid canari, checksum, payload length)
"""
# first, we try to unpack as if it was a 64 bytes packet
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=PAYLOAD_MIN_SIZE),
bytes_buf[:PACKET_MIN_SIZE]
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if packet.canari != CANARI:
raise ValueError("packet: the canari is dead")
# payload can fit in a 64 bytes packet: just verify checksum, then job done
if packet.payload_size <= PAYLOAD_MIN_SIZE:
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
packet = packet._replace(
payload=packet.payload[:packet.payload_size]
)
return packet
# packet is actually bigger than 64 bytes (extra_payload)
if len(bytes_buf) <= PACKET_MIN_SIZE:
return packet # the payload is incomplete, and checksum not verified
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=packet.payload_size),
bytes_buf
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
return packet # complete packet with extra payload
| 32.223684
| 79
| 0.636178
|
47e43b3b4e3f0031df6f61702eae33c0a872be24
| 1,095
|
py
|
Python
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 11
|
2017-01-30T21:53:20.000Z
|
2020-05-29T22:39:19.000Z
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 139
|
2016-03-09T19:09:59.000Z
|
2021-09-03T17:14:00.000Z
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 10
|
2016-12-19T22:39:42.000Z
|
2021-03-09T19:23:15.000Z
|
"""
API interfaces for swagger operations.
"""
from typing import (
Any,
Iterable,
Mapping,
Tuple,
)
from marshmallow import Schema
from marshmallow.fields import Field
from microcosm_flask.swagger.parameters import Parameters
from microcosm_flask.swagger.schemas import Schemas
def build_schema(schema: Schema, strict_enums: bool = True) -> Mapping[str, Any]:
"""
Build JSON schema from a marshmallow schema.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.build(schema)
def iter_schemas(schema: Schema, strict_enums: bool = True) -> Iterable[Tuple[str, Any]]:
"""
Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.iter_schemas(schema)
def build_parameter(field: Field, **kwargs) -> Mapping[str, Any]:
"""
Build JSON parameter from a marshmallow field.
"""
builder = Parameters(**kwargs)
return builder.build(field)
| 23.804348
| 89
| 0.717808
|
47e4a20a59666f230f44fc593c648ca410af9651
| 1,286
|
py
|
Python
|
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | 1
|
2020-07-04T01:09:00.000Z
|
2020-07-04T01:09:00.000Z
|
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | null | null | null |
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | null | null | null |
import sys
import struct
if(len(sys.argv) != 5):
print("Usage: python converter.py <num_pixels_x> <num_pixels_y> <input file> <output file>\n")
else:
num_pixels_x = int(sys.argv[1])
num_pixels_y = int(sys.argv[2])
print(num_pixels_x)
has_alpha_channel = False
infile = open(sys.argv[3], "rb")
data = infile.read()
data_len = len(data)
if((data_len != (num_pixels_x * num_pixels_y * 4)) or
(data_len != (num_pixels_x * num_pixels_y * 3))):
AssertionError(
"File size does not match given resolution, or does not use 8bpp.")
if(data_len == (num_pixels_x * num_pixels_y * 4)):
has_alpha_channel = True
outfile = open(sys.argv[4], "wb")
infile.seek(0)
for y in range(num_pixels_y):
for x in range(num_pixels_x):
r = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
g = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
b = (int.from_bytes(infile.read(1), 'little') >> 6) & 0x3
if(has_alpha_channel):
# Alpha channel, we don't use this
_ = infile.read(1)
pixel = (b << 6) | (g << 3) | r
outfile.write(pixel.to_bytes(1, 'little'))
infile.close()
outfile.close()
| 30.619048
| 98
| 0.573872
|
47e4f4051c67291e2bfd6264123e2f3ba68f0903
| 319
|
py
|
Python
|
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
# import require package
import os
upload_dir = './uploads'
remove_content(upload_dir)
images_dir = './static/images'
remove_content(images_dir)
| 18.764706
| 44
| 0.705329
|
47e633e7aabb9cbd31dd0cb29459787e531f57cc
| 15,271
|
py
|
Python
|
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
"""
Fit step for gross alignment and scale.
"""
from opencmiss.utils.zinc.field import assignFieldParameters, createFieldsDisplacementGradients
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.field import Field, FieldFindMeshLocation
from opencmiss.zinc.optimisation import Optimisation
from opencmiss.zinc.result import RESULT_OK
from scaffoldfitter.fitterstep import FitterStep
| 49.26129
| 185
| 0.71017
|
47e8c93b45a616423efc06f0610ea0b349b67a78
| 261
|
py
|
Python
|
src/structlog_to_seq/abs_processor.py
|
gjedlicska/structlog-to-seq
|
44d8eb536db2cf0a5bbd8561a545b39f7584f372
|
[
"MIT"
] | null | null | null |
src/structlog_to_seq/abs_processor.py
|
gjedlicska/structlog-to-seq
|
44d8eb536db2cf0a5bbd8561a545b39f7584f372
|
[
"MIT"
] | null | null | null |
src/structlog_to_seq/abs_processor.py
|
gjedlicska/structlog-to-seq
|
44d8eb536db2cf0a5bbd8561a545b39f7584f372
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from typing import Any
| 21.75
| 56
| 0.708812
|
47e9940c13fbcbe44daf1d6db6129df93471f1e5
| 2,965
|
py
|
Python
|
scripts/ch4_correlation_plot.py
|
mathkann/understanding-random-forests
|
d2c5e0174d1a778be37a495083d756b2829160ec
|
[
"BSD-3-Clause"
] | 353
|
2015-01-03T13:34:03.000Z
|
2022-03-25T05:16:30.000Z
|
scripts/ch4_correlation_plot.py
|
mathkann/understanding-random-forests
|
d2c5e0174d1a778be37a495083d756b2829160ec
|
[
"BSD-3-Clause"
] | 1
|
2016-06-29T05:43:41.000Z
|
2016-06-29T05:43:41.000Z
|
scripts/ch4_correlation_plot.py
|
mathkann/understanding-random-forests
|
d2c5e0174d1a778be37a495083d756b2829160ec
|
[
"BSD-3-Clause"
] | 153
|
2015-01-14T03:46:42.000Z
|
2021-12-26T10:13:51.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
blue = (0, 0, 1.0)
green = (0, 0.8, 0)
red = (1.0, 0, 0)
red_alpha = (1.0, 0, 0, 0.001)
gray = (0.7, 0.7, 0.7)
results = [[],[],
["RandomForestRegressor-K=1",3.527128,2.820386,0.706743,0.063868,0.009973,0.286104,0.420639],
["RandomForestRegressor-K=2",3.036291,2.333874,0.702417,0.075537,0.011347,0.314841,0.387576],
["RandomForestRegressor-K=3",2.823907,2.109897,0.714009,0.087809,0.012335,0.349486,0.364523],
["RandomForestRegressor-K=4",2.715613,1.979086,0.736527,0.102472,0.014302,0.391750,0.344778],
["RandomForestRegressor-K=5",2.643232,1.887080,0.756151,0.111790,0.015411,0.421380,0.334772],
["RandomForestRegressor-K=6",2.642354,1.851498,0.790856,0.125342,0.016268,0.466556,0.324300],
["RandomForestRegressor-K=7",2.636296,1.822316,0.813980,0.134200,0.017159,0.495746,0.318234],
["RandomForestRegressor-K=8",2.623646,1.784344,0.839303,0.146081,0.018631,0.531100,0.308202],
["RandomForestRegressor-K=9",2.645439,1.780447,0.864992,0.152977,0.019492,0.558601,0.306390],
["RandomForestRegressor-K=10",2.638901,1.753437,0.885464,0.160371,0.020184,0.583494,0.301970],
["ExtraTreesRegressor-K=1",3.376099,2.723586,0.652514,0.051864,0.009532,0.230752,0.421761],
["ExtraTreesRegressor-K=2",2.801100,2.146534,0.654566,0.060858,0.011926,0.258086,0.396480],
["ExtraTreesRegressor-K=3",2.536644,1.886837,0.649807,0.067322,0.012756,0.273424,0.376383],
["ExtraTreesRegressor-K=4",2.409943,1.745583,0.664360,0.076519,0.016511,0.302962,0.361399],
["ExtraTreesRegressor-K=5",2.330165,1.651706,0.678459,0.086137,0.017063,0.331515,0.346944],
["ExtraTreesRegressor-K=6",2.285386,1.597063,0.688323,0.092147,0.019216,0.349667,0.338655],
["ExtraTreesRegressor-K=7",2.263983,1.553772,0.710211,0.100322,0.020510,0.378116,0.332094],
["ExtraTreesRegressor-K=8",2.246997,1.528167,0.718831,0.107167,0.021703,0.396323,0.322507],
["ExtraTreesRegressor-K=9",2.236845,1.495768,0.741077,0.115699,0.023020,0.423894,0.317183],
["ExtraTreesRegressor-K=10",2.232862,1.469781,0.763081,0.123849,0.024420,0.451778,0.311304]]
max_features = range(1, 10+1)
ax = plt.subplot(1, 2, 1)
plt.plot(max_features, [results[1+k][1] for k in max_features], 'o-', color=blue, label='Random Forest')
plt.plot(max_features, [results[1+k][2] for k in max_features], 'o--', color=blue)
plt.plot(max_features, [results[1+k][3] for k in max_features], 'o:', color=blue)
plt.plot(max_features, [results[11+k][1] for k in max_features], 'o-', color=red, label='Extremely Randomized Trees')
plt.plot(max_features, [results[11+k][2] for k in max_features], 'o--', color=red)
plt.plot(max_features, [results[11+k][3] for k in max_features], 'o:', color=red)
plt.legend(loc="best")
plt.xlabel("$K$")
plt.subplot(1, 2, 2, sharex=ax)
plt.plot(max_features, [results[1+k][4] for k in max_features], 'o-', color=blue)
plt.plot(max_features, [results[11+k][4] for k in max_features], 'o-', color=red)
plt.xlabel("$K$")
plt.ylabel("$\\rho$")
plt.show()
| 57.019231
| 117
| 0.725801
|
47e9c55cbdb85ad05b1bd86a08b95251624c0eb6
| 4,029
|
py
|
Python
|
Run_Vfree-Synthetic_Flat.py
|
Fernandez-Trincado/DataReductionPy
|
f06eb975067dc80cac038a47d3b9a9dde43bfdb6
|
[
"FSFAP"
] | 1
|
2020-01-25T06:28:40.000Z
|
2020-01-25T06:28:40.000Z
|
Run_Vfree-Synthetic_Flat.py
|
Fernandez-Trincado/DataReductionPy
|
f06eb975067dc80cac038a47d3b9a9dde43bfdb6
|
[
"FSFAP"
] | null | null | null |
Run_Vfree-Synthetic_Flat.py
|
Fernandez-Trincado/DataReductionPy
|
f06eb975067dc80cac038a47d3b9a9dde43bfdb6
|
[
"FSFAP"
] | null | null | null |
#!/usr/bin/python
# Created by: Jose G. Fernandez Trincado
# Date: 2013 June 28
# Program: This program correct the imagen .fit (Science) by Syntethic Flat
# 1 m Reflector telescope, National Astronomical Observatory of Venezuela
# Mode f/5, 21 arcmin x 21 arcmin
# Project: Omega Centauri, Tidal Tails.
# The program Astrometry_V1.py defined was developed by J. G. Fernandez Trincado at the Centro de Investigaciones de Astronomia "Francisco J. Duarte".
# If you have any problems, please contact J. G. Fernandez Trincado, jfernandez@cida.ve / jfernandezt87@gmail.com
import numpy as np
import scipy as sc
import pyfits
import sys, os
from pyraf import iraf
#run, program.
#Example:
# Next program: ./Run_Vfree-Synthetic_Flat.py GrupoX.dat
# >>> GrupoX.dat/XXX.XX.XXX.XX.XXXX.hlv*
location='/home/jfernandez/Escritorio/Tesis_2013-2014_CIDA_ULA/Data_Tesis_2013_2014_CIDA-ULA/Reflector/'
if len(sys.argv[:]) < 2.:
print '***************************************************'
print 'Warning: ./Run_Vfree-Synthetic_Flat.py GrupoX.dat'
print '***************************************************'
else:
#Combine images MEDIAN
#TASK IRAF: images.immatch.imcombine
#Function to combine images for generates Master Flat
data=sc.genfromtxt(sys.argv[1],dtype=str)
#Lee lista dentro de los directorios, estas listas contienen la ruta de las imagenes ya clasificadas por filtro y tiempo de exposicion.
for i in np.arange(len(data)):
temp='/Initial_list_Syntethic_flat_'
os.system('ls '+data[i]+temp+'* >temporal_classified.dat')
data_clas=sc.genfromtxt('temporal_classified.dat',dtype=str)
for j in np.arange(len(data_clas)):
if data_clas[j] == data[i]+temp+'I60':
os.system('cat '+data[i]+temp+'I60 >> MasterFlat_I60_Good.dat')
elif data_clas[j] == data[i]+temp+'I90':
os.system('cat '+data[i]+temp+'I90 >> MasterFlat_I90_Good.dat')
elif data_clas[j] == data[i]+temp+'V60':
os.system('cat '+data[i]+temp+'V60 >> MasterFlat_V60_Good.dat')
elif data_clas[j] == data[i]+temp+'V90':
os.system('cat '+data[i]+temp+'V90 >> MasterFlat_V90_Good.dat')
else:
pass
os.system('rm temporal_classified.dat')
os.system('ls MasterFlat_*_Good.dat >list_temp_gen.dat')
data_end=sc.genfromtxt('list_temp_gen.dat',dtype=str)
for k in np.arange(len(data_end)):
print 'Generating Master Flat: '+data_end[k]
print ''
Master_combina('@'+data_end[k],data_end[k]+'.fit')
print 'End of the process'
print ''
for h in np.arange(len(data)):
os.system('cp '+data_end[k]+'.fit '+data[h]+'/')
os.system('rm '+data_end[k]+'.fit')
os.system('rm list_temp_gen.dat MasterFlat_*_Good.dat')
#END
| 28.778571
| 150
| 0.70489
|
47e9dce96f4661d34f10811dc840595ffed5833b
| 829
|
py
|
Python
|
abing/backend/abing/models/feature.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 5
|
2020-12-04T14:15:26.000Z
|
2020-12-30T09:11:09.000Z
|
abing/backend/abing/models/feature.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 8
|
2020-12-20T16:33:30.000Z
|
2021-01-06T01:56:55.000Z
|
abing/backend/abing/models/feature.py
|
dohyungp/abitrary
|
4dc3f4c79a433a2debe1f1e151d00400a2225e9c
|
[
"MIT"
] | 1
|
2021-01-06T15:25:19.000Z
|
2021-01-06T15:25:19.000Z
|
from typing import TYPE_CHECKING
from sqlalchemy import Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from abing.db.base_class import Base
if TYPE_CHECKING:
from .arm import Arm # noqa: F401
| 30.703704
| 79
| 0.731001
|
47eb8cdb6e6b5599e5209b828d0aacfe3eb4df25
| 555
|
py
|
Python
|
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
mlevel = 1
elevel = 1
mclass_bonus_a = 20
eclass_bonus_a = 0
mclass_bonus_b = 60
eclass_bonus_b = 0
mclass_power = 3
eclass_power = 2
print(damage_exp())
print(defeat_exp())
print(defeat_exp(2))
print(kill_exp())
| 24.130435
| 103
| 0.715315
|
47eb8f87adf534b765a9c50c9659d9424a7c2ade
| 1,315
|
py
|
Python
|
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | null | null | null |
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | 1
|
2021-09-11T18:06:33.000Z
|
2021-09-11T18:06:33.000Z
|
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from pymongo import MongoClient, collection
client = MongoClient("mongodb://localhost:27017")
database = client["temp"]
states_districts = database["states_districts"]
states_districts.remove({})
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/states", headers=headers
)
states = json.loads(response.text)["states"]
custom_state_id=1
for state in states:
state_id = state["state_id"]
state_name = state["state_name"].strip()
print(state_name)
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/districts/" + str(state_id),
headers=headers,
)
custom_district_id=1
temp=[]
districts = json.loads(response.text)["districts"]
for district in districts:
district_id = district["district_id"]
district_name = district["district_name"].strip()
data={"state_name":state_name,"custom_state_id":custom_state_id,"district_name":district_name,"custom_district_id":custom_district_id,"actual_district_id":district_id}
states_districts.insert_one(data)
custom_district_id+=1
custom_state_id+=1
| 35.540541
| 175
| 0.719392
|
47ecac75bfa5b5456323216191e97427a888010b
| 3,989
|
py
|
Python
|
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
| 40.292929
| 124
| 0.552018
|
47ed721213e9d40abe12f67af339888e2d8b6e5e
| 12,488
|
py
|
Python
|
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | 1
|
2019-01-08T16:41:54.000Z
|
2019-01-08T16:41:54.000Z
|
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | null | null | null |
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | null | null | null |
import numpy as np
# import matplotlib.pyplot as plt
# import pylab
import re
import itertools
import json
import collections
import multiprocessing as mp
import random
import sys
#sys.path.append("./src/")
#from proto import io as protoio
#from utils.multiprocessor_cpu import MultiProcessorCPU
'''
some general pre/post processing tips:
1. should strip the space at the begining or end
2. consider the influence of punctuation at the end
3. be careful about empty string when using lib re functions
'''
def LoadJsonData(filePath):
'''
Load the file.
@param filePath: filePath string
'''
with open(filePath) as dataFile:
data = json.load(dataFile)
return data
def DumpJsonPrediction(filePath, predictions):
'''
currently only support top 1 prediction.
the output put goes in the following format:
{id : answer string}
'''
predDict = dict()
for title in predictions.keys():
for pred in predictions[title]:
if len(pred["prediction"] ) == 0:
continue
predDict[pred["id"] ] = pred["prediction"][0]
with open(filePath, "w") as outFile:
json.dump(predDict, outFile)
def ParseJsonData(data):
'''
@param data is a json object. This is the version before
visualization functionality.
'''
dataPerArticle = dict()
for article in data:
text = ""
# process articles to a list of sentences represented by list of words
for paragraph in article["paragraphs"]:
text += paragraph["context"].strip() + " "
textInSentences = TextToSentence(text)
queries = list()
answers = list()
qaIds = list()
for paragraph in article["paragraphs"]:
for qaPair in paragraph["qas"]:
# turn everything into lower cases
queries.append(StripPunct(qaPair["question"].lower().strip() ) )
answers.append(StripPunct(qaPair["answers"][0]["text"].lower().strip() ) )
qaIds.append(qaPair["id"] )
dataPerArticle[article["title"] ] = { \
"textInSentences": textInSentences,
"queries": queries,
"answers": answers,
"qaIds": qaIds
}
return dataPerArticle
def TextToSentence(text):
'''
cut document into sentences with the given delimiters
@param delimiters: delimiters to cut doc to sentences as a list of char
@return sentences: list of full string of sentences
'''
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co|Corp)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
numbers = "([-+]?)([0-9]+)(\.)([0-9]+)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd> \\3<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
text = re.sub(numbers, "\\g<1>\\g<2><prd>\\g<4>", text)
# # specific to current SQUAD dataset
text = text.lower()
suffixesSupp = "(\.)([a-z]+)"
text = re.sub(suffixesSupp,"<prd>\\2",text)
text = text.replace("...", "<elli>")
text = text.replace("i.e.", "i<prd>e<prd>")
text = text.replace("etc.", "etc<prd>")
text = text.replace("u.s.", "u<prd>s<prd>")
text = text.replace("v.s.", "v<prd>s<prd>")
text = text.replace("vs.", "vs<prd>")
text = text.replace(" v. ", " v<prd> ")
text = text.replace("med.sc.d", "med<prd>sc<prd>d")
text = text.replace("ecl.", "ecl<prd>")
text = text.replace("hma.", "hma<prd>")
text = text.replace("(r.", "(r<prd>") # for some year related staff
text = text.replace("(d.", "(d<prd>")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
text = text.replace("<elli>", "...")
sentences = text.split("<stop>")
sentences = [s.strip() \
for s in sentences if s.strip() != '']
return sentences
def SentenceToWord(sentences):
'''
cut sentences to list of words
@param sentences: a list of sentences
@return sentencesInWords: a list containing list of words
'''
delimiters = "[ ,;\"\n\(\)]+"
sentencesInWords = list()
for sentence in sentences:
sentence = StripPunct(sentence)
sentence = sentence.replace("...", " ...")
sentencesInWords.append(re.split(delimiters, sentence) )
# omit the empty word produced by re.split
sentencesInWords[-1] = [s.strip().lower() for s in sentencesInWords[-1] if s.strip() != '']
return sentencesInWords
############### helper to multiprocess per article task with MultiprocessorCPU
def MultipleProcess(agent, titleList, targetFunc, conservative=True, debug=False):
'''
target function is the one we want to execute
for each article. When conservative == True, the num of threads
is equal to the number of cores on the machine
'''
procs = []
manager = mp.Manager()
returnDict = manager.dict()
if debug:
for title in titleList:
targetFunc(agent, title, returnDict)
else:
for title in titleList:
p = mp.Process(target=targetFunc,
args=(agent, title, returnDict) )
procs.append(p)
processor = MultiProcessorCPU(procs)
processor.run(conservative)
return returnDict
################ helpers for protobuf based dataset#################
def ReconstructStrFromSpan(tokens, span=None):
'''
@param tokens: a protobuf object representing a list of tokens
@param span: a pair (beginId, endId). Note endId is excluded.
'''
if span is None:
span = (0, len(tokens))
string = ""
beginId, endId = span
for i in range(beginId, endId):
string += tokens[i].word + tokens[i].after
string = string.strip()
return string
def GetContextBigram(article):
'''
article is an protobuf object for apecific article
'''
bigram = []
for paragraph in article.paragraphs:
bigramByPara = list()
for s in paragraph.context.sentence:
bigramByPara.append(GetBigramBySentence(s.token) )
bigram.append(bigramByPara)
return bigram
def GetBigramBySentence(tokens):
'''
tokens is a list of proto message object tokens
'''
bigram = []
for i in range(len(tokens) - 1):
bigram.append( (tokens[i].word.lower(), tokens[i + 1].word.lower() ) )
return bigram
def GetContextConstituentSpan(article):
'''
@return span: the spans are organized by the following hierarchy
span = [spanByPara1, spanByPara2, ...] Where
spanByPara1 = [spanBySentence1, spanBySentence2, ...]
spanBySentence1 is a list of spans extracted from the parsing tree
'''
span = []
for paragraph in article.paragraphs:
spanByPara = list()
for s in paragraph.context.sentence:
# tokens = [token.word for token in s.token]
spanBySentence = GetConstituentSpanBySentence(s.parseTree)
spanByPara.append(spanBySentence)
span.append(spanByPara)
return span
def GetConstituentSpanBySentence(parseTree):
'''
@param parseTree: a protobuf object
extract span represented by nodes in the parsing trees
'''
def AddSpanToParseTree(parseTree, nextLeaf):
'''
@param parseTree: a protobuf object
fill in the yieldBeginIndex and yieldEndIndex fields for parsing trees
'''
if len(parseTree.child) == 0:
parseTree.yieldBeginIndex = nextLeaf
parseTree.yieldEndIndex = nextLeaf + 1
return parseTree, nextLeaf + 1
else:
for i in range(len(parseTree.child) ):
child, nextLeaf = \
AddSpanToParseTree(parseTree.child[i], nextLeaf)
parseTree.child[i].CopyFrom(child)
parseTree.yieldBeginIndex = parseTree.child[0].yieldBeginIndex
parseTree.yieldEndIndex = parseTree.child[-1].yieldEndIndex
return parseTree, nextLeaf
parseTree, _ = AddSpanToParseTree(parseTree, nextLeaf=0)
spans = list()
visitList = list()
visitList.append(parseTree)
tokenList = list()
while len(visitList) != 0:
node = visitList.pop(0)
spans.append( (node.yieldBeginIndex, node.yieldEndIndex) )
for subTree in node.child:
visitList.append(subTree)
spansUniq = []
[spansUniq.append(span) for span in spans if span not in spansUniq]
return spansUniq
# some functions for debug
def GetCandidateAnsListInStr(candDataPerArticle, origDataPerArtice, ids, predId):
'''
for detailed use browse to prediction function of context rnn
'''
ansList = list()
for idx in ids:
predInfo = candDataPerArticle.candidateAnswers[idx]
predParaId = predInfo.paragraphIndex
predSenId = predInfo.sentenceIndex
predSpanStart = predInfo.spanBeginIndex
predSpanEnd = predInfo.spanBeginIndex + predInfo.spanLength
tokens = origDataPerArticle.paragraphs[predParaId].context.sentence[predSenId].token[predSpanStart:predSpanEnd]
predStr = ReconstructStrFromSpan(tokens, (0, len(tokens) ) )
ansList.append(predStr)
return ansList
# for serializing complex results
# display proto tokens
# remove the and . from tokens
def UnkrizeData(data, rate, padId, unkId):
'''
artificially set non-<pad> tokens to <unk>. The portion of
the artificial <unk> is indicated by rate.
'''
mask = np.random.uniform(low=0.0, high=1.0, size=data.shape)
mask = np.logical_and( (data != padId), (mask >= (1 - rate) ) )
data[mask] = unkId
return data
| 32.605744
| 119
| 0.596493
|
47ee5fdbd7a2fd709f968f5597836efd8e182df3
| 207
|
py
|
Python
|
software/test/sample.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | null | null | null |
software/test/sample.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | 3
|
2018-02-05T23:21:02.000Z
|
2018-05-03T02:58:50.000Z
|
software/test/sample.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | null | null | null |
# print("Hello World!")
# sum = 2 + 2
# print(sum)
# for i in range(10,-10,-1):
# if i % 2 == 0:
# print(i)
# else:
# pass
# while(1):
# val = input("Enter ")
# print(val)
| 13.8
| 28
| 0.434783
|
47f00c4575c588196fb02578a13c75df9196c8ba
| 476
|
py
|
Python
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 5
|
2021-05-02T00:06:41.000Z
|
2021-11-30T10:34:08.000Z
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 3
|
2021-05-06T09:31:49.000Z
|
2021-05-11T05:14:32.000Z
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 1
|
2021-05-06T15:34:24.000Z
|
2021-05-06T15:34:24.000Z
|
# -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, jsonify
from flask_login import login_required
from super_nft.extensions import csrf_protect
datasprint_bp = Blueprint("datasprint", __name__, url_prefix="/datasprint", static_folder="../static")
| 28
| 102
| 0.682773
|
47f07bbd0388ba3dd47eb8f252a382985372ec31
| 548
|
py
|
Python
|
CursoemVideoPython/Desafio 34.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 34.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 34.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
'''
Escreva um programa que pergunte o salrio de um funcionrio e calcule o valor do seu
aumento.
Para salrios superiores a R$1.250,00, calcule um aumento de 10%
Para os inferiores ou iguais, o aumento de 15%.
'''
salario = float(input('Digite o salrio: R$ '))
if salario < 0:
print('Valor invlido!')
else:
if salario <= 1250:
print(f'O aumento ser de R${salario*0.15} e passar a ser R${salario + salario*0.15}')
else:
print(f'O aumento ser de R${salario * 0.10} e passar a ser R${salario + salario * 0.10}')
| 34.25
| 99
| 0.669708
|
47f0f24b25872b88a91afd63b72991904ea663bc
| 658
|
py
|
Python
|
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
cores = {'vermelho': '\033[31m',
'azul': '\033[34m',
'amarelo': '\033[33m',
'branco': '\033[30m',
'roxo': '\033[35m',
'verde': '\033[32m',
'ciano': '\033[36m',
'limpa': '\033[m',
'preto e branco': '\033[7;30;m'}
pintar = str(input('Deseja pintar o seu texto com qual cor? ')).lower()
while pintar not in cores:
pintar = str(input('Erro! Essa cor no existe. Tente novamente: '))
if pintar in cores:
break
texto = str(input('Digite seu texto: '))
linhas(cor=pintar, txt=texto)
| 26.32
| 71
| 0.542553
|
47f182f38e59b731af6d6326b1c317ab14b2b7e5
| 992
|
py
|
Python
|
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
# Listing_20-2.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Adding an event handler for the button
import sys
from PyQt4 import QtCore, QtGui, uic
form_class = uic.loadUiType("MyFirstGui.ui")[0]
# Class definition for the main window
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass()
myWindow.show()
app.exec_()
| 31
| 90
| 0.654234
|
47f231b8a668477769e2a9abd3723ae4eedc3e54
| 1,072
|
py
|
Python
|
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | 16
|
2015-04-04T15:26:01.000Z
|
2019-10-15T16:13:03.000Z
|
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | null | null | null |
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | 7
|
2015-10-12T21:23:12.000Z
|
2021-10-13T02:41:25.000Z
|
from serial_comminication import *
from utils.decorators import Override
__author__ = 'Danyang'
| 38.285714
| 118
| 0.602612
|
47f249b23a7b5f7230bfbb222ddcb290a2a7adde
| 5,259
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from botocore.paginate import Paginator
| 40.145038
| 224
| 0.487165
|
47f2d05914db9e80d9759d21867bc5761abeee91
| 1,550
|
py
|
Python
|
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
# Best : O(n + k)
# Avg : O(n + k)
# Worst O(n + k)
# Space worst : O(k) - CAN GET VERY BIG BIG
# k - range of values in array
# Take every number in arr then add += 1 to index of that number in temporary arrays, then
# for every index in temporary arrays add to final_arr that amount of that index number of
# how big number at that index is - if arr[23] = 3 then add 23 23 23 and same for negatives
# 1. The whole thing can get very ineffective if numbers in arr are big
# 2. Possibility of sorting negatives greatly increase time/space complexity
# create array len of min(to_sort), do the same as positives, then for final result multiply by -1 to get negatives
# and reverse them because we were counting them as positive => [1,5,10] => [-1,-5,-10] => [-10,-5,-1] and
# add that array at beginning of positive_sorted
to_sort = [52, 63, 12, 6, 631, 6, 24, 637,
64, 421, 74, 124, 0, -5, 523, -10, -529]
print(counting_sort(to_sort))
| 34.444444
| 119
| 0.642581
|
47f4980d53b9e0ce1e873da3c9bbca1b3052a8de
| 5,881
|
py
|
Python
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 67
|
2020-09-07T11:50:03.000Z
|
2022-03-31T04:09:08.000Z
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 7
|
2020-09-27T01:41:59.000Z
|
2022-03-25T05:16:43.000Z
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 12
|
2020-10-13T14:31:01.000Z
|
2022-02-14T05:44:38.000Z
|
import os
import random
import re
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import sys; sys.path.append("../../..")
from job_table import JobTable
| 38.188312
| 100
| 0.550757
|
47f4fb021dc13ce9ce0d5ff354639ce8927eaf9b
| 883
|
py
|
Python
|
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | 8
|
2020-09-05T16:04:31.000Z
|
2022-02-27T09:57:51.000Z
|
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
"""
Move Zeroes - https://leetcode.com/problems/move-zeroes/
Given an integer array nums, move all 0's to the end of it while maintaining the relative order of the non-zero elements.
Note that you must do this in-place without making a copy of the array.
Example 1:
Input: nums = [0,1,0,3,12]
Output: [1,3,12,0,0]
Example 2:
Input: nums = [0]
Output: [0]
Constraints:
1 <= nums.length <= 104
-231 <= nums[i] <= 231 - 1
Follow up: Could you minimize the total number of operations done?
"""
| 20.534884
| 121
| 0.583239
|
47f6c684577e0c1a7c425c6ef180e8ab456e667e
| 1,503
|
py
|
Python
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 36
|
2015-01-13T00:21:07.000Z
|
2017-11-07T11:45:25.000Z
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 55
|
2015-01-07T09:53:50.000Z
|
2017-02-07T00:31:20.000Z
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 24
|
2015-01-06T16:17:33.000Z
|
2017-04-21T14:00:16.000Z
|
from django.contrib.auth import login as django_login
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url
from django.conf import settings
from .backends import StormpathIdSiteBackend
ID_SITE_STATUS_AUTHENTICATED = 'AUTHENTICATED'
ID_SITE_STATUS_LOGOUT = 'LOGOUT'
ID_SITE_STATUS_REGISTERED = 'REGISTERED'
ID_SITE_AUTH_BACKEND = 'django_stormpath.backends.StormpathIdSiteBackend'
_handle_registered = _handle_authenticated
CALLBACK_ACTIONS = {
ID_SITE_STATUS_AUTHENTICATED: _handle_authenticated,
ID_SITE_STATUS_LOGOUT: _handle_logout,
ID_SITE_STATUS_REGISTERED: _handle_registered,
}
| 28.358491
| 73
| 0.809714
|
47f782c40ce2bf55510e810deac00bf9b89ac029
| 445
|
py
|
Python
|
dp/sequence/perfect-square.py
|
windowssocket/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 3
|
2018-05-29T02:29:40.000Z
|
2020-02-05T03:28:16.000Z
|
dp/sequence/perfect-square.py
|
xidongc/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 1
|
2019-03-08T13:22:32.000Z
|
2019-03-08T13:22:32.000Z
|
dp/sequence/perfect-square.py
|
xidongc/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 3
|
2018-05-29T11:50:24.000Z
|
2018-11-27T12:31:01.000Z
|
# https://leetcode.com/problems/perfect-squares/description/
# dp alg, time complexity: O(n^2)
| 22.25
| 60
| 0.438202
|
47f8383750414c949b888bd3081dff4a804800b1
| 1,416
|
py
|
Python
|
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
# Programa para sorteio
from tkinter import *
'''import PySimpleGUI as sg'''
'''
#Layout
layout = [
[sg.Text('Nome:'), sg.Input()],
[sg.Button('OK')]
]
#Janela
janela = sg.Window('Janela teste', layout)
#Interao
eventos, valores = janela.Read()
#Mensagem
print(f'Ol {valores[0]}, obrigado por usar PySimpleGUI!')
#Encerramento da janela
janela.close()
'''
'''
cont = 0
participantes = dict()
for cont in range(0, 2):
participantes["nome"] = str(input('Digite o nome do participante: '))
participantes["numero"] = int(input('Digite o nmero do participante: '))
cont += 1
print(f'{cont} pessoas concorrendo ao sorteio!')
print(participantes)
'''
'''theme_name_list = sg.theme_list()
print(theme_name_list)'''
# Sempre inicia com:
janela = Tk()
janela.title('Sorteio T-force')
janela.geometry("400x400")
# Texto de orientao:
texto_de_orientacao = Label(janela, text='Clique no boto para ver as cotaes das moedas')
# Posio do texto:
texto_de_orientacao.grid(column=0, row=0, padx=10, pady=10)
# Boto + funo
botao = Button(janela, text="Buscar cotaes Dlar, Euro e BTC", command=pegar_cotacoes)
botao.grid(column=0, row=1, padx=10, pady=10)
# Texto das cotaes:
texto_cotacoes = Label(janela, text="")
texto_cotacoes.grid(column=0, row=2, padx=10, pady=10)
# Sempre termina com:
janela.mainloop()
| 24.413793
| 91
| 0.699859
|
47fa30bd997d1a1670c7fee27600bd53764e519c
| 481
|
py
|
Python
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:07:56.000Z
|
2018-12-19T22:07:56.000Z
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 12
|
2020-03-14T05:32:26.000Z
|
2022-03-12T00:08:49.000Z
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:08:00.000Z
|
2018-12-19T22:08:00.000Z
|
"""
@author: magician
@file: __init__.py.py
@date: 2020/9/7
"""
from flask import Flask
from flask_tutorial.flask_sqlite3.flask_sqlite3 import SQLite3
app = Flask(__name__)
app.config.from_pyfile('the-config.cfg')
db = SQLite3(app)
| 17.814815
| 62
| 0.636175
|
47fd0f1fa3538b0a659731489a61f441085833ad
| 516
|
py
|
Python
|
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
"""Transformando um string de numeros, em uma lista
com conjunto de numeros separads por \n"""
matrix = "1 2 3 4\n4 5 6 5\n7 8 9 6\n8 7 6 7"
print(matrix)
matrix = matrix.split("\n")
print(matrix)
matrix2 = []
for n in range(len(matrix)):
matrix[n] = matrix[n].split()
matrix[n] = list(map(int, matrix[n])) # EX: Tranforma a string '1' em um inteiro
matrix2.append(matrix[n][0]) # Pegar o primeiro elemento/numero de cada indice
for index in range(4):
print(matrix[index])
print()
print(matrix2)
| 28.666667
| 85
| 0.672481
|
47fd4fd6cf5d22ab4c0d8b28debf25aa2a0236a1
| 517
|
py
|
Python
|
Programs/__init__.py
|
el-vida/ITE_Classification_Using_GMMs
|
8c1e751fac2c0aa873d41dbae45776b540db0889
|
[
"MIT"
] | null | null | null |
Programs/__init__.py
|
el-vida/ITE_Classification_Using_GMMs
|
8c1e751fac2c0aa873d41dbae45776b540db0889
|
[
"MIT"
] | null | null | null |
Programs/__init__.py
|
el-vida/ITE_Classification_Using_GMMs
|
8c1e751fac2c0aa873d41dbae45776b540db0889
|
[
"MIT"
] | null | null | null |
from A_1_Add_columns_participant_info_multiprocessing_new import *
from A_2_Merge_new_logs import *
from B_1_Add_columns_user_performance_multiprocessing_new import *
from B_2_Merge_new_logs_with_user_groups import *
from C_1_Post_processing_log_new import *
from C_2_Merge_new_logs_post_processed import *
from D_1_Recalculate_new_levenshtein import *
from D_2_Merge_new_logs_new_levenshtein import *
from E_1_Correct_ites_new import *
from E_2_Merge_new_logs_corrected import *
from F_Finalize_dataset_slim import *
| 47
| 66
| 0.895551
|
47fd66e778b3e447ec3e01b548b142f968b5fb7f
| 599
|
py
|
Python
|
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | null | null | null |
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | null | null | null |
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | 2
|
2016-12-27T12:12:09.000Z
|
2020-09-24T18:06:58.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='xl-helper',
version='1.0.5',
description='XL Deploy helper',
long_description='This tool helps with installation and upgrade of XL Deploy and plugins',
author='Mike Kotsur',
author_email='mkotsur@xebialabs.com',
url='http://xebialabs.com/',
packages=find_packages(where=".", exclude=["tests*"]),
package_data={'xl_helper': ['deployit.conf', '.xl-helper.defaults']},
include_package_data=True,
install_requires=['jenkinsapi', 'argparse', 'pytz'],
scripts=['xl-helper']
)
| 31.526316
| 94
| 0.684474
|
47ff4464ecaa8b0b0480823b9cf4bf43b54abcec
| 3,520
|
py
|
Python
|
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | 5
|
2018-05-10T19:50:29.000Z
|
2018-05-10T20:07:08.000Z
|
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | null | null | null |
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import defaultdict
from PyPDF2 import PdfFileReader
from PyPDF2.pdf import PageObject, ContentStream, TextStringObject, u_, i, b_
PageObject.extractText_with_separator = extractText_with_separator
KEYWORDS = ["procesos electorales"]
def find_in_pdf(pdf_path, keywords):
"""
Try to find a word list into pdf file.
.. note:
The line number is approximately, not exactly.
:param pdf_path: path to pdf
:type pdf_path: str
:param keywords: list of keyword to search
:type keywords: list(str)
:return: a structure like this: { PAGE_NUM: { LINE_NUM: TEXT_OF_LINE}
:rtype: dict(str: dict(int: str))
"""
pdf = PdfFileReader(open(pdf_path, 'rb'))
matches = defaultdict(dict)
for page_no, page in enumerate(pdf.pages, 1):
text = page.extractText_with_separator()
line_no = 1
# search
for keyword in keywords:
for line in text.split("\n"):
if not line:
continue
line_no += 1
if keyword in line.lower():
matches["page_%s" % page_no][line_no] = line
return matches
if __name__ == '__main__':
r = find_in_pdf("BOE.pdf", KEYWORDS)
print(r)
| 23.311258
| 77
| 0.658523
|
9a003487767445f7e574b64c73392ed111a08837
| 493
|
py
|
Python
|
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | 8
|
2016-06-21T21:56:17.000Z
|
2021-10-06T17:28:00.000Z
|
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | null | null | null |
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | 1
|
2018-10-19T21:57:54.000Z
|
2018-10-19T21:57:54.000Z
|
from setuptools import setup, find_packages
setup(
name = "django-netpromoterscore",
version = '0.0.2',
description = "Model, Tests, and API for collecting promoter score from users.",
author = "Austin Brennan",
author_email = "ab@epantry.com",
url = "https://github.com/epantry/django-netpromoterscore",
keywords = ["promoter score", "net promoter score", "django"],
install_requires = [],
packages = find_packages(),
include_package_data=True,
)
| 32.866667
| 84
| 0.677485
|
9a0110a8361459c9dacb7bcdc22b39b60eeea30e
| 731
|
py
|
Python
|
DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py
|
ai4eu/tutorials
|
68eb2208716e655d2aa8b950a0d7d73bf6f20f3a
|
[
"Apache-2.0"
] | 8
|
2020-04-21T13:29:04.000Z
|
2021-12-13T08:59:09.000Z
|
DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py
|
ai4eu/tutorials
|
68eb2208716e655d2aa8b950a0d7d73bf6f20f3a
|
[
"Apache-2.0"
] | 3
|
2021-04-27T11:03:04.000Z
|
2021-05-24T18:22:57.000Z
|
DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py
|
ai4eu/tutorials
|
68eb2208716e655d2aa8b950a0d7d73bf6f20f3a
|
[
"Apache-2.0"
] | 6
|
2020-07-06T08:23:25.000Z
|
2021-11-24T10:39:34.000Z
|
import flask
| 24.366667
| 65
| 0.653899
|
9a014ede8ba8180a42bf7abbdb2da472afc22d73
| 228
|
py
|
Python
|
pynemo/core/base/property/string.py
|
SSripilaipong/pynemo
|
f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f
|
[
"MIT"
] | null | null | null |
pynemo/core/base/property/string.py
|
SSripilaipong/pynemo
|
f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f
|
[
"MIT"
] | null | null | null |
pynemo/core/base/property/string.py
|
SSripilaipong/pynemo
|
f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f
|
[
"MIT"
] | null | null | null |
from pynemo.core.base.abstract.property import Property
| 19
| 55
| 0.666667
|
9a0316a49bbe3e0c8ccbf65e47f3d0ad6d7d1eaf
| 6,299
|
py
|
Python
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 1
|
2022-03-30T07:48:33.000Z
|
2022-03-30T07:48:33.000Z
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 76
|
2022-02-04T16:36:49.000Z
|
2022-03-31T11:20:29.000Z
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 1
|
2022-02-02T17:19:05.000Z
|
2022-02-02T17:19:05.000Z
|
import logging
import sys
from pathlib import Path
import time
from folio_uuid.folio_namespaces import FOLIONamespaces
| 42.275168
| 98
| 0.680743
|
9a03f4a30283fc5811ab209f5fab981571d780d6
| 6,064
|
py
|
Python
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 8
|
2021-04-09T18:00:18.000Z
|
2022-03-11T01:13:13.000Z
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 1
|
2021-08-18T04:59:42.000Z
|
2021-12-08T00:24:24.000Z
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 3
|
2021-11-05T15:42:31.000Z
|
2022-03-03T07:38:46.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The tree aggregation protocol for noise addition in DP-FTRL."""
import torch
from collections import namedtuple
from absl import app
Element = namedtuple('Element', 'height value')
if __name__ == '__main__':
app.run(main)
| 32.956522
| 121
| 0.59812
|
9a04e9a41ace038d4a501f35036632f201b9f71d
| 2,782
|
py
|
Python
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-30T21:07:20.000Z
|
2021-01-08T13:52:14.000Z
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-05T22:48:14.000Z
|
2021-12-12T17:29:16.000Z
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 5
|
2016-10-12T16:24:09.000Z
|
2019-11-26T10:16:44.000Z
|
from django.test import TestCase
from ladder.models import Player, Result, League, Season
from django.db.models import Avg
| 36.605263
| 107
| 0.638749
|
9a06d9877e200d7e5cdcb16fe42f60b4884f0200
| 6,779
|
py
|
Python
|
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
"""Google OpenID Connect identity provider."""
from uuid import UUID
from fastapi import Depends, status, Request
from pydantic import BaseModel
from .base import IdPRouter, oauth
from ..http import get_edgedb_pool
from ..models import IdPClient, Identity as BaseIdentity, Href, User
from ..orm import ExtendedComputableProperty, ExclusiveConstraint, with_block
idp = IdPRouter("google")
class IdentityOut(BaseModel):
iss: str # "https://accounts.google.com"
hd: str # "edgedb.com"
email: str
email_verified: bool
name: str
picture: str # URL
given_name: str
family_name: str
locale: str # "en"
| 27.445344
| 95
| 0.621478
|
9a07ac994b1953108c37e98bccdd7052124320ff
| 1,192
|
py
|
Python
|
src/holocron/_processors/import_processors.py
|
ikalnytskyi/holocron
|
f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3
|
[
"BSD-3-Clause"
] | 6
|
2016-11-27T11:53:18.000Z
|
2021-02-08T00:37:59.000Z
|
src/holocron/_processors/import_processors.py
|
ikalnytskyi/holocron
|
f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3
|
[
"BSD-3-Clause"
] | 25
|
2017-04-12T15:27:55.000Z
|
2022-01-21T23:37:37.000Z
|
src/holocron/_processors/import_processors.py
|
ikalnytskyi/holocron
|
f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3
|
[
"BSD-3-Clause"
] | 1
|
2020-11-15T17:49:36.000Z
|
2020-11-15T17:49:36.000Z
|
"""Import processors from 3rd party sources."""
import contextlib
import sys
import pkg_resources
from ._misc import parameters
| 29.073171
| 79
| 0.64849
|
9a0859c884c636f6f47e39ee23feff85000d7d1d
| 656
|
py
|
Python
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | 1
|
2019-03-26T13:49:14.000Z
|
2019-03-26T13:49:14.000Z
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | 5
|
2020-01-04T15:13:06.000Z
|
2020-08-31T14:20:23.000Z
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | null | null | null |
#
# @lc app=leetcode.cn id=412 lang=python3
#
# [412] Fizz Buzz
#
# Accepted
# 8/8 cases passed (48 ms)
# Your runtime beats 76.37 % of python3 submissions
# Your memory usage beats 25 % of python3 submissions (14.5 MB)
# @lc code=start
# @lc code=end
| 22.62069
| 63
| 0.532012
|
9a08f540ce3f12537d5b6d4be1caf8051f4c1c27
| 5,875
|
py
|
Python
|
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | 2
|
2019-04-26T19:40:31.000Z
|
2019-10-12T15:18:29.000Z
|
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import check_cv
from sklearn.exceptions import NotFittedError
from sklearn.base import clone, is_classifier
from robusta.importance import get_importance
from robusta.crossval import crossval
from .base import _Selector
# Original: sklearn.feature_selection.SelectFromModel
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features
def _check_threshold(importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
threshold = -np.inf
elif isinstance(threshold, str):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
| 30.440415
| 79
| 0.637787
|
9a0a7b6a486f4199dd2e8181f3e83788c1d07d18
| 1,875
|
py
|
Python
|
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch_optimizer as optim
import pandas as pd
# customized libs
import criterions
import models
import datasets
| 29.296875
| 99
| 0.670933
|
9a0b750755a4f2eb69f71eb1f7890678edaaee12
| 1,733
|
py
|
Python
|
falmer/search/queries.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 2
|
2017-04-27T19:35:59.000Z
|
2017-06-13T16:19:33.000Z
|
falmer/search/queries.py
|
sussexstudent/falmer
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 975
|
2017-04-13T11:31:07.000Z
|
2022-02-10T07:46:18.000Z
|
falmer/search/queries.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 3
|
2018-05-09T06:42:25.000Z
|
2020-12-10T18:29:30.000Z
|
import graphene
from fuzzywuzzy import process
from falmer.search.types import SearchQuery
from falmer.search.utils import get_falmer_results_for_term, get_msl_results_for_term, \
SearchTermResponseData
| 28.883333
| 88
| 0.623774
|
9a0ef79b4f00de681e34f8ae67dfe78a084e7151
| 700
|
py
|
Python
|
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
#Shape of heart:
def for_heart():
"""printing shape of'heart' using for loop"""
for row in range(6):
for col in range(7):
if row-col==2 or row+col==8 or col%3!=0 and row==0 or col%3==0 and row==1:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_heart():
"""printing shape of'heart' using while loop"""
i=0
while i<6:
j=0
while j<7:
if i-j==2 or i+j==8 or j%3!=0 and i==0 or j%3==0 and i==1:
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
print()
i+=1
| 24.137931
| 87
| 0.4
|
9a0f75acc0d453f223e437b74a0cfe99d7909068
| 338
|
py
|
Python
|
test_core.py
|
DominikPutz/lecture-spring-2021
|
cce0970e261d45cbc16b3955d0659ca295ed8fc2
|
[
"Apache-2.0"
] | null | null | null |
test_core.py
|
DominikPutz/lecture-spring-2021
|
cce0970e261d45cbc16b3955d0659ca295ed8fc2
|
[
"Apache-2.0"
] | null | null | null |
test_core.py
|
DominikPutz/lecture-spring-2021
|
cce0970e261d45cbc16b3955d0659ca295ed8fc2
|
[
"Apache-2.0"
] | 3
|
2021-03-23T14:48:38.000Z
|
2022-01-13T09:45:08.000Z
|
from core import add
from core import sub
def test_add():
"""Check that `add()` works as expected"""
assert add(2, 3) == 5
def test_add_z():
"""Check that `add()` works as expected"""
assert add(2, 3, 1) == 6
def test_sub():
"""Check that `sub()` works as expected"""
assert sub(3, 1) == 2
| 16.9
| 46
| 0.553254
|
9a1128153dbcf8a364098445381dc767e17a1621
| 73
|
py
|
Python
|
setup.py
|
AnnaUstiuzhanina/flake8_extension
|
4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383
|
[
"MIT"
] | null | null | null |
setup.py
|
AnnaUstiuzhanina/flake8_extension
|
4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383
|
[
"MIT"
] | null | null | null |
setup.py
|
AnnaUstiuzhanina/flake8_extension
|
4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from setuptools import setup
setup()
| 14.6
| 34
| 0.835616
|
9a11a05b881b07a6c93ac169600004f78ada2754
| 434
|
py
|
Python
|
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
#9. Faaumam funo que receba a altura e o raio de um cilindro circular e retorne o volume
#do cilindro. O volume de um cilindro circular calculado por meio da seguinte frmula:
#V =pi*raio^2 x altura, onde pi = 3.141592.
r=float(input("Informe o raio do cilindro: "))
alt=float(input("Informe a altura do cilindro: "))
volume=volCilindro(3,2)
print(volume)
| 43.4
| 91
| 0.751152
|
9a122e1fac70741e43cd706c1bfea367874d0fa7
| 1,714
|
py
|
Python
|
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from sachima import conf
| 32.961538
| 78
| 0.449242
|
9a13bf9dda86cde96d1e704297f9ca1d15b1b6aa
| 3,254
|
pyw
|
Python
|
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | null | null | null |
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
import tkinter
if __name__ == '__main__':
application = tkinter.Tk()
application.title("Mediator Sample")
window = Main(application)
application.protocol("WM_DELETE_WINDOW", window.quit)
application.mainloop()
| 39.682927
| 106
| 0.567609
|
9a14a2d004a0836d3daffc7ee2ad09d95986fb4d
| 2,190
|
py
|
Python
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 4
|
2015-09-28T10:06:45.000Z
|
2019-09-20T05:53:03.000Z
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 8
|
2015-06-15T13:06:43.000Z
|
2018-12-23T13:37:20.000Z
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 2
|
2015-09-23T05:07:00.000Z
|
2015-10-20T15:43:19.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
urlpatterns = [
]
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'statictemplate',
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
},
LANGUAGES=(
('en-us', 'English'),
('it', 'Italian'),
),
ROOT_URLCONF='runtests',
SITE_ID=1,
MIDDLEWARE_CLASSES=[
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
if __name__ == '__main__':
runtests()
| 28.815789
| 70
| 0.622374
|
9a1583710b1d1ad4cc13f28020664d7f22387e1e
| 585
|
py
|
Python
|
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
x = input()
z = input()
splitter = [x[i:] for i in range(len(x))]
found_splitter = False
next_z = ""
for i in range(1, len(x) + 1):
if z[:i] in splitter:
found_splitter = True
next_z = z[i:]
if next_z[: len(x)] == x:
break
if i == len(z):
break
if next_z == "":
if found_splitter is False:
print(z)
else:
if found_splitter is True:
while True:
if next_z[0 : len(x)] == x:
next_z = next_z.replace(x, "", 1)
else:
print(next_z)
break
| 19.5
| 49
| 0.471795
|
9a163185c3befcd4de02a4c3e143213f59c12c77
| 117
|
py
|
Python
|
first_request.py
|
sgriffith3/2021_05_10_pyna
|
d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5
|
[
"MIT"
] | null | null | null |
first_request.py
|
sgriffith3/2021_05_10_pyna
|
d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5
|
[
"MIT"
] | null | null | null |
first_request.py
|
sgriffith3/2021_05_10_pyna
|
d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5
|
[
"MIT"
] | null | null | null |
import urllib.request
url = "https://google.com"
data = urllib.request.urlopen(url)
print(data)
print(data.read())
| 14.625
| 34
| 0.726496
|
9a1676b9866c375100521ac48277fdcc219264ce
| 1,592
|
py
|
Python
|
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | 1
|
2015-11-02T09:11:12.000Z
|
2015-11-02T09:11:12.000Z
|
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls.defaults import patterns, url
from datawinners.blue import view
from datawinners.blue.view import new_xform_submission_post, edit_xform_submission_post, get_attachment, attachment_download, guest_survey, public_survey
from datawinners.blue.view import ProjectUpload, ProjectUpdate
from datawinners.blue.view import new_xform_submission_get
from datawinners.project.views.submission_views import edit_xform_submission_get
urlpatterns = patterns('',
url(r'^guest_survey/(?P<link_uid>.+?)/$', guest_survey, name='guest_survey'),
url(r'^survey/(?P<org_id>.+?)/(?P<anonymous_link_id>.+?)/*$', public_survey, name='public_survey'),
url(r'^xlsform/upload/$', ProjectUpload.as_view(), name="import_project"),
url(r'^xlsform/download/$', view.project_download),
url(r'^xlsform/upload/update/(?P<project_id>\w+?)/$', ProjectUpdate.as_view(), name="update_project"),
url(r'^xlsform/(?P<project_id>.+?)/web_submission/(?P<survey_response_id>[^\\/]+?)/$', edit_xform_submission_get, name="edit_xform_submission"),
url(r'^xlsform/(?P<project_id>\w+?)/web_submission/$', new_xform_submission_get, name="xform_web_questionnaire"),
url(r'^xlsform/web_submission/(?P<survey_response_id>.+?)/$', edit_xform_submission_post, name="update_web_submission"),
url(r'^xlsform/web_submission/$', new_xform_submission_post, name="new_web_submission"),
url(r'^attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', get_attachment),
url(r'^download/attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', attachment_download)
)
| 61.230769
| 153
| 0.741834
|
9a16fe83f8b00c1ae4b19a89510efa6538193e44
| 95
|
py
|
Python
|
test2.py
|
marionettenspieler/pyneta
|
56a2dba736daf57464b06978c80383787a736ced
|
[
"Apache-2.0"
] | null | null | null |
test2.py
|
marionettenspieler/pyneta
|
56a2dba736daf57464b06978c80383787a736ced
|
[
"Apache-2.0"
] | null | null | null |
test2.py
|
marionettenspieler/pyneta
|
56a2dba736daf57464b06978c80383787a736ced
|
[
"Apache-2.0"
] | null | null | null |
print('hello man')
print('hello man')
print('hello man')
print('hello man')
print('hello man')
| 15.833333
| 18
| 0.684211
|
9a1837e6b67fec245ab3af4f52d7d449ca21cff5
| 4,013
|
py
|
Python
|
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import cudf
import numpy as np
import pyarrow.parquet as pq
try:
import cupy as cp
except ImportError:
import numpy as cp
| 32.104
| 82
| 0.595315
|
9a18608a6d3310b926afa6ca71ff25504d52035f
| 481
|
py
|
Python
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | 2
|
2015-09-24T07:38:07.000Z
|
2015-11-05T18:33:43.000Z
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | 2
|
2015-11-04T17:34:14.000Z
|
2015-11-09T02:05:31.000Z
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | null | null | null |
ROUTE = '/push'
PORT = 4242
IMAGE_NAME = 'globidocker/github-hook'
import docker
cli = docker.Client()
from lib.git import clone_tmp
| 19.24
| 58
| 0.634096
|
9a1861ac2df97b1bcfbdb3654e5d9c31f32e9e49
| 12,403
|
py
|
Python
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | 5
|
2021-11-17T07:12:54.000Z
|
2022-03-16T15:06:39.000Z
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | 25
|
2021-09-09T07:39:13.000Z
|
2022-01-23T13:00:19.000Z
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Solver test suite runner script, used for
# * regression tests (default)
# * test-init runs (with --test-init option)
# * performance evaluation (with --performance option)
#
# 1. Regression tests (the default)
# - runs set of projects and compares physical results and solver stats
# - meant to be run with either sequential or parallel solver
# - performance is monitored, but not so important (very short tests!)
# - expects jobs to have reference result directory, otherwise warning is issued
# and simulation is skipped (with --run-always option all simulations are done even without
# reference result dirs)
# - result of script:
# for each job show old/new stats and metrics
# show summary table with timings for all successful jobs
#
# 2. Initialization tests
# - checks if solver can initialize set of project files
# - script parses directory structure, generates list of test-init jobs
# and executes test initialization
# - result of script:
# for each job result status and time needed for test init (only for information)
#
# 3. Performance tests
# - collects list of jobs, runs each job 3 times and stores timings for all cases
# - result of script:
# for each job print individual timings and best evalualtion time in a table
#
# License:
# BSD License
#
# Authors:
# Andreas Nicolai <andreas.nicolai@tu-dresden.de>
#
# Syntax:
# > python run_tests.py --path <path/to/testsuite> --solver <path/to/solver/binary> --extension <project file extension>
#
# Example:
# > python run_tests.py --path ../../data/tests --solver ./DelphinSolver --extension d6p
# > python run_tests.py -p ../../data/tests -s ./DelphinSolver -e d6p
#
# Returns:
# 0 - if all tests could be simulated successfully and if all solver results/metrics match those of reference results
# 1 - if anything failed
#
# Note: if run with --run-all option, test cases without reference results will always be accepted.
#
import subprocess # import the module for calling external programs (creating subprocesses)
import sys
import os
import os.path
import shutil
import filecmp # for result file comparison
import argparse
import platform # to detect current OS
from colorama import *
from SolverStats import *
from print_funcs import *
from config import USE_COLORS
def configCommandLineArguments():
"""
This method sets the available input parameters and parses them.
Returns a configured argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser("run_tests.py")
parser.description = '''
Runs the regression test suite. Can be used for init-tests (--test-init)
or performance evaluation (--performance) as well.'''
parser.add_argument('-p', '--path', dest='path', required=True, type=str,
help='Path to test suite root directory.')
parser.add_argument('-s', '--solver', dest='solver', required=True, type=str,
help='Path to solver binary.')
parser.add_argument('-e', '--extension', dest="extension", required=True, type=str,
help='Project file extension.')
parser.add_argument('--no-colors', dest="no_colors", action='store_true',
help='Disables colored console output.')
parser.add_argument('--test-init', dest="test_init", action='store_true',
help='Enables test-initialization mode (runs solvers with --test-init argument and '
'skips result evaluation).')
parser.add_argument('--performance', dest="performance", action='store_true',
help='Enables performance evaluation mode (runs solvers three times '
'without result evaluation and dumps timings of all cases and best-of-three timings).')
parser.add_argument('--run-all', dest="run_all", action='store_true',
help='If set (in regression test mode), also the test cases without reference results '
'are simulated (can be used to generate reference results for all cases).')
return parser.parse_args()
def checkResults(dir1, dir2, evalTimes):
"""
Compares two result directories for equal contents.
Compared are:
- physical results
- solver counters (/log/summary.txt)
This function uses IBK.SolverStats
Arguments:
* dir1 (reference results) and dir2 (computed results)
* evalTimes is a dictionary with filepath (key) and wall clock time (value),
new entries are always added to the dictionary
Returns: True on success, False on error
"""
try:
# open stat files and compare them
stats1 = SolverStats()
if not stats1.read(dir1 + "/log/summary.txt"):
return False
stats2 = SolverStats()
if not stats2.read(dir2 + "/log/summary.txt"):
return False
if not SolverStats.compareStats(stats1, stats2, []):
printError("Mismatching statistics.")
return False
# compare all result files (d60, tsv), if any reference result files exist
if os.path.exists(dir1 + "/results"):
if not SolverStats.compareResults(dir1 + "/results", dir2 + "/results"):
printError("Mismatching values.")
return False
evalTimes[dir2] = stats2.timers['WallClockTime']
except Exception as e:
printError("Error comparing simulation results, error: {}".format(e))
return True
# *** main script ***
args = configCommandLineArguments()
if not args.no_colors:
init() # init ANSI code filtering for windows
config.USE_COLORS = True
printNotification("Enabling colored console output")
if args.test_init and args.performance:
printError("Either use --test-init or --performance, but not both together.")
exit(1)
# process all directories under test suite directory
currentOS = platform.system()
compilerID = None
if currentOS == "Linux" :
compilerID = "gcc_linux"
elif currentOS == "Windows" :
compilerID = "VC14_win64"
elif currentOS == "Darwin" :
compilerID = "gcc_mac"
if compilerID == None:
printError("Unknown/unsupported platform")
exit(1)
else:
print("Compiler ID : " + compilerID)
print("Test suite : " + args.path)
print("Solver : " + args.solver)
print("Project file extension : " + args.extension)
# walk all subdirectories (except .svn) within testsuite and collect project file names
projects = []
for root, dirs, files in os.walk(args.path, topdown=False):
for name in files:
if name.endswith('.'+args.extension):
projectFilePath = os.path.join(root, name)
projects.append(projectFilePath)
projects.sort()
print("Number of projects : {}\n".format(len(projects)))
# performance tests?
if args.performance:
res = run_performance_evaluation(args, projects)
exit(res)
failed_projects = []
eval_times = dict() # key - file path to project, value - eval time in [s]
for project in projects:
print(project)
path, fname = os.path.split(project)
#print("Path : " + path)
#print ("Project : " + fname)
# compose path of result folder
resultsFolder = project[:-(1+len(args.extension))]
# remove entire directory with previous results
if os.path.exists(resultsFolder):
shutil.rmtree(resultsFolder)
cmdline = [args.solver, project]
# if in test-init mode, append --test-init to command line
if args.test_init:
cmdline.append("--test-init")
skipResultCheck = True
args.run_all = True
else:
skipResultCheck = False
referenceFolder = resultsFolder + "." + compilerID
if not os.path.exists(referenceFolder):
if not args.run_all:
failed_projects.append(project)
printError("Missing reference data directory '{}'".format(os.path.split(referenceFolder)[1]))
continue
else:
skipResultCheck = True
try:
# run solver
FNULL = open(os.devnull, 'w')
if platform.system() == "Windows":
cmdline.append("-x")
cmdline.append("--verbosity-level=0")
retcode = subprocess.call(cmdline, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
if args.test_init:
# in test-init mode we want to see the output
retcode = subprocess.call(cmdline)
else:
retcode = subprocess.call(cmdline, stdout=FNULL, stderr=subprocess.STDOUT)
# check return code
if retcode == 0:
# successful run
if not skipResultCheck:
# now check against reference results
if not checkResults(referenceFolder, resultsFolder, eval_times):
if not project in failed_projects:
failed_projects.append(project) # mark as failed
printError("Mismatching results.")
else:
# mark project as failed
failed_projects.append(project)
# and print error message
printError("Simulation failed, see screenlog file {}".format(os.path.join(os.getcwd(),
resultsFolder+"/log/screenlog.txt" ) ) )
except OSError as e:
printError("Error starting solver executable '{}', error: {}".format(args.solver, e))
exit(1)
print("\nSuccessful projects:\n")
print("{:80s} {}".format("Project path", "Wall clock time [s]"))
filenames = eval_times.keys()
filenames = sorted(filenames)
for filename in filenames:
fname = os.path.basename(filename)
onedir = os.path.join(os.path.basename(os.path.dirname(filename)), os.path.basename(filename))
printNotification("{:80s} {:>10.3f}".format(onedir, eval_times[filename]))
if len(failed_projects) > 0:
print("\nFailed projects:")
for p in failed_projects:
printError(p)
print("\n")
printError("*** Failure ***")
exit(1)
printNotification("*** Success ***")
exit(0)
| 33.252011
| 120
| 0.689511
|
9a1b2360ed8259c0fd8d46c53ed3e0ed659879cf
| 4,815
|
py
|
Python
|
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
| 37.038462
| 119
| 0.627414
|
9a1b3e2dbb66fc996ec081ab5ef13e302246dd49
| 1,410
|
py
|
Python
|
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
import logging
import datetime
import pathlib
import pytz
import requests
import pandas as pd
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
CovidTrackingDataUpdater().update()
| 30
| 75
| 0.685816
|
9a1e2ae93bc1197259db93644405545d0b6670ce
| 1,100
|
py
|
Python
|
src/chatty/auth/auth.py
|
CITIZENSHIP-CHATTY/backend
|
8982a0f3cff8ba2efe6a903bb4ab47f9c6044487
|
[
"MIT"
] | null | null | null |
src/chatty/auth/auth.py
|
CITIZENSHIP-CHATTY/backend
|
8982a0f3cff8ba2efe6a903bb4ab47f9c6044487
|
[
"MIT"
] | 4
|
2020-04-19T09:25:46.000Z
|
2020-05-07T20:20:04.000Z
|
src/chatty/auth/auth.py
|
CITIZENSHIP-CHATTY/backend
|
8982a0f3cff8ba2efe6a903bb4ab47f9c6044487
|
[
"MIT"
] | null | null | null |
from chatty import utils
from aiohttp import web
from chatty.auth.models import User
| 32.352941
| 99
| 0.708182
|
9a21fd4ed5ae1c86fb6e590a1edd2f37df8e132c
| 1,220
|
py
|
Python
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 36
|
2015-11-18T22:35:39.000Z
|
2022-03-21T10:13:23.000Z
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 23
|
2016-02-02T06:09:16.000Z
|
2020-03-06T22:54:55.000Z
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 82
|
2015-11-22T11:46:33.000Z
|
2022-03-18T02:46:48.000Z
|
import os, sys
import imp
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
constants = imp.load_source('modulename', 'constants.py')
if(os.path.basename(__file__) == os.path.basename(sys.argv[0])):
delete_customer_profile(constants.customerProfileId)
| 38.125
| 120
| 0.788525
|
9a25b6f7b3f250cb0ca3c95cee4acba5e53203f1
| 3,659
|
py
|
Python
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 31
|
2021-01-27T15:03:18.000Z
|
2021-12-13T11:09:58.000Z
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 20
|
2021-02-01T16:42:17.000Z
|
2022-01-26T10:48:59.000Z
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 12
|
2021-01-27T14:50:11.000Z
|
2021-08-10T22:13:46.000Z
|
# Copyright 2021 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
from pyspark.sql.dataframe import DataFrame
from py4j.java_collections import MapConverter
| 38.114583
| 108
| 0.625854
|
9a25e9fa72dd391d4676f6e0a6bb06f9710db5d6
| 1,837
|
py
|
Python
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 45
|
2021-01-28T04:12:21.000Z
|
2022-02-24T13:15:50.000Z
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 32
|
2021-03-02T18:45:16.000Z
|
2022-03-12T00:53:10.000Z
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 10
|
2020-12-25T15:02:40.000Z
|
2021-12-30T11:40:15.000Z
|
from kafka.consumer import KafkaConsumer
from json import loads
from mongoengine import *
from matilda.data_pipeline import object_model
consumer = KafkaConsumer(
'numtest', # kafka topic
bootstrap_servers=['localhost:9092'], # same as our producer
# It handles where the consumer restarts reading after breaking down or being turned off and can be set either
# to earliest or latest. When set to latest, the consumer starts reading at the end of the log.
# When set to earliest, the consumer starts reading at the latest committed offset.
auto_offset_reset='earliest',
enable_auto_commit=True, # makes sure the consumer commits its read offset every interval.
# join a consumer group for dynamic partition assignment and offset commits
# a consumer needs to be part of a consumer group to make the auto commit work.
# otherwise, need to do it manually i.e. consumer.assign([TopicPartition('foobar', 2)]); msg = next(consumer)
group_id='my-group',
# deserialize encoded values
value_deserializer=lambda x: loads(x.decode('utf-8')))
atlas_url = get_atlas_db_url(username='AlainDaccache', password='qwerty98', dbname='matilda-db')
db = connect(host=atlas_url)
# The consumer iterator returns ConsumerRecords, which are simple namedtuples
# that expose basic message attributes: topic, partition, offset, key, and value:
for message in consumer:
message = message.value
print(message)
object_model.Test(number=message['number']).save()
print('{} added to db'.format(message))
# # Then to check whats in it:
# for doc in object_model.Test.objects:
# print(doc._data)
| 42.72093
| 116
| 0.740338
|
9a26ea77dac1512349aaac759f21f3e326122e27
| 746
|
py
|
Python
|
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | 2
|
2021-05-31T13:01:33.000Z
|
2021-12-20T19:48:18.000Z
|
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | null | null | null |
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | null | null | null |
from graph import Graph
| 21.314286
| 69
| 0.548257
|